1 /* Operations with very long integers.
2 Copyright (C) 2012-2013 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
30 #if GCC_VERSION >= 3000
31 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
32 typedef unsigned HOST_HALF_WIDE_INT UHWtype
;
33 typedef unsigned HOST_WIDE_INT UWtype
;
34 typedef unsigned int UQItype
__attribute__ ((mode (QI
)));
35 typedef unsigned int USItype
__attribute__ ((mode (SI
)));
36 typedef unsigned int UDItype
__attribute__ ((mode (DI
)));
40 /* This is the maximal size of the buffer needed for dump. */
41 const unsigned int MAX_SIZE
= (4 * (MAX_BITSIZE_MODE_ANY_INT
/ 4
42 + (MAX_BITSIZE_MODE_ANY_INT
43 / HOST_BITS_PER_WIDE_INT
)
46 static const HOST_WIDE_INT zeros
[WIDE_INT_MAX_ELTS
] = {};
52 /* Quantities to deal with values that hold half of a wide int. Used
53 in multiply and divide. */
54 #define HALF_INT_MASK (((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
56 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
57 #define BLOCKS_NEEDED(PREC) \
58 (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
59 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
61 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
62 based on the top existing bit of VAL. */
64 static unsigned HOST_WIDE_INT
65 safe_uhwi (const HOST_WIDE_INT
*val
, unsigned int len
, unsigned int i
)
67 return i
< len
? val
[i
] : val
[len
- 1] < 0 ? (HOST_WIDE_INT
) -1 : 0;
70 /* Convert the integer in VAL to canonical form, returning its new length.
71 LEN is the number of blocks currently in VAL and PRECISION is the number
72 of bits in the integer it represents.
74 This function only changes the representation, not the value. */
76 canonize (HOST_WIDE_INT
*val
, unsigned int len
, unsigned int precision
)
78 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
82 if (len
> blocks_needed
)
89 if (len
* HOST_BITS_PER_WIDE_INT
> precision
)
90 top
= sext_hwi (top
, precision
% HOST_BITS_PER_WIDE_INT
);
91 if (top
!= 0 && top
!= (HOST_WIDE_INT
)-1)
94 /* At this point we know that the top is either 0 or -1. Find the
95 first block that is not a copy of this. */
96 for (i
= len
- 2; i
>= 0; i
--)
98 HOST_WIDE_INT x
= val
[i
];
101 if (SIGN_MASK (x
) == top
)
104 /* We need an extra block because the top bit block i does
105 not match the extension. */
110 /* The number is 0 or -1. */
115 * Conversion routines in and out of wide_int.
118 /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
119 result for an integer with precision PRECISION. Return the length
120 of VAL (after any canonization. */
122 wi::from_array (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
123 unsigned int xlen
, unsigned int precision
, bool need_canon
)
125 for (unsigned i
= 0; i
< xlen
; i
++)
127 return need_canon
? canonize (val
, xlen
, precision
) : xlen
;
130 /* Construct a wide int from a buffer of length LEN. BUFFER will be
131 read according to byte endianess and word endianess of the target.
132 Only the lower LEN bytes of the result are set; the remaining high
133 bytes are cleared. */
135 wi::from_buffer (const unsigned char *buffer
, unsigned int buffer_len
)
137 unsigned int precision
= buffer_len
* BITS_PER_UNIT
;
138 wide_int result
= wide_int::create (precision
);
139 unsigned int words
= buffer_len
/ UNITS_PER_WORD
;
141 /* We have to clear all the bits ourself, as we merely or in values
143 unsigned int len
= BLOCKS_NEEDED (precision
);
144 HOST_WIDE_INT
*val
= result
.write_val ();
145 for (unsigned int i
= 0; i
< len
; ++i
)
148 for (unsigned int byte
= 0; byte
< buffer_len
; byte
++)
152 unsigned int bitpos
= byte
* BITS_PER_UNIT
;
153 unsigned HOST_WIDE_INT value
;
155 if (buffer_len
> UNITS_PER_WORD
)
157 unsigned int word
= byte
/ UNITS_PER_WORD
;
159 if (WORDS_BIG_ENDIAN
)
160 word
= (words
- 1) - word
;
162 offset
= word
* UNITS_PER_WORD
;
164 if (BYTES_BIG_ENDIAN
)
165 offset
+= (UNITS_PER_WORD
- 1) - (byte
% UNITS_PER_WORD
);
167 offset
+= byte
% UNITS_PER_WORD
;
170 offset
= BYTES_BIG_ENDIAN
? (buffer_len
- 1) - byte
: byte
;
172 value
= (unsigned HOST_WIDE_INT
) buffer
[offset
];
174 index
= bitpos
/ HOST_BITS_PER_WIDE_INT
;
175 val
[index
] |= value
<< (bitpos
% HOST_BITS_PER_WIDE_INT
);
178 result
.set_len (canonize (val
, len
, precision
));
183 /* Sets RESULT from THIS, the sign is taken according to SGN. */
185 wi::to_mpz (wide_int x
, mpz_t result
, signop sgn
)
187 bool negative
= false;
188 int len
= x
.get_len ();
189 const HOST_WIDE_INT
*v
= x
.get_val ();
190 int small_prec
= x
.get_precision () & (HOST_BITS_PER_WIDE_INT
- 1);
192 if (wi::neg_p (x
, sgn
))
195 /* We use ones complement to avoid -x80..0 edge case that -
200 if (sgn
== UNSIGNED
&& small_prec
)
202 HOST_WIDE_INT t
[WIDE_INT_MAX_ELTS
];
204 for (int i
= 0; i
< len
- 1; i
++)
206 t
[len
-1] = zext_hwi (v
[len
-1], small_prec
);
207 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, t
);
210 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, v
);
213 mpz_com (result
, result
);
216 /* Returns VAL converted to TYPE. If WRAP is true, then out-of-range
217 values of VAL will be wrapped; otherwise, they will be set to the
218 appropriate minimum or maximum TYPE bound. */
220 wi::from_mpz (const_tree type
, mpz_t x
, bool wrap
)
223 int prec
= TYPE_PRECISION (type
);
224 wide_int res
= wide_int::create (prec
);
232 get_type_static_bounds (type
, min
, max
);
234 if (mpz_cmp (x
, min
) < 0)
236 else if (mpz_cmp (x
, max
) > 0)
243 /* Determine the number of unsigned HOST_WIDE_INTs that are required
244 for representing the value. The code to calculate count is
245 extracted from the GMP manual, section "Integer Import and Export":
246 http://gmplib.org/manual/Integer-Import-and-Export.html */
247 numb
= 8*sizeof(HOST_WIDE_INT
);
248 count
= (mpz_sizeinbase (x
, 2) + numb
-1) / numb
;
249 HOST_WIDE_INT
*val
= res
.write_val ();
250 mpz_export (val
, &count
, -1, sizeof (HOST_WIDE_INT
), 0, 0, x
);
265 * Largest and smallest values in a mode.
268 /* Return the largest SGNed number that is representable in PRECISION bits.
270 TODO: There is still code from the double_int era that trys to
271 make up for the fact that double int's could not represent the
272 min and max values of all types. This code should be removed
273 because the min and max values can always be represented in
274 wide_ints and int-csts. */
276 wi::max_value (unsigned int precision
, signop sgn
)
278 gcc_checking_assert (precision
!= 0);
280 /* The unsigned max is just all ones. */
281 return shwi (-1, precision
);
283 /* The signed max is all ones except the top bit. This must be
284 explicitly represented. */
285 return mask (precision
- 1, false, precision
);
288 /* Return the largest SGNed number that is representable in PRECISION bits. */
290 wi::min_value (unsigned int precision
, signop sgn
)
292 gcc_checking_assert (precision
!= 0);
294 return uhwi (0, precision
);
296 /* The signed min is all zeros except the top bit. This must be
297 explicitly represented. */
298 return wi::set_bit_in_zero (precision
- 1, precision
);
305 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
306 signedness SGN, to an integer that has PRECISION bits. Store the blocks
307 in VAL and return the number of blocks used.
309 This function can handle both extension (PRECISION > XPRECISION)
310 and truncation (PRECISION < XPRECISION). */
312 wi::force_to_size (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
313 unsigned int xlen
, unsigned int xprecision
,
314 unsigned int precision
, signop sgn
)
316 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
317 unsigned int len
= blocks_needed
< xlen
? blocks_needed
: xlen
;
318 for (unsigned i
= 0; i
< len
; i
++)
321 if (precision
> xprecision
)
323 unsigned int small_xprecision
= xprecision
% HOST_BITS_PER_WIDE_INT
;
328 if (small_xprecision
&& len
== BLOCKS_NEEDED (xprecision
))
329 val
[len
- 1] = zext_hwi (val
[len
- 1], small_xprecision
);
330 else if (val
[len
- 1] < 0)
332 while (len
< BLOCKS_NEEDED (xprecision
))
334 if (small_xprecision
)
335 val
[len
- 1] = zext_hwi (val
[len
- 1], small_xprecision
);
342 if (small_xprecision
&& len
== BLOCKS_NEEDED (xprecision
))
343 val
[len
- 1] = sext_hwi (val
[len
- 1], small_xprecision
);
346 len
= canonize (val
, len
, precision
);
351 /* This function hides the fact that we cannot rely on the bits beyond
352 the precision. This issue comes up in the relational comparisions
353 where we do allow comparisons of values of different precisions. */
354 static inline HOST_WIDE_INT
355 selt (const HOST_WIDE_INT
*a
, unsigned int len
,
356 unsigned int blocks_needed
,
357 unsigned int small_prec
,
358 unsigned int index
, signop sgn
)
363 else if (index
< blocks_needed
|| sgn
== SIGNED
)
364 /* Signed or within the precision. */
365 val
= SIGN_MASK (a
[len
- 1]);
367 /* Unsigned extension beyond the precision. */
370 if (small_prec
&& index
== blocks_needed
- 1)
371 return (sgn
== SIGNED
372 ? sext_hwi (val
, small_prec
)
373 : zext_hwi (val
, small_prec
));
378 /* Find the highest bit represented in a wide int. This will in
379 general have the same value as the sign bit. */
380 static inline HOST_WIDE_INT
381 top_bit_of (const HOST_WIDE_INT
*a
, unsigned int len
, unsigned int prec
)
383 int excess
= len
* HOST_BITS_PER_WIDE_INT
- prec
;
384 unsigned HOST_WIDE_INT val
= a
[len
- 1];
387 return val
>> (HOST_BITS_PER_WIDE_INT
- 1);
391 * Comparisons, note that only equality is an operator. The other
392 * comparisons cannot be operators since they are inherently singed or
393 * unsigned and C++ has no such operators.
396 /* Return true if OP0 == OP1. */
398 wi::eq_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
399 const HOST_WIDE_INT
*op1
, unsigned int op1len
,
403 unsigned int small_prec
= prec
& (HOST_BITS_PER_WIDE_INT
- 1);
405 while (op0len
!= op1len
)
408 if (op0len
== BLOCKS_NEEDED (prec
) && small_prec
)
410 /* It does not matter if we zext or sext here, we just have to
411 do both the same way. */
412 if (zext_hwi (op0
[l0
], small_prec
) != zext_hwi (op1
[l0
], small_prec
))
418 if (op0
[l0
] != op1
[l0
])
426 /* Return true if OP0 < OP1 using signed comparisons. */
428 wi::lts_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
429 unsigned int precision
,
430 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
432 HOST_WIDE_INT s0
, s1
;
433 unsigned HOST_WIDE_INT u0
, u1
;
434 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
435 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
436 int l
= MAX (op0len
- 1, op1len
- 1);
438 /* Only the top block is compared as signed. The rest are unsigned
440 s0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
441 s1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
450 u0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
451 u1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
463 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
466 wi::cmps_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
467 unsigned int precision
,
468 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
470 HOST_WIDE_INT s0
, s1
;
471 unsigned HOST_WIDE_INT u0
, u1
;
472 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
473 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
474 int l
= MAX (op0len
- 1, op1len
- 1);
476 /* Only the top block is compared as signed. The rest are unsigned
478 s0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
479 s1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
488 u0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
489 u1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
501 /* Return true if OP0 < OP1 using unsigned comparisons. */
503 wi::ltu_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
504 unsigned int precision
,
505 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
507 unsigned HOST_WIDE_INT x0
;
508 unsigned HOST_WIDE_INT x1
;
509 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
510 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
511 int l
= MAX (op0len
- 1, op1len
- 1);
515 x0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
516 x1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
527 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
528 unsigned compares. */
530 wi::cmpu_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
531 unsigned int precision
,
532 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
534 unsigned HOST_WIDE_INT x0
;
535 unsigned HOST_WIDE_INT x1
;
536 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
537 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
538 int l
= MAX (op0len
- 1, op1len
- 1);
542 x0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
543 x1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
558 /* Sign-extend the number represented by XVAL and XLEN into VAL,
559 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
560 and VAL have PRECISION bits. */
562 wi::sext_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
563 unsigned int xlen
, unsigned int precision
, unsigned int offset
)
565 unsigned int len
= offset
/ HOST_BITS_PER_WIDE_INT
;
566 /* Extending beyond the precision is a no-op. If we have only stored
567 OFFSET bits or fewer, the rest are already signs. */
568 if (offset
>= precision
|| len
>= xlen
)
570 for (unsigned i
= 0; i
< xlen
; ++i
)
574 unsigned int suboffset
= offset
% HOST_BITS_PER_WIDE_INT
;
575 for (unsigned int i
= 0; i
< len
; i
++)
579 val
[len
] = sext_hwi (xval
[len
], suboffset
);
582 return canonize (val
, len
, precision
);
585 /* Zero-extend the number represented by XVAL and XLEN into VAL,
586 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
587 and VAL have PRECISION bits. */
589 wi::zext_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
590 unsigned int xlen
, unsigned int precision
, unsigned int offset
)
592 unsigned int len
= offset
/ HOST_BITS_PER_WIDE_INT
;
593 /* Extending beyond the precision is a no-op. If we have only stored
594 OFFSET bits or fewer, and the upper stored bit is zero, then there
596 if (offset
>= precision
|| (len
>= xlen
&& xval
[xlen
- 1] >= 0))
598 for (unsigned i
= 0; i
< xlen
; ++i
)
602 unsigned int suboffset
= offset
% HOST_BITS_PER_WIDE_INT
;
603 for (unsigned int i
= 0; i
< len
; i
++)
604 val
[i
] = i
< xlen
? xval
[i
] : -1;
606 val
[len
] = zext_hwi (len
< xlen
? xval
[len
] : -1, suboffset
);
609 return canonize (val
, len
+ 1, precision
);
613 * Masking, inserting, shifting, rotating.
616 /* Insert WIDTH bits from Y into X starting at START. */
618 wi::insert (const wide_int
&x
, const wide_int
&y
, unsigned int start
,
625 unsigned int precision
= x
.get_precision ();
626 if (start
>= precision
)
629 gcc_checking_assert (precision
>= width
);
631 if (start
+ width
>= precision
)
632 width
= precision
- start
;
634 mask
= wi::shifted_mask (start
, width
, false, precision
);
635 tmp
= wi::lshift (wide_int::from (y
, precision
, UNSIGNED
), start
);
638 tmp
= wi::bit_and_not (x
, mask
);
639 result
= result
| tmp
;
644 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
645 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
648 wi::set_bit_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
649 unsigned int xlen
, unsigned int precision
, unsigned int bit
)
651 unsigned int block
= bit
/ HOST_BITS_PER_WIDE_INT
;
652 unsigned int subbit
= bit
% HOST_BITS_PER_WIDE_INT
;
654 if (block
+ 1 >= xlen
)
656 /* The operation either affects the last current block or needs
658 unsigned int len
= block
+ 1;
659 for (unsigned int i
= 0; i
< len
; i
++)
660 val
[i
] = safe_uhwi (xval
, xlen
, i
);
661 val
[block
] |= (unsigned HOST_WIDE_INT
) 1 << subbit
;
663 /* If the bit we just set is at the msb of the block, make sure
664 that any higher bits are zeros. */
665 if (bit
+ 1 < precision
&& subbit
== HOST_BITS_PER_WIDE_INT
- 1)
671 for (unsigned int i
= 0; i
< xlen
; i
++)
673 val
[block
] |= (unsigned HOST_WIDE_INT
) 1 << subbit
;
674 return canonize (val
, xlen
, precision
);
680 wide_int_storage::bswap () const
682 wide_int result
= wide_int::create (precision
);
684 unsigned int len
= BLOCKS_NEEDED (precision
);
685 unsigned int xlen
= get_len ();
686 const HOST_WIDE_INT
*xval
= get_val ();
687 HOST_WIDE_INT
*val
= result
.write_val ();
689 /* This is not a well defined operation if the precision is not a
691 gcc_assert ((precision
& 0x7) == 0);
693 for (i
= 0; i
< len
; i
++)
696 /* Only swap the bytes that are not the padding. */
697 for (s
= 0; s
< precision
; s
+= 8)
699 unsigned int d
= precision
- s
- 8;
700 unsigned HOST_WIDE_INT byte
;
702 unsigned int block
= s
/ HOST_BITS_PER_WIDE_INT
;
703 unsigned int offset
= s
& (HOST_BITS_PER_WIDE_INT
- 1);
705 byte
= (safe_uhwi (xval
, xlen
, block
) >> offset
) & 0xff;
707 block
= d
/ HOST_BITS_PER_WIDE_INT
;
708 offset
= d
& (HOST_BITS_PER_WIDE_INT
- 1);
710 val
[block
] |= byte
<< offset
;
713 result
.set_len (canonize (val
, len
, precision
));
717 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
718 above that up to PREC are zeros. The result is inverted if NEGATE
719 is true. Return the number of blocks in VAL. */
721 wi::mask (HOST_WIDE_INT
*val
, unsigned int width
, bool negate
,
724 gcc_assert (width
< 4 * MAX_BITSIZE_MODE_ANY_INT
);
725 gcc_assert (prec
<= 4 * MAX_BITSIZE_MODE_ANY_INT
);
729 val
[0] = negate
? 0 : -1;
734 val
[0] = negate
? -1 : 0;
739 while (i
< width
/ HOST_BITS_PER_WIDE_INT
)
740 val
[i
++] = negate
? 0 : -1;
742 unsigned int shift
= width
& (HOST_BITS_PER_WIDE_INT
- 1);
745 HOST_WIDE_INT last
= (((unsigned HOST_WIDE_INT
) 1) << shift
) - 1;
746 val
[i
++] = negate
? ~last
: last
;
749 val
[i
++] = negate
? -1 : 0;
754 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
755 bits are ones, and the bits above that up to PREC are zeros. The result
756 is inverted if NEGATE is true. Return the number of blocks in VAL. */
758 wi::shifted_mask (HOST_WIDE_INT
*val
, unsigned int start
, unsigned int width
,
759 bool negate
, unsigned int prec
)
761 if (start
>= prec
|| width
== 0)
763 val
[0] = negate
? -1 : 0;
767 if (width
> prec
- start
)
768 width
= prec
- start
;
769 unsigned int end
= start
+ width
;
772 while (i
< start
/ HOST_BITS_PER_WIDE_INT
)
773 val
[i
++] = negate
? -1 : 0;
775 unsigned int shift
= start
& (HOST_BITS_PER_WIDE_INT
- 1);
778 HOST_WIDE_INT block
= (((unsigned HOST_WIDE_INT
) 1) << shift
) - 1;
779 shift
= end
& (HOST_BITS_PER_WIDE_INT
- 1);
783 block
= (((unsigned HOST_WIDE_INT
) 1) << shift
) - block
- 1;
784 val
[i
++] = negate
? ~block
: block
;
789 val
[i
++] = negate
? block
: ~block
;
792 while (i
< end
/ HOST_BITS_PER_WIDE_INT
)
794 val
[i
++] = negate
? 0 : -1;
796 shift
= end
& (HOST_BITS_PER_WIDE_INT
- 1);
800 HOST_WIDE_INT block
= (((unsigned HOST_WIDE_INT
) 1) << shift
) - 1;
801 val
[i
++] = negate
? ~block
: block
;
804 val
[i
++] = negate
? -1 : 0;
810 * logical operations.
813 /* Set VAL to OP0 & OP1. Return the number of blocks used. */
815 wi::and_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
816 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
817 unsigned int op1len
, unsigned int prec
)
821 bool need_canon
= true;
823 unsigned int len
= MAX (op0len
, op1len
);
826 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
844 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
860 val
[l0
] = op0
[l0
] & op1
[l0
];
865 len
= canonize (val
, len
, prec
);
870 /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
872 wi::and_not_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
873 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
874 unsigned int op1len
, unsigned int prec
)
879 bool need_canon
= true;
881 unsigned int len
= MAX (op0len
, op1len
);
884 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
902 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
918 val
[l0
] = op0
[l0
] & ~op1
[l0
];
923 len
= canonize (val
, len
, prec
);
928 /* Set VAL to OP0 | OP1. Return the number of blocks used. */
930 wi::or_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
931 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
932 unsigned int op1len
, unsigned int prec
)
937 bool need_canon
= true;
939 unsigned int len
= MAX (op0len
, op1len
);
942 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
960 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
976 val
[l0
] = op0
[l0
] | op1
[l0
];
981 len
= canonize (val
, len
, prec
);
986 /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
988 wi::or_not_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
989 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
990 unsigned int op1len
, unsigned int prec
)
995 bool need_canon
= true;
997 unsigned int len
= MAX (op0len
, op1len
);
1000 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1018 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1034 val
[l0
] = op0
[l0
] | ~op1
[l0
];
1039 len
= canonize (val
, len
, prec
);
1044 /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1046 wi::xor_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1047 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1048 unsigned int op1len
, unsigned int prec
)
1051 int l0
= op0len
- 1;
1052 int l1
= op1len
- 1;
1054 unsigned int len
= MAX (op0len
, op1len
);
1057 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1060 val
[l0
] = op0
[l0
] ^ op1mask
;
1067 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1070 val
[l1
] = op0mask
^ op1
[l1
];
1077 val
[l0
] = op0
[l0
] ^ op1
[l0
];
1081 return canonize (val
, len
, prec
);
1088 /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1089 whether the result overflows when OP0 and OP1 are treated as having
1090 signedness SGN. Return the number of blocks in VAL. */
1092 wi::add_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1093 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1094 unsigned int op1len
, unsigned int prec
,
1095 signop sgn
, bool *overflow
)
1097 unsigned HOST_WIDE_INT o0
= 0;
1098 unsigned HOST_WIDE_INT o1
= 0;
1099 unsigned HOST_WIDE_INT x
= 0;
1100 unsigned HOST_WIDE_INT carry
= 0;
1101 unsigned HOST_WIDE_INT old_carry
= 0;
1102 unsigned HOST_WIDE_INT mask0
, mask1
;
1105 unsigned int len
= MAX (op0len
, op1len
);
1106 mask0
= -top_bit_of (op0
, op0len
, prec
);
1107 mask1
= -top_bit_of (op1
, op1len
, prec
);
1108 /* Add all of the explicitly defined elements. */
1110 for (i
= 0; i
< len
; i
++)
1112 o0
= i
< op0len
? (unsigned HOST_WIDE_INT
) op0
[i
] : mask0
;
1113 o1
= i
< op1len
? (unsigned HOST_WIDE_INT
) op1
[i
] : mask1
;
1114 x
= o0
+ o1
+ carry
;
1117 carry
= carry
== 0 ? x
< o0
: x
<= o0
;
1120 if (len
* HOST_BITS_PER_WIDE_INT
< prec
)
1122 val
[len
] = mask0
+ mask1
+ carry
;
1129 unsigned int shift
= -prec
% HOST_BITS_PER_WIDE_INT
;
1132 unsigned HOST_WIDE_INT x
= (val
[len
- 1] ^ o0
) & (val
[len
- 1] ^ o1
);
1133 *overflow
= (HOST_WIDE_INT
) (x
<< shift
) < 0;
1137 /* Put the MSB of X and O0 and in the top of the HWI. */
1141 *overflow
= (x
<= o0
);
1143 *overflow
= (x
< o0
);
1147 return canonize (val
, len
, prec
);
1150 /* This is bogus. We should always return the precision and leave the
1151 caller to handle target dependencies. */
1153 clz_zero (unsigned int precision
)
1157 enum machine_mode mode
= mode_for_size (precision
, MODE_INT
, 0);
1158 if (mode
== BLKmode
)
1159 mode_for_size (precision
, MODE_PARTIAL_INT
, 0);
1161 /* Even if the value at zero is undefined, we have to come up
1162 with some replacement. Seems good enough. */
1163 if (mode
== BLKmode
)
1165 else if (!CLZ_DEFINED_VALUE_AT_ZERO (mode
, count
))
1170 /* This is bogus. We should always return the precision and leave the
1171 caller to handle target dependencies. */
1173 ctz_zero (unsigned int precision
)
1177 enum machine_mode mode
= mode_for_size (precision
, MODE_INT
, 0);
1178 if (mode
== BLKmode
)
1179 mode_for_size (precision
, MODE_PARTIAL_INT
, 0);
1181 /* Even if the value at zero is undefined, we have to come up
1182 with some replacement. Seems good enough. */
1183 if (mode
== BLKmode
)
1185 else if (!CTZ_DEFINED_VALUE_AT_ZERO (mode
, count
))
1190 /* Subroutines of the multiplication and division operations. Unpack
1191 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1192 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1193 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1195 wi_unpack (unsigned HOST_HALF_WIDE_INT
*result
,
1196 const unsigned HOST_WIDE_INT
*input
,
1197 unsigned int in_len
, unsigned int out_len
,
1198 unsigned int prec
, signop sgn
)
1202 unsigned int small_prec
= prec
& (HOST_BITS_PER_WIDE_INT
- 1);
1203 unsigned int blocks_needed
= BLOCKS_NEEDED (prec
);
1208 mask
= -top_bit_of ((const HOST_WIDE_INT
*) input
, in_len
, prec
);
1209 mask
&= HALF_INT_MASK
;
1214 for (i
= 0; i
< in_len
; i
++)
1216 HOST_WIDE_INT x
= input
[i
];
1217 if (i
== blocks_needed
- 1 && small_prec
)
1220 x
= sext_hwi (x
, small_prec
);
1222 x
= zext_hwi (x
, small_prec
);
1225 result
[j
++] = x
>> HOST_BITS_PER_HALF_WIDE_INT
;
1228 /* Smear the sign bit. */
1233 /* The inverse of wi_unpack. IN_LEN is the the number of input
1234 blocks. The number of output blocks will be half this amount. */
1236 wi_pack (unsigned HOST_WIDE_INT
*result
,
1237 const unsigned HOST_HALF_WIDE_INT
*input
,
1238 unsigned int in_len
)
1243 while (i
+ 2 < in_len
)
1245 result
[j
++] = (unsigned HOST_WIDE_INT
)input
[i
]
1246 | ((unsigned HOST_WIDE_INT
)input
[i
+ 1]
1247 << HOST_BITS_PER_HALF_WIDE_INT
);
1251 /* Handle the case where in_len is odd. For this we zero extend. */
1253 result
[j
++] = (unsigned HOST_WIDE_INT
)input
[i
];
1255 result
[j
++] = (unsigned HOST_WIDE_INT
)input
[i
]
1256 | ((unsigned HOST_WIDE_INT
)input
[i
+ 1] << HOST_BITS_PER_HALF_WIDE_INT
);
1259 /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1262 If HIGH is not set, throw away the upper half after the check is
1263 made to see if it overflows. Unfortunately there is no better way
1264 to check for overflow than to do this. If OVERFLOW is nonnull,
1265 record in *OVERFLOW whether the result overflowed. SGN controls
1266 the signedness and is used to check overflow or if HIGH is set. */
1268 wi::mul_internal (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op1val
,
1269 unsigned int op1len
, const HOST_WIDE_INT
*op2val
,
1270 unsigned int op2len
, unsigned int prec
, signop sgn
,
1271 bool *overflow
, bool high
)
1273 unsigned HOST_WIDE_INT o0
, o1
, k
, t
;
1276 unsigned int blocks_needed
= BLOCKS_NEEDED (prec
);
1277 unsigned int half_blocks_needed
= blocks_needed
* 2;
1278 /* The sizes here are scaled to support a 2x largest mode by 2x
1279 largest mode yielding a 4x largest mode result. This is what is
1282 unsigned HOST_HALF_WIDE_INT
1283 u
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1284 unsigned HOST_HALF_WIDE_INT
1285 v
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1286 /* The '2' in 'R' is because we are internally doing a full
1288 unsigned HOST_HALF_WIDE_INT
1289 r
[2 * 4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1290 HOST_WIDE_INT mask
= ((HOST_WIDE_INT
)1 << HOST_BITS_PER_HALF_WIDE_INT
) - 1;
1292 /* If the top level routine did not really pass in an overflow, then
1293 just make sure that we never attempt to set it. */
1294 bool needs_overflow
= (overflow
!= 0);
1298 wide_int_ref op1
= wi::storage_ref (op1val
, op1len
, prec
);
1299 wide_int_ref op2
= wi::storage_ref (op2val
, op2len
, prec
);
1301 /* This is a surprisingly common case, so do it first. */
1302 if (op1
== 0 || op2
== 0)
1309 if (sgn
== UNSIGNED
)
1311 /* If the inputs are single HWIs and the output has room for at
1312 least two HWIs, we can use umul_ppmm directly. */
1313 if (prec
>= HOST_BITS_PER_WIDE_INT
* 2
1314 && wi::fits_uhwi_p (op1
)
1315 && wi::fits_uhwi_p (op2
))
1317 umul_ppmm (val
[1], val
[0], op1
.ulow (), op2
.ulow ());
1318 return 1 + (val
[1] != 0 || val
[0] < 0);
1320 /* Likewise if the output is a full single HWI, except that the
1321 upper HWI of the result is only used for determining overflow.
1322 (We handle this case inline when overflow isn't needed.) */
1323 else if (prec
== HOST_BITS_PER_WIDE_INT
)
1325 unsigned HOST_WIDE_INT upper
;
1326 umul_ppmm (upper
, val
[0], op1
.ulow (), op2
.ulow ());
1328 *overflow
= (upper
!= 0);
1334 /* Handle multiplications by 1. */
1337 for (i
= 0; i
< op2len
; i
++)
1343 for (i
= 0; i
< op1len
; i
++)
1348 /* If we need to check for overflow, we can only do half wide
1349 multiplies quickly because we need to look at the top bits to
1350 check for the overflow. */
1351 if ((high
|| needs_overflow
)
1352 && (prec
<= HOST_BITS_PER_HALF_WIDE_INT
))
1354 unsigned HOST_WIDE_INT r
;
1358 o0
= op1
.to_shwi ();
1359 o1
= op2
.to_shwi ();
1363 o0
= op1
.to_uhwi ();
1364 o1
= op2
.to_uhwi ();
1372 if ((HOST_WIDE_INT
) (r
) != sext_hwi (r
, prec
))
1377 if ((r
>> prec
) != 0)
1381 val
[0] = high
? r
>> prec
: r
;
1385 /* We do unsigned mul and then correct it. */
1386 wi_unpack (u
, (const unsigned HOST_WIDE_INT
*) op1val
, op1len
,
1387 half_blocks_needed
, prec
, SIGNED
);
1388 wi_unpack (v
, (const unsigned HOST_WIDE_INT
*) op2val
, op2len
,
1389 half_blocks_needed
, prec
, SIGNED
);
1391 /* The 2 is for a full mult. */
1392 memset (r
, 0, half_blocks_needed
* 2
1393 * HOST_BITS_PER_HALF_WIDE_INT
/ CHAR_BIT
);
1395 for (j
= 0; j
< half_blocks_needed
; j
++)
1398 for (i
= 0; i
< half_blocks_needed
; i
++)
1400 t
= ((unsigned HOST_WIDE_INT
)u
[i
] * (unsigned HOST_WIDE_INT
)v
[j
]
1402 r
[i
+ j
] = t
& HALF_INT_MASK
;
1403 k
= t
>> HOST_BITS_PER_HALF_WIDE_INT
;
1405 r
[j
+ half_blocks_needed
] = k
;
1408 /* We did unsigned math above. For signed we must adjust the
1409 product (assuming we need to see that). */
1410 if (sgn
== SIGNED
&& (high
|| needs_overflow
))
1412 unsigned HOST_WIDE_INT b
;
1413 if (wi::neg_p (op1
))
1416 for (i
= 0; i
< half_blocks_needed
; i
++)
1418 t
= (unsigned HOST_WIDE_INT
)r
[i
+ half_blocks_needed
]
1419 - (unsigned HOST_WIDE_INT
)v
[i
] - b
;
1420 r
[i
+ half_blocks_needed
] = t
& HALF_INT_MASK
;
1421 b
= t
>> (HOST_BITS_PER_WIDE_INT
- 1);
1424 if (wi::neg_p (op2
))
1427 for (i
= 0; i
< half_blocks_needed
; i
++)
1429 t
= (unsigned HOST_WIDE_INT
)r
[i
+ half_blocks_needed
]
1430 - (unsigned HOST_WIDE_INT
)u
[i
] - b
;
1431 r
[i
+ half_blocks_needed
] = t
& HALF_INT_MASK
;
1432 b
= t
>> (HOST_BITS_PER_WIDE_INT
- 1);
1441 /* For unsigned, overflow is true if any of the top bits are set.
1442 For signed, overflow is true if any of the top bits are not equal
1444 if (sgn
== UNSIGNED
)
1448 top
= r
[(half_blocks_needed
) - 1];
1449 top
= SIGN_MASK (top
<< (HOST_BITS_PER_WIDE_INT
/ 2));
1453 for (i
= half_blocks_needed
; i
< half_blocks_needed
* 2; i
++)
1454 if (((HOST_WIDE_INT
)(r
[i
] & mask
)) != top
)
1460 /* compute [prec] <- ([prec] * [prec]) >> [prec] */
1461 wi_pack ((unsigned HOST_WIDE_INT
*) val
,
1462 &r
[half_blocks_needed
], half_blocks_needed
);
1463 return canonize (val
, blocks_needed
, prec
);
1467 /* compute [prec] <- ([prec] * [prec]) && ((1 << [prec]) - 1) */
1468 wi_pack ((unsigned HOST_WIDE_INT
*) val
, r
, half_blocks_needed
);
1469 return canonize (val
, blocks_needed
, prec
);
1473 /* Compute the population count of X. */
1475 wi::popcount (const wide_int_ref
&x
)
1480 /* The high order block is special if it is the last block and the
1481 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1482 have to clear out any ones above the precision before doing
1483 popcount on this block. */
1484 count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
1485 unsigned int stop
= x
.len
;
1488 count
= popcount_hwi (x
.uhigh () << -count
);
1493 if (x
.sign_mask () >= 0)
1497 for (i
= 0; i
< stop
; ++i
)
1498 count
+= popcount_hwi (x
.val
[i
]);
1503 /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1504 whether the result overflows when OP0 and OP1 are treated as having
1505 signedness SGN. Return the number of blocks in VAL. */
1507 wi::sub_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1508 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1509 unsigned int op1len
, unsigned int prec
,
1510 signop sgn
, bool *overflow
)
1512 unsigned HOST_WIDE_INT o0
= 0;
1513 unsigned HOST_WIDE_INT o1
= 0;
1514 unsigned HOST_WIDE_INT x
= 0;
1515 /* We implement subtraction as an in place negate and add. Negation
1516 is just inversion and add 1, so we can do the add of 1 by just
1517 starting the borrow in of the first element at 1. */
1518 unsigned HOST_WIDE_INT borrow
= 0;
1519 unsigned HOST_WIDE_INT old_borrow
= 0;
1521 unsigned HOST_WIDE_INT mask0
, mask1
;
1524 unsigned int len
= MAX (op0len
, op1len
);
1525 mask0
= -top_bit_of (op0
, op0len
, prec
);
1526 mask1
= -top_bit_of (op1
, op1len
, prec
);
1528 /* Subtract all of the explicitly defined elements. */
1529 for (i
= 0; i
< len
; i
++)
1531 o0
= i
< op0len
? (unsigned HOST_WIDE_INT
)op0
[i
] : mask0
;
1532 o1
= i
< op1len
? (unsigned HOST_WIDE_INT
)op1
[i
] : mask1
;
1533 x
= o0
- o1
- borrow
;
1535 old_borrow
= borrow
;
1536 borrow
= borrow
== 0 ? o0
< o1
: o0
<= o1
;
1539 if (len
* HOST_BITS_PER_WIDE_INT
< prec
)
1541 val
[len
] = mask0
- mask1
- borrow
;
1548 unsigned int shift
= -prec
% HOST_BITS_PER_WIDE_INT
;
1551 unsigned HOST_WIDE_INT x
= (o0
^ o1
) & (val
[len
- 1] ^ o0
);
1552 *overflow
= (HOST_WIDE_INT
) (x
<< shift
) < 0;
1556 /* Put the MSB of X and O0 and in the top of the HWI. */
1560 *overflow
= (x
>= o0
);
1562 *overflow
= (x
> o0
);
1566 return canonize (val
, len
, prec
);
1574 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1575 algorithm is a small modification of the algorithm in Hacker's
1576 Delight by Warren, which itself is a small modification of Knuth's
1577 algorithm. M is the number of significant elements of U however
1578 there needs to be at least one extra element of B_DIVIDEND
1579 allocated, N is the number of elements of B_DIVISOR. */
1581 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT
*b_quotient
,
1582 unsigned HOST_HALF_WIDE_INT
*b_remainder
,
1583 unsigned HOST_HALF_WIDE_INT
*b_dividend
,
1584 unsigned HOST_HALF_WIDE_INT
*b_divisor
,
1585 unsigned int m
, unsigned int n
)
1587 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1588 HOST_WIDE_INT and stored in the lower bits of each word. This
1589 algorithm should work properly on both 32 and 64 bit
1591 unsigned HOST_WIDE_INT b
1592 = (unsigned HOST_WIDE_INT
)1 << HOST_BITS_PER_HALF_WIDE_INT
;
1593 unsigned HOST_WIDE_INT qhat
; /* Estimate of quotient digit. */
1594 unsigned HOST_WIDE_INT rhat
; /* A remainder. */
1595 unsigned HOST_WIDE_INT p
; /* Product of two digits. */
1596 HOST_WIDE_INT s
, i
, j
, t
, k
;
1598 /* Single digit divisor. */
1602 for (j
= m
- 1; j
>= 0; j
--)
1604 b_quotient
[j
] = (k
* b
+ b_dividend
[j
])/b_divisor
[0];
1605 k
= ((k
* b
+ b_dividend
[j
])
1606 - ((unsigned HOST_WIDE_INT
)b_quotient
[j
]
1607 * (unsigned HOST_WIDE_INT
)b_divisor
[0]));
1613 s
= clz_hwi (b_divisor
[n
-1]) - HOST_BITS_PER_HALF_WIDE_INT
; /* CHECK clz */
1617 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1618 algorithm, we can overwrite b_dividend and b_divisor, so we do
1620 for (i
= n
- 1; i
> 0; i
--)
1621 b_divisor
[i
] = (b_divisor
[i
] << s
)
1622 | (b_divisor
[i
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1623 b_divisor
[0] = b_divisor
[0] << s
;
1625 b_dividend
[m
] = b_dividend
[m
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
);
1626 for (i
= m
- 1; i
> 0; i
--)
1627 b_dividend
[i
] = (b_dividend
[i
] << s
)
1628 | (b_dividend
[i
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1629 b_dividend
[0] = b_dividend
[0] << s
;
1633 for (j
= m
- n
; j
>= 0; j
--)
1635 qhat
= (b_dividend
[j
+n
] * b
+ b_dividend
[j
+n
-1]) / b_divisor
[n
-1];
1636 rhat
= (b_dividend
[j
+n
] * b
+ b_dividend
[j
+n
-1]) - qhat
* b_divisor
[n
-1];
1638 if (qhat
>= b
|| qhat
* b_divisor
[n
-2] > b
* rhat
+ b_dividend
[j
+n
-2])
1641 rhat
+= b_divisor
[n
-1];
1646 /* Multiply and subtract. */
1648 for (i
= 0; i
< n
; i
++)
1650 p
= qhat
* b_divisor
[i
];
1651 t
= b_dividend
[i
+j
] - k
- (p
& HALF_INT_MASK
);
1652 b_dividend
[i
+ j
] = t
;
1653 k
= ((p
>> HOST_BITS_PER_HALF_WIDE_INT
)
1654 - (t
>> HOST_BITS_PER_HALF_WIDE_INT
));
1656 t
= b_dividend
[j
+n
] - k
;
1657 b_dividend
[j
+n
] = t
;
1659 b_quotient
[j
] = qhat
;
1664 for (i
= 0; i
< n
; i
++)
1666 t
= (HOST_WIDE_INT
)b_dividend
[i
+j
] + b_divisor
[i
] + k
;
1667 b_dividend
[i
+j
] = t
;
1668 k
= t
>> HOST_BITS_PER_HALF_WIDE_INT
;
1670 b_dividend
[j
+n
] += k
;
1674 for (i
= 0; i
< n
; i
++)
1675 b_remainder
[i
] = (b_dividend
[i
] >> s
)
1676 | (b_dividend
[i
+1] << (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1678 for (i
= 0; i
< n
; i
++)
1679 b_remainder
[i
] = b_dividend
[i
];
1683 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1684 the result. If QUOTIENT is nonnull, store the value of the quotient
1685 there and return the number of blocks in it. The return value is
1686 not defined otherwise. If REMAINDER is nonnull, store the value
1687 of the remainder there and store the number of blocks in
1688 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1689 the division overflowed. */
1691 wi::divmod_internal (HOST_WIDE_INT
*quotient
, unsigned int *remainder_len
,
1692 HOST_WIDE_INT
*remainder
,
1693 const HOST_WIDE_INT
*dividend_val
,
1694 unsigned int dividend_len
, unsigned int dividend_prec
,
1695 const HOST_WIDE_INT
*divisor_val
, unsigned int divisor_len
,
1696 unsigned int divisor_prec
, signop sgn
,
1699 unsigned int dividend_blocks_needed
= 2 * BLOCKS_NEEDED (dividend_prec
);
1700 unsigned int divisor_blocks_needed
= 2 * BLOCKS_NEEDED (divisor_prec
);
1701 unsigned HOST_HALF_WIDE_INT
1702 b_quotient
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1703 unsigned HOST_HALF_WIDE_INT
1704 b_remainder
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1705 unsigned HOST_HALF_WIDE_INT
1706 b_dividend
[(4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
) + 1];
1707 unsigned HOST_HALF_WIDE_INT
1708 b_divisor
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1710 bool dividend_neg
= false;
1711 bool divisor_neg
= false;
1712 bool overflow
= false;
1713 wide_int neg_dividend
, neg_divisor
;
1715 wide_int_ref dividend
= wi::storage_ref (dividend_val
, dividend_len
,
1717 wide_int_ref divisor
= wi::storage_ref (divisor_val
, divisor_len
,
1722 /* The smallest signed number / -1 causes overflow. The dividend_len
1723 check is for speed rather than correctness. */
1725 && dividend_len
== BLOCKS_NEEDED (dividend_prec
)
1727 && wi::only_sign_bit_p (dividend
))
1730 /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1731 (signed min / -1) has the same representation as the orignal dividend.
1732 We have traditionally made division by zero act as division by one,
1733 so there too we use the original dividend. */
1744 for (unsigned int i
= 0; i
< dividend_len
; ++i
)
1745 quotient
[i
] = dividend_val
[i
];
1746 return dividend_len
;
1752 /* Do it on the host if you can. */
1754 && wi::fits_shwi_p (dividend
)
1755 && wi::fits_shwi_p (divisor
))
1757 HOST_WIDE_INT o0
= dividend
.to_shwi ();
1758 HOST_WIDE_INT o1
= divisor
.to_shwi ();
1760 if (o0
== HOST_WIDE_INT_MIN
&& o1
== -1)
1762 gcc_checking_assert (dividend_prec
> HOST_BITS_PER_WIDE_INT
);
1765 quotient
[0] = HOST_WIDE_INT_MIN
;
1778 quotient
[0] = o0
/ o1
;
1781 remainder
[0] = o0
% o1
;
1789 && wi::fits_uhwi_p (dividend
)
1790 && wi::fits_uhwi_p (divisor
))
1792 unsigned HOST_WIDE_INT o0
= dividend
.to_uhwi ();
1793 unsigned HOST_WIDE_INT o1
= divisor
.to_uhwi ();
1796 quotient
[0] = o0
/ o1
;
1799 remainder
[0] = o0
% o1
;
1805 /* Make the divisor and dividend positive and remember what we
1809 if (wi::neg_p (dividend
))
1811 neg_dividend
= -dividend
;
1812 dividend
= neg_dividend
;
1813 dividend_neg
= true;
1815 if (wi::neg_p (divisor
))
1817 neg_divisor
= -divisor
;
1818 divisor
= neg_divisor
;
1823 wi_unpack (b_dividend
, (const unsigned HOST_WIDE_INT
*) dividend
.get_val (),
1824 dividend
.get_len (), dividend_blocks_needed
, dividend_prec
, sgn
);
1825 wi_unpack (b_divisor
, (const unsigned HOST_WIDE_INT
*) divisor
.get_val (),
1826 divisor
.get_len (), divisor_blocks_needed
, divisor_prec
, sgn
);
1828 if (wi::neg_p (dividend
, sgn
))
1829 m
= dividend_blocks_needed
;
1831 m
= 2 * dividend
.get_len ();
1833 if (wi::neg_p (divisor
, sgn
))
1834 n
= divisor_blocks_needed
;
1836 n
= 2 * divisor
.get_len ();
1838 /* We need to find the top non zero block of b_divisor. At most the
1839 top two blocks are zero. */
1840 if (b_divisor
[n
- 1] == 0)
1842 if (b_divisor
[n
- 1] == 0)
1845 memset (b_quotient
, 0, sizeof (b_quotient
));
1847 divmod_internal_2 (b_quotient
, b_remainder
, b_dividend
, b_divisor
, m
, n
);
1849 unsigned int quotient_len
= 0;
1852 wi_pack ((unsigned HOST_WIDE_INT
*) quotient
, b_quotient
, m
);
1853 quotient_len
= canonize (quotient
, (m
+ 1) / 2, dividend_prec
);
1854 /* The quotient is neg if exactly one of the divisor or dividend is
1856 if (dividend_neg
!= divisor_neg
)
1857 quotient_len
= wi::sub_large (quotient
, zeros
, 1, quotient
,
1858 quotient_len
, dividend_prec
,
1864 wi_pack ((unsigned HOST_WIDE_INT
*) remainder
, b_remainder
, n
);
1865 *remainder_len
= canonize (remainder
, (n
+ 1) / 2, dividend_prec
);
1866 /* The remainder is always the same sign as the dividend. */
1868 *remainder_len
= wi::sub_large (remainder
, zeros
, 1, remainder
,
1869 *remainder_len
, dividend_prec
,
1873 return quotient_len
;
1877 * Shifting, rotating and extraction.
1880 /* Left shift XVAL by SHIFT and store the result in VAL. Return the
1881 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
1883 wi::lshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1884 unsigned int xlen
, unsigned int precision
,
1887 /* Split the shift into a whole-block shift and a subblock shift. */
1888 unsigned int skip
= shift
/ HOST_BITS_PER_WIDE_INT
;
1889 unsigned int small_shift
= shift
% HOST_BITS_PER_WIDE_INT
;
1891 /* The whole-block shift fills with zeros. */
1892 unsigned int len
= BLOCKS_NEEDED (precision
);
1893 for (unsigned int i
= 0; i
< skip
; ++i
)
1896 /* It's easier to handle the simple block case specially. */
1897 if (small_shift
== 0)
1898 for (unsigned int i
= skip
; i
< len
; ++i
)
1899 val
[i
] = safe_uhwi (xval
, xlen
, i
- skip
);
1902 /* The first unfilled output block is a left shift of the first
1903 block in XVAL. The other output blocks contain bits from two
1904 consecutive input blocks. */
1905 unsigned HOST_WIDE_INT carry
= 0;
1906 for (unsigned int i
= skip
; i
< len
; ++i
)
1908 unsigned HOST_WIDE_INT x
= safe_uhwi (xval
, xlen
, i
- skip
);
1909 val
[i
] = (x
<< small_shift
) | carry
;
1910 carry
= x
>> (-small_shift
% HOST_BITS_PER_WIDE_INT
);
1913 return canonize (val
, len
, precision
);
1916 /* Right shift XVAL by SHIFT and store the result in VAL. Return the
1917 number of blocks in VAL. The input has XPRECISION bits and the
1918 output has XPRECISION - SHIFT bits. */
1920 rshift_large_common (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1921 unsigned int xlen
, unsigned int xprecision
,
1924 /* Split the shift into a whole-block shift and a subblock shift. */
1925 unsigned int skip
= shift
/ HOST_BITS_PER_WIDE_INT
;
1926 unsigned int small_shift
= shift
% HOST_BITS_PER_WIDE_INT
;
1928 /* Work out how many blocks are needed to store the significant bits
1929 (excluding the upper zeros or signs). */
1930 unsigned int len
= BLOCKS_NEEDED (xprecision
- shift
);
1932 /* It's easier to handle the simple block case specially. */
1933 if (small_shift
== 0)
1934 for (unsigned int i
= 0; i
< len
; ++i
)
1935 val
[i
] = safe_uhwi (xval
, xlen
, i
+ skip
);
1938 /* Each output block but the last is a combination of two input blocks.
1939 The last block is a right shift of the last block in XVAL. */
1940 unsigned HOST_WIDE_INT curr
= safe_uhwi (xval
, xlen
, skip
);
1941 for (unsigned int i
= 0; i
< len
; ++i
)
1943 val
[i
] = curr
>> small_shift
;
1944 curr
= safe_uhwi (xval
, xlen
, i
+ skip
+ 1);
1945 val
[i
] |= curr
<< (-small_shift
% HOST_BITS_PER_WIDE_INT
);
1951 /* Logically right shift XVAL by SHIFT and store the result in VAL.
1952 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1953 VAL has PRECISION bits. */
1955 wi::lrshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1956 unsigned int xlen
, unsigned int xprecision
,
1957 unsigned int precision
, unsigned int shift
)
1959 unsigned int len
= rshift_large_common (val
, xval
, xlen
, xprecision
, shift
);
1961 /* The value we just created has precision XPRECISION - SHIFT.
1962 Zero-extend it to wider precisions. */
1963 if (precision
> xprecision
- shift
)
1965 unsigned int small_prec
= (xprecision
- shift
) % HOST_BITS_PER_WIDE_INT
;
1967 val
[len
- 1] = zext_hwi (val
[len
- 1], small_prec
);
1968 else if (val
[len
- 1] < 0)
1970 /* Add a new block with a zero. */
1975 return canonize (val
, len
, precision
);
1978 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
1979 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1980 VAL has PRECISION bits. */
1982 wi::arshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1983 unsigned int xlen
, unsigned int xprecision
,
1984 unsigned int precision
, unsigned int shift
)
1986 unsigned int len
= rshift_large_common (val
, xval
, xlen
, xprecision
, shift
);
1988 /* The value we just created has precision XPRECISION - SHIFT.
1989 Sign-extend it to wider types. */
1990 if (precision
> xprecision
- shift
)
1992 unsigned int small_prec
= (xprecision
- shift
) % HOST_BITS_PER_WIDE_INT
;
1994 val
[len
- 1] = sext_hwi (val
[len
- 1], small_prec
);
1996 return canonize (val
, len
, precision
);
1999 /* Return the number of leading (upper) zeros in X. */
2001 wi::clz (const wide_int_ref
&x
)
2003 /* Calculate how many bits there above the highest represented block. */
2004 int count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
2006 unsigned HOST_WIDE_INT high
= x
.uhigh ();
2008 /* The upper -COUNT bits of HIGH are not part of the value.
2010 high
= (high
<< -count
) >> -count
;
2011 else if (x
.sign_mask () < 0)
2012 /* The upper bit is set, so there are no leading zeros. */
2015 /* Check whether the value is zero. */
2016 if (high
== 0 && x
.len
== 1)
2017 return clz_zero (x
.precision
);
2019 /* We don't need to look below HIGH. Either HIGH is nonzero,
2020 or the top bit of the block below is nonzero; clz_hwi is
2021 HOST_BITS_PER_WIDE_INT in the latter case. */
2022 return count
+ clz_hwi (high
);
2025 /* Return the number of redundant sign bits in X. (That is, the number
2026 of bits immediately below the sign bit that have the same value as
2029 wi::clrsb (const wide_int_ref
&x
)
2031 /* Calculate how many bits there above the highest represented block. */
2032 int count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
2034 unsigned HOST_WIDE_INT high
= x
.uhigh ();
2035 unsigned HOST_WIDE_INT mask
= -1;
2038 /* The upper -COUNT bits of HIGH are not part of the value.
2039 Clear them from both MASK and HIGH. */
2044 /* If the top bit is 1, count the number of leading 1s. If the top
2045 bit is zero, count the number of leading zeros. */
2046 if (high
> mask
/ 2)
2049 /* There are no sign bits below the top block, so we don't need to look
2050 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2052 return count
+ clz_hwi (high
) - 1;
2055 /* Return the number of trailing (lower) zeros in X. */
2057 wi::ctz (const wide_int_ref
&x
)
2059 if (x
.len
== 1 && x
.ulow () == 0)
2060 return ctz_zero (x
.precision
);
2062 /* Having dealt with the zero case, there must be a block with a
2063 nonzero bit. We don't care about the bits above the first 1. */
2065 while (x
.val
[i
] == 0)
2067 return i
* HOST_BITS_PER_WIDE_INT
+ ctz_hwi (x
.val
[i
]);
2070 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2073 wi::exact_log2 (const wide_int_ref
&x
)
2075 /* Reject cases where there are implicit -1 blocks above HIGH. */
2076 if (x
.len
* HOST_BITS_PER_WIDE_INT
< x
.precision
&& x
.sign_mask () < 0)
2079 /* Set CRUX to the index of the entry that should be nonzero.
2080 If the top block is zero then the next lowest block (if any)
2081 must have the high bit set. */
2082 unsigned int crux
= x
.len
- 1;
2083 if (crux
> 0 && x
.val
[crux
] == 0)
2086 /* Check that all lower blocks are zero. */
2087 for (unsigned int i
= 0; i
< crux
; ++i
)
2091 /* Get a zero-extended form of block CRUX. */
2092 unsigned HOST_WIDE_INT hwi
= x
.val
[crux
];
2093 if ((crux
+ 1) * HOST_BITS_PER_WIDE_INT
> x
.precision
)
2094 hwi
= zext_hwi (hwi
, x
.precision
% HOST_BITS_PER_WIDE_INT
);
2096 /* Now it's down to whether HWI is a power of 2. */
2097 int res
= ::exact_log2 (hwi
);
2099 res
+= crux
* HOST_BITS_PER_WIDE_INT
;
2103 /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2105 wi::floor_log2 (const wide_int_ref
&x
)
2107 return x
.precision
- 1 - clz (x
);
2110 /* Return the index of the first (lowest) set bit in X, counting from 1.
2111 Return 0 if X is 0. */
2113 wi::ffs (const wide_int_ref
&x
)
2115 return eq_p (x
, 0) ? 0 : ctz (x
) + 1;
2118 /* Return true if sign-extending X to have precision PRECISION would give
2119 the minimum signed value at that precision. */
2121 wi::only_sign_bit_p (const wide_int_ref
&x
, unsigned int precision
)
2123 return ctz (x
) + 1 == int (precision
);
2126 /* Return true if X represents the minimum signed value. */
2128 wi::only_sign_bit_p (const wide_int_ref
&x
)
2130 return only_sign_bit_p (x
, x
.precision
);
2134 * Private utilities.
2137 void gt_ggc_mx (widest_int
*) { }
2138 void gt_pch_nx (widest_int
*, void (*) (void *, void *), void *) { }
2139 void gt_pch_nx (widest_int
*) { }
2142 * Private debug printing routines.
2144 #ifdef DEBUG_WIDE_INT
2145 /* The debugging routines print results of wide operations into the
2146 dump files of the respective passes in which they were called. */
2148 dumpa (const HOST_WIDE_INT
*val
, unsigned int len
, unsigned int prec
, char *buf
)
2152 const char * sep
= "";
2154 l
= sprintf (buf
, "[%d (", prec
);
2155 for (i
= len
- 1; i
>= 0; i
--)
2157 l
+= sprintf (&buf
[l
], "%s" HOST_WIDE_INT_PRINT_HEX
, sep
, val
[i
]);
2161 gcc_assert (len
!= 0);
2163 l
+= sprintf (&buf
[l
], ")]");
2165 gcc_assert (l
< MAX_SIZE
);
2173 /* The debugging routines print results of wide operations into the
2174 dump files of the respective passes in which they were called. */
2176 wide_int_ro::dump (char* buf
) const
2180 const char * sep
= "";
2182 l
= sprintf (buf
, "[%d (", precision
);
2183 for (i
= len
- 1; i
>= 0; i
--)
2185 l
+= sprintf (&buf
[l
], "%s" HOST_WIDE_INT_PRINT_HEX
, sep
, val
[i
]);
2189 gcc_assert (len
!= 0);
2191 l
+= sprintf (&buf
[l
], ")]");
2193 gcc_assert (l
< MAX_SIZE
);