1 /* Operations with very long integers.
2 Copyright (C) 2012-2013 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
30 /* This is the maximal size of the buffer needed for dump. */
31 const unsigned int MAX_SIZE
= (4 * (MAX_BITSIZE_MODE_ANY_INT
/ 4
32 + (MAX_BITSIZE_MODE_ANY_INT
33 / HOST_BITS_PER_WIDE_INT
)
36 static const HOST_WIDE_INT zeros
[WIDE_INT_MAX_ELTS
] = {};
42 /* Quantities to deal with values that hold half of a wide int. Used
43 in multiply and divide. */
44 #define HALF_INT_MASK (((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
46 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
47 #define BLOCKS_NEEDED(PREC) \
48 (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
49 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
51 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
52 based on the top existing bit of VAL. */
54 static unsigned HOST_WIDE_INT
55 safe_uhwi (const HOST_WIDE_INT
*val
, unsigned int len
, unsigned int i
)
57 return i
< len
? val
[i
] : val
[len
- 1] < 0 ? (HOST_WIDE_INT
) -1 : 0;
60 /* Convert the integer in VAL to canonical form, returning its new length.
61 LEN is the number of blocks currently in VAL and PRECISION is the number
62 of bits in the integer it represents.
64 This function only changes the representation, not the value. */
66 canonize (HOST_WIDE_INT
*val
, unsigned int len
, unsigned int precision
)
68 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
72 if (len
> blocks_needed
)
79 if (len
* HOST_BITS_PER_WIDE_INT
> precision
)
80 top
= sext_hwi (top
, precision
% HOST_BITS_PER_WIDE_INT
);
81 if (top
!= 0 && top
!= (HOST_WIDE_INT
)-1)
84 /* At this point we know that the top is either 0 or -1. Find the
85 first block that is not a copy of this. */
86 for (i
= len
- 2; i
>= 0; i
--)
88 HOST_WIDE_INT x
= val
[i
];
91 if (SIGN_MASK (x
) == top
)
94 /* We need an extra block because the top bit block i does
95 not match the extension. */
100 /* The number is 0 or -1. */
105 * Conversion routines in and out of wide_int.
108 /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
109 result for an integer with precision PRECISION. Return the length
110 of VAL (after any canonization. */
112 wi::from_array (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
113 unsigned int xlen
, unsigned int precision
, bool need_canon
)
115 for (unsigned i
= 0; i
< xlen
; i
++)
117 return need_canon
? canonize (val
, xlen
, precision
) : xlen
;
120 /* Construct a wide int from a buffer of length LEN. BUFFER will be
121 read according to byte endianess and word endianess of the target.
122 Only the lower LEN bytes of the result are set; the remaining high
123 bytes are cleared. */
125 wi::from_buffer (const unsigned char *buffer
, unsigned int buffer_len
)
127 unsigned int precision
= buffer_len
* BITS_PER_UNIT
;
128 wide_int result
= wide_int::create (precision
);
129 unsigned int words
= buffer_len
/ UNITS_PER_WORD
;
131 /* We have to clear all the bits ourself, as we merely or in values
133 unsigned int len
= BLOCKS_NEEDED (precision
);
134 HOST_WIDE_INT
*val
= result
.write_val ();
135 for (unsigned int i
= 0; i
< len
; ++i
)
138 for (unsigned int byte
= 0; byte
< buffer_len
; byte
++)
142 unsigned int bitpos
= byte
* BITS_PER_UNIT
;
143 unsigned HOST_WIDE_INT value
;
145 if (buffer_len
> UNITS_PER_WORD
)
147 unsigned int word
= byte
/ UNITS_PER_WORD
;
149 if (WORDS_BIG_ENDIAN
)
150 word
= (words
- 1) - word
;
152 offset
= word
* UNITS_PER_WORD
;
154 if (BYTES_BIG_ENDIAN
)
155 offset
+= (UNITS_PER_WORD
- 1) - (byte
% UNITS_PER_WORD
);
157 offset
+= byte
% UNITS_PER_WORD
;
160 offset
= BYTES_BIG_ENDIAN
? (buffer_len
- 1) - byte
: byte
;
162 value
= (unsigned HOST_WIDE_INT
) buffer
[offset
];
164 index
= bitpos
/ HOST_BITS_PER_WIDE_INT
;
165 val
[index
] |= value
<< (bitpos
% HOST_BITS_PER_WIDE_INT
);
168 result
.set_len (canonize (val
, len
, precision
));
173 /* Sets RESULT from THIS, the sign is taken according to SGN. */
175 wi::to_mpz (wide_int x
, mpz_t result
, signop sgn
)
177 bool negative
= false;
178 int len
= x
.get_len ();
179 const HOST_WIDE_INT
*v
= x
.get_val ();
180 int small_prec
= x
.get_precision () & (HOST_BITS_PER_WIDE_INT
- 1);
182 if (wi::neg_p (x
, sgn
))
185 /* We use ones complement to avoid -x80..0 edge case that -
190 if (sgn
== UNSIGNED
&& small_prec
)
192 HOST_WIDE_INT t
[WIDE_INT_MAX_ELTS
];
194 for (int i
= 0; i
< len
- 1; i
++)
196 t
[len
-1] = zext_hwi (v
[len
-1], small_prec
);
197 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, t
);
200 mpz_import (result
, len
, -1, sizeof (HOST_WIDE_INT
), 0, 0, v
);
203 mpz_com (result
, result
);
206 /* Returns VAL converted to TYPE. If WRAP is true, then out-of-range
207 values of VAL will be wrapped; otherwise, they will be set to the
208 appropriate minimum or maximum TYPE bound. */
210 wi::from_mpz (const_tree type
, mpz_t x
, bool wrap
)
213 int prec
= TYPE_PRECISION (type
);
214 wide_int res
= wide_int::create (prec
);
222 get_type_static_bounds (type
, min
, max
);
224 if (mpz_cmp (x
, min
) < 0)
226 else if (mpz_cmp (x
, max
) > 0)
233 /* Determine the number of unsigned HOST_WIDE_INTs that are required
234 for representing the value. The code to calculate count is
235 extracted from the GMP manual, section "Integer Import and Export":
236 http://gmplib.org/manual/Integer-Import-and-Export.html */
237 numb
= 8*sizeof(HOST_WIDE_INT
);
238 count
= (mpz_sizeinbase (x
, 2) + numb
-1) / numb
;
239 HOST_WIDE_INT
*val
= res
.write_val ();
240 mpz_export (val
, &count
, -1, sizeof (HOST_WIDE_INT
), 0, 0, x
);
255 * Largest and smallest values in a mode.
258 /* Return the largest SGNed number that is representable in PRECISION bits.
260 TODO: There is still code from the double_int era that trys to
261 make up for the fact that double int's could not represent the
262 min and max values of all types. This code should be removed
263 because the min and max values can always be represented in
264 wide_ints and int-csts. */
266 wi::max_value (unsigned int precision
, signop sgn
)
269 return shwi (0, precision
);
270 else if (sgn
== UNSIGNED
)
271 /* The unsigned max is just all ones. */
272 return shwi (-1, precision
);
274 /* The signed max is all ones except the top bit. This must be
275 explicitly represented. */
276 return mask (precision
- 1, false, precision
);
279 /* Return the largest SGNed number that is representable in PRECISION bits. */
281 wi::min_value (unsigned int precision
, signop sgn
)
283 if (precision
== 0 || sgn
== UNSIGNED
)
284 return uhwi (0, precision
);
286 /* The signed min is all zeros except the top bit. This must be
287 explicitly represented. */
288 return wi::set_bit_in_zero (precision
- 1, precision
);
295 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
296 signedness SGN, to an integer that has PRECISION bits. Store the blocks
297 in VAL and return the number of blocks used.
299 This function can handle both extension (PRECISION > XPRECISION)
300 and truncation (PRECISION < XPRECISION). */
302 wi::force_to_size (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
303 unsigned int xlen
, unsigned int xprecision
,
304 unsigned int precision
, signop sgn
)
306 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
307 unsigned int len
= blocks_needed
< xlen
? blocks_needed
: xlen
;
308 for (unsigned i
= 0; i
< len
; i
++)
311 if (precision
> xprecision
)
313 unsigned int small_xprecision
= xprecision
% HOST_BITS_PER_WIDE_INT
;
318 if (small_xprecision
&& len
== BLOCKS_NEEDED (xprecision
))
319 val
[len
- 1] = zext_hwi (val
[len
- 1], small_xprecision
);
320 else if (val
[len
- 1] < 0)
322 while (len
< BLOCKS_NEEDED (xprecision
))
324 if (small_xprecision
)
325 val
[len
- 1] = zext_hwi (val
[len
- 1], small_xprecision
);
332 if (small_xprecision
&& len
== BLOCKS_NEEDED (xprecision
))
333 val
[len
- 1] = sext_hwi (val
[len
- 1], small_xprecision
);
336 len
= canonize (val
, len
, precision
);
341 /* This function hides the fact that we cannot rely on the bits beyond
342 the precision. This issue comes up in the relational comparisions
343 where we do allow comparisons of values of different precisions. */
344 static inline HOST_WIDE_INT
345 selt (const HOST_WIDE_INT
*a
, unsigned int len
,
346 unsigned int blocks_needed
,
347 unsigned int small_prec
,
348 unsigned int index
, signop sgn
)
353 else if (index
< blocks_needed
|| sgn
== SIGNED
)
354 /* Signed or within the precision. */
355 val
= SIGN_MASK (a
[len
- 1]);
357 /* Unsigned extension beyond the precision. */
360 if (small_prec
&& index
== blocks_needed
- 1)
361 return (sgn
== SIGNED
362 ? sext_hwi (val
, small_prec
)
363 : zext_hwi (val
, small_prec
));
368 /* Find the highest bit represented in a wide int. This will in
369 general have the same value as the sign bit. */
370 static inline HOST_WIDE_INT
371 top_bit_of (const HOST_WIDE_INT
*a
, unsigned int len
, unsigned int prec
)
373 int excess
= len
* HOST_BITS_PER_WIDE_INT
- prec
;
374 unsigned HOST_WIDE_INT val
= a
[len
- 1];
377 return val
>> (HOST_BITS_PER_WIDE_INT
- 1);
381 * Comparisons, note that only equality is an operator. The other
382 * comparisons cannot be operators since they are inherently singed or
383 * unsigned and C++ has no such operators.
386 /* Return true if OP0 == OP1. */
388 wi::eq_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
389 const HOST_WIDE_INT
*op1
, unsigned int op1len
,
393 unsigned int small_prec
= prec
& (HOST_BITS_PER_WIDE_INT
- 1);
395 while (op0len
!= op1len
)
398 if (op0len
== BLOCKS_NEEDED (prec
) && small_prec
)
400 /* It does not matter if we zext or sext here, we just have to
401 do both the same way. */
402 if (zext_hwi (op0
[l0
], small_prec
) != zext_hwi (op1
[l0
], small_prec
))
408 if (op0
[l0
] != op1
[l0
])
416 /* Return true if OP0 < OP1 using signed comparisons. */
418 wi::lts_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
419 unsigned int precision
,
420 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
422 HOST_WIDE_INT s0
, s1
;
423 unsigned HOST_WIDE_INT u0
, u1
;
424 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
425 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
426 int l
= MAX (op0len
- 1, op1len
- 1);
428 /* Only the top block is compared as signed. The rest are unsigned
430 s0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
431 s1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
440 u0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
441 u1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
453 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
456 wi::cmps_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
457 unsigned int precision
,
458 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
460 HOST_WIDE_INT s0
, s1
;
461 unsigned HOST_WIDE_INT u0
, u1
;
462 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
463 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
464 int l
= MAX (op0len
- 1, op1len
- 1);
466 /* Only the top block is compared as signed. The rest are unsigned
468 s0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
469 s1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
478 u0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, SIGNED
);
479 u1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, SIGNED
);
491 /* Return true if OP0 < OP1 using unsigned comparisons. */
493 wi::ltu_p_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
494 unsigned int precision
,
495 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
497 unsigned HOST_WIDE_INT x0
;
498 unsigned HOST_WIDE_INT x1
;
499 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
500 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
501 int l
= MAX (op0len
- 1, op1len
- 1);
505 x0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
506 x1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
517 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
518 unsigned compares. */
520 wi::cmpu_large (const HOST_WIDE_INT
*op0
, unsigned int op0len
,
521 unsigned int precision
,
522 const HOST_WIDE_INT
*op1
, unsigned int op1len
)
524 unsigned HOST_WIDE_INT x0
;
525 unsigned HOST_WIDE_INT x1
;
526 unsigned int blocks_needed
= BLOCKS_NEEDED (precision
);
527 unsigned int small_prec
= precision
& (HOST_BITS_PER_WIDE_INT
- 1);
528 int l
= MAX (op0len
- 1, op1len
- 1);
532 x0
= selt (op0
, op0len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
533 x1
= selt (op1
, op1len
, blocks_needed
, small_prec
, l
, UNSIGNED
);
548 /* Sign-extend the number represented by XVAL and XLEN into VAL,
549 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
550 and VAL have PRECISION bits. */
552 wi::sext_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
553 unsigned int xlen
, unsigned int precision
, unsigned int offset
)
555 unsigned int len
= offset
/ HOST_BITS_PER_WIDE_INT
;
556 /* Extending beyond the precision is a no-op. If we have only stored
557 OFFSET bits or fewer, the rest are already signs. */
558 if (offset
>= precision
|| len
>= xlen
)
560 for (unsigned i
= 0; i
< xlen
; ++i
)
564 unsigned int suboffset
= offset
% HOST_BITS_PER_WIDE_INT
;
565 for (unsigned int i
= 0; i
< len
; i
++)
569 val
[len
] = sext_hwi (xval
[len
], suboffset
);
572 return canonize (val
, len
, precision
);
575 /* Zero-extend the number represented by XVAL and XLEN into VAL,
576 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
577 and VAL have PRECISION bits. */
579 wi::zext_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
580 unsigned int xlen
, unsigned int precision
, unsigned int offset
)
582 unsigned int len
= offset
/ HOST_BITS_PER_WIDE_INT
;
583 /* Extending beyond the precision is a no-op. If we have only stored
584 OFFSET bits or fewer, and the upper stored bit is zero, then there
586 if (offset
>= precision
|| (len
>= xlen
&& xval
[xlen
- 1] >= 0))
588 for (unsigned i
= 0; i
< xlen
; ++i
)
592 unsigned int suboffset
= offset
% HOST_BITS_PER_WIDE_INT
;
593 for (unsigned int i
= 0; i
< len
; i
++)
594 val
[i
] = i
< xlen
? xval
[i
] : -1;
596 val
[len
] = zext_hwi (len
< xlen
? xval
[len
] : -1, suboffset
);
599 return canonize (val
, len
+ 1, precision
);
603 * Masking, inserting, shifting, rotating.
606 /* Insert WIDTH bits from Y into X starting at START. */
608 wi::insert (const wide_int
&x
, const wide_int
&y
, unsigned int start
,
615 unsigned int precision
= x
.get_precision ();
616 if (start
>= precision
)
619 gcc_checking_assert (precision
>= width
);
621 if (start
+ width
>= precision
)
622 width
= precision
- start
;
624 mask
= wi::shifted_mask (start
, width
, false, precision
);
625 tmp
= wi::lshift (wide_int::from (y
, precision
, UNSIGNED
), start
);
628 tmp
= wi::bit_and_not (x
, mask
);
629 result
= result
| tmp
;
634 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
635 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
638 wi::set_bit_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
639 unsigned int xlen
, unsigned int precision
, unsigned int bit
)
641 unsigned int block
= bit
/ HOST_BITS_PER_WIDE_INT
;
642 unsigned int subbit
= bit
% HOST_BITS_PER_WIDE_INT
;
644 if (block
+ 1 >= xlen
)
646 /* The operation either affects the last current block or needs
648 unsigned int len
= block
+ 1;
649 for (unsigned int i
= 0; i
< len
; i
++)
650 val
[i
] = safe_uhwi (xval
, xlen
, i
);
651 val
[block
] |= (unsigned HOST_WIDE_INT
) 1 << subbit
;
653 /* If the bit we just set is at the msb of the block, make sure
654 that any higher bits are zeros. */
655 if (bit
+ 1 < precision
&& subbit
== HOST_BITS_PER_WIDE_INT
- 1)
661 for (unsigned int i
= 0; i
< xlen
; i
++)
663 val
[block
] |= (unsigned HOST_WIDE_INT
) 1 << subbit
;
664 return canonize (val
, xlen
, precision
);
670 wide_int_storage::bswap () const
672 wide_int result
= wide_int::create (precision
);
674 unsigned int len
= BLOCKS_NEEDED (precision
);
675 unsigned int xlen
= get_len ();
676 const HOST_WIDE_INT
*xval
= get_val ();
677 HOST_WIDE_INT
*val
= result
.write_val ();
679 /* This is not a well defined operation if the precision is not a
681 gcc_assert ((precision
& 0x7) == 0);
683 for (i
= 0; i
< len
; i
++)
686 /* Only swap the bytes that are not the padding. */
687 for (s
= 0; s
< precision
; s
+= 8)
689 unsigned int d
= precision
- s
- 8;
690 unsigned HOST_WIDE_INT byte
;
692 unsigned int block
= s
/ HOST_BITS_PER_WIDE_INT
;
693 unsigned int offset
= s
& (HOST_BITS_PER_WIDE_INT
- 1);
695 byte
= (safe_uhwi (xval
, xlen
, block
) >> offset
) & 0xff;
697 block
= d
/ HOST_BITS_PER_WIDE_INT
;
698 offset
= d
& (HOST_BITS_PER_WIDE_INT
- 1);
700 val
[block
] |= byte
<< offset
;
703 result
.set_len (canonize (val
, len
, precision
));
707 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
708 above that up to PREC are zeros. The result is inverted if NEGATE
709 is true. Return the number of blocks in VAL. */
711 wi::mask (HOST_WIDE_INT
*val
, unsigned int width
, bool negate
,
714 gcc_assert (width
< 4 * MAX_BITSIZE_MODE_ANY_INT
);
715 gcc_assert (prec
<= 4 * MAX_BITSIZE_MODE_ANY_INT
);
719 val
[0] = negate
? 0 : -1;
724 val
[0] = negate
? -1 : 0;
729 while (i
< width
/ HOST_BITS_PER_WIDE_INT
)
730 val
[i
++] = negate
? 0 : -1;
732 unsigned int shift
= width
& (HOST_BITS_PER_WIDE_INT
- 1);
735 HOST_WIDE_INT last
= (((unsigned HOST_WIDE_INT
) 1) << shift
) - 1;
736 val
[i
++] = negate
? ~last
: last
;
739 val
[i
++] = negate
? -1 : 0;
744 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
745 bits are ones, and the bits above that up to PREC are zeros. The result
746 is inverted if NEGATE is true. Return the number of blocks in VAL. */
748 wi::shifted_mask (HOST_WIDE_INT
*val
, unsigned int start
, unsigned int width
,
749 bool negate
, unsigned int prec
)
751 if (start
>= prec
|| width
== 0)
753 val
[0] = negate
? -1 : 0;
757 if (width
> prec
- start
)
758 width
= prec
- start
;
759 unsigned int end
= start
+ width
;
762 while (i
< start
/ HOST_BITS_PER_WIDE_INT
)
763 val
[i
++] = negate
? -1 : 0;
765 unsigned int shift
= start
& (HOST_BITS_PER_WIDE_INT
- 1);
768 HOST_WIDE_INT block
= (((unsigned HOST_WIDE_INT
) 1) << shift
) - 1;
769 shift
= end
& (HOST_BITS_PER_WIDE_INT
- 1);
773 block
= (((unsigned HOST_WIDE_INT
) 1) << shift
) - block
- 1;
774 val
[i
++] = negate
? ~block
: block
;
779 val
[i
++] = negate
? block
: ~block
;
782 while (i
< end
/ HOST_BITS_PER_WIDE_INT
)
784 val
[i
++] = negate
? 0 : -1;
786 shift
= end
& (HOST_BITS_PER_WIDE_INT
- 1);
790 HOST_WIDE_INT block
= (((unsigned HOST_WIDE_INT
) 1) << shift
) - 1;
791 val
[i
++] = negate
? ~block
: block
;
794 val
[i
++] = negate
? -1 : 0;
800 * logical operations.
803 /* Set VAL to OP0 & OP1. Return the number of blocks used. */
805 wi::and_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
806 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
807 unsigned int op1len
, unsigned int prec
)
811 bool need_canon
= true;
813 unsigned int len
= MAX (op0len
, op1len
);
816 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
834 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
850 val
[l0
] = op0
[l0
] & op1
[l0
];
855 len
= canonize (val
, len
, prec
);
860 /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
862 wi::and_not_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
863 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
864 unsigned int op1len
, unsigned int prec
)
869 bool need_canon
= true;
871 unsigned int len
= MAX (op0len
, op1len
);
874 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
892 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
908 val
[l0
] = op0
[l0
] & ~op1
[l0
];
913 len
= canonize (val
, len
, prec
);
918 /* Set VAL to OP0 | OP1. Return the number of blocks used. */
920 wi::or_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
921 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
922 unsigned int op1len
, unsigned int prec
)
927 bool need_canon
= true;
929 unsigned int len
= MAX (op0len
, op1len
);
932 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
950 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
966 val
[l0
] = op0
[l0
] | op1
[l0
];
971 len
= canonize (val
, len
, prec
);
976 /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
978 wi::or_not_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
979 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
980 unsigned int op1len
, unsigned int prec
)
985 bool need_canon
= true;
987 unsigned int len
= MAX (op0len
, op1len
);
990 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1008 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1024 val
[l0
] = op0
[l0
] | ~op1
[l0
];
1029 len
= canonize (val
, len
, prec
);
1034 /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1036 wi::xor_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1037 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1038 unsigned int op1len
, unsigned int prec
)
1041 int l0
= op0len
- 1;
1042 int l1
= op1len
- 1;
1044 unsigned int len
= MAX (op0len
, op1len
);
1047 HOST_WIDE_INT op1mask
= -top_bit_of (op1
, op1len
, prec
);
1050 val
[l0
] = op0
[l0
] ^ op1mask
;
1057 HOST_WIDE_INT op0mask
= -top_bit_of (op0
, op0len
, prec
);
1060 val
[l1
] = op0mask
^ op1
[l1
];
1067 val
[l0
] = op0
[l0
] ^ op1
[l0
];
1071 return canonize (val
, len
, prec
);
1078 /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1079 whether the result overflows when OP0 and OP1 are treated as having
1080 signedness SGN. Return the number of blocks in VAL. */
1082 wi::add_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1083 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1084 unsigned int op1len
, unsigned int prec
,
1085 signop sgn
, bool *overflow
)
1087 unsigned HOST_WIDE_INT o0
= 0;
1088 unsigned HOST_WIDE_INT o1
= 0;
1089 unsigned HOST_WIDE_INT x
= 0;
1090 unsigned HOST_WIDE_INT carry
= 0;
1091 unsigned HOST_WIDE_INT old_carry
= 0;
1092 unsigned HOST_WIDE_INT mask0
, mask1
;
1095 unsigned int len
= MAX (op0len
, op1len
);
1096 mask0
= -top_bit_of (op0
, op0len
, prec
);
1097 mask1
= -top_bit_of (op1
, op1len
, prec
);
1098 /* Add all of the explicitly defined elements. */
1100 for (i
= 0; i
< len
; i
++)
1102 o0
= i
< op0len
? (unsigned HOST_WIDE_INT
) op0
[i
] : mask0
;
1103 o1
= i
< op1len
? (unsigned HOST_WIDE_INT
) op1
[i
] : mask1
;
1104 x
= o0
+ o1
+ carry
;
1107 carry
= carry
== 0 ? x
< o0
: x
<= o0
;
1110 if (len
* HOST_BITS_PER_WIDE_INT
< prec
)
1112 val
[len
] = mask0
+ mask1
+ carry
;
1119 unsigned int shift
= -prec
% HOST_BITS_PER_WIDE_INT
;
1122 unsigned HOST_WIDE_INT x
= (val
[len
- 1] ^ o0
) & (val
[len
- 1] ^ o1
);
1123 *overflow
= HOST_WIDE_INT (x
<< shift
) < 0;
1127 /* Put the MSB of X and O0 and in the top of the HWI. */
1131 *overflow
= (x
<= o0
);
1133 *overflow
= (x
< o0
);
1137 return canonize (val
, len
, prec
);
1140 /* This is bogus. We should always return the precision and leave the
1141 caller to handle target dependencies. */
1143 clz_zero (unsigned int precision
)
1147 enum machine_mode mode
= mode_for_size (precision
, MODE_INT
, 0);
1148 if (mode
== BLKmode
)
1149 mode_for_size (precision
, MODE_PARTIAL_INT
, 0);
1151 /* Even if the value at zero is undefined, we have to come up
1152 with some replacement. Seems good enough. */
1153 if (mode
== BLKmode
)
1155 else if (!CLZ_DEFINED_VALUE_AT_ZERO (mode
, count
))
1160 /* This is bogus. We should always return the precision and leave the
1161 caller to handle target dependencies. */
1163 ctz_zero (unsigned int precision
)
1167 enum machine_mode mode
= mode_for_size (precision
, MODE_INT
, 0);
1168 if (mode
== BLKmode
)
1169 mode_for_size (precision
, MODE_PARTIAL_INT
, 0);
1171 /* Even if the value at zero is undefined, we have to come up
1172 with some replacement. Seems good enough. */
1173 if (mode
== BLKmode
)
1175 else if (!CTZ_DEFINED_VALUE_AT_ZERO (mode
, count
))
1180 /* Subroutines of the multiplication and division operations. Unpack
1181 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1182 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1183 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1185 wi_unpack (unsigned HOST_HALF_WIDE_INT
*result
,
1186 const unsigned HOST_WIDE_INT
*input
,
1187 unsigned int in_len
, unsigned int out_len
,
1188 unsigned int prec
, signop sgn
)
1192 unsigned int small_prec
= prec
& (HOST_BITS_PER_WIDE_INT
- 1);
1193 unsigned int blocks_needed
= BLOCKS_NEEDED (prec
);
1198 mask
= -top_bit_of ((const HOST_WIDE_INT
*) input
, in_len
, prec
);
1199 mask
&= HALF_INT_MASK
;
1204 for (i
= 0; i
< in_len
; i
++)
1206 HOST_WIDE_INT x
= input
[i
];
1207 if (i
== blocks_needed
- 1 && small_prec
)
1210 x
= sext_hwi (x
, small_prec
);
1212 x
= zext_hwi (x
, small_prec
);
1215 result
[j
++] = x
>> HOST_BITS_PER_HALF_WIDE_INT
;
1218 /* Smear the sign bit. */
1223 /* The inverse of wi_unpack. IN_LEN is the the number of input
1224 blocks. The number of output blocks will be half this amount. */
1226 wi_pack (unsigned HOST_WIDE_INT
*result
,
1227 const unsigned HOST_HALF_WIDE_INT
*input
,
1228 unsigned int in_len
)
1233 while (i
+ 2 < in_len
)
1235 result
[j
++] = (unsigned HOST_WIDE_INT
)input
[i
]
1236 | ((unsigned HOST_WIDE_INT
)input
[i
+ 1]
1237 << HOST_BITS_PER_HALF_WIDE_INT
);
1241 /* Handle the case where in_len is odd. For this we zero extend. */
1243 result
[j
++] = (unsigned HOST_WIDE_INT
)input
[i
];
1245 result
[j
++] = (unsigned HOST_WIDE_INT
)input
[i
]
1246 | ((unsigned HOST_WIDE_INT
)input
[i
+ 1] << HOST_BITS_PER_HALF_WIDE_INT
);
1249 /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1250 result is returned. If FULL is set, the entire result is returned
1251 in a mode that is twice the width of the inputs. However, that
1252 mode needs to exist if the value is to be usable. Clients that use
1253 FULL need to check for this.
1255 If HIGH or FULL are not set, throw away the upper half after the check
1256 is made to see if it overflows. Unfortunately there is no better
1257 way to check for overflow than to do this. OVERFLOW is assumed to
1258 be sticky so it should be initialized. SGN controls the signedness
1259 and is used to check overflow or if HIGH or FULL is set. */
1261 wi::mul_internal (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op1
,
1262 unsigned int op1len
, const HOST_WIDE_INT
*op2
,
1263 unsigned int op2len
, unsigned int prec
, signop sgn
,
1264 bool *overflow
, bool high
, bool full
)
1266 unsigned HOST_WIDE_INT o0
, o1
, k
, t
;
1269 unsigned int blocks_needed
= BLOCKS_NEEDED (prec
);
1270 unsigned int half_blocks_needed
= blocks_needed
* 2;
1271 /* The sizes here are scaled to support a 2x largest mode by 2x
1272 largest mode yielding a 4x largest mode result. This is what is
1275 unsigned HOST_HALF_WIDE_INT
1276 u
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1277 unsigned HOST_HALF_WIDE_INT
1278 v
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1279 /* The '2' in 'R' is because we are internally doing a full
1281 unsigned HOST_HALF_WIDE_INT
1282 r
[2 * 4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1283 HOST_WIDE_INT mask
= ((HOST_WIDE_INT
)1 << HOST_BITS_PER_HALF_WIDE_INT
) - 1;
1285 /* If the top level routine did not really pass in an overflow, then
1286 just make sure that we never attempt to set it. */
1287 bool needs_overflow
= (overflow
!= 0);
1291 /* If we need to check for overflow, we can only do half wide
1292 multiplies quickly because we need to look at the top bits to
1293 check for the overflow. */
1294 if ((high
|| full
|| needs_overflow
)
1295 && (prec
<= HOST_BITS_PER_HALF_WIDE_INT
))
1297 unsigned HOST_WIDE_INT r
;
1301 o0
= sext_hwi (op1
[0], prec
);
1302 o1
= sext_hwi (op2
[0], prec
);
1306 o0
= zext_hwi (op1
[0], prec
);
1307 o1
= zext_hwi (op2
[0], prec
);
1315 if (HOST_WIDE_INT (r
) != sext_hwi (r
, prec
))
1320 if ((r
>> prec
) != 0)
1324 val
[0] = high
? r
>> prec
: r
;
1328 /* We do unsigned mul and then correct it. */
1329 wi_unpack (u
, (const unsigned HOST_WIDE_INT
*)op1
, op1len
,
1330 half_blocks_needed
, prec
, SIGNED
);
1331 wi_unpack (v
, (const unsigned HOST_WIDE_INT
*)op2
, op2len
,
1332 half_blocks_needed
, prec
, SIGNED
);
1334 /* The 2 is for a full mult. */
1335 memset (r
, 0, half_blocks_needed
* 2
1336 * HOST_BITS_PER_HALF_WIDE_INT
/ CHAR_BIT
);
1338 for (j
= 0; j
< half_blocks_needed
; j
++)
1341 for (i
= 0; i
< half_blocks_needed
; i
++)
1343 t
= ((unsigned HOST_WIDE_INT
)u
[i
] * (unsigned HOST_WIDE_INT
)v
[j
]
1345 r
[i
+ j
] = t
& HALF_INT_MASK
;
1346 k
= t
>> HOST_BITS_PER_HALF_WIDE_INT
;
1348 r
[j
+ half_blocks_needed
] = k
;
1351 /* We did unsigned math above. For signed we must adjust the
1352 product (assuming we need to see that). */
1353 if (sgn
== SIGNED
&& (full
|| high
|| needs_overflow
))
1355 unsigned HOST_WIDE_INT b
;
1356 if (op1
[op1len
-1] < 0)
1359 for (i
= 0; i
< half_blocks_needed
; i
++)
1361 t
= (unsigned HOST_WIDE_INT
)r
[i
+ half_blocks_needed
]
1362 - (unsigned HOST_WIDE_INT
)v
[i
] - b
;
1363 r
[i
+ half_blocks_needed
] = t
& HALF_INT_MASK
;
1364 b
= t
>> (HOST_BITS_PER_WIDE_INT
- 1);
1367 if (op2
[op2len
-1] < 0)
1370 for (i
= 0; i
< half_blocks_needed
; i
++)
1372 t
= (unsigned HOST_WIDE_INT
)r
[i
+ half_blocks_needed
]
1373 - (unsigned HOST_WIDE_INT
)u
[i
] - b
;
1374 r
[i
+ half_blocks_needed
] = t
& HALF_INT_MASK
;
1375 b
= t
>> (HOST_BITS_PER_WIDE_INT
- 1);
1384 /* For unsigned, overflow is true if any of the top bits are set.
1385 For signed, overflow is true if any of the top bits are not equal
1387 if (sgn
== UNSIGNED
)
1391 top
= r
[(half_blocks_needed
) - 1];
1392 top
= SIGN_MASK (top
<< (HOST_BITS_PER_WIDE_INT
/ 2));
1396 for (i
= half_blocks_needed
; i
< half_blocks_needed
* 2; i
++)
1397 if (((HOST_WIDE_INT
)(r
[i
] & mask
)) != top
)
1403 /* compute [2prec] <- [prec] * [prec] */
1404 wi_pack ((unsigned HOST_WIDE_INT
*) val
, r
, 2 * half_blocks_needed
);
1405 return canonize (val
, blocks_needed
* 2, prec
* 2);
1409 /* compute [prec] <- ([prec] * [prec]) >> [prec] */
1410 wi_pack ((unsigned HOST_WIDE_INT
*) val
,
1411 &r
[half_blocks_needed
], half_blocks_needed
);
1412 return canonize (val
, blocks_needed
, prec
);
1416 /* compute [prec] <- ([prec] * [prec]) && ((1 << [prec]) - 1) */
1417 wi_pack ((unsigned HOST_WIDE_INT
*) val
, r
, half_blocks_needed
);
1418 return canonize (val
, blocks_needed
, prec
);
1422 /* Compute the population count of X. */
1424 wi::popcount (const wide_int_ref
&x
)
1429 if (x
.precision
== 0)
1432 /* The high order block is special if it is the last block and the
1433 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1434 have to clear out any ones above the precision before doing
1435 popcount on this block. */
1436 count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
1437 unsigned int stop
= x
.len
;
1440 count
= popcount_hwi (x
.uhigh () << -count
);
1445 if (x
.sign_mask () >= 0)
1449 for (i
= 0; i
< stop
; ++i
)
1450 count
+= popcount_hwi (x
.val
[i
]);
1455 /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1456 whether the result overflows when OP0 and OP1 are treated as having
1457 signedness SGN. Return the number of blocks in VAL. */
1459 wi::sub_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*op0
,
1460 unsigned int op0len
, const HOST_WIDE_INT
*op1
,
1461 unsigned int op1len
, unsigned int prec
,
1462 signop sgn
, bool *overflow
)
1464 unsigned HOST_WIDE_INT o0
= 0;
1465 unsigned HOST_WIDE_INT o1
= 0;
1466 unsigned HOST_WIDE_INT x
= 0;
1467 /* We implement subtraction as an in place negate and add. Negation
1468 is just inversion and add 1, so we can do the add of 1 by just
1469 starting the borrow in of the first element at 1. */
1470 unsigned HOST_WIDE_INT borrow
= 0;
1471 unsigned HOST_WIDE_INT old_borrow
= 0;
1473 unsigned HOST_WIDE_INT mask0
, mask1
;
1476 unsigned int len
= MAX (op0len
, op1len
);
1477 mask0
= -top_bit_of (op0
, op0len
, prec
);
1478 mask1
= -top_bit_of (op1
, op1len
, prec
);
1480 /* Subtract all of the explicitly defined elements. */
1481 for (i
= 0; i
< len
; i
++)
1483 o0
= i
< op0len
? (unsigned HOST_WIDE_INT
)op0
[i
] : mask0
;
1484 o1
= i
< op1len
? (unsigned HOST_WIDE_INT
)op1
[i
] : mask1
;
1485 x
= o0
- o1
- borrow
;
1487 old_borrow
= borrow
;
1488 borrow
= borrow
== 0 ? o0
< o1
: o0
<= o1
;
1491 if (len
* HOST_BITS_PER_WIDE_INT
< prec
)
1493 val
[len
] = mask0
- mask1
- borrow
;
1500 unsigned int shift
= -prec
% HOST_BITS_PER_WIDE_INT
;
1503 unsigned HOST_WIDE_INT x
= (o0
^ o1
) & (val
[len
- 1] ^ o0
);
1504 *overflow
= HOST_WIDE_INT (x
<< shift
) < 0;
1508 /* Put the MSB of X and O0 and in the top of the HWI. */
1512 *overflow
= (x
>= o0
);
1514 *overflow
= (x
> o0
);
1518 return canonize (val
, len
, prec
);
1526 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1527 algorithm is a small modification of the algorithm in Hacker's
1528 Delight by Warren, which itself is a small modification of Knuth's
1529 algorithm. M is the number of significant elements of U however
1530 there needs to be at least one extra element of B_DIVIDEND
1531 allocated, N is the number of elements of B_DIVISOR. */
1533 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT
*b_quotient
,
1534 unsigned HOST_HALF_WIDE_INT
*b_remainder
,
1535 unsigned HOST_HALF_WIDE_INT
*b_dividend
,
1536 unsigned HOST_HALF_WIDE_INT
*b_divisor
,
1537 unsigned int m
, unsigned int n
)
1539 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1540 HOST_WIDE_INT and stored in the lower bits of each word. This
1541 algorithm should work properly on both 32 and 64 bit
1543 unsigned HOST_WIDE_INT b
1544 = (unsigned HOST_WIDE_INT
)1 << HOST_BITS_PER_HALF_WIDE_INT
;
1545 unsigned HOST_WIDE_INT qhat
; /* Estimate of quotient digit. */
1546 unsigned HOST_WIDE_INT rhat
; /* A remainder. */
1547 unsigned HOST_WIDE_INT p
; /* Product of two digits. */
1548 HOST_WIDE_INT s
, i
, j
, t
, k
;
1550 /* Single digit divisor. */
1554 for (j
= m
- 1; j
>= 0; j
--)
1556 b_quotient
[j
] = (k
* b
+ b_dividend
[j
])/b_divisor
[0];
1557 k
= ((k
* b
+ b_dividend
[j
])
1558 - ((unsigned HOST_WIDE_INT
)b_quotient
[j
]
1559 * (unsigned HOST_WIDE_INT
)b_divisor
[0]));
1565 s
= clz_hwi (b_divisor
[n
-1]) - HOST_BITS_PER_HALF_WIDE_INT
; /* CHECK clz */
1569 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1570 algorithm, we can overwrite b_dividend and b_divisor, so we do
1572 for (i
= n
- 1; i
> 0; i
--)
1573 b_divisor
[i
] = (b_divisor
[i
] << s
)
1574 | (b_divisor
[i
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1575 b_divisor
[0] = b_divisor
[0] << s
;
1577 b_dividend
[m
] = b_dividend
[m
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
);
1578 for (i
= m
- 1; i
> 0; i
--)
1579 b_dividend
[i
] = (b_dividend
[i
] << s
)
1580 | (b_dividend
[i
-1] >> (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1581 b_dividend
[0] = b_dividend
[0] << s
;
1585 for (j
= m
- n
; j
>= 0; j
--)
1587 qhat
= (b_dividend
[j
+n
] * b
+ b_dividend
[j
+n
-1]) / b_divisor
[n
-1];
1588 rhat
= (b_dividend
[j
+n
] * b
+ b_dividend
[j
+n
-1]) - qhat
* b_divisor
[n
-1];
1590 if (qhat
>= b
|| qhat
* b_divisor
[n
-2] > b
* rhat
+ b_dividend
[j
+n
-2])
1593 rhat
+= b_divisor
[n
-1];
1598 /* Multiply and subtract. */
1600 for (i
= 0; i
< n
; i
++)
1602 p
= qhat
* b_divisor
[i
];
1603 t
= b_dividend
[i
+j
] - k
- (p
& HALF_INT_MASK
);
1604 b_dividend
[i
+ j
] = t
;
1605 k
= ((p
>> HOST_BITS_PER_HALF_WIDE_INT
)
1606 - (t
>> HOST_BITS_PER_HALF_WIDE_INT
));
1608 t
= b_dividend
[j
+n
] - k
;
1609 b_dividend
[j
+n
] = t
;
1611 b_quotient
[j
] = qhat
;
1616 for (i
= 0; i
< n
; i
++)
1618 t
= (HOST_WIDE_INT
)b_dividend
[i
+j
] + b_divisor
[i
] + k
;
1619 b_dividend
[i
+j
] = t
;
1620 k
= t
>> HOST_BITS_PER_HALF_WIDE_INT
;
1622 b_dividend
[j
+n
] += k
;
1626 for (i
= 0; i
< n
; i
++)
1627 b_remainder
[i
] = (b_dividend
[i
] >> s
)
1628 | (b_dividend
[i
+1] << (HOST_BITS_PER_HALF_WIDE_INT
- s
));
1630 for (i
= 0; i
< n
; i
++)
1631 b_remainder
[i
] = b_dividend
[i
];
1635 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1636 the result. If QUOTIENT is nonnull, store the value of the quotient
1637 there and return the number of blocks in it. The return value is
1638 not defined otherwise. If REMAINDER is nonnull, store the value
1639 of the remainder there and store the number of blocks in
1640 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1641 the division overflowed. */
1643 wi::divmod_internal (HOST_WIDE_INT
*quotient
, unsigned int *remainder_len
,
1644 HOST_WIDE_INT
*remainder
, const HOST_WIDE_INT
*dividend
,
1645 unsigned int dividend_len
, unsigned int dividend_prec
,
1646 const HOST_WIDE_INT
*divisor
, unsigned int divisor_len
,
1647 unsigned int divisor_prec
, signop sgn
,
1650 unsigned int dividend_blocks_needed
= 2 * BLOCKS_NEEDED (dividend_prec
);
1651 unsigned int divisor_blocks_needed
= 2 * BLOCKS_NEEDED (divisor_prec
);
1652 unsigned HOST_HALF_WIDE_INT
1653 b_quotient
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1654 unsigned HOST_HALF_WIDE_INT
1655 b_remainder
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1656 unsigned HOST_HALF_WIDE_INT
1657 b_dividend
[(4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
) + 1];
1658 unsigned HOST_HALF_WIDE_INT
1659 b_divisor
[4 * MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_HALF_WIDE_INT
];
1661 HOST_WIDE_INT u0
[WIDE_INT_MAX_ELTS
];
1662 HOST_WIDE_INT u1
[WIDE_INT_MAX_ELTS
];
1663 bool dividend_neg
= false;
1664 bool divisor_neg
= false;
1665 bool overflow
= false;
1667 if (divisor
[0] == 0 && divisor_len
== 1)
1670 /* The smallest signed number / -1 causes overflow. */
1672 && dividend_len
== BLOCKS_NEEDED (dividend_prec
)
1673 && divisor_len
== 1)
1675 HOST_WIDE_INT divisor_low
= divisor
[0];
1676 if (divisor_prec
< HOST_BITS_PER_WIDE_INT
)
1677 divisor_low
= sext_hwi (divisor_low
, divisor_prec
);
1678 unsigned HOST_WIDE_INT dividend_high
= dividend
[dividend_len
- 1];
1679 dividend_high
<<= -dividend_prec
% HOST_BITS_PER_WIDE_INT
;
1680 if (divisor_low
== -1
1681 && HOST_WIDE_INT (dividend_high
) == HOST_WIDE_INT_MIN
)
1683 /* The smallest neg number is 100...00. The high word was
1684 checked above, now check the rest of the words are zero. */
1686 bool all_zero
= true;
1687 for (i
= 0; i
+ 1 < dividend_len
; i
++)
1688 if (dividend
[i
] != 0)
1698 /* If overflow is set, just get out. There will only be grief by
1717 /* Do it on the host if you can. */
1718 if (dividend_prec
<= HOST_BITS_PER_WIDE_INT
1719 && divisor_prec
<= HOST_BITS_PER_WIDE_INT
)
1723 HOST_WIDE_INT o0
= sext_hwi (dividend
[0], dividend_prec
);
1724 HOST_WIDE_INT o1
= sext_hwi (divisor
[0], divisor_prec
);
1727 quotient
[0] = o0
/ o1
;
1730 remainder
[0] = o0
% o1
;
1736 unsigned HOST_WIDE_INT o0
= zext_hwi (dividend
[0], dividend_prec
);
1737 unsigned HOST_WIDE_INT o1
= zext_hwi (divisor
[0], divisor_prec
);
1740 quotient
[0] = o0
/ o1
;
1743 remainder
[0] = o0
% o1
;
1751 /* Make the divisor and dividend positive and remember what we
1755 if (top_bit_of (dividend
, dividend_len
, dividend_prec
))
1757 dividend_len
= wi::sub_large (u0
, zeros
, 1, dividend
, dividend_len
,
1758 dividend_prec
, UNSIGNED
, 0);
1760 dividend_neg
= true;
1762 if (top_bit_of (divisor
, divisor_len
, divisor_prec
))
1764 divisor_len
= wi::sub_large (u1
, zeros
, 1, divisor
, divisor_len
,
1765 divisor_prec
, UNSIGNED
, 0);
1771 wi_unpack (b_dividend
, (const unsigned HOST_WIDE_INT
*)dividend
,
1772 dividend_len
, dividend_blocks_needed
, dividend_prec
, sgn
);
1773 wi_unpack (b_divisor
, (const unsigned HOST_WIDE_INT
*)divisor
,
1774 divisor_len
, divisor_blocks_needed
, divisor_prec
, sgn
);
1776 if (top_bit_of (dividend
, dividend_len
, dividend_prec
) && sgn
== SIGNED
)
1777 m
= dividend_blocks_needed
;
1779 m
= 2 * dividend_len
;
1781 if (top_bit_of (divisor
, divisor_len
, divisor_prec
) && sgn
== SIGNED
)
1782 n
= divisor_blocks_needed
;
1784 n
= 2 * divisor_len
;
1786 /* We need to find the top non zero block of b_divisor. At most the
1787 top two blocks are zero. */
1788 if (b_divisor
[n
- 1] == 0)
1790 if (b_divisor
[n
- 1] == 0)
1793 memset (b_quotient
, 0, sizeof (b_quotient
));
1795 divmod_internal_2 (b_quotient
, b_remainder
, b_dividend
, b_divisor
, m
, n
);
1797 unsigned int quotient_len
= 0;
1800 wi_pack ((unsigned HOST_WIDE_INT
*) quotient
, b_quotient
, m
);
1801 quotient_len
= canonize (quotient
, (m
+ 1) / 2, dividend_prec
);
1802 /* The quotient is neg if exactly one of the divisor or dividend is
1804 if (dividend_neg
!= divisor_neg
)
1805 quotient_len
= wi::sub_large (quotient
, zeros
, 1, quotient
,
1806 quotient_len
, dividend_prec
,
1812 wi_pack ((unsigned HOST_WIDE_INT
*) remainder
, b_remainder
, n
);
1813 *remainder_len
= canonize (remainder
, (n
+ 1) / 2, dividend_prec
);
1814 /* The remainder is always the same sign as the dividend. */
1816 *remainder_len
= wi::sub_large (remainder
, zeros
, 1, remainder
,
1817 *remainder_len
, dividend_prec
,
1821 return quotient_len
;
1825 * Shifting, rotating and extraction.
1828 /* Left shift XVAL by SHIFT and store the result in VAL. Return the
1829 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
1831 wi::lshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1832 unsigned int xlen
, unsigned int precision
,
1835 /* Split the shift into a whole-block shift and a subblock shift. */
1836 unsigned int skip
= shift
/ HOST_BITS_PER_WIDE_INT
;
1837 unsigned int small_shift
= shift
% HOST_BITS_PER_WIDE_INT
;
1839 /* The whole-block shift fills with zeros. */
1840 unsigned int len
= BLOCKS_NEEDED (precision
);
1841 for (unsigned int i
= 0; i
< skip
; ++i
)
1844 /* It's easier to handle the simple block case specially. */
1845 if (small_shift
== 0)
1846 for (unsigned int i
= skip
; i
< len
; ++i
)
1847 val
[i
] = safe_uhwi (xval
, xlen
, i
- skip
);
1850 /* The first unfilled output block is a left shift of the first
1851 block in XVAL. The other output blocks contain bits from two
1852 consecutive input blocks. */
1853 unsigned HOST_WIDE_INT carry
= 0;
1854 for (unsigned int i
= skip
; i
< len
; ++i
)
1856 unsigned HOST_WIDE_INT x
= safe_uhwi (xval
, xlen
, i
- skip
);
1857 val
[i
] = (x
<< small_shift
) | carry
;
1858 carry
= x
>> (-small_shift
% HOST_BITS_PER_WIDE_INT
);
1861 return canonize (val
, len
, precision
);
1864 /* Right shift XVAL by SHIFT and store the result in VAL. Return the
1865 number of blocks in VAL. The input has XPRECISION bits and the
1866 output has XPRECISION - SHIFT bits. */
1868 rshift_large_common (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1869 unsigned int xlen
, unsigned int xprecision
,
1872 /* Split the shift into a whole-block shift and a subblock shift. */
1873 unsigned int skip
= shift
/ HOST_BITS_PER_WIDE_INT
;
1874 unsigned int small_shift
= shift
% HOST_BITS_PER_WIDE_INT
;
1876 /* Work out how many blocks are needed to store the significant bits
1877 (excluding the upper zeros or signs). */
1878 unsigned int len
= BLOCKS_NEEDED (xprecision
- shift
);
1880 /* It's easier to handle the simple block case specially. */
1881 if (small_shift
== 0)
1882 for (unsigned int i
= 0; i
< len
; ++i
)
1883 val
[i
] = safe_uhwi (xval
, xlen
, i
+ skip
);
1886 /* Each output block but the last is a combination of two input blocks.
1887 The last block is a right shift of the last block in XVAL. */
1888 unsigned HOST_WIDE_INT curr
= safe_uhwi (xval
, xlen
, skip
);
1889 for (unsigned int i
= 0; i
< len
; ++i
)
1891 val
[i
] = curr
>> small_shift
;
1892 curr
= safe_uhwi (xval
, xlen
, i
+ skip
+ 1);
1893 val
[i
] |= curr
<< (-small_shift
% HOST_BITS_PER_WIDE_INT
);
1899 /* Logically right shift XVAL by SHIFT and store the result in VAL.
1900 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1901 VAL has PRECISION bits. */
1903 wi::lrshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1904 unsigned int xlen
, unsigned int xprecision
,
1905 unsigned int precision
, unsigned int shift
)
1907 unsigned int len
= rshift_large_common (val
, xval
, xlen
, xprecision
, shift
);
1909 /* The value we just created has precision XPRECISION - SHIFT.
1910 Zero-extend it to wider precisions. */
1911 if (precision
> xprecision
- shift
)
1913 unsigned int small_prec
= (xprecision
- shift
) % HOST_BITS_PER_WIDE_INT
;
1915 val
[len
- 1] = zext_hwi (val
[len
- 1], small_prec
);
1916 else if (val
[len
- 1] < 0)
1918 /* Add a new block with a zero. */
1923 return canonize (val
, len
, precision
);
1926 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
1927 Return the number of blocks in VAL. XVAL has XPRECISION bits and
1928 VAL has PRECISION bits. */
1930 wi::arshift_large (HOST_WIDE_INT
*val
, const HOST_WIDE_INT
*xval
,
1931 unsigned int xlen
, unsigned int xprecision
,
1932 unsigned int precision
, unsigned int shift
)
1934 unsigned int len
= rshift_large_common (val
, xval
, xlen
, xprecision
, shift
);
1936 /* The value we just created has precision XPRECISION - SHIFT.
1937 Sign-extend it to wider types. */
1938 if (precision
> xprecision
- shift
)
1940 unsigned int small_prec
= (xprecision
- shift
) % HOST_BITS_PER_WIDE_INT
;
1942 val
[len
- 1] = sext_hwi (val
[len
- 1], small_prec
);
1944 return canonize (val
, len
, precision
);
1947 /* Return the number of leading (upper) zeros in X. */
1949 wi::clz (const wide_int_ref
&x
)
1951 /* Calculate how many bits there above the highest represented block. */
1952 int count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
1954 unsigned HOST_WIDE_INT high
= x
.uhigh ();
1956 /* The upper -COUNT bits of HIGH are not part of the value.
1958 high
= (high
<< -count
) >> -count
;
1959 else if (x
.sign_mask () < 0)
1960 /* The upper bit is set, so there are no leading zeros. */
1963 /* Check whether the value is zero. */
1964 if (high
== 0 && x
.len
== 1)
1965 return clz_zero (x
.precision
);
1967 /* We don't need to look below HIGH. Either HIGH is nonzero,
1968 or the top bit of the block below is nonzero; clz_hwi is
1969 HOST_BITS_PER_WIDE_INT in the latter case. */
1970 return count
+ clz_hwi (high
);
1973 /* Return the number of redundant sign bits in X. (That is, the number
1974 of bits immediately below the sign bit that have the same value as
1977 wi::clrsb (const wide_int_ref
&x
)
1979 /* Calculate how many bits there above the highest represented block. */
1980 int count
= x
.precision
- x
.len
* HOST_BITS_PER_WIDE_INT
;
1982 unsigned HOST_WIDE_INT high
= x
.uhigh ();
1983 unsigned HOST_WIDE_INT mask
= -1;
1986 /* The upper -COUNT bits of HIGH are not part of the value.
1987 Clear them from both MASK and HIGH. */
1992 /* If the top bit is 1, count the number of leading 1s. If the top
1993 bit is zero, count the number of leading zeros. */
1994 if (high
> mask
/ 2)
1997 /* There are no sign bits below the top block, so we don't need to look
1998 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2000 return count
+ clz_hwi (high
) - 1;
2003 /* Return the number of trailing (lower) zeros in X. */
2005 wi::ctz (const wide_int_ref
&x
)
2007 if (x
.len
== 1 && x
.ulow () == 0)
2008 return ctz_zero (x
.precision
);
2010 /* Having dealt with the zero case, there must be a block with a
2011 nonzero bit. We don't care about the bits above the first 1. */
2013 while (x
.val
[i
] == 0)
2015 return i
* HOST_BITS_PER_WIDE_INT
+ ctz_hwi (x
.val
[i
]);
2018 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2021 wi::exact_log2 (const wide_int_ref
&x
)
2023 /* 0-precision values can only hold 0. */
2024 if (x
.precision
== 0)
2027 /* Reject cases where there are implicit -1 blocks above HIGH. */
2028 if (x
.len
* HOST_BITS_PER_WIDE_INT
< x
.precision
&& x
.sign_mask () < 0)
2031 /* Set CRUX to the index of the entry that should be nonzero.
2032 If the top block is zero then the next lowest block (if any)
2033 must have the high bit set. */
2034 unsigned int crux
= x
.len
- 1;
2035 if (crux
> 0 && x
.val
[crux
] == 0)
2038 /* Check that all lower blocks are zero. */
2039 for (unsigned int i
= 0; i
< crux
; ++i
)
2043 /* Get a zero-extended form of block CRUX. */
2044 unsigned HOST_WIDE_INT hwi
= x
.val
[crux
];
2045 if ((crux
+ 1) * HOST_BITS_PER_WIDE_INT
> x
.precision
)
2046 hwi
= zext_hwi (hwi
, x
.precision
% HOST_BITS_PER_WIDE_INT
);
2048 /* Now it's down to whether HWI is a power of 2. */
2049 int res
= ::exact_log2 (hwi
);
2051 res
+= crux
* HOST_BITS_PER_WIDE_INT
;
2055 /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2057 wi::floor_log2 (const wide_int_ref
&x
)
2059 return x
.precision
- 1 - clz (x
);
2062 /* Return the index of the first (lowest) set bit in X, counting from 1.
2063 Return 0 if X is 0. */
2065 wi::ffs (const wide_int_ref
&x
)
2067 return eq_p (x
, 0) ? 0 : ctz (x
) + 1;
2070 /* Return true if sign-extending X to have precision PRECISION would give
2071 the minimum signed value at that precision. */
2073 wi::only_sign_bit_p (const wide_int_ref
&x
, unsigned int precision
)
2075 return ctz (x
) + 1 == int (precision
);
2078 /* Return true if X represents the minimum signed value. */
2080 wi::only_sign_bit_p (const wide_int_ref
&x
)
2082 return only_sign_bit_p (x
, x
.precision
);
2086 * Private utilities.
2089 void gt_ggc_mx (widest_int
*) { }
2090 void gt_pch_nx (widest_int
*, void (*) (void *, void *), void *) { }
2091 void gt_pch_nx (widest_int
*) { }
2094 * Private debug printing routines.
2096 #ifdef DEBUG_WIDE_INT
2097 /* The debugging routines print results of wide operations into the
2098 dump files of the respective passes in which they were called. */
2100 dumpa (const HOST_WIDE_INT
*val
, unsigned int len
, unsigned int prec
, char *buf
)
2104 const char * sep
= "";
2106 l
= sprintf (buf
, "[%d (", prec
);
2107 for (i
= len
- 1; i
>= 0; i
--)
2109 l
+= sprintf (&buf
[l
], "%s" HOST_WIDE_INT_PRINT_HEX
, sep
, val
[i
]);
2113 gcc_assert (len
!= 0);
2115 l
+= sprintf (&buf
[l
], ")]");
2117 gcc_assert (l
< MAX_SIZE
);
2125 /* The debugging routines print results of wide operations into the
2126 dump files of the respective passes in which they were called. */
2128 wide_int_ro::dump (char* buf
) const
2132 const char * sep
= "";
2134 l
= sprintf (buf
, "[%d (", precision
);
2135 for (i
= len
- 1; i
>= 0; i
--)
2137 l
+= sprintf (&buf
[l
], "%s" HOST_WIDE_INT_PRINT_HEX
, sep
, val
[i
]);
2141 gcc_assert (len
!= 0);
2143 l
+= sprintf (&buf
[l
], ")]");
2145 gcc_assert (l
< MAX_SIZE
);