1 /* Operations with long integers.
2 Copyright (C) 2006-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
23 #include "tm.h" /* For SHIFT_COUNT_TRUNCATED. */
26 static int add_double_with_sign (unsigned HOST_WIDE_INT
, HOST_WIDE_INT
,
27 unsigned HOST_WIDE_INT
, HOST_WIDE_INT
,
28 unsigned HOST_WIDE_INT
*, HOST_WIDE_INT
*,
31 #define add_double(l1,h1,l2,h2,lv,hv) \
32 add_double_with_sign (l1, h1, l2, h2, lv, hv, false)
34 static int neg_double (unsigned HOST_WIDE_INT
, HOST_WIDE_INT
,
35 unsigned HOST_WIDE_INT
*, HOST_WIDE_INT
*);
37 static int mul_double_with_sign (unsigned HOST_WIDE_INT
, HOST_WIDE_INT
,
38 unsigned HOST_WIDE_INT
, HOST_WIDE_INT
,
39 unsigned HOST_WIDE_INT
*, HOST_WIDE_INT
*,
42 static int mul_double_wide_with_sign (unsigned HOST_WIDE_INT
, HOST_WIDE_INT
,
43 unsigned HOST_WIDE_INT
, HOST_WIDE_INT
,
44 unsigned HOST_WIDE_INT
*, HOST_WIDE_INT
*,
45 unsigned HOST_WIDE_INT
*, HOST_WIDE_INT
*,
48 #define mul_double(l1,h1,l2,h2,lv,hv) \
49 mul_double_with_sign (l1, h1, l2, h2, lv, hv, false)
51 static void lshift_double (unsigned HOST_WIDE_INT
, HOST_WIDE_INT
,
52 HOST_WIDE_INT
, unsigned int,
53 unsigned HOST_WIDE_INT
*, HOST_WIDE_INT
*, bool);
55 static int div_and_round_double (unsigned, int, unsigned HOST_WIDE_INT
,
56 HOST_WIDE_INT
, unsigned HOST_WIDE_INT
,
57 HOST_WIDE_INT
, unsigned HOST_WIDE_INT
*,
58 HOST_WIDE_INT
*, unsigned HOST_WIDE_INT
*,
61 /* We know that A1 + B1 = SUM1, using 2's complement arithmetic and ignoring
62 overflow. Suppose A, B and SUM have the same respective signs as A1, B1,
63 and SUM1. Then this yields nonzero if overflow occurred during the
66 Overflow occurs if A and B have the same sign, but A and SUM differ in
67 sign. Use `^' to test whether signs differ, and `< 0' to isolate the
69 #define OVERFLOW_SUM_SIGN(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
71 /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
72 We do that by representing the two-word integer in 4 words, with only
73 HOST_BITS_PER_WIDE_INT / 2 bits stored in each word, as a positive
74 number. The value of the word is LOWPART + HIGHPART * BASE. */
77 ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)) - 1))
79 ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT / 2)
80 #define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT / 2)
82 /* Unpack a two-word integer into 4 words.
83 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
84 WORDS points to the array of HOST_WIDE_INTs. */
87 encode (HOST_WIDE_INT
*words
, unsigned HOST_WIDE_INT low
, HOST_WIDE_INT hi
)
89 words
[0] = LOWPART (low
);
90 words
[1] = HIGHPART (low
);
91 words
[2] = LOWPART (hi
);
92 words
[3] = HIGHPART (hi
);
95 /* Pack an array of 4 words into a two-word integer.
96 WORDS points to the array of words.
97 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
100 decode (HOST_WIDE_INT
*words
, unsigned HOST_WIDE_INT
*low
,
103 *low
= words
[0] + words
[1] * BASE
;
104 *hi
= words
[2] + words
[3] * BASE
;
107 /* Add two doubleword integers with doubleword result.
108 Return nonzero if the operation overflows according to UNSIGNED_P.
109 Each argument is given as two `HOST_WIDE_INT' pieces.
110 One argument is L1 and H1; the other, L2 and H2.
111 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
114 add_double_with_sign (unsigned HOST_WIDE_INT l1
, HOST_WIDE_INT h1
,
115 unsigned HOST_WIDE_INT l2
, HOST_WIDE_INT h2
,
116 unsigned HOST_WIDE_INT
*lv
, HOST_WIDE_INT
*hv
,
119 unsigned HOST_WIDE_INT l
;
123 h
= (HOST_WIDE_INT
) ((unsigned HOST_WIDE_INT
) h1
124 + (unsigned HOST_WIDE_INT
) h2
131 return ((unsigned HOST_WIDE_INT
) h
< (unsigned HOST_WIDE_INT
) h1
135 return OVERFLOW_SUM_SIGN (h1
, h2
, h
);
138 /* Negate a doubleword integer with doubleword result.
139 Return nonzero if the operation overflows, assuming it's signed.
140 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
141 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
144 neg_double (unsigned HOST_WIDE_INT l1
, HOST_WIDE_INT h1
,
145 unsigned HOST_WIDE_INT
*lv
, HOST_WIDE_INT
*hv
)
151 return (*hv
& h1
) < 0;
161 /* Multiply two doubleword integers with doubleword result.
162 Return nonzero if the operation overflows according to UNSIGNED_P.
163 Each argument is given as two `HOST_WIDE_INT' pieces.
164 One argument is L1 and H1; the other, L2 and H2.
165 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
168 mul_double_with_sign (unsigned HOST_WIDE_INT l1
, HOST_WIDE_INT h1
,
169 unsigned HOST_WIDE_INT l2
, HOST_WIDE_INT h2
,
170 unsigned HOST_WIDE_INT
*lv
, HOST_WIDE_INT
*hv
,
173 unsigned HOST_WIDE_INT toplow
;
174 HOST_WIDE_INT tophigh
;
176 return mul_double_wide_with_sign (l1
, h1
, l2
, h2
,
177 lv
, hv
, &toplow
, &tophigh
,
182 mul_double_wide_with_sign (unsigned HOST_WIDE_INT l1
, HOST_WIDE_INT h1
,
183 unsigned HOST_WIDE_INT l2
, HOST_WIDE_INT h2
,
184 unsigned HOST_WIDE_INT
*lv
, HOST_WIDE_INT
*hv
,
185 unsigned HOST_WIDE_INT
*lw
, HOST_WIDE_INT
*hw
,
188 HOST_WIDE_INT arg1
[4];
189 HOST_WIDE_INT arg2
[4];
190 HOST_WIDE_INT prod
[4 * 2];
191 unsigned HOST_WIDE_INT carry
;
193 unsigned HOST_WIDE_INT neglow
;
194 HOST_WIDE_INT neghigh
;
196 encode (arg1
, l1
, h1
);
197 encode (arg2
, l2
, h2
);
199 memset (prod
, 0, sizeof prod
);
201 for (i
= 0; i
< 4; i
++)
204 for (j
= 0; j
< 4; j
++)
207 /* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */
208 carry
+= (unsigned HOST_WIDE_INT
) arg1
[i
] * arg2
[j
];
209 /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */
211 prod
[k
] = LOWPART (carry
);
212 carry
= HIGHPART (carry
);
217 decode (prod
, lv
, hv
);
218 decode (prod
+ 4, lw
, hw
);
220 /* Unsigned overflow is immediate. */
222 return (*lw
| *hw
) != 0;
224 /* Check for signed overflow by calculating the signed representation of the
225 top half of the result; it should agree with the low half's sign bit. */
228 neg_double (l2
, h2
, &neglow
, &neghigh
);
229 add_double (neglow
, neghigh
, *lw
, *hw
, lw
, hw
);
233 neg_double (l1
, h1
, &neglow
, &neghigh
);
234 add_double (neglow
, neghigh
, *lw
, *hw
, lw
, hw
);
236 return (*hv
< 0 ? ~(*lw
& *hw
) : *lw
| *hw
) != 0;
239 /* Shift the doubleword integer in L1, H1 right by COUNT places
240 keeping only PREC bits of result. ARITH nonzero specifies
241 arithmetic shifting; otherwise use logical shift.
242 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
245 rshift_double (unsigned HOST_WIDE_INT l1
, HOST_WIDE_INT h1
,
246 unsigned HOST_WIDE_INT count
, unsigned int prec
,
247 unsigned HOST_WIDE_INT
*lv
, HOST_WIDE_INT
*hv
,
250 unsigned HOST_WIDE_INT signmask
;
253 ? -((unsigned HOST_WIDE_INT
) h1
>> (HOST_BITS_PER_WIDE_INT
- 1))
256 if (SHIFT_COUNT_TRUNCATED
)
259 if (count
>= HOST_BITS_PER_DOUBLE_INT
)
261 /* Shifting by the host word size is undefined according to the
262 ANSI standard, so we must handle this as a special case. */
266 else if (count
>= HOST_BITS_PER_WIDE_INT
)
269 *lv
= (unsigned HOST_WIDE_INT
) h1
>> (count
- HOST_BITS_PER_WIDE_INT
);
273 *hv
= (unsigned HOST_WIDE_INT
) h1
>> count
;
275 | ((unsigned HOST_WIDE_INT
) h1
276 << (HOST_BITS_PER_WIDE_INT
- count
- 1) << 1));
279 /* Zero / sign extend all bits that are beyond the precision. */
286 else if ((prec
- count
) >= HOST_BITS_PER_DOUBLE_INT
)
288 else if ((prec
- count
) >= HOST_BITS_PER_WIDE_INT
)
290 *hv
&= ~((HOST_WIDE_INT
) (-1) << (prec
- count
- HOST_BITS_PER_WIDE_INT
));
291 *hv
|= signmask
<< (prec
- count
- HOST_BITS_PER_WIDE_INT
);
296 *lv
&= ~((unsigned HOST_WIDE_INT
) (-1) << (prec
- count
));
297 *lv
|= signmask
<< (prec
- count
);
301 /* Shift the doubleword integer in L1, H1 left by COUNT places
302 keeping only PREC bits of result.
303 Shift right if COUNT is negative.
304 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
305 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
308 lshift_double (unsigned HOST_WIDE_INT l1
, HOST_WIDE_INT h1
,
309 HOST_WIDE_INT count
, unsigned int prec
,
310 unsigned HOST_WIDE_INT
*lv
, HOST_WIDE_INT
*hv
, bool arith
)
312 unsigned HOST_WIDE_INT signmask
;
316 rshift_double (l1
, h1
, absu_hwi (count
), prec
, lv
, hv
, arith
);
320 if (SHIFT_COUNT_TRUNCATED
)
323 if (count
>= HOST_BITS_PER_DOUBLE_INT
)
325 /* Shifting by the host word size is undefined according to the
326 ANSI standard, so we must handle this as a special case. */
330 else if (count
>= HOST_BITS_PER_WIDE_INT
)
332 *hv
= l1
<< (count
- HOST_BITS_PER_WIDE_INT
);
337 *hv
= (((unsigned HOST_WIDE_INT
) h1
<< count
)
338 | (l1
>> (HOST_BITS_PER_WIDE_INT
- count
- 1) >> 1));
342 /* Sign extend all bits that are beyond the precision. */
344 signmask
= -((prec
> HOST_BITS_PER_WIDE_INT
345 ? ((unsigned HOST_WIDE_INT
) *hv
346 >> (prec
- HOST_BITS_PER_WIDE_INT
- 1))
347 : (*lv
>> (prec
- 1))) & 1);
349 if (prec
>= HOST_BITS_PER_DOUBLE_INT
)
351 else if (prec
>= HOST_BITS_PER_WIDE_INT
)
353 *hv
&= ~((HOST_WIDE_INT
) (-1) << (prec
- HOST_BITS_PER_WIDE_INT
));
354 *hv
|= signmask
<< (prec
- HOST_BITS_PER_WIDE_INT
);
359 *lv
&= ~((unsigned HOST_WIDE_INT
) (-1) << prec
);
360 *lv
|= signmask
<< prec
;
364 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
365 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
366 CODE is a tree code for a kind of division, one of
367 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
369 It controls how the quotient is rounded to an integer.
370 Return nonzero if the operation overflows.
371 UNS nonzero says do unsigned division. */
374 div_and_round_double (unsigned code
, int uns
,
375 /* num == numerator == dividend */
376 unsigned HOST_WIDE_INT lnum_orig
,
377 HOST_WIDE_INT hnum_orig
,
378 /* den == denominator == divisor */
379 unsigned HOST_WIDE_INT lden_orig
,
380 HOST_WIDE_INT hden_orig
,
381 unsigned HOST_WIDE_INT
*lquo
,
382 HOST_WIDE_INT
*hquo
, unsigned HOST_WIDE_INT
*lrem
,
386 HOST_WIDE_INT num
[4 + 1]; /* extra element for scaling. */
387 HOST_WIDE_INT den
[4], quo
[4];
389 unsigned HOST_WIDE_INT work
;
390 unsigned HOST_WIDE_INT carry
= 0;
391 unsigned HOST_WIDE_INT lnum
= lnum_orig
;
392 HOST_WIDE_INT hnum
= hnum_orig
;
393 unsigned HOST_WIDE_INT lden
= lden_orig
;
394 HOST_WIDE_INT hden
= hden_orig
;
397 if (hden
== 0 && lden
== 0)
398 overflow
= 1, lden
= 1;
400 /* Calculate quotient sign and convert operands to unsigned. */
406 /* (minimum integer) / (-1) is the only overflow case. */
407 if (neg_double (lnum
, hnum
, &lnum
, &hnum
)
408 && ((HOST_WIDE_INT
) lden
& hden
) == -1)
414 neg_double (lden
, hden
, &lden
, &hden
);
418 if (hnum
== 0 && hden
== 0)
419 { /* single precision */
421 /* This unsigned division rounds toward zero. */
427 { /* trivial case: dividend < divisor */
428 /* hden != 0 already checked. */
435 memset (quo
, 0, sizeof quo
);
437 memset (num
, 0, sizeof num
); /* to zero 9th element */
438 memset (den
, 0, sizeof den
);
440 encode (num
, lnum
, hnum
);
441 encode (den
, lden
, hden
);
443 /* Special code for when the divisor < BASE. */
444 if (hden
== 0 && lden
< (unsigned HOST_WIDE_INT
) BASE
)
446 /* hnum != 0 already checked. */
447 for (i
= 4 - 1; i
>= 0; i
--)
449 work
= num
[i
] + carry
* BASE
;
450 quo
[i
] = work
/ lden
;
456 /* Full double precision division,
457 with thanks to Don Knuth's "Seminumerical Algorithms". */
458 int num_hi_sig
, den_hi_sig
;
459 unsigned HOST_WIDE_INT quo_est
, scale
;
461 /* Find the highest nonzero divisor digit. */
462 for (i
= 4 - 1;; i
--)
469 /* Insure that the first digit of the divisor is at least BASE/2.
470 This is required by the quotient digit estimation algorithm. */
472 scale
= BASE
/ (den
[den_hi_sig
] + 1);
474 { /* scale divisor and dividend */
476 for (i
= 0; i
<= 4 - 1; i
++)
478 work
= (num
[i
] * scale
) + carry
;
479 num
[i
] = LOWPART (work
);
480 carry
= HIGHPART (work
);
485 for (i
= 0; i
<= 4 - 1; i
++)
487 work
= (den
[i
] * scale
) + carry
;
488 den
[i
] = LOWPART (work
);
489 carry
= HIGHPART (work
);
490 if (den
[i
] != 0) den_hi_sig
= i
;
497 for (i
= num_hi_sig
- den_hi_sig
- 1; i
>= 0; i
--)
499 /* Guess the next quotient digit, quo_est, by dividing the first
500 two remaining dividend digits by the high order quotient digit.
501 quo_est is never low and is at most 2 high. */
502 unsigned HOST_WIDE_INT tmp
;
504 num_hi_sig
= i
+ den_hi_sig
+ 1;
505 work
= num
[num_hi_sig
] * BASE
+ num
[num_hi_sig
- 1];
506 if (num
[num_hi_sig
] != den
[den_hi_sig
])
507 quo_est
= work
/ den
[den_hi_sig
];
511 /* Refine quo_est so it's usually correct, and at most one high. */
512 tmp
= work
- quo_est
* den
[den_hi_sig
];
514 && (den
[den_hi_sig
- 1] * quo_est
515 > (tmp
* BASE
+ num
[num_hi_sig
- 2])))
518 /* Try QUO_EST as the quotient digit, by multiplying the
519 divisor by QUO_EST and subtracting from the remaining dividend.
520 Keep in mind that QUO_EST is the I - 1st digit. */
523 for (j
= 0; j
<= den_hi_sig
; j
++)
525 work
= quo_est
* den
[j
] + carry
;
526 carry
= HIGHPART (work
);
527 work
= num
[i
+ j
] - LOWPART (work
);
528 num
[i
+ j
] = LOWPART (work
);
529 carry
+= HIGHPART (work
) != 0;
532 /* If quo_est was high by one, then num[i] went negative and
533 we need to correct things. */
534 if (num
[num_hi_sig
] < (HOST_WIDE_INT
) carry
)
537 carry
= 0; /* add divisor back in */
538 for (j
= 0; j
<= den_hi_sig
; j
++)
540 work
= num
[i
+ j
] + den
[j
] + carry
;
541 carry
= HIGHPART (work
);
542 num
[i
+ j
] = LOWPART (work
);
545 num
[num_hi_sig
] += carry
;
548 /* Store the quotient digit. */
553 decode (quo
, lquo
, hquo
);
556 /* If result is negative, make it so. */
558 neg_double (*lquo
, *hquo
, lquo
, hquo
);
560 /* Compute trial remainder: rem = num - (quo * den) */
561 mul_double (*lquo
, *hquo
, lden_orig
, hden_orig
, lrem
, hrem
);
562 neg_double (*lrem
, *hrem
, lrem
, hrem
);
563 add_double (lnum_orig
, hnum_orig
, *lrem
, *hrem
, lrem
, hrem
);
568 case TRUNC_MOD_EXPR
: /* round toward zero */
569 case EXACT_DIV_EXPR
: /* for this one, it shouldn't matter */
573 case FLOOR_MOD_EXPR
: /* round toward negative infinity */
574 if (quo_neg
&& (*lrem
!= 0 || *hrem
!= 0)) /* ratio < 0 && rem != 0 */
577 add_double (*lquo
, *hquo
, (HOST_WIDE_INT
) -1, (HOST_WIDE_INT
) -1,
585 case CEIL_MOD_EXPR
: /* round toward positive infinity */
586 if (!quo_neg
&& (*lrem
!= 0 || *hrem
!= 0)) /* ratio > 0 && rem != 0 */
588 add_double (*lquo
, *hquo
, (HOST_WIDE_INT
) 1, (HOST_WIDE_INT
) 0,
596 case ROUND_MOD_EXPR
: /* round to closest integer */
598 unsigned HOST_WIDE_INT labs_rem
= *lrem
;
599 HOST_WIDE_INT habs_rem
= *hrem
;
600 unsigned HOST_WIDE_INT labs_den
= lden
, ltwice
;
601 HOST_WIDE_INT habs_den
= hden
, htwice
;
603 /* Get absolute values. */
605 neg_double (*lrem
, *hrem
, &labs_rem
, &habs_rem
);
607 neg_double (lden
, hden
, &labs_den
, &habs_den
);
609 /* If (2 * abs (lrem) >= abs (lden)), adjust the quotient. */
610 mul_double ((HOST_WIDE_INT
) 2, (HOST_WIDE_INT
) 0,
611 labs_rem
, habs_rem
, <wice
, &htwice
);
613 if (((unsigned HOST_WIDE_INT
) habs_den
614 < (unsigned HOST_WIDE_INT
) htwice
)
615 || (((unsigned HOST_WIDE_INT
) habs_den
616 == (unsigned HOST_WIDE_INT
) htwice
)
617 && (labs_den
<= ltwice
)))
621 add_double (*lquo
, *hquo
,
622 (HOST_WIDE_INT
) -1, (HOST_WIDE_INT
) -1, lquo
, hquo
);
625 add_double (*lquo
, *hquo
, (HOST_WIDE_INT
) 1, (HOST_WIDE_INT
) 0,
637 /* Compute true remainder: rem = num - (quo * den) */
638 mul_double (*lquo
, *hquo
, lden_orig
, hden_orig
, lrem
, hrem
);
639 neg_double (*lrem
, *hrem
, lrem
, hrem
);
640 add_double (lnum_orig
, hnum_orig
, *lrem
, *hrem
, lrem
, hrem
);
644 /* Returns mask for PREC bits. */
647 double_int::mask (unsigned prec
)
649 unsigned HOST_WIDE_INT m
;
652 if (prec
> HOST_BITS_PER_WIDE_INT
)
654 prec
-= HOST_BITS_PER_WIDE_INT
;
655 m
= ((unsigned HOST_WIDE_INT
) 2 << (prec
- 1)) - 1;
656 mask
.high
= (HOST_WIDE_INT
) m
;
662 mask
.low
= prec
? ((unsigned HOST_WIDE_INT
) 2 << (prec
- 1)) - 1 : 0;
668 /* Returns a maximum value for signed or unsigned integer
669 of precision PREC. */
672 double_int::max_value (unsigned int prec
, bool uns
)
674 return double_int::mask (prec
- (uns
? 0 : 1));
677 /* Returns a minimum value for signed or unsigned integer
678 of precision PREC. */
681 double_int::min_value (unsigned int prec
, bool uns
)
684 return double_int_zero
;
685 return double_int_one
.lshift (prec
- 1, prec
, false);
688 /* Clears the bits of CST over the precision PREC. If UNS is false, the bits
689 outside of the precision are set to the sign bit (i.e., the PREC-th one),
690 otherwise they are set to zero.
692 This corresponds to returning the value represented by PREC lowermost bits
693 of CST, with the given signedness. */
696 double_int::ext (unsigned prec
, bool uns
) const
699 return this->zext (prec
);
701 return this->sext (prec
);
704 /* The same as double_int::ext with UNS = true. */
707 double_int::zext (unsigned prec
) const
709 const double_int
&cst
= *this;
710 double_int mask
= double_int::mask (prec
);
713 r
.low
= cst
.low
& mask
.low
;
714 r
.high
= cst
.high
& mask
.high
;
719 /* The same as double_int::ext with UNS = false. */
722 double_int::sext (unsigned prec
) const
724 const double_int
&cst
= *this;
725 double_int mask
= double_int::mask (prec
);
727 unsigned HOST_WIDE_INT snum
;
729 if (prec
<= HOST_BITS_PER_WIDE_INT
)
733 prec
-= HOST_BITS_PER_WIDE_INT
;
734 snum
= (unsigned HOST_WIDE_INT
) cst
.high
;
736 if (((snum
>> (prec
- 1)) & 1) == 1)
738 r
.low
= cst
.low
| ~mask
.low
;
739 r
.high
= cst
.high
| ~mask
.high
;
743 r
.low
= cst
.low
& mask
.low
;
744 r
.high
= cst
.high
& mask
.high
;
750 /* Returns true if CST fits in signed HOST_WIDE_INT. */
753 double_int::fits_shwi () const
755 const double_int
&cst
= *this;
757 return (HOST_WIDE_INT
) cst
.low
>= 0;
758 else if (cst
.high
== -1)
759 return (HOST_WIDE_INT
) cst
.low
< 0;
764 /* Returns true if CST fits in HOST_WIDE_INT if UNS is false, or in
765 unsigned HOST_WIDE_INT if UNS is true. */
768 double_int::fits_hwi (bool uns
) const
771 return this->fits_uhwi ();
773 return this->fits_shwi ();
779 double_int::operator * (double_int b
) const
781 const double_int
&a
= *this;
783 mul_double (a
.low
, a
.high
, b
.low
, b
.high
, &ret
.low
, &ret
.high
);
787 /* Returns A * B. If the operation overflows according to UNSIGNED_P,
788 *OVERFLOW is set to nonzero. */
791 double_int::mul_with_sign (double_int b
, bool unsigned_p
, bool *overflow
) const
793 const double_int
&a
= *this;
795 *overflow
= mul_double_with_sign (a
.low
, a
.high
, b
.low
, b
.high
,
796 &ret
.low
, &ret
.high
, unsigned_p
);
801 double_int::wide_mul_with_sign (double_int b
, bool unsigned_p
,
802 double_int
*higher
, bool *overflow
) const
806 *overflow
= mul_double_wide_with_sign (low
, high
, b
.low
, b
.high
,
807 &lower
.low
, &lower
.high
,
808 &higher
->low
, &higher
->high
,
816 double_int::operator + (double_int b
) const
818 const double_int
&a
= *this;
820 add_double (a
.low
, a
.high
, b
.low
, b
.high
, &ret
.low
, &ret
.high
);
824 /* Returns A + B. If the operation overflows according to UNSIGNED_P,
825 *OVERFLOW is set to nonzero. */
828 double_int::add_with_sign (double_int b
, bool unsigned_p
, bool *overflow
) const
830 const double_int
&a
= *this;
832 *overflow
= add_double_with_sign (a
.low
, a
.high
, b
.low
, b
.high
,
833 &ret
.low
, &ret
.high
, unsigned_p
);
840 double_int::operator - (double_int b
) const
842 const double_int
&a
= *this;
844 neg_double (b
.low
, b
.high
, &b
.low
, &b
.high
);
845 add_double (a
.low
, a
.high
, b
.low
, b
.high
, &ret
.low
, &ret
.high
);
849 /* Returns A - B. If the operation overflows via inconsistent sign bits,
850 *OVERFLOW is set to nonzero. */
853 double_int::sub_with_overflow (double_int b
, bool *overflow
) const
856 neg_double (b
.low
, b
.high
, &ret
.low
, &ret
.high
);
857 add_double (low
, high
, ret
.low
, ret
.high
, &ret
.low
, &ret
.high
);
858 *overflow
= OVERFLOW_SUM_SIGN (ret
.high
, b
.high
, high
);
865 double_int::operator - () const
867 const double_int
&a
= *this;
869 neg_double (a
.low
, a
.high
, &ret
.low
, &ret
.high
);
874 double_int::neg_with_overflow (bool *overflow
) const
877 *overflow
= neg_double (low
, high
, &ret
.low
, &ret
.high
);
881 /* Returns A / B (computed as unsigned depending on UNS, and rounded as
882 specified by CODE). CODE is enum tree_code in fact, but double_int.h
883 must be included before tree.h. The remainder after the division is
887 double_int::divmod_with_overflow (double_int b
, bool uns
, unsigned code
,
888 double_int
*mod
, bool *overflow
) const
890 const double_int
&a
= *this;
893 *overflow
= div_and_round_double (code
, uns
, a
.low
, a
.high
,
894 b
.low
, b
.high
, &ret
.low
, &ret
.high
,
895 &mod
->low
, &mod
->high
);
900 double_int::divmod (double_int b
, bool uns
, unsigned code
,
901 double_int
*mod
) const
903 const double_int
&a
= *this;
906 div_and_round_double (code
, uns
, a
.low
, a
.high
,
907 b
.low
, b
.high
, &ret
.low
, &ret
.high
,
908 &mod
->low
, &mod
->high
);
912 /* The same as double_int::divmod with UNS = false. */
915 double_int::sdivmod (double_int b
, unsigned code
, double_int
*mod
) const
917 return this->divmod (b
, false, code
, mod
);
920 /* The same as double_int::divmod with UNS = true. */
923 double_int::udivmod (double_int b
, unsigned code
, double_int
*mod
) const
925 return this->divmod (b
, true, code
, mod
);
928 /* Returns A / B (computed as unsigned depending on UNS, and rounded as
929 specified by CODE). CODE is enum tree_code in fact, but double_int.h
930 must be included before tree.h. */
933 double_int::div (double_int b
, bool uns
, unsigned code
) const
937 return this->divmod (b
, uns
, code
, &mod
);
940 /* The same as double_int::div with UNS = false. */
943 double_int::sdiv (double_int b
, unsigned code
) const
945 return this->div (b
, false, code
);
948 /* The same as double_int::div with UNS = true. */
951 double_int::udiv (double_int b
, unsigned code
) const
953 return this->div (b
, true, code
);
956 /* Returns A % B (computed as unsigned depending on UNS, and rounded as
957 specified by CODE). CODE is enum tree_code in fact, but double_int.h
958 must be included before tree.h. */
961 double_int::mod (double_int b
, bool uns
, unsigned code
) const
965 this->divmod (b
, uns
, code
, &mod
);
969 /* The same as double_int::mod with UNS = false. */
972 double_int::smod (double_int b
, unsigned code
) const
974 return this->mod (b
, false, code
);
977 /* The same as double_int::mod with UNS = true. */
980 double_int::umod (double_int b
, unsigned code
) const
982 return this->mod (b
, true, code
);
985 /* Return TRUE iff PRODUCT is an integral multiple of FACTOR, and return
986 the multiple in *MULTIPLE. Otherwise return FALSE and leave *MULTIPLE
990 double_int::multiple_of (double_int factor
,
991 bool unsigned_p
, double_int
*multiple
) const
993 double_int remainder
;
994 double_int quotient
= this->divmod (factor
, unsigned_p
,
995 TRUNC_DIV_EXPR
, &remainder
);
996 if (remainder
.is_zero ())
998 *multiple
= quotient
;
1005 /* Set BITPOS bit in A. */
1007 double_int::set_bit (unsigned bitpos
) const
1009 double_int a
= *this;
1010 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
1011 a
.low
|= (unsigned HOST_WIDE_INT
) 1 << bitpos
;
1013 a
.high
|= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
1018 /* Count trailing zeros in A. */
1020 double_int::trailing_zeros () const
1022 const double_int
&a
= *this;
1023 unsigned HOST_WIDE_INT w
= a
.low
? a
.low
: (unsigned HOST_WIDE_INT
) a
.high
;
1024 unsigned bits
= a
.low
? 0 : HOST_BITS_PER_WIDE_INT
;
1026 return HOST_BITS_PER_DOUBLE_INT
;
1027 bits
+= ctz_hwi (w
);
1031 /* Shift A left by COUNT places keeping only PREC bits of result. Shift
1032 right if COUNT is negative. ARITH true specifies arithmetic shifting;
1033 otherwise use logical shift. */
1036 double_int::lshift (HOST_WIDE_INT count
, unsigned int prec
, bool arith
) const
1038 const double_int
&a
= *this;
1040 lshift_double (a
.low
, a
.high
, count
, prec
, &ret
.low
, &ret
.high
, arith
);
1044 /* Shift A right by COUNT places keeping only PREC bits of result. Shift
1045 left if COUNT is negative. ARITH true specifies arithmetic shifting;
1046 otherwise use logical shift. */
1049 double_int::rshift (HOST_WIDE_INT count
, unsigned int prec
, bool arith
) const
1051 const double_int
&a
= *this;
1053 lshift_double (a
.low
, a
.high
, -count
, prec
, &ret
.low
, &ret
.high
, arith
);
1057 /* Arithmetic shift A left by COUNT places keeping only PREC bits of result.
1058 Shift right if COUNT is negative. */
1061 double_int::alshift (HOST_WIDE_INT count
, unsigned int prec
) const
1064 lshift_double (low
, high
, count
, prec
, &r
.low
, &r
.high
, true);
1068 /* Arithmetic shift A right by COUNT places keeping only PREC bits of result.
1069 Shift left if COUNT is negative. */
1072 double_int::arshift (HOST_WIDE_INT count
, unsigned int prec
) const
1075 lshift_double (low
, high
, -count
, prec
, &r
.low
, &r
.high
, true);
1079 /* Logical shift A left by COUNT places keeping only PREC bits of result.
1080 Shift right if COUNT is negative. */
1083 double_int::llshift (HOST_WIDE_INT count
, unsigned int prec
) const
1086 lshift_double (low
, high
, count
, prec
, &r
.low
, &r
.high
, false);
1090 /* Logical shift A right by COUNT places keeping only PREC bits of result.
1091 Shift left if COUNT is negative. */
1094 double_int::lrshift (HOST_WIDE_INT count
, unsigned int prec
) const
1097 lshift_double (low
, high
, -count
, prec
, &r
.low
, &r
.high
, false);
1101 /* Rotate A left by COUNT places keeping only PREC bits of result.
1102 Rotate right if COUNT is negative. */
1105 double_int::lrotate (HOST_WIDE_INT count
, unsigned int prec
) const
1113 t1
= this->lshift (count
, prec
, false);
1114 t2
= this->rshift (prec
- count
, prec
, false);
1119 /* Rotate A rigth by COUNT places keeping only PREC bits of result.
1120 Rotate right if COUNT is negative. */
1123 double_int::rrotate (HOST_WIDE_INT count
, unsigned int prec
) const
1131 t1
= this->rshift (count
, prec
, false);
1132 t2
= this->lshift (prec
- count
, prec
, false);
1137 /* Returns -1 if A < B, 0 if A == B and 1 if A > B. Signedness of the
1138 comparison is given by UNS. */
1141 double_int::cmp (double_int b
, bool uns
) const
1144 return this->ucmp (b
);
1146 return this->scmp (b
);
1149 /* Compares two unsigned values A and B. Returns -1 if A < B, 0 if A == B,
1153 double_int::ucmp (double_int b
) const
1155 const double_int
&a
= *this;
1156 if ((unsigned HOST_WIDE_INT
) a
.high
< (unsigned HOST_WIDE_INT
) b
.high
)
1158 if ((unsigned HOST_WIDE_INT
) a
.high
> (unsigned HOST_WIDE_INT
) b
.high
)
1168 /* Compares two signed values A and B. Returns -1 if A < B, 0 if A == B,
1172 double_int::scmp (double_int b
) const
1174 const double_int
&a
= *this;
1175 if (a
.high
< b
.high
)
1177 if (a
.high
> b
.high
)
1187 /* Compares two unsigned values A and B for less-than. */
1190 double_int::ult (double_int b
) const
1192 if ((unsigned HOST_WIDE_INT
) high
< (unsigned HOST_WIDE_INT
) b
.high
)
1194 if ((unsigned HOST_WIDE_INT
) high
> (unsigned HOST_WIDE_INT
) b
.high
)
1201 /* Compares two unsigned values A and B for less-than or equal-to. */
1204 double_int::ule (double_int b
) const
1206 if ((unsigned HOST_WIDE_INT
) high
< (unsigned HOST_WIDE_INT
) b
.high
)
1208 if ((unsigned HOST_WIDE_INT
) high
> (unsigned HOST_WIDE_INT
) b
.high
)
1215 /* Compares two unsigned values A and B for greater-than. */
1218 double_int::ugt (double_int b
) const
1220 if ((unsigned HOST_WIDE_INT
) high
> (unsigned HOST_WIDE_INT
) b
.high
)
1222 if ((unsigned HOST_WIDE_INT
) high
< (unsigned HOST_WIDE_INT
) b
.high
)
1229 /* Compares two signed values A and B for less-than. */
1232 double_int::slt (double_int b
) const
1243 /* Compares two signed values A and B for less-than or equal-to. */
1246 double_int::sle (double_int b
) const
1257 /* Compares two signed values A and B for greater-than. */
1260 double_int::sgt (double_int b
) const
1272 /* Compares two values A and B. Returns max value. Signedness of the
1273 comparison is given by UNS. */
1276 double_int::max (double_int b
, bool uns
)
1278 return (this->cmp (b
, uns
) == 1) ? *this : b
;
1281 /* Compares two signed values A and B. Returns max value. */
1284 double_int::smax (double_int b
)
1286 return (this->scmp (b
) == 1) ? *this : b
;
1289 /* Compares two unsigned values A and B. Returns max value. */
1292 double_int::umax (double_int b
)
1294 return (this->ucmp (b
) == 1) ? *this : b
;
1297 /* Compares two values A and B. Returns mix value. Signedness of the
1298 comparison is given by UNS. */
1301 double_int::min (double_int b
, bool uns
)
1303 return (this->cmp (b
, uns
) == -1) ? *this : b
;
1306 /* Compares two signed values A and B. Returns min value. */
1309 double_int::smin (double_int b
)
1311 return (this->scmp (b
) == -1) ? *this : b
;
1314 /* Compares two unsigned values A and B. Returns min value. */
1317 double_int::umin (double_int b
)
1319 return (this->ucmp (b
) == -1) ? *this : b
;
1322 /* Splits last digit of *CST (taken as unsigned) in BASE and returns it. */
1325 double_int_split_digit (double_int
*cst
, unsigned base
)
1327 unsigned HOST_WIDE_INT resl
, reml
;
1328 HOST_WIDE_INT resh
, remh
;
1330 div_and_round_double (FLOOR_DIV_EXPR
, true, cst
->low
, cst
->high
, base
, 0,
1331 &resl
, &resh
, &reml
, &remh
);
1338 /* Dumps CST to FILE. If UNS is true, CST is considered to be unsigned,
1339 otherwise it is signed. */
1342 dump_double_int (FILE *file
, double_int cst
, bool uns
)
1344 unsigned digits
[100], n
;
1349 fprintf (file
, "0");
1353 if (!uns
&& cst
.is_negative ())
1355 fprintf (file
, "-");
1359 for (n
= 0; !cst
.is_zero (); n
++)
1360 digits
[n
] = double_int_split_digit (&cst
, 10);
1361 for (i
= n
- 1; i
>= 0; i
--)
1362 fprintf (file
, "%u", digits
[i
]);
1366 /* Sets RESULT to VAL, taken unsigned if UNS is true and as signed
1370 mpz_set_double_int (mpz_t result
, double_int val
, bool uns
)
1372 bool negate
= false;
1373 unsigned HOST_WIDE_INT vp
[2];
1375 if (!uns
&& val
.is_negative ())
1382 vp
[1] = (unsigned HOST_WIDE_INT
) val
.high
;
1383 mpz_import (result
, 2, -1, sizeof (HOST_WIDE_INT
), 0, 0, vp
);
1386 mpz_neg (result
, result
);
1389 /* Returns VAL converted to TYPE. If WRAP is true, then out-of-range
1390 values of VAL will be wrapped; otherwise, they will be set to the
1391 appropriate minimum or maximum TYPE bound. */
1394 mpz_get_double_int (const_tree type
, mpz_t val
, bool wrap
)
1396 unsigned HOST_WIDE_INT
*vp
;
1406 get_type_static_bounds (type
, min
, max
);
1408 if (mpz_cmp (val
, min
) < 0)
1410 else if (mpz_cmp (val
, max
) > 0)
1417 /* Determine the number of unsigned HOST_WIDE_INT that are required
1418 for representing the value. The code to calculate count is
1419 extracted from the GMP manual, section "Integer Import and Export":
1420 http://gmplib.org/manual/Integer-Import-and-Export.html */
1421 numb
= 8*sizeof(HOST_WIDE_INT
);
1422 count
= (mpz_sizeinbase (val
, 2) + numb
-1) / numb
;
1425 vp
= (unsigned HOST_WIDE_INT
*) alloca (count
* sizeof(HOST_WIDE_INT
));
1429 mpz_export (vp
, &count
, -1, sizeof (HOST_WIDE_INT
), 0, 0, val
);
1431 gcc_assert (wrap
|| count
<= 2);
1434 res
.high
= (HOST_WIDE_INT
) vp
[1];
1436 res
= res
.ext (TYPE_PRECISION (type
), TYPE_UNSIGNED (type
));
1437 if (mpz_sgn (val
) < 0)