1 /* Operations with long integers.
2 Copyright (C) 2006, 2007, 2009, 2010 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
26 /* We know that A1 + B1 = SUM1, using 2's complement arithmetic and ignoring
27 overflow. Suppose A, B and SUM have the same respective signs as A1, B1,
28 and SUM1. Then this yields nonzero if overflow occurred during the
31 Overflow occurs if A and B have the same sign, but A and SUM differ in
32 sign. Use `^' to test whether signs differ, and `< 0' to isolate the
34 #define OVERFLOW_SUM_SIGN(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
36 /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
37 We do that by representing the two-word integer in 4 words, with only
38 HOST_BITS_PER_WIDE_INT / 2 bits stored in each word, as a positive
39 number. The value of the word is LOWPART + HIGHPART * BASE. */
42 ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)) - 1))
44 ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT / 2)
45 #define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT / 2)
47 /* Unpack a two-word integer into 4 words.
48 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
49 WORDS points to the array of HOST_WIDE_INTs. */
52 encode (HOST_WIDE_INT
*words
, unsigned HOST_WIDE_INT low
, HOST_WIDE_INT hi
)
54 words
[0] = LOWPART (low
);
55 words
[1] = HIGHPART (low
);
56 words
[2] = LOWPART (hi
);
57 words
[3] = HIGHPART (hi
);
60 /* Pack an array of 4 words into a two-word integer.
61 WORDS points to the array of words.
62 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
65 decode (HOST_WIDE_INT
*words
, unsigned HOST_WIDE_INT
*low
,
68 *low
= words
[0] + words
[1] * BASE
;
69 *hi
= words
[2] + words
[3] * BASE
;
72 /* Force the double-word integer L1, H1 to be within the range of the
73 integer type TYPE. Stores the properly truncated and sign-extended
74 double-word integer in *LV, *HV. Returns true if the operation
75 overflows, that is, argument and result are different. */
78 fit_double_type (unsigned HOST_WIDE_INT l1
, HOST_WIDE_INT h1
,
79 unsigned HOST_WIDE_INT
*lv
, HOST_WIDE_INT
*hv
, const_tree type
)
81 unsigned HOST_WIDE_INT low0
= l1
;
82 HOST_WIDE_INT high0
= h1
;
83 unsigned int prec
= TYPE_PRECISION (type
);
84 int sign_extended_type
;
86 /* Size types *are* sign extended. */
87 sign_extended_type
= (!TYPE_UNSIGNED (type
)
88 || (TREE_CODE (type
) == INTEGER_TYPE
89 && TYPE_IS_SIZETYPE (type
)));
91 /* First clear all bits that are beyond the type's precision. */
92 if (prec
>= 2 * HOST_BITS_PER_WIDE_INT
)
94 else if (prec
> HOST_BITS_PER_WIDE_INT
)
95 h1
&= ~((HOST_WIDE_INT
) (-1) << (prec
- HOST_BITS_PER_WIDE_INT
));
99 if (prec
< HOST_BITS_PER_WIDE_INT
)
100 l1
&= ~((HOST_WIDE_INT
) (-1) << prec
);
103 /* Then do sign extension if necessary. */
104 if (!sign_extended_type
)
105 /* No sign extension */;
106 else if (prec
>= 2 * HOST_BITS_PER_WIDE_INT
)
107 /* Correct width already. */;
108 else if (prec
> HOST_BITS_PER_WIDE_INT
)
110 /* Sign extend top half? */
111 if (h1
& ((unsigned HOST_WIDE_INT
)1
112 << (prec
- HOST_BITS_PER_WIDE_INT
- 1)))
113 h1
|= (HOST_WIDE_INT
) (-1) << (prec
- HOST_BITS_PER_WIDE_INT
);
115 else if (prec
== HOST_BITS_PER_WIDE_INT
)
117 if ((HOST_WIDE_INT
)l1
< 0)
122 /* Sign extend bottom half? */
123 if (l1
& ((unsigned HOST_WIDE_INT
)1 << (prec
- 1)))
126 l1
|= (HOST_WIDE_INT
)(-1) << prec
;
133 /* If the value didn't fit, signal overflow. */
134 return l1
!= low0
|| h1
!= high0
;
137 /* We force the double-int HIGH:LOW to the range of the type TYPE by
138 sign or zero extending it.
139 OVERFLOWABLE indicates if we are interested
140 in overflow of the value, when >0 we are only interested in signed
141 overflow, for <0 we are interested in any overflow. OVERFLOWED
142 indicates whether overflow has already occurred. CONST_OVERFLOWED
143 indicates whether constant overflow has already occurred. We force
144 T's value to be within range of T's type (by setting to 0 or 1 all
145 the bits outside the type's range). We set TREE_OVERFLOWED if,
146 OVERFLOWED is nonzero,
147 or OVERFLOWABLE is >0 and signed overflow occurs
148 or OVERFLOWABLE is <0 and any overflow occurs
149 We return a new tree node for the extended double-int. The node
150 is shared if no overflow flags are set. */
153 force_fit_type_double (tree type
, unsigned HOST_WIDE_INT low
,
154 HOST_WIDE_INT high
, int overflowable
,
157 int sign_extended_type
;
160 /* Size types *are* sign extended. */
161 sign_extended_type
= (!TYPE_UNSIGNED (type
)
162 || (TREE_CODE (type
) == INTEGER_TYPE
163 && TYPE_IS_SIZETYPE (type
)));
165 overflow
= fit_double_type (low
, high
, &low
, &high
, type
);
167 /* If we need to set overflow flags, return a new unshared node. */
168 if (overflowed
|| overflow
)
172 || (overflowable
> 0 && sign_extended_type
))
174 tree t
= make_node (INTEGER_CST
);
175 TREE_INT_CST_LOW (t
) = low
;
176 TREE_INT_CST_HIGH (t
) = high
;
177 TREE_TYPE (t
) = type
;
178 TREE_OVERFLOW (t
) = 1;
183 /* Else build a shared node. */
184 return build_int_cst_wide (type
, low
, high
);
187 /* Add two doubleword integers with doubleword result.
188 Return nonzero if the operation overflows according to UNSIGNED_P.
189 Each argument is given as two `HOST_WIDE_INT' pieces.
190 One argument is L1 and H1; the other, L2 and H2.
191 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
194 add_double_with_sign (unsigned HOST_WIDE_INT l1
, HOST_WIDE_INT h1
,
195 unsigned HOST_WIDE_INT l2
, HOST_WIDE_INT h2
,
196 unsigned HOST_WIDE_INT
*lv
, HOST_WIDE_INT
*hv
,
199 unsigned HOST_WIDE_INT l
;
203 h
= (HOST_WIDE_INT
) ((unsigned HOST_WIDE_INT
) h1
204 + (unsigned HOST_WIDE_INT
) h2
211 return ((unsigned HOST_WIDE_INT
) h
< (unsigned HOST_WIDE_INT
) h1
215 return OVERFLOW_SUM_SIGN (h1
, h2
, h
);
218 /* Negate a doubleword integer with doubleword result.
219 Return nonzero if the operation overflows, assuming it's signed.
220 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
221 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
224 neg_double (unsigned HOST_WIDE_INT l1
, HOST_WIDE_INT h1
,
225 unsigned HOST_WIDE_INT
*lv
, HOST_WIDE_INT
*hv
)
231 return (*hv
& h1
) < 0;
241 /* Multiply two doubleword integers with doubleword result.
242 Return nonzero if the operation overflows according to UNSIGNED_P.
243 Each argument is given as two `HOST_WIDE_INT' pieces.
244 One argument is L1 and H1; the other, L2 and H2.
245 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
248 mul_double_with_sign (unsigned HOST_WIDE_INT l1
, HOST_WIDE_INT h1
,
249 unsigned HOST_WIDE_INT l2
, HOST_WIDE_INT h2
,
250 unsigned HOST_WIDE_INT
*lv
, HOST_WIDE_INT
*hv
,
253 HOST_WIDE_INT arg1
[4];
254 HOST_WIDE_INT arg2
[4];
255 HOST_WIDE_INT prod
[4 * 2];
256 unsigned HOST_WIDE_INT carry
;
258 unsigned HOST_WIDE_INT toplow
, neglow
;
259 HOST_WIDE_INT tophigh
, neghigh
;
261 encode (arg1
, l1
, h1
);
262 encode (arg2
, l2
, h2
);
264 memset (prod
, 0, sizeof prod
);
266 for (i
= 0; i
< 4; i
++)
269 for (j
= 0; j
< 4; j
++)
272 /* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */
273 carry
+= arg1
[i
] * arg2
[j
];
274 /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */
276 prod
[k
] = LOWPART (carry
);
277 carry
= HIGHPART (carry
);
282 decode (prod
, lv
, hv
);
283 decode (prod
+ 4, &toplow
, &tophigh
);
285 /* Unsigned overflow is immediate. */
287 return (toplow
| tophigh
) != 0;
289 /* Check for signed overflow by calculating the signed representation of the
290 top half of the result; it should agree with the low half's sign bit. */
293 neg_double (l2
, h2
, &neglow
, &neghigh
);
294 add_double (neglow
, neghigh
, toplow
, tophigh
, &toplow
, &tophigh
);
298 neg_double (l1
, h1
, &neglow
, &neghigh
);
299 add_double (neglow
, neghigh
, toplow
, tophigh
, &toplow
, &tophigh
);
301 return (*hv
< 0 ? ~(toplow
& tophigh
) : toplow
| tophigh
) != 0;
304 /* Shift the doubleword integer in L1, H1 left by COUNT places
305 keeping only PREC bits of result.
306 Shift right if COUNT is negative.
307 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
308 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
311 lshift_double (unsigned HOST_WIDE_INT l1
, HOST_WIDE_INT h1
,
312 HOST_WIDE_INT count
, unsigned int prec
,
313 unsigned HOST_WIDE_INT
*lv
, HOST_WIDE_INT
*hv
, bool arith
)
315 unsigned HOST_WIDE_INT signmask
;
319 rshift_double (l1
, h1
, -count
, prec
, lv
, hv
, arith
);
323 if (SHIFT_COUNT_TRUNCATED
)
326 if (count
>= 2 * HOST_BITS_PER_WIDE_INT
)
328 /* Shifting by the host word size is undefined according to the
329 ANSI standard, so we must handle this as a special case. */
333 else if (count
>= HOST_BITS_PER_WIDE_INT
)
335 *hv
= l1
<< (count
- HOST_BITS_PER_WIDE_INT
);
340 *hv
= (((unsigned HOST_WIDE_INT
) h1
<< count
)
341 | (l1
>> (HOST_BITS_PER_WIDE_INT
- count
- 1) >> 1));
345 /* Sign extend all bits that are beyond the precision. */
347 signmask
= -((prec
> HOST_BITS_PER_WIDE_INT
348 ? ((unsigned HOST_WIDE_INT
) *hv
349 >> (prec
- HOST_BITS_PER_WIDE_INT
- 1))
350 : (*lv
>> (prec
- 1))) & 1);
352 if (prec
>= 2 * HOST_BITS_PER_WIDE_INT
)
354 else if (prec
>= HOST_BITS_PER_WIDE_INT
)
356 *hv
&= ~((HOST_WIDE_INT
) (-1) << (prec
- HOST_BITS_PER_WIDE_INT
));
357 *hv
|= signmask
<< (prec
- HOST_BITS_PER_WIDE_INT
);
362 *lv
&= ~((unsigned HOST_WIDE_INT
) (-1) << prec
);
363 *lv
|= signmask
<< prec
;
367 /* Shift the doubleword integer in L1, H1 right by COUNT places
368 keeping only PREC bits of result. Shift left if COUNT is negative.
369 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
370 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
373 rshift_double (unsigned HOST_WIDE_INT l1
, HOST_WIDE_INT h1
,
374 HOST_WIDE_INT count
, unsigned int prec
,
375 unsigned HOST_WIDE_INT
*lv
, HOST_WIDE_INT
*hv
,
378 unsigned HOST_WIDE_INT signmask
;
382 lshift_double (l1
, h1
, -count
, prec
, lv
, hv
, arith
);
387 ? -((unsigned HOST_WIDE_INT
) h1
>> (HOST_BITS_PER_WIDE_INT
- 1))
390 if (SHIFT_COUNT_TRUNCATED
)
393 if (count
>= 2 * HOST_BITS_PER_WIDE_INT
)
395 /* Shifting by the host word size is undefined according to the
396 ANSI standard, so we must handle this as a special case. */
400 else if (count
>= HOST_BITS_PER_WIDE_INT
)
403 *lv
= (unsigned HOST_WIDE_INT
) h1
>> (count
- HOST_BITS_PER_WIDE_INT
);
407 *hv
= (unsigned HOST_WIDE_INT
) h1
>> count
;
409 | ((unsigned HOST_WIDE_INT
) h1
410 << (HOST_BITS_PER_WIDE_INT
- count
- 1) << 1));
413 /* Zero / sign extend all bits that are beyond the precision. */
415 if (count
>= (HOST_WIDE_INT
)prec
)
420 else if ((prec
- count
) >= 2 * HOST_BITS_PER_WIDE_INT
)
422 else if ((prec
- count
) >= HOST_BITS_PER_WIDE_INT
)
424 *hv
&= ~((HOST_WIDE_INT
) (-1) << (prec
- count
- HOST_BITS_PER_WIDE_INT
));
425 *hv
|= signmask
<< (prec
- count
- HOST_BITS_PER_WIDE_INT
);
430 *lv
&= ~((unsigned HOST_WIDE_INT
) (-1) << (prec
- count
));
431 *lv
|= signmask
<< (prec
- count
);
435 /* Rotate the doubleword integer in L1, H1 left by COUNT places
436 keeping only PREC bits of result.
437 Rotate right if COUNT is negative.
438 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
441 lrotate_double (unsigned HOST_WIDE_INT l1
, HOST_WIDE_INT h1
,
442 HOST_WIDE_INT count
, unsigned int prec
,
443 unsigned HOST_WIDE_INT
*lv
, HOST_WIDE_INT
*hv
)
445 unsigned HOST_WIDE_INT s1l
, s2l
;
446 HOST_WIDE_INT s1h
, s2h
;
452 lshift_double (l1
, h1
, count
, prec
, &s1l
, &s1h
, 0);
453 rshift_double (l1
, h1
, prec
- count
, prec
, &s2l
, &s2h
, 0);
458 /* Rotate the doubleword integer in L1, H1 left by COUNT places
459 keeping only PREC bits of result. COUNT must be positive.
460 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
463 rrotate_double (unsigned HOST_WIDE_INT l1
, HOST_WIDE_INT h1
,
464 HOST_WIDE_INT count
, unsigned int prec
,
465 unsigned HOST_WIDE_INT
*lv
, HOST_WIDE_INT
*hv
)
467 unsigned HOST_WIDE_INT s1l
, s2l
;
468 HOST_WIDE_INT s1h
, s2h
;
474 rshift_double (l1
, h1
, count
, prec
, &s1l
, &s1h
, 0);
475 lshift_double (l1
, h1
, prec
- count
, prec
, &s2l
, &s2h
, 0);
480 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
481 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
482 CODE is a tree code for a kind of division, one of
483 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
485 It controls how the quotient is rounded to an integer.
486 Return nonzero if the operation overflows.
487 UNS nonzero says do unsigned division. */
490 div_and_round_double (unsigned code
, int uns
,
491 /* num == numerator == dividend */
492 unsigned HOST_WIDE_INT lnum_orig
,
493 HOST_WIDE_INT hnum_orig
,
494 /* den == denominator == divisor */
495 unsigned HOST_WIDE_INT lden_orig
,
496 HOST_WIDE_INT hden_orig
,
497 unsigned HOST_WIDE_INT
*lquo
,
498 HOST_WIDE_INT
*hquo
, unsigned HOST_WIDE_INT
*lrem
,
502 HOST_WIDE_INT num
[4 + 1]; /* extra element for scaling. */
503 HOST_WIDE_INT den
[4], quo
[4];
505 unsigned HOST_WIDE_INT work
;
506 unsigned HOST_WIDE_INT carry
= 0;
507 unsigned HOST_WIDE_INT lnum
= lnum_orig
;
508 HOST_WIDE_INT hnum
= hnum_orig
;
509 unsigned HOST_WIDE_INT lden
= lden_orig
;
510 HOST_WIDE_INT hden
= hden_orig
;
513 if (hden
== 0 && lden
== 0)
514 overflow
= 1, lden
= 1;
516 /* Calculate quotient sign and convert operands to unsigned. */
522 /* (minimum integer) / (-1) is the only overflow case. */
523 if (neg_double (lnum
, hnum
, &lnum
, &hnum
)
524 && ((HOST_WIDE_INT
) lden
& hden
) == -1)
530 neg_double (lden
, hden
, &lden
, &hden
);
534 if (hnum
== 0 && hden
== 0)
535 { /* single precision */
537 /* This unsigned division rounds toward zero. */
543 { /* trivial case: dividend < divisor */
544 /* hden != 0 already checked. */
551 memset (quo
, 0, sizeof quo
);
553 memset (num
, 0, sizeof num
); /* to zero 9th element */
554 memset (den
, 0, sizeof den
);
556 encode (num
, lnum
, hnum
);
557 encode (den
, lden
, hden
);
559 /* Special code for when the divisor < BASE. */
560 if (hden
== 0 && lden
< (unsigned HOST_WIDE_INT
) BASE
)
562 /* hnum != 0 already checked. */
563 for (i
= 4 - 1; i
>= 0; i
--)
565 work
= num
[i
] + carry
* BASE
;
566 quo
[i
] = work
/ lden
;
572 /* Full double precision division,
573 with thanks to Don Knuth's "Seminumerical Algorithms". */
574 int num_hi_sig
, den_hi_sig
;
575 unsigned HOST_WIDE_INT quo_est
, scale
;
577 /* Find the highest nonzero divisor digit. */
578 for (i
= 4 - 1;; i
--)
585 /* Insure that the first digit of the divisor is at least BASE/2.
586 This is required by the quotient digit estimation algorithm. */
588 scale
= BASE
/ (den
[den_hi_sig
] + 1);
590 { /* scale divisor and dividend */
592 for (i
= 0; i
<= 4 - 1; i
++)
594 work
= (num
[i
] * scale
) + carry
;
595 num
[i
] = LOWPART (work
);
596 carry
= HIGHPART (work
);
601 for (i
= 0; i
<= 4 - 1; i
++)
603 work
= (den
[i
] * scale
) + carry
;
604 den
[i
] = LOWPART (work
);
605 carry
= HIGHPART (work
);
606 if (den
[i
] != 0) den_hi_sig
= i
;
613 for (i
= num_hi_sig
- den_hi_sig
- 1; i
>= 0; i
--)
615 /* Guess the next quotient digit, quo_est, by dividing the first
616 two remaining dividend digits by the high order quotient digit.
617 quo_est is never low and is at most 2 high. */
618 unsigned HOST_WIDE_INT tmp
;
620 num_hi_sig
= i
+ den_hi_sig
+ 1;
621 work
= num
[num_hi_sig
] * BASE
+ num
[num_hi_sig
- 1];
622 if (num
[num_hi_sig
] != den
[den_hi_sig
])
623 quo_est
= work
/ den
[den_hi_sig
];
627 /* Refine quo_est so it's usually correct, and at most one high. */
628 tmp
= work
- quo_est
* den
[den_hi_sig
];
630 && (den
[den_hi_sig
- 1] * quo_est
631 > (tmp
* BASE
+ num
[num_hi_sig
- 2])))
634 /* Try QUO_EST as the quotient digit, by multiplying the
635 divisor by QUO_EST and subtracting from the remaining dividend.
636 Keep in mind that QUO_EST is the I - 1st digit. */
639 for (j
= 0; j
<= den_hi_sig
; j
++)
641 work
= quo_est
* den
[j
] + carry
;
642 carry
= HIGHPART (work
);
643 work
= num
[i
+ j
] - LOWPART (work
);
644 num
[i
+ j
] = LOWPART (work
);
645 carry
+= HIGHPART (work
) != 0;
648 /* If quo_est was high by one, then num[i] went negative and
649 we need to correct things. */
650 if (num
[num_hi_sig
] < (HOST_WIDE_INT
) carry
)
653 carry
= 0; /* add divisor back in */
654 for (j
= 0; j
<= den_hi_sig
; j
++)
656 work
= num
[i
+ j
] + den
[j
] + carry
;
657 carry
= HIGHPART (work
);
658 num
[i
+ j
] = LOWPART (work
);
661 num
[num_hi_sig
] += carry
;
664 /* Store the quotient digit. */
669 decode (quo
, lquo
, hquo
);
672 /* If result is negative, make it so. */
674 neg_double (*lquo
, *hquo
, lquo
, hquo
);
676 /* Compute trial remainder: rem = num - (quo * den) */
677 mul_double (*lquo
, *hquo
, lden_orig
, hden_orig
, lrem
, hrem
);
678 neg_double (*lrem
, *hrem
, lrem
, hrem
);
679 add_double (lnum_orig
, hnum_orig
, *lrem
, *hrem
, lrem
, hrem
);
684 case TRUNC_MOD_EXPR
: /* round toward zero */
685 case EXACT_DIV_EXPR
: /* for this one, it shouldn't matter */
689 case FLOOR_MOD_EXPR
: /* round toward negative infinity */
690 if (quo_neg
&& (*lrem
!= 0 || *hrem
!= 0)) /* ratio < 0 && rem != 0 */
693 add_double (*lquo
, *hquo
, (HOST_WIDE_INT
) -1, (HOST_WIDE_INT
) -1,
701 case CEIL_MOD_EXPR
: /* round toward positive infinity */
702 if (!quo_neg
&& (*lrem
!= 0 || *hrem
!= 0)) /* ratio > 0 && rem != 0 */
704 add_double (*lquo
, *hquo
, (HOST_WIDE_INT
) 1, (HOST_WIDE_INT
) 0,
712 case ROUND_MOD_EXPR
: /* round to closest integer */
714 unsigned HOST_WIDE_INT labs_rem
= *lrem
;
715 HOST_WIDE_INT habs_rem
= *hrem
;
716 unsigned HOST_WIDE_INT labs_den
= lden
, ltwice
;
717 HOST_WIDE_INT habs_den
= hden
, htwice
;
719 /* Get absolute values. */
721 neg_double (*lrem
, *hrem
, &labs_rem
, &habs_rem
);
723 neg_double (lden
, hden
, &labs_den
, &habs_den
);
725 /* If (2 * abs (lrem) >= abs (lden)), adjust the quotient. */
726 mul_double ((HOST_WIDE_INT
) 2, (HOST_WIDE_INT
) 0,
727 labs_rem
, habs_rem
, <wice
, &htwice
);
729 if (((unsigned HOST_WIDE_INT
) habs_den
730 < (unsigned HOST_WIDE_INT
) htwice
)
731 || (((unsigned HOST_WIDE_INT
) habs_den
732 == (unsigned HOST_WIDE_INT
) htwice
)
733 && (labs_den
<= ltwice
)))
737 add_double (*lquo
, *hquo
,
738 (HOST_WIDE_INT
) -1, (HOST_WIDE_INT
) -1, lquo
, hquo
);
741 add_double (*lquo
, *hquo
, (HOST_WIDE_INT
) 1, (HOST_WIDE_INT
) 0,
753 /* Compute true remainder: rem = num - (quo * den) */
754 mul_double (*lquo
, *hquo
, lden_orig
, hden_orig
, lrem
, hrem
);
755 neg_double (*lrem
, *hrem
, lrem
, hrem
);
756 add_double (lnum_orig
, hnum_orig
, *lrem
, *hrem
, lrem
, hrem
);
761 /* Returns mask for PREC bits. */
764 double_int_mask (unsigned prec
)
766 unsigned HOST_WIDE_INT m
;
769 if (prec
> HOST_BITS_PER_WIDE_INT
)
771 prec
-= HOST_BITS_PER_WIDE_INT
;
772 m
= ((unsigned HOST_WIDE_INT
) 2 << (prec
- 1)) - 1;
773 mask
.high
= (HOST_WIDE_INT
) m
;
779 mask
.low
= ((unsigned HOST_WIDE_INT
) 2 << (prec
- 1)) - 1;
785 /* Clears the bits of CST over the precision PREC. If UNS is false, the bits
786 outside of the precision are set to the sign bit (i.e., the PREC-th one),
787 otherwise they are set to zero.
789 This corresponds to returning the value represented by PREC lowermost bits
790 of CST, with the given signedness. */
793 double_int_ext (double_int cst
, unsigned prec
, bool uns
)
796 return double_int_zext (cst
, prec
);
798 return double_int_sext (cst
, prec
);
801 /* The same as double_int_ext with UNS = true. */
804 double_int_zext (double_int cst
, unsigned prec
)
806 double_int mask
= double_int_mask (prec
);
809 r
.low
= cst
.low
& mask
.low
;
810 r
.high
= cst
.high
& mask
.high
;
815 /* The same as double_int_ext with UNS = false. */
818 double_int_sext (double_int cst
, unsigned prec
)
820 double_int mask
= double_int_mask (prec
);
822 unsigned HOST_WIDE_INT snum
;
824 if (prec
<= HOST_BITS_PER_WIDE_INT
)
828 prec
-= HOST_BITS_PER_WIDE_INT
;
829 snum
= (unsigned HOST_WIDE_INT
) cst
.high
;
831 if (((snum
>> (prec
- 1)) & 1) == 1)
833 r
.low
= cst
.low
| ~mask
.low
;
834 r
.high
= cst
.high
| ~mask
.high
;
838 r
.low
= cst
.low
& mask
.low
;
839 r
.high
= cst
.high
& mask
.high
;
845 /* Returns true if CST fits in unsigned HOST_WIDE_INT. */
848 double_int_fits_in_uhwi_p (double_int cst
)
850 return cst
.high
== 0;
853 /* Returns true if CST fits in signed HOST_WIDE_INT. */
856 double_int_fits_in_shwi_p (double_int cst
)
859 return (HOST_WIDE_INT
) cst
.low
>= 0;
860 else if (cst
.high
== -1)
861 return (HOST_WIDE_INT
) cst
.low
< 0;
866 /* Returns true if CST fits in HOST_WIDE_INT if UNS is false, or in
867 unsigned HOST_WIDE_INT if UNS is true. */
870 double_int_fits_in_hwi_p (double_int cst
, bool uns
)
873 return double_int_fits_in_uhwi_p (cst
);
875 return double_int_fits_in_shwi_p (cst
);
878 /* Returns value of CST as a signed number. CST must satisfy
879 double_int_fits_in_shwi_p. */
882 double_int_to_shwi (double_int cst
)
884 return (HOST_WIDE_INT
) cst
.low
;
887 /* Returns value of CST as an unsigned number. CST must satisfy
888 double_int_fits_in_uhwi_p. */
890 unsigned HOST_WIDE_INT
891 double_int_to_uhwi (double_int cst
)
899 double_int_mul (double_int a
, double_int b
)
902 mul_double (a
.low
, a
.high
, b
.low
, b
.high
, &ret
.low
, &ret
.high
);
909 double_int_add (double_int a
, double_int b
)
912 add_double (a
.low
, a
.high
, b
.low
, b
.high
, &ret
.low
, &ret
.high
);
919 double_int_neg (double_int a
)
922 neg_double (a
.low
, a
.high
, &ret
.low
, &ret
.high
);
926 /* Returns A / B (computed as unsigned depending on UNS, and rounded as
927 specified by CODE). CODE is enum tree_code in fact, but double_int.h
928 must be included before tree.h. The remainder after the division is
932 double_int_divmod (double_int a
, double_int b
, bool uns
, unsigned code
,
937 div_and_round_double (code
, uns
, a
.low
, a
.high
,
938 b
.low
, b
.high
, &ret
.low
, &ret
.high
,
939 &mod
->low
, &mod
->high
);
943 /* The same as double_int_divmod with UNS = false. */
946 double_int_sdivmod (double_int a
, double_int b
, unsigned code
, double_int
*mod
)
948 return double_int_divmod (a
, b
, false, code
, mod
);
951 /* The same as double_int_divmod with UNS = true. */
954 double_int_udivmod (double_int a
, double_int b
, unsigned code
, double_int
*mod
)
956 return double_int_divmod (a
, b
, true, code
, mod
);
959 /* Returns A / B (computed as unsigned depending on UNS, and rounded as
960 specified by CODE). CODE is enum tree_code in fact, but double_int.h
961 must be included before tree.h. */
964 double_int_div (double_int a
, double_int b
, bool uns
, unsigned code
)
968 return double_int_divmod (a
, b
, uns
, code
, &mod
);
971 /* The same as double_int_div with UNS = false. */
974 double_int_sdiv (double_int a
, double_int b
, unsigned code
)
976 return double_int_div (a
, b
, false, code
);
979 /* The same as double_int_div with UNS = true. */
982 double_int_udiv (double_int a
, double_int b
, unsigned code
)
984 return double_int_div (a
, b
, true, code
);
987 /* Returns A % B (computed as unsigned depending on UNS, and rounded as
988 specified by CODE). CODE is enum tree_code in fact, but double_int.h
989 must be included before tree.h. */
992 double_int_mod (double_int a
, double_int b
, bool uns
, unsigned code
)
996 double_int_divmod (a
, b
, uns
, code
, &mod
);
1000 /* The same as double_int_mod with UNS = false. */
1003 double_int_smod (double_int a
, double_int b
, unsigned code
)
1005 return double_int_mod (a
, b
, false, code
);
1008 /* The same as double_int_mod with UNS = true. */
1011 double_int_umod (double_int a
, double_int b
, unsigned code
)
1013 return double_int_mod (a
, b
, true, code
);
1016 /* Shift A left by COUNT places keeping only PREC bits of result. Shift
1017 right if COUNT is negative. ARITH true specifies arithmetic shifting;
1018 otherwise use logical shift. */
1021 double_int_lshift (double_int a
, HOST_WIDE_INT count
, unsigned int prec
, bool arith
)
1024 lshift_double (a
.low
, a
.high
, count
, prec
, &ret
.low
, &ret
.high
, arith
);
1028 /* Shift A rigth by COUNT places keeping only PREC bits of result. Shift
1029 left if COUNT is negative. ARITH true specifies arithmetic shifting;
1030 otherwise use logical shift. */
1033 double_int_rshift (double_int a
, HOST_WIDE_INT count
, unsigned int prec
, bool arith
)
1036 rshift_double (a
.low
, a
.high
, count
, prec
, &ret
.low
, &ret
.high
, arith
);
1040 /* Constructs tree in type TYPE from with value given by CST. Signedness of CST
1041 is assumed to be the same as the signedness of TYPE. */
1044 double_int_to_tree (tree type
, double_int cst
)
1046 cst
= double_int_ext (cst
, TYPE_PRECISION (type
), TYPE_UNSIGNED (type
));
1048 return build_int_cst_wide (type
, cst
.low
, cst
.high
);
1051 /* Returns true if CST fits into range of TYPE. Signedness of CST is assumed
1052 to be the same as the signedness of TYPE. */
1055 double_int_fits_to_tree_p (const_tree type
, double_int cst
)
1057 double_int ext
= double_int_ext (cst
,
1058 TYPE_PRECISION (type
),
1059 TYPE_UNSIGNED (type
));
1061 return double_int_equal_p (cst
, ext
);
1064 /* Returns -1 if A < B, 0 if A == B and 1 if A > B. Signedness of the
1065 comparison is given by UNS. */
1068 double_int_cmp (double_int a
, double_int b
, bool uns
)
1071 return double_int_ucmp (a
, b
);
1073 return double_int_scmp (a
, b
);
1076 /* Compares two unsigned values A and B. Returns -1 if A < B, 0 if A == B,
1080 double_int_ucmp (double_int a
, double_int b
)
1082 if ((unsigned HOST_WIDE_INT
) a
.high
< (unsigned HOST_WIDE_INT
) b
.high
)
1084 if ((unsigned HOST_WIDE_INT
) a
.high
> (unsigned HOST_WIDE_INT
) b
.high
)
1094 /* Compares two signed values A and B. Returns -1 if A < B, 0 if A == B,
1098 double_int_scmp (double_int a
, double_int b
)
1100 if (a
.high
< b
.high
)
1102 if (a
.high
> b
.high
)
1112 /* Splits last digit of *CST (taken as unsigned) in BASE and returns it. */
1115 double_int_split_digit (double_int
*cst
, unsigned base
)
1117 unsigned HOST_WIDE_INT resl
, reml
;
1118 HOST_WIDE_INT resh
, remh
;
1120 div_and_round_double (FLOOR_DIV_EXPR
, true, cst
->low
, cst
->high
, base
, 0,
1121 &resl
, &resh
, &reml
, &remh
);
1128 /* Dumps CST to FILE. If UNS is true, CST is considered to be unsigned,
1129 otherwise it is signed. */
1132 dump_double_int (FILE *file
, double_int cst
, bool uns
)
1134 unsigned digits
[100], n
;
1137 if (double_int_zero_p (cst
))
1139 fprintf (file
, "0");
1143 if (!uns
&& double_int_negative_p (cst
))
1145 fprintf (file
, "-");
1146 cst
= double_int_neg (cst
);
1149 for (n
= 0; !double_int_zero_p (cst
); n
++)
1150 digits
[n
] = double_int_split_digit (&cst
, 10);
1151 for (i
= n
- 1; i
>= 0; i
--)
1152 fprintf (file
, "%u", digits
[i
]);
1156 /* Sets RESULT to VAL, taken unsigned if UNS is true and as signed
1160 mpz_set_double_int (mpz_t result
, double_int val
, bool uns
)
1162 bool negate
= false;
1163 unsigned HOST_WIDE_INT vp
[2];
1165 if (!uns
&& double_int_negative_p (val
))
1168 val
= double_int_neg (val
);
1172 vp
[1] = (unsigned HOST_WIDE_INT
) val
.high
;
1173 mpz_import (result
, 2, -1, sizeof (HOST_WIDE_INT
), 0, 0, vp
);
1176 mpz_neg (result
, result
);
1179 /* Returns VAL converted to TYPE. If WRAP is true, then out-of-range
1180 values of VAL will be wrapped; otherwise, they will be set to the
1181 appropriate minimum or maximum TYPE bound. */
1184 mpz_get_double_int (const_tree type
, mpz_t val
, bool wrap
)
1186 unsigned HOST_WIDE_INT
*vp
;
1196 get_type_static_bounds (type
, min
, max
);
1198 if (mpz_cmp (val
, min
) < 0)
1200 else if (mpz_cmp (val
, max
) > 0)
1207 /* Determine the number of unsigned HOST_WIDE_INT that are required
1208 for representing the value. The code to calculate count is
1209 extracted from the GMP manual, section "Integer Import and Export":
1210 http://gmplib.org/manual/Integer-Import-and-Export.html */
1211 numb
= 8*sizeof(HOST_WIDE_INT
);
1212 count
= (mpz_sizeinbase (val
, 2) + numb
-1) / numb
;
1215 vp
= (unsigned HOST_WIDE_INT
*) alloca (count
* sizeof(HOST_WIDE_INT
));
1219 mpz_export (vp
, &count
, -1, sizeof (HOST_WIDE_INT
), 0, 0, val
);
1221 gcc_assert (wrap
|| count
<= 2);
1224 res
.high
= (HOST_WIDE_INT
) vp
[1];
1226 res
= double_int_ext (res
, TYPE_PRECISION (type
), TYPE_UNSIGNED (type
));
1227 if (mpz_sgn (val
) < 0)
1228 res
= double_int_neg (res
);