rephrase text
[official-gcc.git] / gcc / double-int.c
blob96e5884c96fc1fa346dda7f62fcdaf7b909ee8ba
1 /* Operations with long integers.
2 Copyright (C) 2006, 2007, 2009, 2010 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
26 /* We know that A1 + B1 = SUM1, using 2's complement arithmetic and ignoring
27 overflow. Suppose A, B and SUM have the same respective signs as A1, B1,
28 and SUM1. Then this yields nonzero if overflow occurred during the
29 addition.
31 Overflow occurs if A and B have the same sign, but A and SUM differ in
32 sign. Use `^' to test whether signs differ, and `< 0' to isolate the
33 sign. */
34 #define OVERFLOW_SUM_SIGN(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
36 /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
37 We do that by representing the two-word integer in 4 words, with only
38 HOST_BITS_PER_WIDE_INT / 2 bits stored in each word, as a positive
39 number. The value of the word is LOWPART + HIGHPART * BASE. */
41 #define LOWPART(x) \
42 ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)) - 1))
43 #define HIGHPART(x) \
44 ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT / 2)
45 #define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT / 2)
47 /* Unpack a two-word integer into 4 words.
48 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
49 WORDS points to the array of HOST_WIDE_INTs. */
51 static void
52 encode (HOST_WIDE_INT *words, unsigned HOST_WIDE_INT low, HOST_WIDE_INT hi)
54 words[0] = LOWPART (low);
55 words[1] = HIGHPART (low);
56 words[2] = LOWPART (hi);
57 words[3] = HIGHPART (hi);
60 /* Pack an array of 4 words into a two-word integer.
61 WORDS points to the array of words.
62 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
64 static void
65 decode (HOST_WIDE_INT *words, unsigned HOST_WIDE_INT *low,
66 HOST_WIDE_INT *hi)
68 *low = words[0] + words[1] * BASE;
69 *hi = words[2] + words[3] * BASE;
72 /* Force the double-word integer L1, H1 to be within the range of the
73 integer type TYPE. Stores the properly truncated and sign-extended
74 double-word integer in *LV, *HV. Returns true if the operation
75 overflows, that is, argument and result are different. */
77 int
78 fit_double_type (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
79 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv, const_tree type)
81 unsigned HOST_WIDE_INT low0 = l1;
82 HOST_WIDE_INT high0 = h1;
83 unsigned int prec = TYPE_PRECISION (type);
84 int sign_extended_type;
86 /* Size types *are* sign extended. */
87 sign_extended_type = (!TYPE_UNSIGNED (type)
88 || (TREE_CODE (type) == INTEGER_TYPE
89 && TYPE_IS_SIZETYPE (type)));
91 /* First clear all bits that are beyond the type's precision. */
92 if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
94 else if (prec > HOST_BITS_PER_WIDE_INT)
95 h1 &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
96 else
98 h1 = 0;
99 if (prec < HOST_BITS_PER_WIDE_INT)
100 l1 &= ~((HOST_WIDE_INT) (-1) << prec);
103 /* Then do sign extension if necessary. */
104 if (!sign_extended_type)
105 /* No sign extension */;
106 else if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
107 /* Correct width already. */;
108 else if (prec > HOST_BITS_PER_WIDE_INT)
110 /* Sign extend top half? */
111 if (h1 & ((unsigned HOST_WIDE_INT)1
112 << (prec - HOST_BITS_PER_WIDE_INT - 1)))
113 h1 |= (HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT);
115 else if (prec == HOST_BITS_PER_WIDE_INT)
117 if ((HOST_WIDE_INT)l1 < 0)
118 h1 = -1;
120 else
122 /* Sign extend bottom half? */
123 if (l1 & ((unsigned HOST_WIDE_INT)1 << (prec - 1)))
125 h1 = -1;
126 l1 |= (HOST_WIDE_INT)(-1) << prec;
130 *lv = l1;
131 *hv = h1;
133 /* If the value didn't fit, signal overflow. */
134 return l1 != low0 || h1 != high0;
137 /* We force the double-int HIGH:LOW to the range of the type TYPE by
138 sign or zero extending it.
139 OVERFLOWABLE indicates if we are interested
140 in overflow of the value, when >0 we are only interested in signed
141 overflow, for <0 we are interested in any overflow. OVERFLOWED
142 indicates whether overflow has already occurred. CONST_OVERFLOWED
143 indicates whether constant overflow has already occurred. We force
144 T's value to be within range of T's type (by setting to 0 or 1 all
145 the bits outside the type's range). We set TREE_OVERFLOWED if,
146 OVERFLOWED is nonzero,
147 or OVERFLOWABLE is >0 and signed overflow occurs
148 or OVERFLOWABLE is <0 and any overflow occurs
149 We return a new tree node for the extended double-int. The node
150 is shared if no overflow flags are set. */
152 tree
153 force_fit_type_double (tree type, unsigned HOST_WIDE_INT low,
154 HOST_WIDE_INT high, int overflowable,
155 bool overflowed)
157 int sign_extended_type;
158 bool overflow;
160 /* Size types *are* sign extended. */
161 sign_extended_type = (!TYPE_UNSIGNED (type)
162 || (TREE_CODE (type) == INTEGER_TYPE
163 && TYPE_IS_SIZETYPE (type)));
165 overflow = fit_double_type (low, high, &low, &high, type);
167 /* If we need to set overflow flags, return a new unshared node. */
168 if (overflowed || overflow)
170 if (overflowed
171 || overflowable < 0
172 || (overflowable > 0 && sign_extended_type))
174 tree t = make_node (INTEGER_CST);
175 TREE_INT_CST_LOW (t) = low;
176 TREE_INT_CST_HIGH (t) = high;
177 TREE_TYPE (t) = type;
178 TREE_OVERFLOW (t) = 1;
179 return t;
183 /* Else build a shared node. */
184 return build_int_cst_wide (type, low, high);
187 /* Add two doubleword integers with doubleword result.
188 Return nonzero if the operation overflows according to UNSIGNED_P.
189 Each argument is given as two `HOST_WIDE_INT' pieces.
190 One argument is L1 and H1; the other, L2 and H2.
191 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
194 add_double_with_sign (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
195 unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,
196 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
197 bool unsigned_p)
199 unsigned HOST_WIDE_INT l;
200 HOST_WIDE_INT h;
202 l = l1 + l2;
203 h = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) h1
204 + (unsigned HOST_WIDE_INT) h2
205 + (l < l1));
207 *lv = l;
208 *hv = h;
210 if (unsigned_p)
211 return ((unsigned HOST_WIDE_INT) h < (unsigned HOST_WIDE_INT) h1
212 || (h == h1
213 && l < l1));
214 else
215 return OVERFLOW_SUM_SIGN (h1, h2, h);
218 /* Negate a doubleword integer with doubleword result.
219 Return nonzero if the operation overflows, assuming it's signed.
220 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
221 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
224 neg_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
225 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
227 if (l1 == 0)
229 *lv = 0;
230 *hv = - h1;
231 return (*hv & h1) < 0;
233 else
235 *lv = -l1;
236 *hv = ~h1;
237 return 0;
241 /* Multiply two doubleword integers with doubleword result.
242 Return nonzero if the operation overflows according to UNSIGNED_P.
243 Each argument is given as two `HOST_WIDE_INT' pieces.
244 One argument is L1 and H1; the other, L2 and H2.
245 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
248 mul_double_with_sign (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
249 unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,
250 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
251 bool unsigned_p)
253 HOST_WIDE_INT arg1[4];
254 HOST_WIDE_INT arg2[4];
255 HOST_WIDE_INT prod[4 * 2];
256 unsigned HOST_WIDE_INT carry;
257 int i, j, k;
258 unsigned HOST_WIDE_INT toplow, neglow;
259 HOST_WIDE_INT tophigh, neghigh;
261 encode (arg1, l1, h1);
262 encode (arg2, l2, h2);
264 memset (prod, 0, sizeof prod);
266 for (i = 0; i < 4; i++)
268 carry = 0;
269 for (j = 0; j < 4; j++)
271 k = i + j;
272 /* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */
273 carry += arg1[i] * arg2[j];
274 /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */
275 carry += prod[k];
276 prod[k] = LOWPART (carry);
277 carry = HIGHPART (carry);
279 prod[i + 4] = carry;
282 decode (prod, lv, hv);
283 decode (prod + 4, &toplow, &tophigh);
285 /* Unsigned overflow is immediate. */
286 if (unsigned_p)
287 return (toplow | tophigh) != 0;
289 /* Check for signed overflow by calculating the signed representation of the
290 top half of the result; it should agree with the low half's sign bit. */
291 if (h1 < 0)
293 neg_double (l2, h2, &neglow, &neghigh);
294 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
296 if (h2 < 0)
298 neg_double (l1, h1, &neglow, &neghigh);
299 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
301 return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
304 /* Shift the doubleword integer in L1, H1 left by COUNT places
305 keeping only PREC bits of result.
306 Shift right if COUNT is negative.
307 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
308 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
310 void
311 lshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
312 HOST_WIDE_INT count, unsigned int prec,
313 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv, bool arith)
315 unsigned HOST_WIDE_INT signmask;
317 if (count < 0)
319 rshift_double (l1, h1, -count, prec, lv, hv, arith);
320 return;
323 if (SHIFT_COUNT_TRUNCATED)
324 count %= prec;
326 if (count >= 2 * HOST_BITS_PER_WIDE_INT)
328 /* Shifting by the host word size is undefined according to the
329 ANSI standard, so we must handle this as a special case. */
330 *hv = 0;
331 *lv = 0;
333 else if (count >= HOST_BITS_PER_WIDE_INT)
335 *hv = l1 << (count - HOST_BITS_PER_WIDE_INT);
336 *lv = 0;
338 else
340 *hv = (((unsigned HOST_WIDE_INT) h1 << count)
341 | (l1 >> (HOST_BITS_PER_WIDE_INT - count - 1) >> 1));
342 *lv = l1 << count;
345 /* Sign extend all bits that are beyond the precision. */
347 signmask = -((prec > HOST_BITS_PER_WIDE_INT
348 ? ((unsigned HOST_WIDE_INT) *hv
349 >> (prec - HOST_BITS_PER_WIDE_INT - 1))
350 : (*lv >> (prec - 1))) & 1);
352 if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
354 else if (prec >= HOST_BITS_PER_WIDE_INT)
356 *hv &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
357 *hv |= signmask << (prec - HOST_BITS_PER_WIDE_INT);
359 else
361 *hv = signmask;
362 *lv &= ~((unsigned HOST_WIDE_INT) (-1) << prec);
363 *lv |= signmask << prec;
367 /* Shift the doubleword integer in L1, H1 right by COUNT places
368 keeping only PREC bits of result. Shift left if COUNT is negative.
369 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
370 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
372 void
373 rshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
374 HOST_WIDE_INT count, unsigned int prec,
375 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
376 bool arith)
378 unsigned HOST_WIDE_INT signmask;
380 if (count < 0)
382 lshift_double (l1, h1, -count, prec, lv, hv, arith);
383 return;
386 signmask = (arith
387 ? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1))
388 : 0);
390 if (SHIFT_COUNT_TRUNCATED)
391 count %= prec;
393 if (count >= 2 * HOST_BITS_PER_WIDE_INT)
395 /* Shifting by the host word size is undefined according to the
396 ANSI standard, so we must handle this as a special case. */
397 *hv = 0;
398 *lv = 0;
400 else if (count >= HOST_BITS_PER_WIDE_INT)
402 *hv = 0;
403 *lv = (unsigned HOST_WIDE_INT) h1 >> (count - HOST_BITS_PER_WIDE_INT);
405 else
407 *hv = (unsigned HOST_WIDE_INT) h1 >> count;
408 *lv = ((l1 >> count)
409 | ((unsigned HOST_WIDE_INT) h1
410 << (HOST_BITS_PER_WIDE_INT - count - 1) << 1));
413 /* Zero / sign extend all bits that are beyond the precision. */
415 if (count >= (HOST_WIDE_INT)prec)
417 *hv = signmask;
418 *lv = signmask;
420 else if ((prec - count) >= 2 * HOST_BITS_PER_WIDE_INT)
422 else if ((prec - count) >= HOST_BITS_PER_WIDE_INT)
424 *hv &= ~((HOST_WIDE_INT) (-1) << (prec - count - HOST_BITS_PER_WIDE_INT));
425 *hv |= signmask << (prec - count - HOST_BITS_PER_WIDE_INT);
427 else
429 *hv = signmask;
430 *lv &= ~((unsigned HOST_WIDE_INT) (-1) << (prec - count));
431 *lv |= signmask << (prec - count);
435 /* Rotate the doubleword integer in L1, H1 left by COUNT places
436 keeping only PREC bits of result.
437 Rotate right if COUNT is negative.
438 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
440 void
441 lrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
442 HOST_WIDE_INT count, unsigned int prec,
443 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
445 unsigned HOST_WIDE_INT s1l, s2l;
446 HOST_WIDE_INT s1h, s2h;
448 count %= prec;
449 if (count < 0)
450 count += prec;
452 lshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
453 rshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
454 *lv = s1l | s2l;
455 *hv = s1h | s2h;
458 /* Rotate the doubleword integer in L1, H1 left by COUNT places
459 keeping only PREC bits of result. COUNT must be positive.
460 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
462 void
463 rrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
464 HOST_WIDE_INT count, unsigned int prec,
465 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
467 unsigned HOST_WIDE_INT s1l, s2l;
468 HOST_WIDE_INT s1h, s2h;
470 count %= prec;
471 if (count < 0)
472 count += prec;
474 rshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
475 lshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
476 *lv = s1l | s2l;
477 *hv = s1h | s2h;
480 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
481 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
482 CODE is a tree code for a kind of division, one of
483 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
484 or EXACT_DIV_EXPR
485 It controls how the quotient is rounded to an integer.
486 Return nonzero if the operation overflows.
487 UNS nonzero says do unsigned division. */
490 div_and_round_double (unsigned code, int uns,
491 /* num == numerator == dividend */
492 unsigned HOST_WIDE_INT lnum_orig,
493 HOST_WIDE_INT hnum_orig,
494 /* den == denominator == divisor */
495 unsigned HOST_WIDE_INT lden_orig,
496 HOST_WIDE_INT hden_orig,
497 unsigned HOST_WIDE_INT *lquo,
498 HOST_WIDE_INT *hquo, unsigned HOST_WIDE_INT *lrem,
499 HOST_WIDE_INT *hrem)
501 int quo_neg = 0;
502 HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */
503 HOST_WIDE_INT den[4], quo[4];
504 int i, j;
505 unsigned HOST_WIDE_INT work;
506 unsigned HOST_WIDE_INT carry = 0;
507 unsigned HOST_WIDE_INT lnum = lnum_orig;
508 HOST_WIDE_INT hnum = hnum_orig;
509 unsigned HOST_WIDE_INT lden = lden_orig;
510 HOST_WIDE_INT hden = hden_orig;
511 int overflow = 0;
513 if (hden == 0 && lden == 0)
514 overflow = 1, lden = 1;
516 /* Calculate quotient sign and convert operands to unsigned. */
517 if (!uns)
519 if (hnum < 0)
521 quo_neg = ~ quo_neg;
522 /* (minimum integer) / (-1) is the only overflow case. */
523 if (neg_double (lnum, hnum, &lnum, &hnum)
524 && ((HOST_WIDE_INT) lden & hden) == -1)
525 overflow = 1;
527 if (hden < 0)
529 quo_neg = ~ quo_neg;
530 neg_double (lden, hden, &lden, &hden);
534 if (hnum == 0 && hden == 0)
535 { /* single precision */
536 *hquo = *hrem = 0;
537 /* This unsigned division rounds toward zero. */
538 *lquo = lnum / lden;
539 goto finish_up;
542 if (hnum == 0)
543 { /* trivial case: dividend < divisor */
544 /* hden != 0 already checked. */
545 *hquo = *lquo = 0;
546 *hrem = hnum;
547 *lrem = lnum;
548 goto finish_up;
551 memset (quo, 0, sizeof quo);
553 memset (num, 0, sizeof num); /* to zero 9th element */
554 memset (den, 0, sizeof den);
556 encode (num, lnum, hnum);
557 encode (den, lden, hden);
559 /* Special code for when the divisor < BASE. */
560 if (hden == 0 && lden < (unsigned HOST_WIDE_INT) BASE)
562 /* hnum != 0 already checked. */
563 for (i = 4 - 1; i >= 0; i--)
565 work = num[i] + carry * BASE;
566 quo[i] = work / lden;
567 carry = work % lden;
570 else
572 /* Full double precision division,
573 with thanks to Don Knuth's "Seminumerical Algorithms". */
574 int num_hi_sig, den_hi_sig;
575 unsigned HOST_WIDE_INT quo_est, scale;
577 /* Find the highest nonzero divisor digit. */
578 for (i = 4 - 1;; i--)
579 if (den[i] != 0)
581 den_hi_sig = i;
582 break;
585 /* Insure that the first digit of the divisor is at least BASE/2.
586 This is required by the quotient digit estimation algorithm. */
588 scale = BASE / (den[den_hi_sig] + 1);
589 if (scale > 1)
590 { /* scale divisor and dividend */
591 carry = 0;
592 for (i = 0; i <= 4 - 1; i++)
594 work = (num[i] * scale) + carry;
595 num[i] = LOWPART (work);
596 carry = HIGHPART (work);
599 num[4] = carry;
600 carry = 0;
601 for (i = 0; i <= 4 - 1; i++)
603 work = (den[i] * scale) + carry;
604 den[i] = LOWPART (work);
605 carry = HIGHPART (work);
606 if (den[i] != 0) den_hi_sig = i;
610 num_hi_sig = 4;
612 /* Main loop */
613 for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--)
615 /* Guess the next quotient digit, quo_est, by dividing the first
616 two remaining dividend digits by the high order quotient digit.
617 quo_est is never low and is at most 2 high. */
618 unsigned HOST_WIDE_INT tmp;
620 num_hi_sig = i + den_hi_sig + 1;
621 work = num[num_hi_sig] * BASE + num[num_hi_sig - 1];
622 if (num[num_hi_sig] != den[den_hi_sig])
623 quo_est = work / den[den_hi_sig];
624 else
625 quo_est = BASE - 1;
627 /* Refine quo_est so it's usually correct, and at most one high. */
628 tmp = work - quo_est * den[den_hi_sig];
629 if (tmp < BASE
630 && (den[den_hi_sig - 1] * quo_est
631 > (tmp * BASE + num[num_hi_sig - 2])))
632 quo_est--;
634 /* Try QUO_EST as the quotient digit, by multiplying the
635 divisor by QUO_EST and subtracting from the remaining dividend.
636 Keep in mind that QUO_EST is the I - 1st digit. */
638 carry = 0;
639 for (j = 0; j <= den_hi_sig; j++)
641 work = quo_est * den[j] + carry;
642 carry = HIGHPART (work);
643 work = num[i + j] - LOWPART (work);
644 num[i + j] = LOWPART (work);
645 carry += HIGHPART (work) != 0;
648 /* If quo_est was high by one, then num[i] went negative and
649 we need to correct things. */
650 if (num[num_hi_sig] < (HOST_WIDE_INT) carry)
652 quo_est--;
653 carry = 0; /* add divisor back in */
654 for (j = 0; j <= den_hi_sig; j++)
656 work = num[i + j] + den[j] + carry;
657 carry = HIGHPART (work);
658 num[i + j] = LOWPART (work);
661 num [num_hi_sig] += carry;
664 /* Store the quotient digit. */
665 quo[i] = quo_est;
669 decode (quo, lquo, hquo);
671 finish_up:
672 /* If result is negative, make it so. */
673 if (quo_neg)
674 neg_double (*lquo, *hquo, lquo, hquo);
676 /* Compute trial remainder: rem = num - (quo * den) */
677 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
678 neg_double (*lrem, *hrem, lrem, hrem);
679 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
681 switch (code)
683 case TRUNC_DIV_EXPR:
684 case TRUNC_MOD_EXPR: /* round toward zero */
685 case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
686 return overflow;
688 case FLOOR_DIV_EXPR:
689 case FLOOR_MOD_EXPR: /* round toward negative infinity */
690 if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
692 /* quo = quo - 1; */
693 add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
694 lquo, hquo);
696 else
697 return overflow;
698 break;
700 case CEIL_DIV_EXPR:
701 case CEIL_MOD_EXPR: /* round toward positive infinity */
702 if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
704 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
705 lquo, hquo);
707 else
708 return overflow;
709 break;
711 case ROUND_DIV_EXPR:
712 case ROUND_MOD_EXPR: /* round to closest integer */
714 unsigned HOST_WIDE_INT labs_rem = *lrem;
715 HOST_WIDE_INT habs_rem = *hrem;
716 unsigned HOST_WIDE_INT labs_den = lden, ltwice;
717 HOST_WIDE_INT habs_den = hden, htwice;
719 /* Get absolute values. */
720 if (*hrem < 0)
721 neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
722 if (hden < 0)
723 neg_double (lden, hden, &labs_den, &habs_den);
725 /* If (2 * abs (lrem) >= abs (lden)), adjust the quotient. */
726 mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
727 labs_rem, habs_rem, &ltwice, &htwice);
729 if (((unsigned HOST_WIDE_INT) habs_den
730 < (unsigned HOST_WIDE_INT) htwice)
731 || (((unsigned HOST_WIDE_INT) habs_den
732 == (unsigned HOST_WIDE_INT) htwice)
733 && (labs_den <= ltwice)))
735 if (*hquo < 0)
736 /* quo = quo - 1; */
737 add_double (*lquo, *hquo,
738 (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
739 else
740 /* quo = quo + 1; */
741 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
742 lquo, hquo);
744 else
745 return overflow;
747 break;
749 default:
750 gcc_unreachable ();
753 /* Compute true remainder: rem = num - (quo * den) */
754 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
755 neg_double (*lrem, *hrem, lrem, hrem);
756 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
757 return overflow;
761 /* Returns mask for PREC bits. */
763 double_int
764 double_int_mask (unsigned prec)
766 unsigned HOST_WIDE_INT m;
767 double_int mask;
769 if (prec > HOST_BITS_PER_WIDE_INT)
771 prec -= HOST_BITS_PER_WIDE_INT;
772 m = ((unsigned HOST_WIDE_INT) 2 << (prec - 1)) - 1;
773 mask.high = (HOST_WIDE_INT) m;
774 mask.low = ALL_ONES;
776 else
778 mask.high = 0;
779 mask.low = ((unsigned HOST_WIDE_INT) 2 << (prec - 1)) - 1;
782 return mask;
785 /* Clears the bits of CST over the precision PREC. If UNS is false, the bits
786 outside of the precision are set to the sign bit (i.e., the PREC-th one),
787 otherwise they are set to zero.
789 This corresponds to returning the value represented by PREC lowermost bits
790 of CST, with the given signedness. */
792 double_int
793 double_int_ext (double_int cst, unsigned prec, bool uns)
795 if (uns)
796 return double_int_zext (cst, prec);
797 else
798 return double_int_sext (cst, prec);
801 /* The same as double_int_ext with UNS = true. */
803 double_int
804 double_int_zext (double_int cst, unsigned prec)
806 double_int mask = double_int_mask (prec);
807 double_int r;
809 r.low = cst.low & mask.low;
810 r.high = cst.high & mask.high;
812 return r;
815 /* The same as double_int_ext with UNS = false. */
817 double_int
818 double_int_sext (double_int cst, unsigned prec)
820 double_int mask = double_int_mask (prec);
821 double_int r;
822 unsigned HOST_WIDE_INT snum;
824 if (prec <= HOST_BITS_PER_WIDE_INT)
825 snum = cst.low;
826 else
828 prec -= HOST_BITS_PER_WIDE_INT;
829 snum = (unsigned HOST_WIDE_INT) cst.high;
831 if (((snum >> (prec - 1)) & 1) == 1)
833 r.low = cst.low | ~mask.low;
834 r.high = cst.high | ~mask.high;
836 else
838 r.low = cst.low & mask.low;
839 r.high = cst.high & mask.high;
842 return r;
845 /* Constructs long integer from tree CST. The extra bits over the precision of
846 the number are filled with sign bit if CST is signed, and with zeros if it
847 is unsigned. */
849 double_int
850 tree_to_double_int (const_tree cst)
852 /* We do not need to call double_int_restrict here to ensure the semantics as
853 described, as this is the default one for trees. */
854 return TREE_INT_CST (cst);
857 /* Returns true if CST fits in unsigned HOST_WIDE_INT. */
859 bool
860 double_int_fits_in_uhwi_p (double_int cst)
862 return cst.high == 0;
865 /* Returns true if CST fits in signed HOST_WIDE_INT. */
867 bool
868 double_int_fits_in_shwi_p (double_int cst)
870 if (cst.high == 0)
871 return (HOST_WIDE_INT) cst.low >= 0;
872 else if (cst.high == -1)
873 return (HOST_WIDE_INT) cst.low < 0;
874 else
875 return false;
878 /* Returns true if CST fits in HOST_WIDE_INT if UNS is false, or in
879 unsigned HOST_WIDE_INT if UNS is true. */
881 bool
882 double_int_fits_in_hwi_p (double_int cst, bool uns)
884 if (uns)
885 return double_int_fits_in_uhwi_p (cst);
886 else
887 return double_int_fits_in_shwi_p (cst);
890 /* Returns value of CST as a signed number. CST must satisfy
891 double_int_fits_in_shwi_p. */
893 HOST_WIDE_INT
894 double_int_to_shwi (double_int cst)
896 return (HOST_WIDE_INT) cst.low;
899 /* Returns value of CST as an unsigned number. CST must satisfy
900 double_int_fits_in_uhwi_p. */
902 unsigned HOST_WIDE_INT
903 double_int_to_uhwi (double_int cst)
905 return cst.low;
908 /* Returns A * B. */
910 double_int
911 double_int_mul (double_int a, double_int b)
913 double_int ret;
914 mul_double (a.low, a.high, b.low, b.high, &ret.low, &ret.high);
915 return ret;
918 /* Returns A + B. */
920 double_int
921 double_int_add (double_int a, double_int b)
923 double_int ret;
924 add_double (a.low, a.high, b.low, b.high, &ret.low, &ret.high);
925 return ret;
928 /* Returns -A. */
930 double_int
931 double_int_neg (double_int a)
933 double_int ret;
934 neg_double (a.low, a.high, &ret.low, &ret.high);
935 return ret;
938 /* Returns A / B (computed as unsigned depending on UNS, and rounded as
939 specified by CODE). CODE is enum tree_code in fact, but double_int.h
940 must be included before tree.h. The remainder after the division is
941 stored to MOD. */
943 double_int
944 double_int_divmod (double_int a, double_int b, bool uns, unsigned code,
945 double_int *mod)
947 double_int ret;
949 div_and_round_double (code, uns, a.low, a.high,
950 b.low, b.high, &ret.low, &ret.high,
951 &mod->low, &mod->high);
952 return ret;
955 /* The same as double_int_divmod with UNS = false. */
957 double_int
958 double_int_sdivmod (double_int a, double_int b, unsigned code, double_int *mod)
960 return double_int_divmod (a, b, false, code, mod);
963 /* The same as double_int_divmod with UNS = true. */
965 double_int
966 double_int_udivmod (double_int a, double_int b, unsigned code, double_int *mod)
968 return double_int_divmod (a, b, true, code, mod);
971 /* Returns A / B (computed as unsigned depending on UNS, and rounded as
972 specified by CODE). CODE is enum tree_code in fact, but double_int.h
973 must be included before tree.h. */
975 double_int
976 double_int_div (double_int a, double_int b, bool uns, unsigned code)
978 double_int mod;
980 return double_int_divmod (a, b, uns, code, &mod);
983 /* The same as double_int_div with UNS = false. */
985 double_int
986 double_int_sdiv (double_int a, double_int b, unsigned code)
988 return double_int_div (a, b, false, code);
991 /* The same as double_int_div with UNS = true. */
993 double_int
994 double_int_udiv (double_int a, double_int b, unsigned code)
996 return double_int_div (a, b, true, code);
999 /* Returns A % B (computed as unsigned depending on UNS, and rounded as
1000 specified by CODE). CODE is enum tree_code in fact, but double_int.h
1001 must be included before tree.h. */
1003 double_int
1004 double_int_mod (double_int a, double_int b, bool uns, unsigned code)
1006 double_int mod;
1008 double_int_divmod (a, b, uns, code, &mod);
1009 return mod;
1012 /* The same as double_int_mod with UNS = false. */
1014 double_int
1015 double_int_smod (double_int a, double_int b, unsigned code)
1017 return double_int_mod (a, b, false, code);
1020 /* The same as double_int_mod with UNS = true. */
1022 double_int
1023 double_int_umod (double_int a, double_int b, unsigned code)
1025 return double_int_mod (a, b, true, code);
1028 /* Shift A left by COUNT places keeping only PREC bits of result. Shift
1029 right if COUNT is negative. ARITH true specifies arithmetic shifting;
1030 otherwise use logical shift. */
1032 double_int
1033 double_int_lshift (double_int a, HOST_WIDE_INT count, unsigned int prec, bool arith)
1035 double_int ret;
1036 lshift_double (a.low, a.high, count, prec, &ret.low, &ret.high, arith);
1037 return ret;
1040 /* Shift A rigth by COUNT places keeping only PREC bits of result. Shift
1041 left if COUNT is negative. ARITH true specifies arithmetic shifting;
1042 otherwise use logical shift. */
1044 double_int
1045 double_int_rshift (double_int a, HOST_WIDE_INT count, unsigned int prec, bool arith)
1047 double_int ret;
1048 rshift_double (a.low, a.high, count, prec, &ret.low, &ret.high, arith);
1049 return ret;
1052 /* Constructs tree in type TYPE from with value given by CST. Signedness of CST
1053 is assumed to be the same as the signedness of TYPE. */
1055 tree
1056 double_int_to_tree (tree type, double_int cst)
1058 cst = double_int_ext (cst, TYPE_PRECISION (type), TYPE_UNSIGNED (type));
1060 return build_int_cst_wide (type, cst.low, cst.high);
1063 /* Returns true if CST fits into range of TYPE. Signedness of CST is assumed
1064 to be the same as the signedness of TYPE. */
1066 bool
1067 double_int_fits_to_tree_p (const_tree type, double_int cst)
1069 double_int ext = double_int_ext (cst,
1070 TYPE_PRECISION (type),
1071 TYPE_UNSIGNED (type));
1073 return double_int_equal_p (cst, ext);
1076 /* Returns -1 if A < B, 0 if A == B and 1 if A > B. Signedness of the
1077 comparison is given by UNS. */
1080 double_int_cmp (double_int a, double_int b, bool uns)
1082 if (uns)
1083 return double_int_ucmp (a, b);
1084 else
1085 return double_int_scmp (a, b);
1088 /* Compares two unsigned values A and B. Returns -1 if A < B, 0 if A == B,
1089 and 1 if A > B. */
1092 double_int_ucmp (double_int a, double_int b)
1094 if ((unsigned HOST_WIDE_INT) a.high < (unsigned HOST_WIDE_INT) b.high)
1095 return -1;
1096 if ((unsigned HOST_WIDE_INT) a.high > (unsigned HOST_WIDE_INT) b.high)
1097 return 1;
1098 if (a.low < b.low)
1099 return -1;
1100 if (a.low > b.low)
1101 return 1;
1103 return 0;
1106 /* Compares two signed values A and B. Returns -1 if A < B, 0 if A == B,
1107 and 1 if A > B. */
1110 double_int_scmp (double_int a, double_int b)
1112 if (a.high < b.high)
1113 return -1;
1114 if (a.high > b.high)
1115 return 1;
1116 if (a.low < b.low)
1117 return -1;
1118 if (a.low > b.low)
1119 return 1;
1121 return 0;
1124 /* Splits last digit of *CST (taken as unsigned) in BASE and returns it. */
1126 static unsigned
1127 double_int_split_digit (double_int *cst, unsigned base)
1129 unsigned HOST_WIDE_INT resl, reml;
1130 HOST_WIDE_INT resh, remh;
1132 div_and_round_double (FLOOR_DIV_EXPR, true, cst->low, cst->high, base, 0,
1133 &resl, &resh, &reml, &remh);
1134 cst->high = resh;
1135 cst->low = resl;
1137 return reml;
1140 /* Dumps CST to FILE. If UNS is true, CST is considered to be unsigned,
1141 otherwise it is signed. */
1143 void
1144 dump_double_int (FILE *file, double_int cst, bool uns)
1146 unsigned digits[100], n;
1147 int i;
1149 if (double_int_zero_p (cst))
1151 fprintf (file, "0");
1152 return;
1155 if (!uns && double_int_negative_p (cst))
1157 fprintf (file, "-");
1158 cst = double_int_neg (cst);
1161 for (n = 0; !double_int_zero_p (cst); n++)
1162 digits[n] = double_int_split_digit (&cst, 10);
1163 for (i = n - 1; i >= 0; i--)
1164 fprintf (file, "%u", digits[i]);
1168 /* Sets RESULT to VAL, taken unsigned if UNS is true and as signed
1169 otherwise. */
1171 void
1172 mpz_set_double_int (mpz_t result, double_int val, bool uns)
1174 bool negate = false;
1175 unsigned HOST_WIDE_INT vp[2];
1177 if (!uns && double_int_negative_p (val))
1179 negate = true;
1180 val = double_int_neg (val);
1183 vp[0] = val.low;
1184 vp[1] = (unsigned HOST_WIDE_INT) val.high;
1185 mpz_import (result, 2, -1, sizeof (HOST_WIDE_INT), 0, 0, vp);
1187 if (negate)
1188 mpz_neg (result, result);
1191 /* Returns VAL converted to TYPE. If WRAP is true, then out-of-range
1192 values of VAL will be wrapped; otherwise, they will be set to the
1193 appropriate minimum or maximum TYPE bound. */
1195 double_int
1196 mpz_get_double_int (const_tree type, mpz_t val, bool wrap)
1198 unsigned HOST_WIDE_INT *vp;
1199 size_t count, numb;
1200 double_int res;
1202 if (!wrap)
1204 mpz_t min, max;
1206 mpz_init (min);
1207 mpz_init (max);
1208 get_type_static_bounds (type, min, max);
1210 if (mpz_cmp (val, min) < 0)
1211 mpz_set (val, min);
1212 else if (mpz_cmp (val, max) > 0)
1213 mpz_set (val, max);
1215 mpz_clear (min);
1216 mpz_clear (max);
1219 /* Determine the number of unsigned HOST_WIDE_INT that are required
1220 for representing the value. The code to calculate count is
1221 extracted from the GMP manual, section "Integer Import and Export":
1222 http://gmplib.org/manual/Integer-Import-and-Export.html */
1223 numb = 8*sizeof(HOST_WIDE_INT);
1224 count = (mpz_sizeinbase (val, 2) + numb-1) / numb;
1225 if (count < 2)
1226 count = 2;
1227 vp = (unsigned HOST_WIDE_INT *) alloca (count * sizeof(HOST_WIDE_INT));
1229 vp[0] = 0;
1230 vp[1] = 0;
1231 mpz_export (vp, &count, -1, sizeof (HOST_WIDE_INT), 0, 0, val);
1233 gcc_assert (wrap || count <= 2);
1235 res.low = vp[0];
1236 res.high = (HOST_WIDE_INT) vp[1];
1238 res = double_int_ext (res, TYPE_PRECISION (type), TYPE_UNSIGNED (type));
1239 if (mpz_sgn (val) < 0)
1240 res = double_int_neg (res);
1242 return res;