(high_{block,function}_linenum): New variables.
[official-gcc.git] / gcc / fold-const.c
blobc17c1d70a5e4c7df59bcfa18c8f2c6c998553576
1 /* Fold a constant sub-tree into a single node for C-compiler
2 Copyright (C) 1987, 88, 92, 93, 94, 1995 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
20 /*@@ This file should be rewritten to use an arbitrary precision
21 @@ representation for "struct tree_int_cst" and "struct tree_real_cst".
22 @@ Perhaps the routines could also be used for bc/dc, and made a lib.
23 @@ The routines that translate from the ap rep should
24 @@ warn if precision et. al. is lost.
25 @@ This would also make life easier when this technology is used
26 @@ for cross-compilers. */
29 /* The entry points in this file are fold, size_int and size_binop.
31 fold takes a tree as argument and returns a simplified tree.
33 size_binop takes a tree code for an arithmetic operation
34 and two operands that are trees, and produces a tree for the
35 result, assuming the type comes from `sizetype'.
37 size_int takes an integer value, and creates a tree constant
38 with type from `sizetype'. */
40 #include <stdio.h>
41 #include <setjmp.h>
42 #include "config.h"
43 #include "flags.h"
44 #include "tree.h"
46 /* Handle floating overflow for `const_binop'. */
47 static jmp_buf float_error;
49 static void encode PROTO((HOST_WIDE_INT *, HOST_WIDE_INT, HOST_WIDE_INT));
50 static void decode PROTO((HOST_WIDE_INT *, HOST_WIDE_INT *, HOST_WIDE_INT *));
51 int div_and_round_double PROTO((enum tree_code, int, HOST_WIDE_INT,
52 HOST_WIDE_INT, HOST_WIDE_INT,
53 HOST_WIDE_INT, HOST_WIDE_INT *,
54 HOST_WIDE_INT *, HOST_WIDE_INT *,
55 HOST_WIDE_INT *));
56 static int split_tree PROTO((tree, enum tree_code, tree *, tree *, int *));
57 static tree const_binop PROTO((enum tree_code, tree, tree, int));
58 static tree fold_convert PROTO((tree, tree));
59 static enum tree_code invert_tree_comparison PROTO((enum tree_code));
60 static enum tree_code swap_tree_comparison PROTO((enum tree_code));
61 static int truth_value_p PROTO((enum tree_code));
62 static int operand_equal_for_comparison_p PROTO((tree, tree, tree));
63 static int twoval_comparison_p PROTO((tree, tree *, tree *, int *));
64 static tree eval_subst PROTO((tree, tree, tree, tree, tree));
65 static tree omit_one_operand PROTO((tree, tree, tree));
66 static tree pedantic_omit_one_operand PROTO((tree, tree, tree));
67 static tree distribute_bit_expr PROTO((enum tree_code, tree, tree, tree));
68 static tree make_bit_field_ref PROTO((tree, tree, int, int, int));
69 static tree optimize_bit_field_compare PROTO((enum tree_code, tree,
70 tree, tree));
71 static tree decode_field_reference PROTO((tree, int *, int *,
72 enum machine_mode *, int *,
73 int *, tree *));
74 static int all_ones_mask_p PROTO((tree, int));
75 static int simple_operand_p PROTO((tree));
76 static tree range_test PROTO((enum tree_code, tree, enum tree_code,
77 enum tree_code, tree, tree, tree));
78 static tree fold_truthop PROTO((enum tree_code, tree, tree, tree));
79 static tree strip_compound_expr PROTO((tree, tree));
81 #ifndef BRANCH_COST
82 #define BRANCH_COST 1
83 #endif
85 /* Yield nonzero if a signed left shift of A by B bits overflows. */
86 #define left_shift_overflows(a, b) ((a) != ((a) << (b)) >> (b))
88 /* Suppose A1 + B1 = SUM1, using 2's complement arithmetic ignoring overflow.
89 Suppose A, B and SUM have the same respective signs as A1, B1, and SUM1.
90 Then this yields nonzero if overflow occurred during the addition.
91 Overflow occurs if A and B have the same sign, but A and SUM differ in sign.
92 Use `^' to test whether signs differ, and `< 0' to isolate the sign. */
93 #define overflow_sum_sign(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
95 /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
96 We do that by representing the two-word integer in 4 words, with only
97 HOST_BITS_PER_WIDE_INT/2 bits stored in each word, as a positive number. */
99 #define LOWPART(x) \
100 ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT/2)) - 1))
101 #define HIGHPART(x) \
102 ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT/2)
103 #define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT/2)
105 /* Unpack a two-word integer into 4 words.
106 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
107 WORDS points to the array of HOST_WIDE_INTs. */
109 static void
110 encode (words, low, hi)
111 HOST_WIDE_INT *words;
112 HOST_WIDE_INT low, hi;
114 words[0] = LOWPART (low);
115 words[1] = HIGHPART (low);
116 words[2] = LOWPART (hi);
117 words[3] = HIGHPART (hi);
120 /* Pack an array of 4 words into a two-word integer.
121 WORDS points to the array of words.
122 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
124 static void
125 decode (words, low, hi)
126 HOST_WIDE_INT *words;
127 HOST_WIDE_INT *low, *hi;
129 *low = words[0] | words[1] * BASE;
130 *hi = words[2] | words[3] * BASE;
133 /* Make the integer constant T valid for its type
134 by setting to 0 or 1 all the bits in the constant
135 that don't belong in the type.
136 Yield 1 if a signed overflow occurs, 0 otherwise.
137 If OVERFLOW is nonzero, a signed overflow has already occurred
138 in calculating T, so propagate it.
140 Make the real constant T valid for its type by calling CHECK_FLOAT_VALUE,
141 if it exists. */
144 force_fit_type (t, overflow)
145 tree t;
146 int overflow;
148 HOST_WIDE_INT low, high;
149 register int prec;
151 if (TREE_CODE (t) == REAL_CST)
153 #ifdef CHECK_FLOAT_VALUE
154 CHECK_FLOAT_VALUE (TYPE_MODE (TREE_TYPE (t)), TREE_REAL_CST (t),
155 overflow);
156 #endif
157 return overflow;
160 else if (TREE_CODE (t) != INTEGER_CST)
161 return overflow;
163 low = TREE_INT_CST_LOW (t);
164 high = TREE_INT_CST_HIGH (t);
166 if (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE)
167 prec = POINTER_SIZE;
168 else
169 prec = TYPE_PRECISION (TREE_TYPE (t));
171 /* First clear all bits that are beyond the type's precision. */
173 if (prec == 2 * HOST_BITS_PER_WIDE_INT)
175 else if (prec > HOST_BITS_PER_WIDE_INT)
177 TREE_INT_CST_HIGH (t)
178 &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
180 else
182 TREE_INT_CST_HIGH (t) = 0;
183 if (prec < HOST_BITS_PER_WIDE_INT)
184 TREE_INT_CST_LOW (t) &= ~((HOST_WIDE_INT) (-1) << prec);
187 /* Unsigned types do not suffer sign extension or overflow. */
188 if (TREE_UNSIGNED (TREE_TYPE (t)))
189 return 0;
191 /* If the value's sign bit is set, extend the sign. */
192 if (prec != 2 * HOST_BITS_PER_WIDE_INT
193 && (prec > HOST_BITS_PER_WIDE_INT
194 ? (TREE_INT_CST_HIGH (t)
195 & ((HOST_WIDE_INT) 1 << (prec - HOST_BITS_PER_WIDE_INT - 1)))
196 : TREE_INT_CST_LOW (t) & ((HOST_WIDE_INT) 1 << (prec - 1))))
198 /* Value is negative:
199 set to 1 all the bits that are outside this type's precision. */
200 if (prec > HOST_BITS_PER_WIDE_INT)
202 TREE_INT_CST_HIGH (t)
203 |= ((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
205 else
207 TREE_INT_CST_HIGH (t) = -1;
208 if (prec < HOST_BITS_PER_WIDE_INT)
209 TREE_INT_CST_LOW (t) |= ((HOST_WIDE_INT) (-1) << prec);
213 /* Yield nonzero if signed overflow occurred. */
214 return
215 ((overflow | (low ^ TREE_INT_CST_LOW (t)) | (high ^ TREE_INT_CST_HIGH (t)))
216 != 0);
219 /* Add two doubleword integers with doubleword result.
220 Each argument is given as two `HOST_WIDE_INT' pieces.
221 One argument is L1 and H1; the other, L2 and H2.
222 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
225 add_double (l1, h1, l2, h2, lv, hv)
226 HOST_WIDE_INT l1, h1, l2, h2;
227 HOST_WIDE_INT *lv, *hv;
229 HOST_WIDE_INT l, h;
231 l = l1 + l2;
232 h = h1 + h2 + ((unsigned HOST_WIDE_INT) l < l1);
234 *lv = l;
235 *hv = h;
236 return overflow_sum_sign (h1, h2, h);
239 /* Negate a doubleword integer with doubleword result.
240 Return nonzero if the operation overflows, assuming it's signed.
241 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
242 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
245 neg_double (l1, h1, lv, hv)
246 HOST_WIDE_INT l1, h1;
247 HOST_WIDE_INT *lv, *hv;
249 if (l1 == 0)
251 *lv = 0;
252 *hv = - h1;
253 return (*hv & h1) < 0;
255 else
257 *lv = - l1;
258 *hv = ~ h1;
259 return 0;
263 /* Multiply two doubleword integers with doubleword result.
264 Return nonzero if the operation overflows, assuming it's signed.
265 Each argument is given as two `HOST_WIDE_INT' pieces.
266 One argument is L1 and H1; the other, L2 and H2.
267 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
270 mul_double (l1, h1, l2, h2, lv, hv)
271 HOST_WIDE_INT l1, h1, l2, h2;
272 HOST_WIDE_INT *lv, *hv;
274 HOST_WIDE_INT arg1[4];
275 HOST_WIDE_INT arg2[4];
276 HOST_WIDE_INT prod[4 * 2];
277 register unsigned HOST_WIDE_INT carry;
278 register int i, j, k;
279 HOST_WIDE_INT toplow, tophigh, neglow, neghigh;
281 encode (arg1, l1, h1);
282 encode (arg2, l2, h2);
284 bzero ((char *) prod, sizeof prod);
286 for (i = 0; i < 4; i++)
288 carry = 0;
289 for (j = 0; j < 4; j++)
291 k = i + j;
292 /* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */
293 carry += arg1[i] * arg2[j];
294 /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */
295 carry += prod[k];
296 prod[k] = LOWPART (carry);
297 carry = HIGHPART (carry);
299 prod[i + 4] = carry;
302 decode (prod, lv, hv); /* This ignores prod[4] through prod[4*2-1] */
304 /* Check for overflow by calculating the top half of the answer in full;
305 it should agree with the low half's sign bit. */
306 decode (prod+4, &toplow, &tophigh);
307 if (h1 < 0)
309 neg_double (l2, h2, &neglow, &neghigh);
310 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
312 if (h2 < 0)
314 neg_double (l1, h1, &neglow, &neghigh);
315 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
317 return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
320 /* Shift the doubleword integer in L1, H1 left by COUNT places
321 keeping only PREC bits of result.
322 Shift right if COUNT is negative.
323 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
324 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
326 void
327 lshift_double (l1, h1, count, prec, lv, hv, arith)
328 HOST_WIDE_INT l1, h1, count;
329 int prec;
330 HOST_WIDE_INT *lv, *hv;
331 int arith;
333 if (count < 0)
335 rshift_double (l1, h1, - count, prec, lv, hv, arith);
336 return;
339 if (count >= prec)
340 count = (unsigned HOST_WIDE_INT) count & prec;
342 if (count >= HOST_BITS_PER_WIDE_INT)
344 *hv = (unsigned HOST_WIDE_INT) l1 << count - HOST_BITS_PER_WIDE_INT;
345 *lv = 0;
347 else
349 *hv = (((unsigned HOST_WIDE_INT) h1 << count)
350 | ((unsigned HOST_WIDE_INT) l1 >> HOST_BITS_PER_WIDE_INT - count - 1 >> 1));
351 *lv = (unsigned HOST_WIDE_INT) l1 << count;
355 /* Shift the doubleword integer in L1, H1 right by COUNT places
356 keeping only PREC bits of result. COUNT must be positive.
357 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
358 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
360 void
361 rshift_double (l1, h1, count, prec, lv, hv, arith)
362 HOST_WIDE_INT l1, h1, count;
363 int prec;
364 HOST_WIDE_INT *lv, *hv;
365 int arith;
367 unsigned HOST_WIDE_INT signmask;
368 signmask = (arith
369 ? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1))
370 : 0);
372 if (count >= prec)
373 count = (unsigned HOST_WIDE_INT) count % prec;
375 if (count >= HOST_BITS_PER_WIDE_INT)
377 *hv = signmask;
378 *lv = ((signmask << 2 * HOST_BITS_PER_WIDE_INT - count - 1 << 1)
379 | ((unsigned HOST_WIDE_INT) h1 >> count - HOST_BITS_PER_WIDE_INT));
381 else
383 *lv = (((unsigned HOST_WIDE_INT) l1 >> count)
384 | ((unsigned HOST_WIDE_INT) h1 << HOST_BITS_PER_WIDE_INT - count - 1 << 1));
385 *hv = ((signmask << HOST_BITS_PER_WIDE_INT - count)
386 | ((unsigned HOST_WIDE_INT) h1 >> count));
390 /* Rotate the doubleword integer in L1, H1 left by COUNT places
391 keeping only PREC bits of result.
392 Rotate right if COUNT is negative.
393 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
395 void
396 lrotate_double (l1, h1, count, prec, lv, hv)
397 HOST_WIDE_INT l1, h1, count;
398 int prec;
399 HOST_WIDE_INT *lv, *hv;
401 HOST_WIDE_INT arg1[4];
402 register int i;
403 register int carry;
405 if (count < 0)
407 rrotate_double (l1, h1, - count, prec, lv, hv);
408 return;
411 encode (arg1, l1, h1);
413 if (count > prec)
414 count = prec;
416 carry = arg1[4 - 1] >> 16 - 1;
417 while (count > 0)
419 for (i = 0; i < 4; i++)
421 carry += arg1[i] << 1;
422 arg1[i] = LOWPART (carry);
423 carry = HIGHPART (carry);
425 count--;
428 decode (arg1, lv, hv);
431 /* Rotate the doubleword integer in L1, H1 left by COUNT places
432 keeping only PREC bits of result. COUNT must be positive.
433 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
435 void
436 rrotate_double (l1, h1, count, prec, lv, hv)
437 HOST_WIDE_INT l1, h1, count;
438 int prec;
439 HOST_WIDE_INT *lv, *hv;
441 HOST_WIDE_INT arg1[4];
442 register int i;
443 register int carry;
445 encode (arg1, l1, h1);
447 if (count > prec)
448 count = prec;
450 carry = arg1[0] & 1;
451 while (count > 0)
453 for (i = 4 - 1; i >= 0; i--)
455 carry *= BASE;
456 carry += arg1[i];
457 arg1[i] = LOWPART (carry >> 1);
459 count--;
462 decode (arg1, lv, hv);
465 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
466 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
467 CODE is a tree code for a kind of division, one of
468 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
469 or EXACT_DIV_EXPR
470 It controls how the quotient is rounded to a integer.
471 Return nonzero if the operation overflows.
472 UNS nonzero says do unsigned division. */
475 div_and_round_double (code, uns,
476 lnum_orig, hnum_orig, lden_orig, hden_orig,
477 lquo, hquo, lrem, hrem)
478 enum tree_code code;
479 int uns;
480 HOST_WIDE_INT lnum_orig, hnum_orig; /* num == numerator == dividend */
481 HOST_WIDE_INT lden_orig, hden_orig; /* den == denominator == divisor */
482 HOST_WIDE_INT *lquo, *hquo, *lrem, *hrem;
484 int quo_neg = 0;
485 HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */
486 HOST_WIDE_INT den[4], quo[4];
487 register int i, j;
488 unsigned HOST_WIDE_INT work;
489 register int carry = 0;
490 HOST_WIDE_INT lnum = lnum_orig;
491 HOST_WIDE_INT hnum = hnum_orig;
492 HOST_WIDE_INT lden = lden_orig;
493 HOST_WIDE_INT hden = hden_orig;
494 int overflow = 0;
496 if ((hden == 0) && (lden == 0))
497 abort ();
499 /* calculate quotient sign and convert operands to unsigned. */
500 if (!uns)
502 if (hnum < 0)
504 quo_neg = ~ quo_neg;
505 /* (minimum integer) / (-1) is the only overflow case. */
506 if (neg_double (lnum, hnum, &lnum, &hnum) && (lden & hden) == -1)
507 overflow = 1;
509 if (hden < 0)
511 quo_neg = ~ quo_neg;
512 neg_double (lden, hden, &lden, &hden);
516 if (hnum == 0 && hden == 0)
517 { /* single precision */
518 *hquo = *hrem = 0;
519 /* This unsigned division rounds toward zero. */
520 *lquo = lnum / (unsigned HOST_WIDE_INT) lden;
521 goto finish_up;
524 if (hnum == 0)
525 { /* trivial case: dividend < divisor */
526 /* hden != 0 already checked. */
527 *hquo = *lquo = 0;
528 *hrem = hnum;
529 *lrem = lnum;
530 goto finish_up;
533 bzero ((char *) quo, sizeof quo);
535 bzero ((char *) num, sizeof num); /* to zero 9th element */
536 bzero ((char *) den, sizeof den);
538 encode (num, lnum, hnum);
539 encode (den, lden, hden);
541 /* Special code for when the divisor < BASE. */
542 if (hden == 0 && lden < BASE)
544 /* hnum != 0 already checked. */
545 for (i = 4 - 1; i >= 0; i--)
547 work = num[i] + carry * BASE;
548 quo[i] = work / (unsigned HOST_WIDE_INT) lden;
549 carry = work % (unsigned HOST_WIDE_INT) lden;
552 else
554 /* Full double precision division,
555 with thanks to Don Knuth's "Seminumerical Algorithms". */
556 int quo_est, scale, num_hi_sig, den_hi_sig;
558 /* Find the highest non-zero divisor digit. */
559 for (i = 4 - 1; ; i--)
560 if (den[i] != 0) {
561 den_hi_sig = i;
562 break;
565 /* Insure that the first digit of the divisor is at least BASE/2.
566 This is required by the quotient digit estimation algorithm. */
568 scale = BASE / (den[den_hi_sig] + 1);
569 if (scale > 1) { /* scale divisor and dividend */
570 carry = 0;
571 for (i = 0; i <= 4 - 1; i++) {
572 work = (num[i] * scale) + carry;
573 num[i] = LOWPART (work);
574 carry = HIGHPART (work);
575 } num[4] = carry;
576 carry = 0;
577 for (i = 0; i <= 4 - 1; i++) {
578 work = (den[i] * scale) + carry;
579 den[i] = LOWPART (work);
580 carry = HIGHPART (work);
581 if (den[i] != 0) den_hi_sig = i;
585 num_hi_sig = 4;
587 /* Main loop */
588 for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--) {
589 /* guess the next quotient digit, quo_est, by dividing the first
590 two remaining dividend digits by the high order quotient digit.
591 quo_est is never low and is at most 2 high. */
592 unsigned HOST_WIDE_INT tmp;
594 num_hi_sig = i + den_hi_sig + 1;
595 work = num[num_hi_sig] * BASE + num[num_hi_sig - 1];
596 if (num[num_hi_sig] != den[den_hi_sig])
597 quo_est = work / den[den_hi_sig];
598 else
599 quo_est = BASE - 1;
601 /* refine quo_est so it's usually correct, and at most one high. */
602 tmp = work - quo_est * den[den_hi_sig];
603 if (tmp < BASE
604 && den[den_hi_sig - 1] * quo_est > (tmp * BASE + num[num_hi_sig - 2]))
605 quo_est--;
607 /* Try QUO_EST as the quotient digit, by multiplying the
608 divisor by QUO_EST and subtracting from the remaining dividend.
609 Keep in mind that QUO_EST is the I - 1st digit. */
611 carry = 0;
612 for (j = 0; j <= den_hi_sig; j++)
614 work = quo_est * den[j] + carry;
615 carry = HIGHPART (work);
616 work = num[i + j] - LOWPART (work);
617 num[i + j] = LOWPART (work);
618 carry += HIGHPART (work) != 0;
621 /* if quo_est was high by one, then num[i] went negative and
622 we need to correct things. */
624 if (num[num_hi_sig] < carry)
626 quo_est--;
627 carry = 0; /* add divisor back in */
628 for (j = 0; j <= den_hi_sig; j++)
630 work = num[i + j] + den[j] + carry;
631 carry = HIGHPART (work);
632 num[i + j] = LOWPART (work);
634 num [num_hi_sig] += carry;
637 /* store the quotient digit. */
638 quo[i] = quo_est;
642 decode (quo, lquo, hquo);
644 finish_up:
645 /* if result is negative, make it so. */
646 if (quo_neg)
647 neg_double (*lquo, *hquo, lquo, hquo);
649 /* compute trial remainder: rem = num - (quo * den) */
650 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
651 neg_double (*lrem, *hrem, lrem, hrem);
652 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
654 switch (code)
656 case TRUNC_DIV_EXPR:
657 case TRUNC_MOD_EXPR: /* round toward zero */
658 case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
659 return overflow;
661 case FLOOR_DIV_EXPR:
662 case FLOOR_MOD_EXPR: /* round toward negative infinity */
663 if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
665 /* quo = quo - 1; */
666 add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
667 lquo, hquo);
669 else return overflow;
670 break;
672 case CEIL_DIV_EXPR:
673 case CEIL_MOD_EXPR: /* round toward positive infinity */
674 if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
676 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
677 lquo, hquo);
679 else return overflow;
680 break;
682 case ROUND_DIV_EXPR:
683 case ROUND_MOD_EXPR: /* round to closest integer */
685 HOST_WIDE_INT labs_rem = *lrem, habs_rem = *hrem;
686 HOST_WIDE_INT labs_den = lden, habs_den = hden, ltwice, htwice;
688 /* get absolute values */
689 if (*hrem < 0) neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
690 if (hden < 0) neg_double (lden, hden, &labs_den, &habs_den);
692 /* if (2 * abs (lrem) >= abs (lden)) */
693 mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
694 labs_rem, habs_rem, &ltwice, &htwice);
695 if (((unsigned HOST_WIDE_INT) habs_den
696 < (unsigned HOST_WIDE_INT) htwice)
697 || (((unsigned HOST_WIDE_INT) habs_den
698 == (unsigned HOST_WIDE_INT) htwice)
699 && ((HOST_WIDE_INT unsigned) labs_den
700 < (unsigned HOST_WIDE_INT) ltwice)))
702 if (*hquo < 0)
703 /* quo = quo - 1; */
704 add_double (*lquo, *hquo,
705 (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
706 else
707 /* quo = quo + 1; */
708 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
709 lquo, hquo);
711 else return overflow;
713 break;
715 default:
716 abort ();
719 /* compute true remainder: rem = num - (quo * den) */
720 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
721 neg_double (*lrem, *hrem, lrem, hrem);
722 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
723 return overflow;
726 #ifndef REAL_ARITHMETIC
727 /* Effectively truncate a real value to represent the nearest possible value
728 in a narrower mode. The result is actually represented in the same data
729 type as the argument, but its value is usually different.
731 A trap may occur during the FP operations and it is the responsibility
732 of the calling function to have a handler established. */
734 REAL_VALUE_TYPE
735 real_value_truncate (mode, arg)
736 enum machine_mode mode;
737 REAL_VALUE_TYPE arg;
739 return REAL_VALUE_TRUNCATE (mode, arg);
742 #if TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
744 /* Check for infinity in an IEEE double precision number. */
747 target_isinf (x)
748 REAL_VALUE_TYPE x;
750 /* The IEEE 64-bit double format. */
751 union {
752 REAL_VALUE_TYPE d;
753 struct {
754 unsigned sign : 1;
755 unsigned exponent : 11;
756 unsigned mantissa1 : 20;
757 unsigned mantissa2;
758 } little_endian;
759 struct {
760 unsigned mantissa2;
761 unsigned mantissa1 : 20;
762 unsigned exponent : 11;
763 unsigned sign : 1;
764 } big_endian;
765 } u;
767 u.d = dconstm1;
768 if (u.big_endian.sign == 1)
770 u.d = x;
771 return (u.big_endian.exponent == 2047
772 && u.big_endian.mantissa1 == 0
773 && u.big_endian.mantissa2 == 0);
775 else
777 u.d = x;
778 return (u.little_endian.exponent == 2047
779 && u.little_endian.mantissa1 == 0
780 && u.little_endian.mantissa2 == 0);
784 /* Check whether an IEEE double precision number is a NaN. */
787 target_isnan (x)
788 REAL_VALUE_TYPE x;
790 /* The IEEE 64-bit double format. */
791 union {
792 REAL_VALUE_TYPE d;
793 struct {
794 unsigned sign : 1;
795 unsigned exponent : 11;
796 unsigned mantissa1 : 20;
797 unsigned mantissa2;
798 } little_endian;
799 struct {
800 unsigned mantissa2;
801 unsigned mantissa1 : 20;
802 unsigned exponent : 11;
803 unsigned sign : 1;
804 } big_endian;
805 } u;
807 u.d = dconstm1;
808 if (u.big_endian.sign == 1)
810 u.d = x;
811 return (u.big_endian.exponent == 2047
812 && (u.big_endian.mantissa1 != 0
813 || u.big_endian.mantissa2 != 0));
815 else
817 u.d = x;
818 return (u.little_endian.exponent == 2047
819 && (u.little_endian.mantissa1 != 0
820 || u.little_endian.mantissa2 != 0));
824 /* Check for a negative IEEE double precision number. */
827 target_negative (x)
828 REAL_VALUE_TYPE x;
830 /* The IEEE 64-bit double format. */
831 union {
832 REAL_VALUE_TYPE d;
833 struct {
834 unsigned sign : 1;
835 unsigned exponent : 11;
836 unsigned mantissa1 : 20;
837 unsigned mantissa2;
838 } little_endian;
839 struct {
840 unsigned mantissa2;
841 unsigned mantissa1 : 20;
842 unsigned exponent : 11;
843 unsigned sign : 1;
844 } big_endian;
845 } u;
847 u.d = dconstm1;
848 if (u.big_endian.sign == 1)
850 u.d = x;
851 return u.big_endian.sign;
853 else
855 u.d = x;
856 return u.little_endian.sign;
859 #else /* Target not IEEE */
861 /* Let's assume other float formats don't have infinity.
862 (This can be overridden by redefining REAL_VALUE_ISINF.) */
864 target_isinf (x)
865 REAL_VALUE_TYPE x;
867 return 0;
870 /* Let's assume other float formats don't have NaNs.
871 (This can be overridden by redefining REAL_VALUE_ISNAN.) */
873 target_isnan (x)
874 REAL_VALUE_TYPE x;
876 return 0;
879 /* Let's assume other float formats don't have minus zero.
880 (This can be overridden by redefining REAL_VALUE_NEGATIVE.) */
882 target_negative (x)
883 REAL_VALUE_TYPE x;
885 return x < 0;
887 #endif /* Target not IEEE */
888 #endif /* no REAL_ARITHMETIC */
890 /* Split a tree IN into a constant and a variable part
891 that could be combined with CODE to make IN.
892 CODE must be a commutative arithmetic operation.
893 Store the constant part into *CONP and the variable in &VARP.
894 Return 1 if this was done; zero means the tree IN did not decompose
895 this way.
897 If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR.
898 Therefore, we must tell the caller whether the variable part
899 was subtracted. We do this by storing 1 or -1 into *VARSIGNP.
900 The value stored is the coefficient for the variable term.
901 The constant term we return should always be added;
902 we negate it if necessary. */
904 static int
905 split_tree (in, code, varp, conp, varsignp)
906 tree in;
907 enum tree_code code;
908 tree *varp, *conp;
909 int *varsignp;
911 register tree outtype = TREE_TYPE (in);
912 *varp = 0;
913 *conp = 0;
915 /* Strip any conversions that don't change the machine mode. */
916 while ((TREE_CODE (in) == NOP_EXPR
917 || TREE_CODE (in) == CONVERT_EXPR)
918 && (TYPE_MODE (TREE_TYPE (in))
919 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (in, 0)))))
920 in = TREE_OPERAND (in, 0);
922 if (TREE_CODE (in) == code
923 || (! FLOAT_TYPE_P (TREE_TYPE (in))
924 /* We can associate addition and subtraction together
925 (even though the C standard doesn't say so)
926 for integers because the value is not affected.
927 For reals, the value might be affected, so we can't. */
928 && ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR)
929 || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR))))
931 enum tree_code code = TREE_CODE (TREE_OPERAND (in, 0));
932 if (code == INTEGER_CST)
934 *conp = TREE_OPERAND (in, 0);
935 *varp = TREE_OPERAND (in, 1);
936 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
937 && TREE_TYPE (*varp) != outtype)
938 *varp = convert (outtype, *varp);
939 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
940 return 1;
942 if (TREE_CONSTANT (TREE_OPERAND (in, 1)))
944 *conp = TREE_OPERAND (in, 1);
945 *varp = TREE_OPERAND (in, 0);
946 *varsignp = 1;
947 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
948 && TREE_TYPE (*varp) != outtype)
949 *varp = convert (outtype, *varp);
950 if (TREE_CODE (in) == MINUS_EXPR)
952 /* If operation is subtraction and constant is second,
953 must negate it to get an additive constant.
954 And this cannot be done unless it is a manifest constant.
955 It could also be the address of a static variable.
956 We cannot negate that, so give up. */
957 if (TREE_CODE (*conp) == INTEGER_CST)
958 /* Subtracting from integer_zero_node loses for long long. */
959 *conp = fold (build1 (NEGATE_EXPR, TREE_TYPE (*conp), *conp));
960 else
961 return 0;
963 return 1;
965 if (TREE_CONSTANT (TREE_OPERAND (in, 0)))
967 *conp = TREE_OPERAND (in, 0);
968 *varp = TREE_OPERAND (in, 1);
969 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
970 && TREE_TYPE (*varp) != outtype)
971 *varp = convert (outtype, *varp);
972 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
973 return 1;
976 return 0;
979 /* Combine two constants NUM and ARG2 under operation CODE
980 to produce a new constant.
981 We assume ARG1 and ARG2 have the same data type,
982 or at least are the same kind of constant and the same machine mode.
984 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
986 static tree
987 const_binop (code, arg1, arg2, notrunc)
988 enum tree_code code;
989 register tree arg1, arg2;
990 int notrunc;
992 if (TREE_CODE (arg1) == INTEGER_CST)
994 register HOST_WIDE_INT int1l = TREE_INT_CST_LOW (arg1);
995 register HOST_WIDE_INT int1h = TREE_INT_CST_HIGH (arg1);
996 HOST_WIDE_INT int2l = TREE_INT_CST_LOW (arg2);
997 HOST_WIDE_INT int2h = TREE_INT_CST_HIGH (arg2);
998 HOST_WIDE_INT low, hi;
999 HOST_WIDE_INT garbagel, garbageh;
1000 register tree t;
1001 int uns = TREE_UNSIGNED (TREE_TYPE (arg1));
1002 int overflow = 0;
1004 switch (code)
1006 case BIT_IOR_EXPR:
1007 t = build_int_2 (int1l | int2l, int1h | int2h);
1008 break;
1010 case BIT_XOR_EXPR:
1011 t = build_int_2 (int1l ^ int2l, int1h ^ int2h);
1012 break;
1014 case BIT_AND_EXPR:
1015 t = build_int_2 (int1l & int2l, int1h & int2h);
1016 break;
1018 case BIT_ANDTC_EXPR:
1019 t = build_int_2 (int1l & ~int2l, int1h & ~int2h);
1020 break;
1022 case RSHIFT_EXPR:
1023 int2l = - int2l;
1024 case LSHIFT_EXPR:
1025 /* It's unclear from the C standard whether shifts can overflow.
1026 The following code ignores overflow; perhaps a C standard
1027 interpretation ruling is needed. */
1028 lshift_double (int1l, int1h, int2l,
1029 TYPE_PRECISION (TREE_TYPE (arg1)),
1030 &low, &hi,
1031 !uns);
1032 t = build_int_2 (low, hi);
1033 TREE_TYPE (t) = TREE_TYPE (arg1);
1034 if (!notrunc)
1035 force_fit_type (t, 0);
1036 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2);
1037 TREE_CONSTANT_OVERFLOW (t)
1038 = TREE_CONSTANT_OVERFLOW (arg1) | TREE_CONSTANT_OVERFLOW (arg2);
1039 return t;
1041 case RROTATE_EXPR:
1042 int2l = - int2l;
1043 case LROTATE_EXPR:
1044 lrotate_double (int1l, int1h, int2l,
1045 TYPE_PRECISION (TREE_TYPE (arg1)),
1046 &low, &hi);
1047 t = build_int_2 (low, hi);
1048 break;
1050 case PLUS_EXPR:
1051 if (int1h == 0)
1053 int2l += int1l;
1054 if ((unsigned HOST_WIDE_INT) int2l < int1l)
1056 hi = int2h++;
1057 overflow = int2h < hi;
1059 t = build_int_2 (int2l, int2h);
1060 break;
1062 if (int2h == 0)
1064 int1l += int2l;
1065 if ((unsigned HOST_WIDE_INT) int1l < int2l)
1067 hi = int1h++;
1068 overflow = int1h < hi;
1070 t = build_int_2 (int1l, int1h);
1071 break;
1073 overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi);
1074 t = build_int_2 (low, hi);
1075 break;
1077 case MINUS_EXPR:
1078 if (int2h == 0 && int2l == 0)
1080 t = build_int_2 (int1l, int1h);
1081 break;
1083 neg_double (int2l, int2h, &low, &hi);
1084 add_double (int1l, int1h, low, hi, &low, &hi);
1085 overflow = overflow_sum_sign (hi, int2h, int1h);
1086 t = build_int_2 (low, hi);
1087 break;
1089 case MULT_EXPR:
1090 overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
1091 t = build_int_2 (low, hi);
1092 break;
1094 case TRUNC_DIV_EXPR:
1095 case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
1096 case EXACT_DIV_EXPR:
1097 /* This is a shortcut for a common special case.
1098 It reduces the number of tree nodes generated
1099 and saves time. */
1100 if (int2h == 0 && int2l > 0
1101 && TREE_TYPE (arg1) == sizetype
1102 && int1h == 0 && int1l >= 0)
1104 if (code == CEIL_DIV_EXPR)
1105 int1l += int2l-1;
1106 return size_int (int1l / int2l);
1108 case ROUND_DIV_EXPR:
1109 if (int2h == 0 && int2l == 1)
1111 t = build_int_2 (int1l, int1h);
1112 break;
1114 if (int1l == int2l && int1h == int2h)
1116 if ((int1l | int1h) == 0)
1117 abort ();
1118 t = build_int_2 (1, 0);
1119 break;
1121 overflow = div_and_round_double (code, uns,
1122 int1l, int1h, int2l, int2h,
1123 &low, &hi, &garbagel, &garbageh);
1124 t = build_int_2 (low, hi);
1125 break;
1127 case TRUNC_MOD_EXPR: case ROUND_MOD_EXPR:
1128 case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
1129 overflow = div_and_round_double (code, uns,
1130 int1l, int1h, int2l, int2h,
1131 &garbagel, &garbageh, &low, &hi);
1132 t = build_int_2 (low, hi);
1133 break;
1135 case MIN_EXPR:
1136 case MAX_EXPR:
1137 if (uns)
1139 low = (((unsigned HOST_WIDE_INT) int1h
1140 < (unsigned HOST_WIDE_INT) int2h)
1141 || (((unsigned HOST_WIDE_INT) int1h
1142 == (unsigned HOST_WIDE_INT) int2h)
1143 && ((unsigned HOST_WIDE_INT) int1l
1144 < (unsigned HOST_WIDE_INT) int2l)));
1146 else
1148 low = ((int1h < int2h)
1149 || ((int1h == int2h)
1150 && ((unsigned HOST_WIDE_INT) int1l
1151 < (unsigned HOST_WIDE_INT) int2l)));
1153 if (low == (code == MIN_EXPR))
1154 t = build_int_2 (int1l, int1h);
1155 else
1156 t = build_int_2 (int2l, int2h);
1157 break;
1159 default:
1160 abort ();
1162 got_it:
1163 TREE_TYPE (t) = TREE_TYPE (arg1);
1164 TREE_OVERFLOW (t)
1165 = ((notrunc ? !uns && overflow : force_fit_type (t, overflow))
1166 | TREE_OVERFLOW (arg1)
1167 | TREE_OVERFLOW (arg2));
1168 TREE_CONSTANT_OVERFLOW (t) = (TREE_OVERFLOW (t)
1169 | TREE_CONSTANT_OVERFLOW (arg1)
1170 | TREE_CONSTANT_OVERFLOW (arg2));
1171 return t;
1173 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1174 if (TREE_CODE (arg1) == REAL_CST)
1176 REAL_VALUE_TYPE d1;
1177 REAL_VALUE_TYPE d2;
1178 int overflow = 0;
1179 REAL_VALUE_TYPE value;
1180 tree t;
1182 d1 = TREE_REAL_CST (arg1);
1183 d2 = TREE_REAL_CST (arg2);
1185 /* If either operand is a NaN, just return it. Otherwise, set up
1186 for floating-point trap; we return an overflow. */
1187 if (REAL_VALUE_ISNAN (d1))
1188 return arg1;
1189 else if (REAL_VALUE_ISNAN (d2))
1190 return arg2;
1191 else if (setjmp (float_error))
1193 t = copy_node (arg1);
1194 overflow = 1;
1195 goto got_float;
1198 set_float_handler (float_error);
1200 #ifdef REAL_ARITHMETIC
1201 REAL_ARITHMETIC (value, code, d1, d2);
1202 #else
1203 switch (code)
1205 case PLUS_EXPR:
1206 value = d1 + d2;
1207 break;
1209 case MINUS_EXPR:
1210 value = d1 - d2;
1211 break;
1213 case MULT_EXPR:
1214 value = d1 * d2;
1215 break;
1217 case RDIV_EXPR:
1218 #ifndef REAL_INFINITY
1219 if (d2 == 0)
1220 abort ();
1221 #endif
1223 value = d1 / d2;
1224 break;
1226 case MIN_EXPR:
1227 value = MIN (d1, d2);
1228 break;
1230 case MAX_EXPR:
1231 value = MAX (d1, d2);
1232 break;
1234 default:
1235 abort ();
1237 #endif /* no REAL_ARITHMETIC */
1238 t = build_real (TREE_TYPE (arg1),
1239 real_value_truncate (TYPE_MODE (TREE_TYPE (arg1)), value));
1240 got_float:
1241 set_float_handler (NULL_PTR);
1243 TREE_OVERFLOW (t)
1244 = (force_fit_type (t, overflow)
1245 | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2));
1246 TREE_CONSTANT_OVERFLOW (t)
1247 = TREE_OVERFLOW (t)
1248 | TREE_CONSTANT_OVERFLOW (arg1)
1249 | TREE_CONSTANT_OVERFLOW (arg2);
1250 return t;
1252 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1253 if (TREE_CODE (arg1) == COMPLEX_CST)
1255 register tree r1 = TREE_REALPART (arg1);
1256 register tree i1 = TREE_IMAGPART (arg1);
1257 register tree r2 = TREE_REALPART (arg2);
1258 register tree i2 = TREE_IMAGPART (arg2);
1259 register tree t;
1261 switch (code)
1263 case PLUS_EXPR:
1264 t = build_complex (const_binop (PLUS_EXPR, r1, r2, notrunc),
1265 const_binop (PLUS_EXPR, i1, i2, notrunc));
1266 break;
1268 case MINUS_EXPR:
1269 t = build_complex (const_binop (MINUS_EXPR, r1, r2, notrunc),
1270 const_binop (MINUS_EXPR, i1, i2, notrunc));
1271 break;
1273 case MULT_EXPR:
1274 t = build_complex (const_binop (MINUS_EXPR,
1275 const_binop (MULT_EXPR,
1276 r1, r2, notrunc),
1277 const_binop (MULT_EXPR,
1278 i1, i2, notrunc),
1279 notrunc),
1280 const_binop (PLUS_EXPR,
1281 const_binop (MULT_EXPR,
1282 r1, i2, notrunc),
1283 const_binop (MULT_EXPR,
1284 i1, r2, notrunc),
1285 notrunc));
1286 break;
1288 case RDIV_EXPR:
1290 register tree magsquared
1291 = const_binop (PLUS_EXPR,
1292 const_binop (MULT_EXPR, r2, r2, notrunc),
1293 const_binop (MULT_EXPR, i2, i2, notrunc),
1294 notrunc);
1296 t = build_complex
1297 (const_binop (INTEGRAL_TYPE_P (TREE_TYPE (r1))
1298 ? TRUNC_DIV_EXPR : RDIV_EXPR,
1299 const_binop (PLUS_EXPR,
1300 const_binop (MULT_EXPR, r1, r2,
1301 notrunc),
1302 const_binop (MULT_EXPR, i1, i2,
1303 notrunc),
1304 notrunc),
1305 magsquared, notrunc),
1306 const_binop (INTEGRAL_TYPE_P (TREE_TYPE (r1))
1307 ? TRUNC_DIV_EXPR : RDIV_EXPR,
1308 const_binop (MINUS_EXPR,
1309 const_binop (MULT_EXPR, i1, r2,
1310 notrunc),
1311 const_binop (MULT_EXPR, r1, i2,
1312 notrunc),
1313 notrunc),
1314 magsquared, notrunc));
1316 break;
1318 default:
1319 abort ();
1321 TREE_TYPE (t) = TREE_TYPE (arg1);
1322 return t;
1324 return 0;
1327 /* Return an INTEGER_CST with value V and type from `sizetype'. */
1329 tree
1330 size_int (number)
1331 unsigned int number;
1333 register tree t;
1334 /* Type-size nodes already made for small sizes. */
1335 static tree size_table[2*HOST_BITS_PER_WIDE_INT + 1];
1337 if (number < 2*HOST_BITS_PER_WIDE_INT + 1
1338 && size_table[number] != 0)
1339 return size_table[number];
1340 if (number < 2*HOST_BITS_PER_WIDE_INT + 1)
1342 push_obstacks_nochange ();
1343 /* Make this a permanent node. */
1344 end_temporary_allocation ();
1345 t = build_int_2 (number, 0);
1346 TREE_TYPE (t) = sizetype;
1347 size_table[number] = t;
1348 pop_obstacks ();
1350 else
1352 t = build_int_2 (number, 0);
1353 TREE_TYPE (t) = sizetype;
1355 return t;
1358 /* Combine operands OP1 and OP2 with arithmetic operation CODE.
1359 CODE is a tree code. Data type is taken from `sizetype',
1360 If the operands are constant, so is the result. */
1362 tree
1363 size_binop (code, arg0, arg1)
1364 enum tree_code code;
1365 tree arg0, arg1;
1367 /* Handle the special case of two integer constants faster. */
1368 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
1370 /* And some specific cases even faster than that. */
1371 if (code == PLUS_EXPR
1372 && TREE_INT_CST_LOW (arg0) == 0
1373 && TREE_INT_CST_HIGH (arg0) == 0)
1374 return arg1;
1375 if (code == MINUS_EXPR
1376 && TREE_INT_CST_LOW (arg1) == 0
1377 && TREE_INT_CST_HIGH (arg1) == 0)
1378 return arg0;
1379 if (code == MULT_EXPR
1380 && TREE_INT_CST_LOW (arg0) == 1
1381 && TREE_INT_CST_HIGH (arg0) == 0)
1382 return arg1;
1383 /* Handle general case of two integer constants. */
1384 return const_binop (code, arg0, arg1, 1);
1387 if (arg0 == error_mark_node || arg1 == error_mark_node)
1388 return error_mark_node;
1390 return fold (build (code, sizetype, arg0, arg1));
1393 /* Given T, a tree representing type conversion of ARG1, a constant,
1394 return a constant tree representing the result of conversion. */
1396 static tree
1397 fold_convert (t, arg1)
1398 register tree t;
1399 register tree arg1;
1401 register tree type = TREE_TYPE (t);
1402 int overflow = 0;
1404 if (TREE_CODE (type) == POINTER_TYPE || INTEGRAL_TYPE_P (type))
1406 if (TREE_CODE (arg1) == INTEGER_CST)
1408 /* If we would build a constant wider than GCC supports,
1409 leave the conversion unfolded. */
1410 if (TYPE_PRECISION (type) > 2 * HOST_BITS_PER_WIDE_INT)
1411 return t;
1413 /* Given an integer constant, make new constant with new type,
1414 appropriately sign-extended or truncated. */
1415 t = build_int_2 (TREE_INT_CST_LOW (arg1),
1416 TREE_INT_CST_HIGH (arg1));
1417 TREE_TYPE (t) = type;
1418 /* Indicate an overflow if (1) ARG1 already overflowed,
1419 or (2) force_fit_type indicates an overflow.
1420 Tell force_fit_type that an overflow has already occurred
1421 if ARG1 is a too-large unsigned value and T is signed. */
1422 TREE_OVERFLOW (t)
1423 = (TREE_OVERFLOW (arg1)
1424 | force_fit_type (t,
1425 (TREE_INT_CST_HIGH (arg1) < 0
1426 & (TREE_UNSIGNED (type)
1427 < TREE_UNSIGNED (TREE_TYPE (arg1))))));
1428 TREE_CONSTANT_OVERFLOW (t)
1429 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1431 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1432 else if (TREE_CODE (arg1) == REAL_CST)
1434 /* Don't initialize these, use assignments.
1435 Initialized local aggregates don't work on old compilers. */
1436 REAL_VALUE_TYPE x;
1437 REAL_VALUE_TYPE l;
1438 REAL_VALUE_TYPE u;
1440 x = TREE_REAL_CST (arg1);
1441 l = real_value_from_int_cst (TYPE_MIN_VALUE (type));
1442 u = real_value_from_int_cst (TYPE_MAX_VALUE (type));
1443 /* See if X will be in range after truncation towards 0.
1444 To compensate for truncation, move the bounds away from 0,
1445 but reject if X exactly equals the adjusted bounds. */
1446 #ifdef REAL_ARITHMETIC
1447 REAL_ARITHMETIC (l, MINUS_EXPR, l, dconst1);
1448 REAL_ARITHMETIC (u, PLUS_EXPR, u, dconst1);
1449 #else
1450 l--;
1451 u++;
1452 #endif
1453 /* If X is a NaN, use zero instead and show we have an overflow.
1454 Otherwise, range check. */
1455 if (REAL_VALUE_ISNAN (x))
1456 overflow = 1, x = dconst0;
1457 else if (! (REAL_VALUES_LESS (l, x) && REAL_VALUES_LESS (x, u)))
1458 overflow = 1;
1460 #ifndef REAL_ARITHMETIC
1462 HOST_WIDE_INT low, high;
1463 HOST_WIDE_INT half_word
1464 = (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2);
1466 if (x < 0)
1467 x = -x;
1469 high = (HOST_WIDE_INT) (x / half_word / half_word);
1470 x -= (REAL_VALUE_TYPE) high * half_word * half_word;
1471 if (x >= (REAL_VALUE_TYPE) half_word * half_word / 2)
1473 low = x - (REAL_VALUE_TYPE) half_word * half_word / 2;
1474 low |= (HOST_WIDE_INT) -1 << (HOST_BITS_PER_WIDE_INT - 1);
1476 else
1477 low = (HOST_WIDE_INT) x;
1478 if (TREE_REAL_CST (arg1) < 0)
1479 neg_double (low, high, &low, &high);
1480 t = build_int_2 (low, high);
1482 #else
1484 HOST_WIDE_INT low, high;
1485 REAL_VALUE_TO_INT (&low, &high, x);
1486 t = build_int_2 (low, high);
1488 #endif
1489 TREE_TYPE (t) = type;
1490 TREE_OVERFLOW (t)
1491 = TREE_OVERFLOW (arg1) | force_fit_type (t, overflow);
1492 TREE_CONSTANT_OVERFLOW (t)
1493 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1495 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1496 TREE_TYPE (t) = type;
1498 else if (TREE_CODE (type) == REAL_TYPE)
1500 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1501 if (TREE_CODE (arg1) == INTEGER_CST)
1502 return build_real_from_int_cst (type, arg1);
1503 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1504 if (TREE_CODE (arg1) == REAL_CST)
1506 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
1507 return arg1;
1508 else if (setjmp (float_error))
1510 overflow = 1;
1511 t = copy_node (arg1);
1512 goto got_it;
1514 set_float_handler (float_error);
1516 t = build_real (type, real_value_truncate (TYPE_MODE (type),
1517 TREE_REAL_CST (arg1)));
1518 set_float_handler (NULL_PTR);
1520 got_it:
1521 TREE_OVERFLOW (t)
1522 = TREE_OVERFLOW (arg1) | force_fit_type (t, overflow);
1523 TREE_CONSTANT_OVERFLOW (t)
1524 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1525 return t;
1528 TREE_CONSTANT (t) = 1;
1529 return t;
1532 /* Return an expr equal to X but certainly not valid as an lvalue.
1533 Also make sure it is not valid as an null pointer constant. */
1535 tree
1536 non_lvalue (x)
1537 tree x;
1539 tree result;
1541 /* These things are certainly not lvalues. */
1542 if (TREE_CODE (x) == NON_LVALUE_EXPR
1543 || TREE_CODE (x) == INTEGER_CST
1544 || TREE_CODE (x) == REAL_CST
1545 || TREE_CODE (x) == STRING_CST
1546 || TREE_CODE (x) == ADDR_EXPR)
1548 if (TREE_CODE (x) == INTEGER_CST && integer_zerop (x))
1550 /* Use NOP_EXPR instead of NON_LVALUE_EXPR
1551 so convert_for_assignment won't strip it.
1552 This is so this 0 won't be treated as a null pointer constant. */
1553 result = build1 (NOP_EXPR, TREE_TYPE (x), x);
1554 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1555 return result;
1557 return x;
1560 result = build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x);
1561 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1562 return result;
1565 /* Nonzero means lvalues are limited to those valid in pedantic ANSI C.
1566 Zero means allow extended lvalues. */
1568 int pedantic_lvalues;
1570 /* When pedantic, return an expr equal to X but certainly not valid as a
1571 pedantic lvalue. Otherwise, return X. */
1573 tree
1574 pedantic_non_lvalue (x)
1575 tree x;
1577 if (pedantic_lvalues)
1578 return non_lvalue (x);
1579 else
1580 return x;
1583 /* Given a tree comparison code, return the code that is the logical inverse
1584 of the given code. It is not safe to do this for floating-point
1585 comparisons, except for NE_EXPR and EQ_EXPR. */
1587 static enum tree_code
1588 invert_tree_comparison (code)
1589 enum tree_code code;
1591 switch (code)
1593 case EQ_EXPR:
1594 return NE_EXPR;
1595 case NE_EXPR:
1596 return EQ_EXPR;
1597 case GT_EXPR:
1598 return LE_EXPR;
1599 case GE_EXPR:
1600 return LT_EXPR;
1601 case LT_EXPR:
1602 return GE_EXPR;
1603 case LE_EXPR:
1604 return GT_EXPR;
1605 default:
1606 abort ();
1610 /* Similar, but return the comparison that results if the operands are
1611 swapped. This is safe for floating-point. */
1613 static enum tree_code
1614 swap_tree_comparison (code)
1615 enum tree_code code;
1617 switch (code)
1619 case EQ_EXPR:
1620 case NE_EXPR:
1621 return code;
1622 case GT_EXPR:
1623 return LT_EXPR;
1624 case GE_EXPR:
1625 return LE_EXPR;
1626 case LT_EXPR:
1627 return GT_EXPR;
1628 case LE_EXPR:
1629 return GE_EXPR;
1630 default:
1631 abort ();
1635 /* Return nonzero if CODE is a tree code that represents a truth value. */
1637 static int
1638 truth_value_p (code)
1639 enum tree_code code;
1641 return (TREE_CODE_CLASS (code) == '<'
1642 || code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR
1643 || code == TRUTH_OR_EXPR || code == TRUTH_ORIF_EXPR
1644 || code == TRUTH_XOR_EXPR || code == TRUTH_NOT_EXPR);
1647 /* Return nonzero if two operands are necessarily equal.
1648 If ONLY_CONST is non-zero, only return non-zero for constants.
1649 This function tests whether the operands are indistinguishable;
1650 it does not test whether they are equal using C's == operation.
1651 The distinction is important for IEEE floating point, because
1652 (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and
1653 (2) two NaNs may be indistinguishable, but NaN!=NaN. */
1656 operand_equal_p (arg0, arg1, only_const)
1657 tree arg0, arg1;
1658 int only_const;
1660 /* If both types don't have the same signedness, then we can't consider
1661 them equal. We must check this before the STRIP_NOPS calls
1662 because they may change the signedness of the arguments. */
1663 if (TREE_UNSIGNED (TREE_TYPE (arg0)) != TREE_UNSIGNED (TREE_TYPE (arg1)))
1664 return 0;
1666 STRIP_NOPS (arg0);
1667 STRIP_NOPS (arg1);
1669 /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
1670 We don't care about side effects in that case because the SAVE_EXPR
1671 takes care of that for us. */
1672 if (TREE_CODE (arg0) == SAVE_EXPR && arg0 == arg1)
1673 return ! only_const;
1675 if (TREE_SIDE_EFFECTS (arg0) || TREE_SIDE_EFFECTS (arg1))
1676 return 0;
1678 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1679 && TREE_CODE (arg0) == ADDR_EXPR
1680 && TREE_OPERAND (arg0, 0) == TREE_OPERAND (arg1, 0))
1681 return 1;
1683 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1684 && TREE_CODE (arg0) == INTEGER_CST
1685 && TREE_INT_CST_LOW (arg0) == TREE_INT_CST_LOW (arg1)
1686 && TREE_INT_CST_HIGH (arg0) == TREE_INT_CST_HIGH (arg1))
1687 return 1;
1689 /* Detect when real constants are equal. */
1690 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1691 && TREE_CODE (arg0) == REAL_CST)
1692 return !bcmp ((char *) &TREE_REAL_CST (arg0),
1693 (char *) &TREE_REAL_CST (arg1),
1694 sizeof (REAL_VALUE_TYPE));
1696 if (only_const)
1697 return 0;
1699 if (arg0 == arg1)
1700 return 1;
1702 if (TREE_CODE (arg0) != TREE_CODE (arg1))
1703 return 0;
1704 /* This is needed for conversions and for COMPONENT_REF.
1705 Might as well play it safe and always test this. */
1706 if (TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1)))
1707 return 0;
1709 switch (TREE_CODE_CLASS (TREE_CODE (arg0)))
1711 case '1':
1712 /* Two conversions are equal only if signedness and modes match. */
1713 if ((TREE_CODE (arg0) == NOP_EXPR || TREE_CODE (arg0) == CONVERT_EXPR)
1714 && (TREE_UNSIGNED (TREE_TYPE (arg0))
1715 != TREE_UNSIGNED (TREE_TYPE (arg1))))
1716 return 0;
1718 return operand_equal_p (TREE_OPERAND (arg0, 0),
1719 TREE_OPERAND (arg1, 0), 0);
1721 case '<':
1722 case '2':
1723 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1724 TREE_OPERAND (arg1, 0), 0)
1725 && operand_equal_p (TREE_OPERAND (arg0, 1),
1726 TREE_OPERAND (arg1, 1), 0));
1728 case 'r':
1729 switch (TREE_CODE (arg0))
1731 case INDIRECT_REF:
1732 return operand_equal_p (TREE_OPERAND (arg0, 0),
1733 TREE_OPERAND (arg1, 0), 0);
1735 case COMPONENT_REF:
1736 case ARRAY_REF:
1737 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1738 TREE_OPERAND (arg1, 0), 0)
1739 && operand_equal_p (TREE_OPERAND (arg0, 1),
1740 TREE_OPERAND (arg1, 1), 0));
1742 case BIT_FIELD_REF:
1743 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1744 TREE_OPERAND (arg1, 0), 0)
1745 && operand_equal_p (TREE_OPERAND (arg0, 1),
1746 TREE_OPERAND (arg1, 1), 0)
1747 && operand_equal_p (TREE_OPERAND (arg0, 2),
1748 TREE_OPERAND (arg1, 2), 0));
1750 break;
1753 return 0;
1756 /* Similar to operand_equal_p, but see if ARG0 might have been made by
1757 shorten_compare from ARG1 when ARG1 was being compared with OTHER.
1759 When in doubt, return 0. */
1761 static int
1762 operand_equal_for_comparison_p (arg0, arg1, other)
1763 tree arg0, arg1;
1764 tree other;
1766 int unsignedp1, unsignedpo;
1767 tree primarg1, primother;
1768 unsigned correct_width;
1770 if (operand_equal_p (arg0, arg1, 0))
1771 return 1;
1773 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
1774 return 0;
1776 /* Duplicate what shorten_compare does to ARG1 and see if that gives the
1777 actual comparison operand, ARG0.
1779 First throw away any conversions to wider types
1780 already present in the operands. */
1782 primarg1 = get_narrower (arg1, &unsignedp1);
1783 primother = get_narrower (other, &unsignedpo);
1785 correct_width = TYPE_PRECISION (TREE_TYPE (arg1));
1786 if (unsignedp1 == unsignedpo
1787 && TYPE_PRECISION (TREE_TYPE (primarg1)) < correct_width
1788 && TYPE_PRECISION (TREE_TYPE (primother)) < correct_width)
1790 tree type = TREE_TYPE (arg0);
1792 /* Make sure shorter operand is extended the right way
1793 to match the longer operand. */
1794 primarg1 = convert (signed_or_unsigned_type (unsignedp1,
1795 TREE_TYPE (primarg1)),
1796 primarg1);
1798 if (operand_equal_p (arg0, convert (type, primarg1), 0))
1799 return 1;
1802 return 0;
1805 /* See if ARG is an expression that is either a comparison or is performing
1806 arithmetic on comparisons. The comparisons must only be comparing
1807 two different values, which will be stored in *CVAL1 and *CVAL2; if
1808 they are non-zero it means that some operands have already been found.
1809 No variables may be used anywhere else in the expression except in the
1810 comparisons. If SAVE_P is true it means we removed a SAVE_EXPR around
1811 the expression and save_expr needs to be called with CVAL1 and CVAL2.
1813 If this is true, return 1. Otherwise, return zero. */
1815 static int
1816 twoval_comparison_p (arg, cval1, cval2, save_p)
1817 tree arg;
1818 tree *cval1, *cval2;
1819 int *save_p;
1821 enum tree_code code = TREE_CODE (arg);
1822 char class = TREE_CODE_CLASS (code);
1824 /* We can handle some of the 'e' cases here. */
1825 if (class == 'e' && code == TRUTH_NOT_EXPR)
1826 class = '1';
1827 else if (class == 'e'
1828 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR
1829 || code == COMPOUND_EXPR))
1830 class = '2';
1832 /* ??? Disable this since the SAVE_EXPR might already be in use outside
1833 the expression. There may be no way to make this work, but it needs
1834 to be looked at again for 2.6. */
1835 #if 0
1836 else if (class == 'e' && code == SAVE_EXPR && SAVE_EXPR_RTL (arg) == 0)
1838 /* If we've already found a CVAL1 or CVAL2, this expression is
1839 two complex to handle. */
1840 if (*cval1 || *cval2)
1841 return 0;
1843 class = '1';
1844 *save_p = 1;
1846 #endif
1848 switch (class)
1850 case '1':
1851 return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p);
1853 case '2':
1854 return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p)
1855 && twoval_comparison_p (TREE_OPERAND (arg, 1),
1856 cval1, cval2, save_p));
1858 case 'c':
1859 return 1;
1861 case 'e':
1862 if (code == COND_EXPR)
1863 return (twoval_comparison_p (TREE_OPERAND (arg, 0),
1864 cval1, cval2, save_p)
1865 && twoval_comparison_p (TREE_OPERAND (arg, 1),
1866 cval1, cval2, save_p)
1867 && twoval_comparison_p (TREE_OPERAND (arg, 2),
1868 cval1, cval2, save_p));
1869 return 0;
1871 case '<':
1872 /* First see if we can handle the first operand, then the second. For
1873 the second operand, we know *CVAL1 can't be zero. It must be that
1874 one side of the comparison is each of the values; test for the
1875 case where this isn't true by failing if the two operands
1876 are the same. */
1878 if (operand_equal_p (TREE_OPERAND (arg, 0),
1879 TREE_OPERAND (arg, 1), 0))
1880 return 0;
1882 if (*cval1 == 0)
1883 *cval1 = TREE_OPERAND (arg, 0);
1884 else if (operand_equal_p (*cval1, TREE_OPERAND (arg, 0), 0))
1886 else if (*cval2 == 0)
1887 *cval2 = TREE_OPERAND (arg, 0);
1888 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 0), 0))
1890 else
1891 return 0;
1893 if (operand_equal_p (*cval1, TREE_OPERAND (arg, 1), 0))
1895 else if (*cval2 == 0)
1896 *cval2 = TREE_OPERAND (arg, 1);
1897 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 1), 0))
1899 else
1900 return 0;
1902 return 1;
1905 return 0;
1908 /* ARG is a tree that is known to contain just arithmetic operations and
1909 comparisons. Evaluate the operations in the tree substituting NEW0 for
1910 any occurrence of OLD0 as an operand of a comparison and likewise for
1911 NEW1 and OLD1. */
1913 static tree
1914 eval_subst (arg, old0, new0, old1, new1)
1915 tree arg;
1916 tree old0, new0, old1, new1;
1918 tree type = TREE_TYPE (arg);
1919 enum tree_code code = TREE_CODE (arg);
1920 char class = TREE_CODE_CLASS (code);
1922 /* We can handle some of the 'e' cases here. */
1923 if (class == 'e' && code == TRUTH_NOT_EXPR)
1924 class = '1';
1925 else if (class == 'e'
1926 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
1927 class = '2';
1929 switch (class)
1931 case '1':
1932 return fold (build1 (code, type,
1933 eval_subst (TREE_OPERAND (arg, 0),
1934 old0, new0, old1, new1)));
1936 case '2':
1937 return fold (build (code, type,
1938 eval_subst (TREE_OPERAND (arg, 0),
1939 old0, new0, old1, new1),
1940 eval_subst (TREE_OPERAND (arg, 1),
1941 old0, new0, old1, new1)));
1943 case 'e':
1944 switch (code)
1946 case SAVE_EXPR:
1947 return eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1);
1949 case COMPOUND_EXPR:
1950 return eval_subst (TREE_OPERAND (arg, 1), old0, new0, old1, new1);
1952 case COND_EXPR:
1953 return fold (build (code, type,
1954 eval_subst (TREE_OPERAND (arg, 0),
1955 old0, new0, old1, new1),
1956 eval_subst (TREE_OPERAND (arg, 1),
1957 old0, new0, old1, new1),
1958 eval_subst (TREE_OPERAND (arg, 2),
1959 old0, new0, old1, new1)));
1962 case '<':
1964 tree arg0 = TREE_OPERAND (arg, 0);
1965 tree arg1 = TREE_OPERAND (arg, 1);
1967 /* We need to check both for exact equality and tree equality. The
1968 former will be true if the operand has a side-effect. In that
1969 case, we know the operand occurred exactly once. */
1971 if (arg0 == old0 || operand_equal_p (arg0, old0, 0))
1972 arg0 = new0;
1973 else if (arg0 == old1 || operand_equal_p (arg0, old1, 0))
1974 arg0 = new1;
1976 if (arg1 == old0 || operand_equal_p (arg1, old0, 0))
1977 arg1 = new0;
1978 else if (arg1 == old1 || operand_equal_p (arg1, old1, 0))
1979 arg1 = new1;
1981 return fold (build (code, type, arg0, arg1));
1985 return arg;
1988 /* Return a tree for the case when the result of an expression is RESULT
1989 converted to TYPE and OMITTED was previously an operand of the expression
1990 but is now not needed (e.g., we folded OMITTED * 0).
1992 If OMITTED has side effects, we must evaluate it. Otherwise, just do
1993 the conversion of RESULT to TYPE. */
1995 static tree
1996 omit_one_operand (type, result, omitted)
1997 tree type, result, omitted;
1999 tree t = convert (type, result);
2001 if (TREE_SIDE_EFFECTS (omitted))
2002 return build (COMPOUND_EXPR, type, omitted, t);
2004 return non_lvalue (t);
2007 /* Similar, but call pedantic_non_lvalue instead of non_lvalue. */
2009 static tree
2010 pedantic_omit_one_operand (type, result, omitted)
2011 tree type, result, omitted;
2013 tree t = convert (type, result);
2015 if (TREE_SIDE_EFFECTS (omitted))
2016 return build (COMPOUND_EXPR, type, omitted, t);
2018 return pedantic_non_lvalue (t);
2023 /* Return a simplified tree node for the truth-negation of ARG. This
2024 never alters ARG itself. We assume that ARG is an operation that
2025 returns a truth value (0 or 1). */
2027 tree
2028 invert_truthvalue (arg)
2029 tree arg;
2031 tree type = TREE_TYPE (arg);
2032 enum tree_code code = TREE_CODE (arg);
2034 if (code == ERROR_MARK)
2035 return arg;
2037 /* If this is a comparison, we can simply invert it, except for
2038 floating-point non-equality comparisons, in which case we just
2039 enclose a TRUTH_NOT_EXPR around what we have. */
2041 if (TREE_CODE_CLASS (code) == '<')
2043 if (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
2044 && code != NE_EXPR && code != EQ_EXPR)
2045 return build1 (TRUTH_NOT_EXPR, type, arg);
2046 else
2047 return build (invert_tree_comparison (code), type,
2048 TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1));
2051 switch (code)
2053 case INTEGER_CST:
2054 return convert (type, build_int_2 (TREE_INT_CST_LOW (arg) == 0
2055 && TREE_INT_CST_HIGH (arg) == 0, 0));
2057 case TRUTH_AND_EXPR:
2058 return build (TRUTH_OR_EXPR, type,
2059 invert_truthvalue (TREE_OPERAND (arg, 0)),
2060 invert_truthvalue (TREE_OPERAND (arg, 1)));
2062 case TRUTH_OR_EXPR:
2063 return build (TRUTH_AND_EXPR, type,
2064 invert_truthvalue (TREE_OPERAND (arg, 0)),
2065 invert_truthvalue (TREE_OPERAND (arg, 1)));
2067 case TRUTH_XOR_EXPR:
2068 /* Here we can invert either operand. We invert the first operand
2069 unless the second operand is a TRUTH_NOT_EXPR in which case our
2070 result is the XOR of the first operand with the inside of the
2071 negation of the second operand. */
2073 if (TREE_CODE (TREE_OPERAND (arg, 1)) == TRUTH_NOT_EXPR)
2074 return build (TRUTH_XOR_EXPR, type, TREE_OPERAND (arg, 0),
2075 TREE_OPERAND (TREE_OPERAND (arg, 1), 0));
2076 else
2077 return build (TRUTH_XOR_EXPR, type,
2078 invert_truthvalue (TREE_OPERAND (arg, 0)),
2079 TREE_OPERAND (arg, 1));
2081 case TRUTH_ANDIF_EXPR:
2082 return build (TRUTH_ORIF_EXPR, type,
2083 invert_truthvalue (TREE_OPERAND (arg, 0)),
2084 invert_truthvalue (TREE_OPERAND (arg, 1)));
2086 case TRUTH_ORIF_EXPR:
2087 return build (TRUTH_ANDIF_EXPR, type,
2088 invert_truthvalue (TREE_OPERAND (arg, 0)),
2089 invert_truthvalue (TREE_OPERAND (arg, 1)));
2091 case TRUTH_NOT_EXPR:
2092 return TREE_OPERAND (arg, 0);
2094 case COND_EXPR:
2095 return build (COND_EXPR, type, TREE_OPERAND (arg, 0),
2096 invert_truthvalue (TREE_OPERAND (arg, 1)),
2097 invert_truthvalue (TREE_OPERAND (arg, 2)));
2099 case COMPOUND_EXPR:
2100 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg, 0),
2101 invert_truthvalue (TREE_OPERAND (arg, 1)));
2103 case NON_LVALUE_EXPR:
2104 return invert_truthvalue (TREE_OPERAND (arg, 0));
2106 case NOP_EXPR:
2107 case CONVERT_EXPR:
2108 case FLOAT_EXPR:
2109 return build1 (TREE_CODE (arg), type,
2110 invert_truthvalue (TREE_OPERAND (arg, 0)));
2112 case BIT_AND_EXPR:
2113 if (!integer_onep (TREE_OPERAND (arg, 1)))
2114 break;
2115 return build (EQ_EXPR, type, arg, convert (type, integer_zero_node));
2117 case SAVE_EXPR:
2118 return build1 (TRUTH_NOT_EXPR, type, arg);
2120 if (TREE_CODE (TREE_TYPE (arg)) != BOOLEAN_TYPE)
2121 abort ();
2122 return build1 (TRUTH_NOT_EXPR, type, arg);
2125 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
2126 operands are another bit-wise operation with a common input. If so,
2127 distribute the bit operations to save an operation and possibly two if
2128 constants are involved. For example, convert
2129 (A | B) & (A | C) into A | (B & C)
2130 Further simplification will occur if B and C are constants.
2132 If this optimization cannot be done, 0 will be returned. */
2134 static tree
2135 distribute_bit_expr (code, type, arg0, arg1)
2136 enum tree_code code;
2137 tree type;
2138 tree arg0, arg1;
2140 tree common;
2141 tree left, right;
2143 if (TREE_CODE (arg0) != TREE_CODE (arg1)
2144 || TREE_CODE (arg0) == code
2145 || (TREE_CODE (arg0) != BIT_AND_EXPR
2146 && TREE_CODE (arg0) != BIT_IOR_EXPR))
2147 return 0;
2149 if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0))
2151 common = TREE_OPERAND (arg0, 0);
2152 left = TREE_OPERAND (arg0, 1);
2153 right = TREE_OPERAND (arg1, 1);
2155 else if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 1), 0))
2157 common = TREE_OPERAND (arg0, 0);
2158 left = TREE_OPERAND (arg0, 1);
2159 right = TREE_OPERAND (arg1, 0);
2161 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 0), 0))
2163 common = TREE_OPERAND (arg0, 1);
2164 left = TREE_OPERAND (arg0, 0);
2165 right = TREE_OPERAND (arg1, 1);
2167 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0))
2169 common = TREE_OPERAND (arg0, 1);
2170 left = TREE_OPERAND (arg0, 0);
2171 right = TREE_OPERAND (arg1, 0);
2173 else
2174 return 0;
2176 return fold (build (TREE_CODE (arg0), type, common,
2177 fold (build (code, type, left, right))));
2180 /* Return a BIT_FIELD_REF of type TYPE to refer to BITSIZE bits of INNER
2181 starting at BITPOS. The field is unsigned if UNSIGNEDP is non-zero. */
2183 static tree
2184 make_bit_field_ref (inner, type, bitsize, bitpos, unsignedp)
2185 tree inner;
2186 tree type;
2187 int bitsize, bitpos;
2188 int unsignedp;
2190 tree result = build (BIT_FIELD_REF, type, inner,
2191 size_int (bitsize), size_int (bitpos));
2193 TREE_UNSIGNED (result) = unsignedp;
2195 return result;
2198 /* Optimize a bit-field compare.
2200 There are two cases: First is a compare against a constant and the
2201 second is a comparison of two items where the fields are at the same
2202 bit position relative to the start of a chunk (byte, halfword, word)
2203 large enough to contain it. In these cases we can avoid the shift
2204 implicit in bitfield extractions.
2206 For constants, we emit a compare of the shifted constant with the
2207 BIT_AND_EXPR of a mask and a byte, halfword, or word of the operand being
2208 compared. For two fields at the same position, we do the ANDs with the
2209 similar mask and compare the result of the ANDs.
2211 CODE is the comparison code, known to be either NE_EXPR or EQ_EXPR.
2212 COMPARE_TYPE is the type of the comparison, and LHS and RHS
2213 are the left and right operands of the comparison, respectively.
2215 If the optimization described above can be done, we return the resulting
2216 tree. Otherwise we return zero. */
2218 static tree
2219 optimize_bit_field_compare (code, compare_type, lhs, rhs)
2220 enum tree_code code;
2221 tree compare_type;
2222 tree lhs, rhs;
2224 int lbitpos, lbitsize, rbitpos, rbitsize;
2225 int lnbitpos, lnbitsize, rnbitpos, rnbitsize;
2226 tree type = TREE_TYPE (lhs);
2227 tree signed_type, unsigned_type;
2228 int const_p = TREE_CODE (rhs) == INTEGER_CST;
2229 enum machine_mode lmode, rmode, lnmode, rnmode;
2230 int lunsignedp, runsignedp;
2231 int lvolatilep = 0, rvolatilep = 0;
2232 tree linner, rinner;
2233 tree mask;
2234 tree offset;
2236 /* Get all the information about the extractions being done. If the bit size
2237 if the same as the size of the underlying object, we aren't doing an
2238 extraction at all and so can do nothing. */
2239 linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode,
2240 &lunsignedp, &lvolatilep);
2241 if (linner == lhs || lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0
2242 || offset != 0)
2243 return 0;
2245 if (!const_p)
2247 /* If this is not a constant, we can only do something if bit positions,
2248 sizes, and signedness are the same. */
2249 rinner = get_inner_reference (rhs, &rbitsize, &rbitpos, &offset,
2250 &rmode, &runsignedp, &rvolatilep);
2252 if (rinner == rhs || lbitpos != rbitpos || lbitsize != rbitsize
2253 || lunsignedp != runsignedp || offset != 0)
2254 return 0;
2257 /* See if we can find a mode to refer to this field. We should be able to,
2258 but fail if we can't. */
2259 lnmode = get_best_mode (lbitsize, lbitpos,
2260 TYPE_ALIGN (TREE_TYPE (linner)), word_mode,
2261 lvolatilep);
2262 if (lnmode == VOIDmode)
2263 return 0;
2265 /* Set signed and unsigned types of the precision of this mode for the
2266 shifts below. */
2267 signed_type = type_for_mode (lnmode, 0);
2268 unsigned_type = type_for_mode (lnmode, 1);
2270 if (! const_p)
2272 rnmode = get_best_mode (rbitsize, rbitpos,
2273 TYPE_ALIGN (TREE_TYPE (rinner)), word_mode,
2274 rvolatilep);
2275 if (rnmode == VOIDmode)
2276 return 0;
2279 /* Compute the bit position and size for the new reference and our offset
2280 within it. If the new reference is the same size as the original, we
2281 won't optimize anything, so return zero. */
2282 lnbitsize = GET_MODE_BITSIZE (lnmode);
2283 lnbitpos = lbitpos & ~ (lnbitsize - 1);
2284 lbitpos -= lnbitpos;
2285 if (lnbitsize == lbitsize)
2286 return 0;
2288 if (! const_p)
2290 rnbitsize = GET_MODE_BITSIZE (rnmode);
2291 rnbitpos = rbitpos & ~ (rnbitsize - 1);
2292 rbitpos -= rnbitpos;
2293 if (rnbitsize == rbitsize)
2294 return 0;
2297 if (BYTES_BIG_ENDIAN)
2298 lbitpos = lnbitsize - lbitsize - lbitpos;
2300 /* Make the mask to be used against the extracted field. */
2301 mask = build_int_2 (~0, ~0);
2302 TREE_TYPE (mask) = unsigned_type;
2303 force_fit_type (mask, 0);
2304 mask = convert (unsigned_type, mask);
2305 mask = const_binop (LSHIFT_EXPR, mask, size_int (lnbitsize - lbitsize), 0);
2306 mask = const_binop (RSHIFT_EXPR, mask,
2307 size_int (lnbitsize - lbitsize - lbitpos), 0);
2309 if (! const_p)
2310 /* If not comparing with constant, just rework the comparison
2311 and return. */
2312 return build (code, compare_type,
2313 build (BIT_AND_EXPR, unsigned_type,
2314 make_bit_field_ref (linner, unsigned_type,
2315 lnbitsize, lnbitpos, 1),
2316 mask),
2317 build (BIT_AND_EXPR, unsigned_type,
2318 make_bit_field_ref (rinner, unsigned_type,
2319 rnbitsize, rnbitpos, 1),
2320 mask));
2322 /* Otherwise, we are handling the constant case. See if the constant is too
2323 big for the field. Warn and return a tree of for 0 (false) if so. We do
2324 this not only for its own sake, but to avoid having to test for this
2325 error case below. If we didn't, we might generate wrong code.
2327 For unsigned fields, the constant shifted right by the field length should
2328 be all zero. For signed fields, the high-order bits should agree with
2329 the sign bit. */
2331 if (lunsignedp)
2333 if (! integer_zerop (const_binop (RSHIFT_EXPR,
2334 convert (unsigned_type, rhs),
2335 size_int (lbitsize), 0)))
2337 warning ("comparison is always %s due to width of bitfield",
2338 code == NE_EXPR ? "one" : "zero");
2339 return convert (compare_type,
2340 (code == NE_EXPR
2341 ? integer_one_node : integer_zero_node));
2344 else
2346 tree tem = const_binop (RSHIFT_EXPR, convert (signed_type, rhs),
2347 size_int (lbitsize - 1), 0);
2348 if (! integer_zerop (tem) && ! integer_all_onesp (tem))
2350 warning ("comparison is always %s due to width of bitfield",
2351 code == NE_EXPR ? "one" : "zero");
2352 return convert (compare_type,
2353 (code == NE_EXPR
2354 ? integer_one_node : integer_zero_node));
2358 /* Single-bit compares should always be against zero. */
2359 if (lbitsize == 1 && ! integer_zerop (rhs))
2361 code = code == EQ_EXPR ? NE_EXPR : EQ_EXPR;
2362 rhs = convert (type, integer_zero_node);
2365 /* Make a new bitfield reference, shift the constant over the
2366 appropriate number of bits and mask it with the computed mask
2367 (in case this was a signed field). If we changed it, make a new one. */
2368 lhs = make_bit_field_ref (linner, unsigned_type, lnbitsize, lnbitpos, 1);
2369 if (lvolatilep)
2371 TREE_SIDE_EFFECTS (lhs) = 1;
2372 TREE_THIS_VOLATILE (lhs) = 1;
2375 rhs = fold (const_binop (BIT_AND_EXPR,
2376 const_binop (LSHIFT_EXPR,
2377 convert (unsigned_type, rhs),
2378 size_int (lbitpos), 0),
2379 mask, 0));
2381 return build (code, compare_type,
2382 build (BIT_AND_EXPR, unsigned_type, lhs, mask),
2383 rhs);
2386 /* Subroutine for fold_truthop: decode a field reference.
2388 If EXP is a comparison reference, we return the innermost reference.
2390 *PBITSIZE is set to the number of bits in the reference, *PBITPOS is
2391 set to the starting bit number.
2393 If the innermost field can be completely contained in a mode-sized
2394 unit, *PMODE is set to that mode. Otherwise, it is set to VOIDmode.
2396 *PVOLATILEP is set to 1 if the any expression encountered is volatile;
2397 otherwise it is not changed.
2399 *PUNSIGNEDP is set to the signedness of the field.
2401 *PMASK is set to the mask used. This is either contained in a
2402 BIT_AND_EXPR or derived from the width of the field.
2404 Return 0 if this is not a component reference or is one that we can't
2405 do anything with. */
2407 static tree
2408 decode_field_reference (exp, pbitsize, pbitpos, pmode, punsignedp,
2409 pvolatilep, pmask)
2410 tree exp;
2411 int *pbitsize, *pbitpos;
2412 enum machine_mode *pmode;
2413 int *punsignedp, *pvolatilep;
2414 tree *pmask;
2416 tree and_mask = 0;
2417 tree mask, inner, offset;
2418 tree unsigned_type;
2419 int precision;
2421 /* All the optimizations using this function assume integer fields.
2422 There are problems with FP fields since the type_for_size call
2423 below can fail for, e.g., XFmode. */
2424 if (! INTEGRAL_TYPE_P (TREE_TYPE (exp)))
2425 return 0;
2427 STRIP_NOPS (exp);
2429 if (TREE_CODE (exp) == BIT_AND_EXPR)
2431 and_mask = TREE_OPERAND (exp, 1);
2432 exp = TREE_OPERAND (exp, 0);
2433 STRIP_NOPS (exp); STRIP_NOPS (and_mask);
2434 if (TREE_CODE (and_mask) != INTEGER_CST)
2435 return 0;
2438 if (TREE_CODE (exp) != COMPONENT_REF && TREE_CODE (exp) != ARRAY_REF
2439 && TREE_CODE (exp) != BIT_FIELD_REF)
2440 return 0;
2442 inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode,
2443 punsignedp, pvolatilep);
2444 if (inner == exp || *pbitsize < 0 || offset != 0)
2445 return 0;
2447 /* Compute the mask to access the bitfield. */
2448 unsigned_type = type_for_size (*pbitsize, 1);
2449 precision = TYPE_PRECISION (unsigned_type);
2451 mask = build_int_2 (~0, ~0);
2452 TREE_TYPE (mask) = unsigned_type;
2453 force_fit_type (mask, 0);
2454 mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2455 mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2457 /* Merge it with the mask we found in the BIT_AND_EXPR, if any. */
2458 if (and_mask != 0)
2459 mask = fold (build (BIT_AND_EXPR, unsigned_type,
2460 convert (unsigned_type, and_mask), mask));
2462 *pmask = mask;
2463 return inner;
2466 /* Return non-zero if MASK represents a mask of SIZE ones in the low-order
2467 bit positions. */
2469 static int
2470 all_ones_mask_p (mask, size)
2471 tree mask;
2472 int size;
2474 tree type = TREE_TYPE (mask);
2475 int precision = TYPE_PRECISION (type);
2476 tree tmask;
2478 tmask = build_int_2 (~0, ~0);
2479 TREE_TYPE (tmask) = signed_type (type);
2480 force_fit_type (tmask, 0);
2481 return
2482 operand_equal_p (mask,
2483 const_binop (RSHIFT_EXPR,
2484 const_binop (LSHIFT_EXPR, tmask,
2485 size_int (precision - size), 0),
2486 size_int (precision - size), 0),
2490 /* Subroutine for fold_truthop: determine if an operand is simple enough
2491 to be evaluated unconditionally. */
2493 static int
2494 simple_operand_p (exp)
2495 tree exp;
2497 /* Strip any conversions that don't change the machine mode. */
2498 while ((TREE_CODE (exp) == NOP_EXPR
2499 || TREE_CODE (exp) == CONVERT_EXPR)
2500 && (TYPE_MODE (TREE_TYPE (exp))
2501 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
2502 exp = TREE_OPERAND (exp, 0);
2504 return (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c'
2505 || (TREE_CODE_CLASS (TREE_CODE (exp)) == 'd'
2506 && ! TREE_ADDRESSABLE (exp)
2507 && ! TREE_THIS_VOLATILE (exp)
2508 && ! DECL_NONLOCAL (exp)
2509 /* Don't regard global variables as simple. They may be
2510 allocated in ways unknown to the compiler (shared memory,
2511 #pragma weak, etc). */
2512 && ! TREE_PUBLIC (exp)
2513 && ! DECL_EXTERNAL (exp)
2514 /* Loading a static variable is unduly expensive, but global
2515 registers aren't expensive. */
2516 && (! TREE_STATIC (exp) || DECL_REGISTER (exp))));
2519 /* Subroutine for fold_truthop: try to optimize a range test.
2521 For example, "i >= 2 && i =< 9" can be done as "(unsigned) (i - 2) <= 7".
2523 JCODE is the logical combination of the two terms. It is TRUTH_AND_EXPR
2524 (representing TRUTH_ANDIF_EXPR and TRUTH_AND_EXPR) or TRUTH_OR_EXPR
2525 (representing TRUTH_ORIF_EXPR and TRUTH_OR_EXPR). TYPE is the type of
2526 the result.
2528 VAR is the value being tested. LO_CODE and HI_CODE are the comparison
2529 operators comparing VAR to LO_CST and HI_CST. LO_CST is known to be no
2530 larger than HI_CST (they may be equal).
2532 We return the simplified tree or 0 if no optimization is possible. */
2534 static tree
2535 range_test (jcode, type, lo_code, hi_code, var, lo_cst, hi_cst)
2536 enum tree_code jcode, lo_code, hi_code;
2537 tree type, var, lo_cst, hi_cst;
2539 tree utype;
2540 enum tree_code rcode;
2542 /* See if this is a range test and normalize the constant terms. */
2544 if (jcode == TRUTH_AND_EXPR)
2546 switch (lo_code)
2548 case NE_EXPR:
2549 /* See if we have VAR != CST && VAR != CST+1. */
2550 if (! (hi_code == NE_EXPR
2551 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2552 && tree_int_cst_equal (integer_one_node,
2553 const_binop (MINUS_EXPR,
2554 hi_cst, lo_cst, 0))))
2555 return 0;
2557 rcode = GT_EXPR;
2558 break;
2560 case GT_EXPR:
2561 case GE_EXPR:
2562 if (hi_code == LT_EXPR)
2563 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2564 else if (hi_code != LE_EXPR)
2565 return 0;
2567 if (lo_code == GT_EXPR)
2568 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2570 /* We now have VAR >= LO_CST && VAR <= HI_CST. */
2571 rcode = LE_EXPR;
2572 break;
2574 default:
2575 return 0;
2578 else
2580 switch (lo_code)
2582 case EQ_EXPR:
2583 /* See if we have VAR == CST || VAR == CST+1. */
2584 if (! (hi_code == EQ_EXPR
2585 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2586 && tree_int_cst_equal (integer_one_node,
2587 const_binop (MINUS_EXPR,
2588 hi_cst, lo_cst, 0))))
2589 return 0;
2591 rcode = LE_EXPR;
2592 break;
2594 case LE_EXPR:
2595 case LT_EXPR:
2596 if (hi_code == GE_EXPR)
2597 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2598 else if (hi_code != GT_EXPR)
2599 return 0;
2601 if (lo_code == LE_EXPR)
2602 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2604 /* We now have VAR < LO_CST || VAR > HI_CST. */
2605 rcode = GT_EXPR;
2606 break;
2608 default:
2609 return 0;
2613 /* When normalizing, it is possible to both increment the smaller constant
2614 and decrement the larger constant. See if they are still ordered. */
2615 if (tree_int_cst_lt (hi_cst, lo_cst))
2616 return 0;
2618 /* Fail if VAR isn't an integer. */
2619 utype = TREE_TYPE (var);
2620 if (! INTEGRAL_TYPE_P (utype))
2621 return 0;
2623 /* The range test is invalid if subtracting the two constants results
2624 in overflow. This can happen in traditional mode. */
2625 if (! int_fits_type_p (hi_cst, TREE_TYPE (var))
2626 || ! int_fits_type_p (lo_cst, TREE_TYPE (var)))
2627 return 0;
2629 if (! TREE_UNSIGNED (utype))
2631 utype = unsigned_type (utype);
2632 var = convert (utype, var);
2633 lo_cst = convert (utype, lo_cst);
2634 hi_cst = convert (utype, hi_cst);
2637 return fold (convert (type,
2638 build (rcode, utype,
2639 build (MINUS_EXPR, utype, var, lo_cst),
2640 const_binop (MINUS_EXPR, hi_cst, lo_cst, 0))));
2643 /* Find ways of folding logical expressions of LHS and RHS:
2644 Try to merge two comparisons to the same innermost item.
2645 Look for range tests like "ch >= '0' && ch <= '9'".
2646 Look for combinations of simple terms on machines with expensive branches
2647 and evaluate the RHS unconditionally.
2649 For example, if we have p->a == 2 && p->b == 4 and we can make an
2650 object large enough to span both A and B, we can do this with a comparison
2651 against the object ANDed with the a mask.
2653 If we have p->a == q->a && p->b == q->b, we may be able to use bit masking
2654 operations to do this with one comparison.
2656 We check for both normal comparisons and the BIT_AND_EXPRs made this by
2657 function and the one above.
2659 CODE is the logical operation being done. It can be TRUTH_ANDIF_EXPR,
2660 TRUTH_AND_EXPR, TRUTH_ORIF_EXPR, or TRUTH_OR_EXPR.
2662 TRUTH_TYPE is the type of the logical operand and LHS and RHS are its
2663 two operands.
2665 We return the simplified tree or 0 if no optimization is possible. */
2667 static tree
2668 fold_truthop (code, truth_type, lhs, rhs)
2669 enum tree_code code;
2670 tree truth_type, lhs, rhs;
2672 /* If this is the "or" of two comparisons, we can do something if we
2673 the comparisons are NE_EXPR. If this is the "and", we can do something
2674 if the comparisons are EQ_EXPR. I.e.,
2675 (a->b == 2 && a->c == 4) can become (a->new == NEW).
2677 WANTED_CODE is this operation code. For single bit fields, we can
2678 convert EQ_EXPR to NE_EXPR so we need not reject the "wrong"
2679 comparison for one-bit fields. */
2681 enum tree_code wanted_code;
2682 enum tree_code lcode, rcode;
2683 tree ll_arg, lr_arg, rl_arg, rr_arg;
2684 tree ll_inner, lr_inner, rl_inner, rr_inner;
2685 int ll_bitsize, ll_bitpos, lr_bitsize, lr_bitpos;
2686 int rl_bitsize, rl_bitpos, rr_bitsize, rr_bitpos;
2687 int xll_bitpos, xlr_bitpos, xrl_bitpos, xrr_bitpos;
2688 int lnbitsize, lnbitpos, rnbitsize, rnbitpos;
2689 int ll_unsignedp, lr_unsignedp, rl_unsignedp, rr_unsignedp;
2690 enum machine_mode ll_mode, lr_mode, rl_mode, rr_mode;
2691 enum machine_mode lnmode, rnmode;
2692 tree ll_mask, lr_mask, rl_mask, rr_mask;
2693 tree l_const, r_const;
2694 tree type, result;
2695 int first_bit, end_bit;
2696 int volatilep;
2698 /* Start by getting the comparison codes and seeing if this looks like
2699 a range test. Fail if anything is volatile. If one operand is a
2700 BIT_AND_EXPR with the constant one, treat it as if it were surrounded
2701 with a NE_EXPR. */
2703 if (TREE_SIDE_EFFECTS (lhs)
2704 || TREE_SIDE_EFFECTS (rhs))
2705 return 0;
2707 lcode = TREE_CODE (lhs);
2708 rcode = TREE_CODE (rhs);
2710 if (lcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (lhs, 1)))
2711 lcode = NE_EXPR, lhs = build (NE_EXPR, truth_type, lhs, integer_zero_node);
2713 if (rcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (rhs, 1)))
2714 rcode = NE_EXPR, rhs = build (NE_EXPR, truth_type, rhs, integer_zero_node);
2716 if (TREE_CODE_CLASS (lcode) != '<'
2717 || TREE_CODE_CLASS (rcode) != '<')
2718 return 0;
2720 code = ((code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR)
2721 ? TRUTH_AND_EXPR : TRUTH_OR_EXPR);
2723 ll_arg = TREE_OPERAND (lhs, 0);
2724 lr_arg = TREE_OPERAND (lhs, 1);
2725 rl_arg = TREE_OPERAND (rhs, 0);
2726 rr_arg = TREE_OPERAND (rhs, 1);
2728 if (TREE_CODE (lr_arg) == INTEGER_CST
2729 && TREE_CODE (rr_arg) == INTEGER_CST
2730 && operand_equal_p (ll_arg, rl_arg, 0))
2732 if (tree_int_cst_lt (lr_arg, rr_arg))
2733 result = range_test (code, truth_type, lcode, rcode,
2734 ll_arg, lr_arg, rr_arg);
2735 else
2736 result = range_test (code, truth_type, rcode, lcode,
2737 ll_arg, rr_arg, lr_arg);
2739 /* If this isn't a range test, it also isn't a comparison that
2740 can be merged. However, it wins to evaluate the RHS unconditionally
2741 on machines with expensive branches. */
2743 if (result == 0 && BRANCH_COST >= 2)
2745 if (TREE_CODE (ll_arg) != VAR_DECL
2746 && TREE_CODE (ll_arg) != PARM_DECL)
2748 /* Avoid evaluating the variable part twice. */
2749 ll_arg = save_expr (ll_arg);
2750 lhs = build (lcode, TREE_TYPE (lhs), ll_arg, lr_arg);
2751 rhs = build (rcode, TREE_TYPE (rhs), ll_arg, rr_arg);
2753 return build (code, truth_type, lhs, rhs);
2755 return result;
2758 /* If the RHS can be evaluated unconditionally and its operands are
2759 simple, it wins to evaluate the RHS unconditionally on machines
2760 with expensive branches. In this case, this isn't a comparison
2761 that can be merged. */
2763 /* @@ I'm not sure it wins on the m88110 to do this if the comparisons
2764 are with zero (tmw). */
2766 if (BRANCH_COST >= 2
2767 && INTEGRAL_TYPE_P (TREE_TYPE (rhs))
2768 && simple_operand_p (rl_arg)
2769 && simple_operand_p (rr_arg))
2770 return build (code, truth_type, lhs, rhs);
2772 /* See if the comparisons can be merged. Then get all the parameters for
2773 each side. */
2775 if ((lcode != EQ_EXPR && lcode != NE_EXPR)
2776 || (rcode != EQ_EXPR && rcode != NE_EXPR))
2777 return 0;
2779 volatilep = 0;
2780 ll_inner = decode_field_reference (ll_arg,
2781 &ll_bitsize, &ll_bitpos, &ll_mode,
2782 &ll_unsignedp, &volatilep, &ll_mask);
2783 lr_inner = decode_field_reference (lr_arg,
2784 &lr_bitsize, &lr_bitpos, &lr_mode,
2785 &lr_unsignedp, &volatilep, &lr_mask);
2786 rl_inner = decode_field_reference (rl_arg,
2787 &rl_bitsize, &rl_bitpos, &rl_mode,
2788 &rl_unsignedp, &volatilep, &rl_mask);
2789 rr_inner = decode_field_reference (rr_arg,
2790 &rr_bitsize, &rr_bitpos, &rr_mode,
2791 &rr_unsignedp, &volatilep, &rr_mask);
2793 /* It must be true that the inner operation on the lhs of each
2794 comparison must be the same if we are to be able to do anything.
2795 Then see if we have constants. If not, the same must be true for
2796 the rhs's. */
2797 if (volatilep || ll_inner == 0 || rl_inner == 0
2798 || ! operand_equal_p (ll_inner, rl_inner, 0))
2799 return 0;
2801 if (TREE_CODE (lr_arg) == INTEGER_CST
2802 && TREE_CODE (rr_arg) == INTEGER_CST)
2803 l_const = lr_arg, r_const = rr_arg;
2804 else if (lr_inner == 0 || rr_inner == 0
2805 || ! operand_equal_p (lr_inner, rr_inner, 0))
2806 return 0;
2807 else
2808 l_const = r_const = 0;
2810 /* If either comparison code is not correct for our logical operation,
2811 fail. However, we can convert a one-bit comparison against zero into
2812 the opposite comparison against that bit being set in the field. */
2814 wanted_code = (code == TRUTH_AND_EXPR ? EQ_EXPR : NE_EXPR);
2815 if (lcode != wanted_code)
2817 if (l_const && integer_zerop (l_const) && integer_pow2p (ll_mask))
2818 l_const = ll_mask;
2819 else
2820 return 0;
2823 if (rcode != wanted_code)
2825 if (r_const && integer_zerop (r_const) && integer_pow2p (rl_mask))
2826 r_const = rl_mask;
2827 else
2828 return 0;
2831 /* See if we can find a mode that contains both fields being compared on
2832 the left. If we can't, fail. Otherwise, update all constants and masks
2833 to be relative to a field of that size. */
2834 first_bit = MIN (ll_bitpos, rl_bitpos);
2835 end_bit = MAX (ll_bitpos + ll_bitsize, rl_bitpos + rl_bitsize);
2836 lnmode = get_best_mode (end_bit - first_bit, first_bit,
2837 TYPE_ALIGN (TREE_TYPE (ll_inner)), word_mode,
2838 volatilep);
2839 if (lnmode == VOIDmode)
2840 return 0;
2842 lnbitsize = GET_MODE_BITSIZE (lnmode);
2843 lnbitpos = first_bit & ~ (lnbitsize - 1);
2844 type = type_for_size (lnbitsize, 1);
2845 xll_bitpos = ll_bitpos - lnbitpos, xrl_bitpos = rl_bitpos - lnbitpos;
2847 if (BYTES_BIG_ENDIAN)
2849 xll_bitpos = lnbitsize - xll_bitpos - ll_bitsize;
2850 xrl_bitpos = lnbitsize - xrl_bitpos - rl_bitsize;
2853 ll_mask = const_binop (LSHIFT_EXPR, convert (type, ll_mask),
2854 size_int (xll_bitpos), 0);
2855 rl_mask = const_binop (LSHIFT_EXPR, convert (type, rl_mask),
2856 size_int (xrl_bitpos), 0);
2858 /* Make sure the constants are interpreted as unsigned, so we
2859 don't have sign bits outside the range of their type. */
2861 if (l_const)
2863 l_const = convert (unsigned_type (TREE_TYPE (l_const)), l_const);
2864 l_const = const_binop (LSHIFT_EXPR, convert (type, l_const),
2865 size_int (xll_bitpos), 0);
2867 if (r_const)
2869 r_const = convert (unsigned_type (TREE_TYPE (r_const)), r_const);
2870 r_const = const_binop (LSHIFT_EXPR, convert (type, r_const),
2871 size_int (xrl_bitpos), 0);
2874 /* If the right sides are not constant, do the same for it. Also,
2875 disallow this optimization if a size or signedness mismatch occurs
2876 between the left and right sides. */
2877 if (l_const == 0)
2879 if (ll_bitsize != lr_bitsize || rl_bitsize != rr_bitsize
2880 || ll_unsignedp != lr_unsignedp || rl_unsignedp != rr_unsignedp
2881 /* Make sure the two fields on the right
2882 correspond to the left without being swapped. */
2883 || ll_bitpos - rl_bitpos != lr_bitpos - rr_bitpos)
2884 return 0;
2886 first_bit = MIN (lr_bitpos, rr_bitpos);
2887 end_bit = MAX (lr_bitpos + lr_bitsize, rr_bitpos + rr_bitsize);
2888 rnmode = get_best_mode (end_bit - first_bit, first_bit,
2889 TYPE_ALIGN (TREE_TYPE (lr_inner)), word_mode,
2890 volatilep);
2891 if (rnmode == VOIDmode)
2892 return 0;
2894 rnbitsize = GET_MODE_BITSIZE (rnmode);
2895 rnbitpos = first_bit & ~ (rnbitsize - 1);
2896 xlr_bitpos = lr_bitpos - rnbitpos, xrr_bitpos = rr_bitpos - rnbitpos;
2898 if (BYTES_BIG_ENDIAN)
2900 xlr_bitpos = rnbitsize - xlr_bitpos - lr_bitsize;
2901 xrr_bitpos = rnbitsize - xrr_bitpos - rr_bitsize;
2904 lr_mask = const_binop (LSHIFT_EXPR, convert (type, lr_mask),
2905 size_int (xlr_bitpos), 0);
2906 rr_mask = const_binop (LSHIFT_EXPR, convert (type, rr_mask),
2907 size_int (xrr_bitpos), 0);
2909 /* Make a mask that corresponds to both fields being compared.
2910 Do this for both items being compared. If the masks agree,
2911 we can do this by masking both and comparing the masked
2912 results. */
2913 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
2914 lr_mask = const_binop (BIT_IOR_EXPR, lr_mask, rr_mask, 0);
2915 if (operand_equal_p (ll_mask, lr_mask, 0) && lnbitsize == rnbitsize)
2917 lhs = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
2918 ll_unsignedp || rl_unsignedp);
2919 rhs = make_bit_field_ref (lr_inner, type, rnbitsize, rnbitpos,
2920 lr_unsignedp || rr_unsignedp);
2921 if (! all_ones_mask_p (ll_mask, lnbitsize))
2923 lhs = build (BIT_AND_EXPR, type, lhs, ll_mask);
2924 rhs = build (BIT_AND_EXPR, type, rhs, ll_mask);
2926 return build (wanted_code, truth_type, lhs, rhs);
2929 /* There is still another way we can do something: If both pairs of
2930 fields being compared are adjacent, we may be able to make a wider
2931 field containing them both. */
2932 if ((ll_bitsize + ll_bitpos == rl_bitpos
2933 && lr_bitsize + lr_bitpos == rr_bitpos)
2934 || (ll_bitpos == rl_bitpos + rl_bitsize
2935 && lr_bitpos == rr_bitpos + rr_bitsize))
2936 return build (wanted_code, truth_type,
2937 make_bit_field_ref (ll_inner, type,
2938 ll_bitsize + rl_bitsize,
2939 MIN (ll_bitpos, rl_bitpos),
2940 ll_unsignedp),
2941 make_bit_field_ref (lr_inner, type,
2942 lr_bitsize + rr_bitsize,
2943 MIN (lr_bitpos, rr_bitpos),
2944 lr_unsignedp));
2946 return 0;
2949 /* Handle the case of comparisons with constants. If there is something in
2950 common between the masks, those bits of the constants must be the same.
2951 If not, the condition is always false. Test for this to avoid generating
2952 incorrect code below. */
2953 result = const_binop (BIT_AND_EXPR, ll_mask, rl_mask, 0);
2954 if (! integer_zerop (result)
2955 && simple_cst_equal (const_binop (BIT_AND_EXPR, result, l_const, 0),
2956 const_binop (BIT_AND_EXPR, result, r_const, 0)) != 1)
2958 if (wanted_code == NE_EXPR)
2960 warning ("`or' of unmatched not-equal tests is always 1");
2961 return convert (truth_type, integer_one_node);
2963 else
2965 warning ("`and' of mutually exclusive equal-tests is always zero");
2966 return convert (truth_type, integer_zero_node);
2970 /* Construct the expression we will return. First get the component
2971 reference we will make. Unless the mask is all ones the width of
2972 that field, perform the mask operation. Then compare with the
2973 merged constant. */
2974 result = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
2975 ll_unsignedp || rl_unsignedp);
2977 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
2978 if (! all_ones_mask_p (ll_mask, lnbitsize))
2979 result = build (BIT_AND_EXPR, type, result, ll_mask);
2981 return build (wanted_code, truth_type, result,
2982 const_binop (BIT_IOR_EXPR, l_const, r_const, 0));
2985 /* If T contains a COMPOUND_EXPR which was inserted merely to evaluate
2986 S, a SAVE_EXPR, return the expression actually being evaluated. Note
2987 that we may sometimes modify the tree. */
2989 static tree
2990 strip_compound_expr (t, s)
2991 tree t;
2992 tree s;
2994 tree type = TREE_TYPE (t);
2995 enum tree_code code = TREE_CODE (t);
2997 /* See if this is the COMPOUND_EXPR we want to eliminate. */
2998 if (code == COMPOUND_EXPR && TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR
2999 && TREE_OPERAND (TREE_OPERAND (t, 0), 0) == s)
3000 return TREE_OPERAND (t, 1);
3002 /* See if this is a COND_EXPR or a simple arithmetic operator. We
3003 don't bother handling any other types. */
3004 else if (code == COND_EXPR)
3006 TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
3007 TREE_OPERAND (t, 1) = strip_compound_expr (TREE_OPERAND (t, 1), s);
3008 TREE_OPERAND (t, 2) = strip_compound_expr (TREE_OPERAND (t, 2), s);
3010 else if (TREE_CODE_CLASS (code) == '1')
3011 TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
3012 else if (TREE_CODE_CLASS (code) == '<'
3013 || TREE_CODE_CLASS (code) == '2')
3015 TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
3016 TREE_OPERAND (t, 1) = strip_compound_expr (TREE_OPERAND (t, 1), s);
3019 return t;
3022 /* Perform constant folding and related simplification of EXPR.
3023 The related simplifications include x*1 => x, x*0 => 0, etc.,
3024 and application of the associative law.
3025 NOP_EXPR conversions may be removed freely (as long as we
3026 are careful not to change the C type of the overall expression)
3027 We cannot simplify through a CONVERT_EXPR, FIX_EXPR or FLOAT_EXPR,
3028 but we can constant-fold them if they have constant operands. */
3030 tree
3031 fold (expr)
3032 tree expr;
3034 register tree t = expr;
3035 tree t1 = NULL_TREE;
3036 tree tem;
3037 tree type = TREE_TYPE (expr);
3038 register tree arg0, arg1;
3039 register enum tree_code code = TREE_CODE (t);
3040 register int kind;
3041 int invert;
3043 /* WINS will be nonzero when the switch is done
3044 if all operands are constant. */
3046 int wins = 1;
3048 /* Don't try to process an RTL_EXPR since its operands aren't trees. */
3049 if (code == RTL_EXPR)
3050 return t;
3052 /* Return right away if already constant. */
3053 if (TREE_CONSTANT (t))
3055 if (code == CONST_DECL)
3056 return DECL_INITIAL (t);
3057 return t;
3060 kind = TREE_CODE_CLASS (code);
3061 if (code == NOP_EXPR || code == FLOAT_EXPR || code == CONVERT_EXPR)
3063 tree subop;
3065 /* Special case for conversion ops that can have fixed point args. */
3066 arg0 = TREE_OPERAND (t, 0);
3068 /* Don't use STRIP_NOPS, because signedness of argument type matters. */
3069 if (arg0 != 0)
3070 STRIP_TYPE_NOPS (arg0);
3072 if (arg0 != 0 && TREE_CODE (arg0) == COMPLEX_CST)
3073 subop = TREE_REALPART (arg0);
3074 else
3075 subop = arg0;
3077 if (subop != 0 && TREE_CODE (subop) != INTEGER_CST
3078 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3079 && TREE_CODE (subop) != REAL_CST
3080 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3082 /* Note that TREE_CONSTANT isn't enough:
3083 static var addresses are constant but we can't
3084 do arithmetic on them. */
3085 wins = 0;
3087 else if (kind == 'e' || kind == '<'
3088 || kind == '1' || kind == '2' || kind == 'r')
3090 register int len = tree_code_length[(int) code];
3091 register int i;
3092 for (i = 0; i < len; i++)
3094 tree op = TREE_OPERAND (t, i);
3095 tree subop;
3097 if (op == 0)
3098 continue; /* Valid for CALL_EXPR, at least. */
3100 if (kind == '<' || code == RSHIFT_EXPR)
3102 /* Signedness matters here. Perhaps we can refine this
3103 later. */
3104 STRIP_TYPE_NOPS (op);
3106 else
3108 /* Strip any conversions that don't change the mode. */
3109 STRIP_NOPS (op);
3112 if (TREE_CODE (op) == COMPLEX_CST)
3113 subop = TREE_REALPART (op);
3114 else
3115 subop = op;
3117 if (TREE_CODE (subop) != INTEGER_CST
3118 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3119 && TREE_CODE (subop) != REAL_CST
3120 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3122 /* Note that TREE_CONSTANT isn't enough:
3123 static var addresses are constant but we can't
3124 do arithmetic on them. */
3125 wins = 0;
3127 if (i == 0)
3128 arg0 = op;
3129 else if (i == 1)
3130 arg1 = op;
3134 /* If this is a commutative operation, and ARG0 is a constant, move it
3135 to ARG1 to reduce the number of tests below. */
3136 if ((code == PLUS_EXPR || code == MULT_EXPR || code == MIN_EXPR
3137 || code == MAX_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR
3138 || code == BIT_AND_EXPR)
3139 && (TREE_CODE (arg0) == INTEGER_CST || TREE_CODE (arg0) == REAL_CST))
3141 tem = arg0; arg0 = arg1; arg1 = tem;
3143 tem = TREE_OPERAND (t, 0); TREE_OPERAND (t, 0) = TREE_OPERAND (t, 1);
3144 TREE_OPERAND (t, 1) = tem;
3147 /* Now WINS is set as described above,
3148 ARG0 is the first operand of EXPR,
3149 and ARG1 is the second operand (if it has more than one operand).
3151 First check for cases where an arithmetic operation is applied to a
3152 compound, conditional, or comparison operation. Push the arithmetic
3153 operation inside the compound or conditional to see if any folding
3154 can then be done. Convert comparison to conditional for this purpose.
3155 The also optimizes non-constant cases that used to be done in
3156 expand_expr.
3158 Before we do that, see if this is a BIT_AND_EXPR or a BIT_OR_EXPR,
3159 one of the operands is a comparison and the other is a comparison, a
3160 BIT_AND_EXPR with the constant 1, or a truth value. In that case, the
3161 code below would make the expression more complex. Change it to a
3162 TRUTH_{AND,OR}_EXPR. Likewise, convert a similar NE_EXPR to
3163 TRUTH_XOR_EXPR and an EQ_EXPR to the inversion of a TRUTH_XOR_EXPR. */
3165 if ((code == BIT_AND_EXPR || code == BIT_IOR_EXPR
3166 || code == EQ_EXPR || code == NE_EXPR)
3167 && ((truth_value_p (TREE_CODE (arg0))
3168 && (truth_value_p (TREE_CODE (arg1))
3169 || (TREE_CODE (arg1) == BIT_AND_EXPR
3170 && integer_onep (TREE_OPERAND (arg1, 1)))))
3171 || (truth_value_p (TREE_CODE (arg1))
3172 && (truth_value_p (TREE_CODE (arg0))
3173 || (TREE_CODE (arg0) == BIT_AND_EXPR
3174 && integer_onep (TREE_OPERAND (arg0, 1)))))))
3176 t = fold (build (code == BIT_AND_EXPR ? TRUTH_AND_EXPR
3177 : code == BIT_IOR_EXPR ? TRUTH_OR_EXPR
3178 : TRUTH_XOR_EXPR,
3179 type, arg0, arg1));
3181 if (code == EQ_EXPR)
3182 t = invert_truthvalue (t);
3184 return t;
3187 if (TREE_CODE_CLASS (code) == '1')
3189 if (TREE_CODE (arg0) == COMPOUND_EXPR)
3190 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3191 fold (build1 (code, type, TREE_OPERAND (arg0, 1))));
3192 else if (TREE_CODE (arg0) == COND_EXPR)
3194 t = fold (build (COND_EXPR, type, TREE_OPERAND (arg0, 0),
3195 fold (build1 (code, type, TREE_OPERAND (arg0, 1))),
3196 fold (build1 (code, type, TREE_OPERAND (arg0, 2)))));
3198 /* If this was a conversion, and all we did was to move into
3199 inside the COND_EXPR, bring it back out. But leave it if
3200 it is a conversion from integer to integer and the
3201 result precision is no wider than a word since such a
3202 conversion is cheap and may be optimized away by combine,
3203 while it couldn't if it were outside the COND_EXPR. Then return
3204 so we don't get into an infinite recursion loop taking the
3205 conversion out and then back in. */
3207 if ((code == NOP_EXPR || code == CONVERT_EXPR
3208 || code == NON_LVALUE_EXPR)
3209 && TREE_CODE (t) == COND_EXPR
3210 && TREE_CODE (TREE_OPERAND (t, 1)) == code
3211 && TREE_CODE (TREE_OPERAND (t, 2)) == code
3212 && (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0))
3213 == TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 2), 0)))
3214 && ! (INTEGRAL_TYPE_P (TREE_TYPE (t))
3215 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0)))
3216 && TYPE_PRECISION (TREE_TYPE (t)) <= BITS_PER_WORD))
3217 t = build1 (code, type,
3218 build (COND_EXPR,
3219 TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0)),
3220 TREE_OPERAND (t, 0),
3221 TREE_OPERAND (TREE_OPERAND (t, 1), 0),
3222 TREE_OPERAND (TREE_OPERAND (t, 2), 0)));
3223 return t;
3225 else if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3226 return fold (build (COND_EXPR, type, arg0,
3227 fold (build1 (code, type, integer_one_node)),
3228 fold (build1 (code, type, integer_zero_node))));
3230 else if (TREE_CODE_CLASS (code) == '2'
3231 || TREE_CODE_CLASS (code) == '<')
3233 if (TREE_CODE (arg1) == COMPOUND_EXPR)
3234 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3235 fold (build (code, type,
3236 arg0, TREE_OPERAND (arg1, 1))));
3237 else if (TREE_CODE (arg1) == COND_EXPR
3238 || TREE_CODE_CLASS (TREE_CODE (arg1)) == '<')
3240 tree test, true_value, false_value;
3242 if (TREE_CODE (arg1) == COND_EXPR)
3244 test = TREE_OPERAND (arg1, 0);
3245 true_value = TREE_OPERAND (arg1, 1);
3246 false_value = TREE_OPERAND (arg1, 2);
3248 else
3250 test = arg1;
3251 true_value = integer_one_node;
3252 false_value = integer_zero_node;
3255 /* If ARG0 is complex we want to make sure we only evaluate
3256 it once. Though this is only required if it is volatile, it
3257 might be more efficient even if it is not. However, if we
3258 succeed in folding one part to a constant, we do not need
3259 to make this SAVE_EXPR. Since we do this optimization
3260 primarily to see if we do end up with constant and this
3261 SAVE_EXPR interfers with later optimizations, suppressing
3262 it when we can is important. */
3264 if (TREE_CODE (arg0) != SAVE_EXPR
3265 && ((TREE_CODE (arg0) != VAR_DECL
3266 && TREE_CODE (arg0) != PARM_DECL)
3267 || TREE_SIDE_EFFECTS (arg0)))
3269 tree lhs = fold (build (code, type, arg0, true_value));
3270 tree rhs = fold (build (code, type, arg0, false_value));
3272 if (TREE_CONSTANT (lhs) || TREE_CONSTANT (rhs))
3273 return fold (build (COND_EXPR, type, test, lhs, rhs));
3275 arg0 = save_expr (arg0);
3278 test = fold (build (COND_EXPR, type, test,
3279 fold (build (code, type, arg0, true_value)),
3280 fold (build (code, type, arg0, false_value))));
3281 if (TREE_CODE (arg0) == SAVE_EXPR)
3282 return build (COMPOUND_EXPR, type,
3283 convert (void_type_node, arg0),
3284 strip_compound_expr (test, arg0));
3285 else
3286 return convert (type, test);
3289 else if (TREE_CODE (arg0) == COMPOUND_EXPR)
3290 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3291 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3292 else if (TREE_CODE (arg0) == COND_EXPR
3293 || TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3295 tree test, true_value, false_value;
3297 if (TREE_CODE (arg0) == COND_EXPR)
3299 test = TREE_OPERAND (arg0, 0);
3300 true_value = TREE_OPERAND (arg0, 1);
3301 false_value = TREE_OPERAND (arg0, 2);
3303 else
3305 test = arg0;
3306 true_value = integer_one_node;
3307 false_value = integer_zero_node;
3310 if (TREE_CODE (arg1) != SAVE_EXPR
3311 && ((TREE_CODE (arg1) != VAR_DECL
3312 && TREE_CODE (arg1) != PARM_DECL)
3313 || TREE_SIDE_EFFECTS (arg1)))
3315 tree lhs = fold (build (code, type, true_value, arg1));
3316 tree rhs = fold (build (code, type, false_value, arg1));
3318 if (TREE_CONSTANT (lhs) || TREE_CONSTANT (rhs)
3319 || TREE_CONSTANT (arg1))
3320 return fold (build (COND_EXPR, type, test, lhs, rhs));
3322 arg1 = save_expr (arg1);
3325 test = fold (build (COND_EXPR, type, test,
3326 fold (build (code, type, true_value, arg1)),
3327 fold (build (code, type, false_value, arg1))));
3328 if (TREE_CODE (arg1) == SAVE_EXPR)
3329 return build (COMPOUND_EXPR, type,
3330 convert (void_type_node, arg1),
3331 strip_compound_expr (test, arg1));
3332 else
3333 return convert (type, test);
3336 else if (TREE_CODE_CLASS (code) == '<'
3337 && TREE_CODE (arg0) == COMPOUND_EXPR)
3338 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3339 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3340 else if (TREE_CODE_CLASS (code) == '<'
3341 && TREE_CODE (arg1) == COMPOUND_EXPR)
3342 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3343 fold (build (code, type, arg0, TREE_OPERAND (arg1, 1))));
3345 switch (code)
3347 case INTEGER_CST:
3348 case REAL_CST:
3349 case STRING_CST:
3350 case COMPLEX_CST:
3351 case CONSTRUCTOR:
3352 return t;
3354 case CONST_DECL:
3355 return fold (DECL_INITIAL (t));
3357 case NOP_EXPR:
3358 case FLOAT_EXPR:
3359 case CONVERT_EXPR:
3360 case FIX_TRUNC_EXPR:
3361 /* Other kinds of FIX are not handled properly by fold_convert. */
3363 if (TREE_TYPE (TREE_OPERAND (t, 0)) == TREE_TYPE (t))
3364 return TREE_OPERAND (t, 0);
3366 /* Handle cases of two conversions in a row. */
3367 if (TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR
3368 || TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR)
3370 tree inside_type = TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0));
3371 tree inter_type = TREE_TYPE (TREE_OPERAND (t, 0));
3372 tree final_type = TREE_TYPE (t);
3373 int inside_int = INTEGRAL_TYPE_P (inside_type);
3374 int inside_ptr = POINTER_TYPE_P (inside_type);
3375 int inside_float = FLOAT_TYPE_P (inside_type);
3376 int inside_prec = TYPE_PRECISION (inside_type);
3377 int inside_unsignedp = TREE_UNSIGNED (inside_type);
3378 int inter_int = INTEGRAL_TYPE_P (inter_type);
3379 int inter_ptr = POINTER_TYPE_P (inter_type);
3380 int inter_float = FLOAT_TYPE_P (inter_type);
3381 int inter_prec = TYPE_PRECISION (inter_type);
3382 int inter_unsignedp = TREE_UNSIGNED (inter_type);
3383 int final_int = INTEGRAL_TYPE_P (final_type);
3384 int final_ptr = POINTER_TYPE_P (final_type);
3385 int final_float = FLOAT_TYPE_P (final_type);
3386 int final_prec = TYPE_PRECISION (final_type);
3387 int final_unsignedp = TREE_UNSIGNED (final_type);
3389 /* In addition to the cases of two conversions in a row
3390 handled below, if we are converting something to its own
3391 type via an object of identical or wider precision, neither
3392 conversion is needed. */
3393 if (inside_type == final_type
3394 && ((inter_int && final_int) || (inter_float && final_float))
3395 && inter_prec >= final_prec)
3396 return TREE_OPERAND (TREE_OPERAND (t, 0), 0);
3398 /* Likewise, if the intermediate and final types are either both
3399 float or both integer, we don't need the middle conversion if
3400 it is wider than the final type and doesn't change the signedness
3401 (for integers). Avoid this if the final type is a pointer
3402 since then we sometimes need the inner conversion. */
3403 if ((((inter_int || inter_ptr) && (inside_int || inside_ptr))
3404 || (inter_float && inside_float))
3405 && inter_prec >= inside_prec
3406 && (inter_float || inter_unsignedp == inside_unsignedp)
3407 && ! final_ptr)
3408 return convert (final_type, TREE_OPERAND (TREE_OPERAND (t, 0), 0));
3410 /* Two conversions in a row are not needed unless:
3411 - some conversion is floating-point (overstrict for now), or
3412 - the intermediate type is narrower than both initial and
3413 final, or
3414 - the intermediate type and innermost type differ in signedness,
3415 and the outermost type is wider than the intermediate, or
3416 - the initial type is a pointer type and the precisions of the
3417 intermediate and final types differ, or
3418 - the final type is a pointer type and the precisions of the
3419 initial and intermediate types differ. */
3420 if (! inside_float && ! inter_float && ! final_float
3421 && (inter_prec > inside_prec || inter_prec > final_prec)
3422 && ! (inside_int && inter_int
3423 && inter_unsignedp != inside_unsignedp
3424 && inter_prec < final_prec)
3425 && ((inter_unsignedp && inter_prec > inside_prec)
3426 == (final_unsignedp && final_prec > inter_prec))
3427 && ! (inside_ptr && inter_prec != final_prec)
3428 && ! (final_ptr && inside_prec != inter_prec))
3429 return convert (final_type, TREE_OPERAND (TREE_OPERAND (t, 0), 0));
3432 if (TREE_CODE (TREE_OPERAND (t, 0)) == MODIFY_EXPR
3433 && TREE_CONSTANT (TREE_OPERAND (TREE_OPERAND (t, 0), 1))
3434 /* Detect assigning a bitfield. */
3435 && !(TREE_CODE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)) == COMPONENT_REF
3436 && DECL_BIT_FIELD (TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (t, 0), 0), 1))))
3438 /* Don't leave an assignment inside a conversion
3439 unless assigning a bitfield. */
3440 tree prev = TREE_OPERAND (t, 0);
3441 TREE_OPERAND (t, 0) = TREE_OPERAND (prev, 1);
3442 /* First do the assignment, then return converted constant. */
3443 t = build (COMPOUND_EXPR, TREE_TYPE (t), prev, fold (t));
3444 TREE_USED (t) = 1;
3445 return t;
3447 if (!wins)
3449 TREE_CONSTANT (t) = TREE_CONSTANT (arg0);
3450 return t;
3452 return fold_convert (t, arg0);
3454 #if 0 /* This loses on &"foo"[0]. */
3455 case ARRAY_REF:
3457 int i;
3459 /* Fold an expression like: "foo"[2] */
3460 if (TREE_CODE (arg0) == STRING_CST
3461 && TREE_CODE (arg1) == INTEGER_CST
3462 && !TREE_INT_CST_HIGH (arg1)
3463 && (i = TREE_INT_CST_LOW (arg1)) < TREE_STRING_LENGTH (arg0))
3465 t = build_int_2 (TREE_STRING_POINTER (arg0)[i], 0);
3466 TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (arg0));
3467 force_fit_type (t, 0);
3470 return t;
3471 #endif /* 0 */
3473 case COMPONENT_REF:
3474 if (TREE_CODE (arg0) == CONSTRUCTOR)
3476 tree m = purpose_member (arg1, CONSTRUCTOR_ELTS (arg0));
3477 if (m)
3478 t = TREE_VALUE (m);
3480 return t;
3482 case RANGE_EXPR:
3483 TREE_CONSTANT (t) = wins;
3484 return t;
3486 case NEGATE_EXPR:
3487 if (wins)
3489 if (TREE_CODE (arg0) == INTEGER_CST)
3491 HOST_WIDE_INT low, high;
3492 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3493 TREE_INT_CST_HIGH (arg0),
3494 &low, &high);
3495 t = build_int_2 (low, high);
3496 TREE_TYPE (t) = type;
3497 TREE_OVERFLOW (t)
3498 = (TREE_OVERFLOW (arg0)
3499 | force_fit_type (t, overflow));
3500 TREE_CONSTANT_OVERFLOW (t)
3501 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
3503 else if (TREE_CODE (arg0) == REAL_CST)
3504 t = build_real (type, REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3505 TREE_TYPE (t) = type;
3507 else if (TREE_CODE (arg0) == NEGATE_EXPR)
3508 return TREE_OPERAND (arg0, 0);
3510 /* Convert - (a - b) to (b - a) for non-floating-point. */
3511 else if (TREE_CODE (arg0) == MINUS_EXPR && ! FLOAT_TYPE_P (type))
3512 return build (MINUS_EXPR, type, TREE_OPERAND (arg0, 1),
3513 TREE_OPERAND (arg0, 0));
3515 return t;
3517 case ABS_EXPR:
3518 if (wins)
3520 if (TREE_CODE (arg0) == INTEGER_CST)
3522 if (! TREE_UNSIGNED (type)
3523 && TREE_INT_CST_HIGH (arg0) < 0)
3525 HOST_WIDE_INT low, high;
3526 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3527 TREE_INT_CST_HIGH (arg0),
3528 &low, &high);
3529 t = build_int_2 (low, high);
3530 TREE_TYPE (t) = type;
3531 TREE_OVERFLOW (t)
3532 = (TREE_OVERFLOW (arg0)
3533 | force_fit_type (t, overflow));
3534 TREE_CONSTANT_OVERFLOW (t)
3535 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
3538 else if (TREE_CODE (arg0) == REAL_CST)
3540 if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (arg0)))
3541 t = build_real (type,
3542 REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3544 TREE_TYPE (t) = type;
3546 else if (TREE_CODE (arg0) == ABS_EXPR || TREE_CODE (arg0) == NEGATE_EXPR)
3547 return build1 (ABS_EXPR, type, TREE_OPERAND (arg0, 0));
3548 return t;
3550 case CONJ_EXPR:
3551 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
3552 return arg0;
3553 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
3554 return build (COMPLEX_EXPR, TREE_TYPE (arg0),
3555 TREE_OPERAND (arg0, 0),
3556 fold (build1 (NEGATE_EXPR,
3557 TREE_TYPE (TREE_TYPE (arg0)),
3558 TREE_OPERAND (arg0, 1))));
3559 else if (TREE_CODE (arg0) == COMPLEX_CST)
3560 return build_complex (TREE_OPERAND (arg0, 0),
3561 fold (build1 (NEGATE_EXPR,
3562 TREE_TYPE (TREE_TYPE (arg0)),
3563 TREE_OPERAND (arg0, 1))));
3564 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
3565 return fold (build (TREE_CODE (arg0), type,
3566 fold (build1 (CONJ_EXPR, type,
3567 TREE_OPERAND (arg0, 0))),
3568 fold (build1 (CONJ_EXPR,
3569 type, TREE_OPERAND (arg0, 1)))));
3570 else if (TREE_CODE (arg0) == CONJ_EXPR)
3571 return TREE_OPERAND (arg0, 0);
3572 return t;
3574 case BIT_NOT_EXPR:
3575 if (wins)
3577 if (TREE_CODE (arg0) == INTEGER_CST)
3578 t = build_int_2 (~ TREE_INT_CST_LOW (arg0),
3579 ~ TREE_INT_CST_HIGH (arg0));
3580 TREE_TYPE (t) = type;
3581 force_fit_type (t, 0);
3582 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg0);
3583 TREE_CONSTANT_OVERFLOW (t) = TREE_CONSTANT_OVERFLOW (arg0);
3585 else if (TREE_CODE (arg0) == BIT_NOT_EXPR)
3586 return TREE_OPERAND (arg0, 0);
3587 return t;
3589 case PLUS_EXPR:
3590 /* A + (-B) -> A - B */
3591 if (TREE_CODE (arg1) == NEGATE_EXPR)
3592 return fold (build (MINUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3593 else if (! FLOAT_TYPE_P (type))
3595 if (integer_zerop (arg1))
3596 return non_lvalue (convert (type, arg0));
3598 /* If we are adding two BIT_AND_EXPR's, both of which are and'ing
3599 with a constant, and the two constants have no bits in common,
3600 we should treat this as a BIT_IOR_EXPR since this may produce more
3601 simplifications. */
3602 if (TREE_CODE (arg0) == BIT_AND_EXPR
3603 && TREE_CODE (arg1) == BIT_AND_EXPR
3604 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3605 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3606 && integer_zerop (const_binop (BIT_AND_EXPR,
3607 TREE_OPERAND (arg0, 1),
3608 TREE_OPERAND (arg1, 1), 0)))
3610 code = BIT_IOR_EXPR;
3611 goto bit_ior;
3614 /* (A * C) + (B * C) -> (A+B) * C. Since we are most concerned
3615 about the case where C is a constant, just try one of the
3616 four possibilities. */
3618 if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR
3619 && operand_equal_p (TREE_OPERAND (arg0, 1),
3620 TREE_OPERAND (arg1, 1), 0))
3621 return fold (build (MULT_EXPR, type,
3622 fold (build (PLUS_EXPR, type,
3623 TREE_OPERAND (arg0, 0),
3624 TREE_OPERAND (arg1, 0))),
3625 TREE_OPERAND (arg0, 1)));
3627 /* In IEEE floating point, x+0 may not equal x. */
3628 else if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3629 || flag_fast_math)
3630 && real_zerop (arg1))
3631 return non_lvalue (convert (type, arg0));
3632 associate:
3633 /* In most languages, can't associate operations on floats
3634 through parentheses. Rather than remember where the parentheses
3635 were, we don't associate floats at all. It shouldn't matter much.
3636 However, associating multiplications is only very slightly
3637 inaccurate, so do that if -ffast-math is specified. */
3638 if (FLOAT_TYPE_P (type)
3639 && ! (flag_fast_math && code == MULT_EXPR))
3640 goto binary;
3642 /* The varsign == -1 cases happen only for addition and subtraction.
3643 It says that the arg that was split was really CON minus VAR.
3644 The rest of the code applies to all associative operations. */
3645 if (!wins)
3647 tree var, con;
3648 int varsign;
3650 if (split_tree (arg0, code, &var, &con, &varsign))
3652 if (varsign == -1)
3654 /* EXPR is (CON-VAR) +- ARG1. */
3655 /* If it is + and VAR==ARG1, return just CONST. */
3656 if (code == PLUS_EXPR && operand_equal_p (var, arg1, 0))
3657 return convert (TREE_TYPE (t), con);
3659 /* If ARG0 is a constant, don't change things around;
3660 instead keep all the constant computations together. */
3662 if (TREE_CONSTANT (arg0))
3663 return t;
3665 /* Otherwise return (CON +- ARG1) - VAR. */
3666 TREE_SET_CODE (t, MINUS_EXPR);
3667 TREE_OPERAND (t, 1) = var;
3668 TREE_OPERAND (t, 0)
3669 = fold (build (code, TREE_TYPE (t), con, arg1));
3671 else
3673 /* EXPR is (VAR+CON) +- ARG1. */
3674 /* If it is - and VAR==ARG1, return just CONST. */
3675 if (code == MINUS_EXPR && operand_equal_p (var, arg1, 0))
3676 return convert (TREE_TYPE (t), con);
3678 /* If ARG0 is a constant, don't change things around;
3679 instead keep all the constant computations together. */
3681 if (TREE_CONSTANT (arg0))
3682 return t;
3684 /* Otherwise return VAR +- (ARG1 +- CON). */
3685 TREE_OPERAND (t, 1) = tem
3686 = fold (build (code, TREE_TYPE (t), arg1, con));
3687 TREE_OPERAND (t, 0) = var;
3688 if (integer_zerop (tem)
3689 && (code == PLUS_EXPR || code == MINUS_EXPR))
3690 return convert (type, var);
3691 /* If we have x +/- (c - d) [c an explicit integer]
3692 change it to x -/+ (d - c) since if d is relocatable
3693 then the latter can be a single immediate insn
3694 and the former cannot. */
3695 if (TREE_CODE (tem) == MINUS_EXPR
3696 && TREE_CODE (TREE_OPERAND (tem, 0)) == INTEGER_CST)
3698 tree tem1 = TREE_OPERAND (tem, 1);
3699 TREE_OPERAND (tem, 1) = TREE_OPERAND (tem, 0);
3700 TREE_OPERAND (tem, 0) = tem1;
3701 TREE_SET_CODE (t,
3702 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3705 return t;
3708 if (split_tree (arg1, code, &var, &con, &varsign))
3710 if (TREE_CONSTANT (arg1))
3711 return t;
3713 if (varsign == -1)
3714 TREE_SET_CODE (t,
3715 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3717 /* EXPR is ARG0 +- (CON +- VAR). */
3718 if (TREE_CODE (t) == MINUS_EXPR
3719 && operand_equal_p (var, arg0, 0))
3721 /* If VAR and ARG0 cancel, return just CON or -CON. */
3722 if (code == PLUS_EXPR)
3723 return convert (TREE_TYPE (t), con);
3724 return fold (build1 (NEGATE_EXPR, TREE_TYPE (t),
3725 convert (TREE_TYPE (t), con)));
3728 TREE_OPERAND (t, 0)
3729 = fold (build (code, TREE_TYPE (t), arg0, con));
3730 TREE_OPERAND (t, 1) = var;
3731 if (integer_zerop (TREE_OPERAND (t, 0))
3732 && TREE_CODE (t) == PLUS_EXPR)
3733 return convert (TREE_TYPE (t), var);
3734 return t;
3737 binary:
3738 #if defined (REAL_IS_NOT_DOUBLE) && ! defined (REAL_ARITHMETIC)
3739 if (TREE_CODE (arg1) == REAL_CST)
3740 return t;
3741 #endif /* REAL_IS_NOT_DOUBLE, and no REAL_ARITHMETIC */
3742 if (wins)
3743 t1 = const_binop (code, arg0, arg1, 0);
3744 if (t1 != NULL_TREE)
3746 /* The return value should always have
3747 the same type as the original expression. */
3748 TREE_TYPE (t1) = TREE_TYPE (t);
3749 return t1;
3751 return t;
3753 case MINUS_EXPR:
3754 if (! FLOAT_TYPE_P (type))
3756 if (! wins && integer_zerop (arg0))
3757 return build1 (NEGATE_EXPR, type, arg1);
3758 if (integer_zerop (arg1))
3759 return non_lvalue (convert (type, arg0));
3761 /* (A * C) - (B * C) -> (A-B) * C. Since we are most concerned
3762 about the case where C is a constant, just try one of the
3763 four possibilities. */
3765 if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR
3766 && operand_equal_p (TREE_OPERAND (arg0, 1),
3767 TREE_OPERAND (arg1, 1), 0))
3768 return fold (build (MULT_EXPR, type,
3769 fold (build (MINUS_EXPR, type,
3770 TREE_OPERAND (arg0, 0),
3771 TREE_OPERAND (arg1, 0))),
3772 TREE_OPERAND (arg0, 1)));
3774 /* Convert A - (-B) to A + B. */
3775 else if (TREE_CODE (arg1) == NEGATE_EXPR)
3776 return fold (build (PLUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3778 else if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3779 || flag_fast_math)
3781 /* Except with IEEE floating point, 0-x equals -x. */
3782 if (! wins && real_zerop (arg0))
3783 return build1 (NEGATE_EXPR, type, arg1);
3784 /* Except with IEEE floating point, x-0 equals x. */
3785 if (real_zerop (arg1))
3786 return non_lvalue (convert (type, arg0));
3789 /* Fold &x - &x. This can happen from &x.foo - &x.
3790 This is unsafe for certain floats even in non-IEEE formats.
3791 In IEEE, it is unsafe because it does wrong for NaNs.
3792 Also note that operand_equal_p is always false if an operand
3793 is volatile. */
3795 if ((! FLOAT_TYPE_P (type) || flag_fast_math)
3796 && operand_equal_p (arg0, arg1, 0))
3797 return convert (type, integer_zero_node);
3799 goto associate;
3801 case MULT_EXPR:
3802 if (! FLOAT_TYPE_P (type))
3804 if (integer_zerop (arg1))
3805 return omit_one_operand (type, arg1, arg0);
3806 if (integer_onep (arg1))
3807 return non_lvalue (convert (type, arg0));
3809 /* ((A / C) * C) is A if the division is an
3810 EXACT_DIV_EXPR. Since C is normally a constant,
3811 just check for one of the four possibilities. */
3813 if (TREE_CODE (arg0) == EXACT_DIV_EXPR
3814 && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
3815 return TREE_OPERAND (arg0, 0);
3817 /* (a * (1 << b)) is (a << b) */
3818 if (TREE_CODE (arg1) == LSHIFT_EXPR
3819 && integer_onep (TREE_OPERAND (arg1, 0)))
3820 return fold (build (LSHIFT_EXPR, type, arg0,
3821 TREE_OPERAND (arg1, 1)));
3822 if (TREE_CODE (arg0) == LSHIFT_EXPR
3823 && integer_onep (TREE_OPERAND (arg0, 0)))
3824 return fold (build (LSHIFT_EXPR, type, arg1,
3825 TREE_OPERAND (arg0, 1)));
3827 else
3829 /* x*0 is 0, except for IEEE floating point. */
3830 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3831 || flag_fast_math)
3832 && real_zerop (arg1))
3833 return omit_one_operand (type, arg1, arg0);
3834 /* In IEEE floating point, x*1 is not equivalent to x for snans.
3835 However, ANSI says we can drop signals,
3836 so we can do this anyway. */
3837 if (real_onep (arg1))
3838 return non_lvalue (convert (type, arg0));
3839 /* x*2 is x+x */
3840 if (! wins && real_twop (arg1))
3842 tree arg = save_expr (arg0);
3843 return build (PLUS_EXPR, type, arg, arg);
3846 goto associate;
3848 case BIT_IOR_EXPR:
3849 bit_ior:
3850 if (integer_all_onesp (arg1))
3851 return omit_one_operand (type, arg1, arg0);
3852 if (integer_zerop (arg1))
3853 return non_lvalue (convert (type, arg0));
3854 t1 = distribute_bit_expr (code, type, arg0, arg1);
3855 if (t1 != NULL_TREE)
3856 return t1;
3858 /* (a << C1) | (a >> C2) if A is unsigned and C1+C2 is the size of A
3859 is a rotate of A by C1 bits. */
3861 if ((TREE_CODE (arg0) == RSHIFT_EXPR
3862 || TREE_CODE (arg0) == LSHIFT_EXPR)
3863 && (TREE_CODE (arg1) == RSHIFT_EXPR
3864 || TREE_CODE (arg1) == LSHIFT_EXPR)
3865 && TREE_CODE (arg0) != TREE_CODE (arg1)
3866 && operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1,0), 0)
3867 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0)))
3868 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3869 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3870 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
3871 && TREE_INT_CST_HIGH (TREE_OPERAND (arg1, 1)) == 0
3872 && ((TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1))
3873 + TREE_INT_CST_LOW (TREE_OPERAND (arg1, 1)))
3874 == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)))))
3875 return build (LROTATE_EXPR, type, TREE_OPERAND (arg0, 0),
3876 TREE_CODE (arg0) == LSHIFT_EXPR
3877 ? TREE_OPERAND (arg0, 1) : TREE_OPERAND (arg1, 1));
3879 goto associate;
3881 case BIT_XOR_EXPR:
3882 if (integer_zerop (arg1))
3883 return non_lvalue (convert (type, arg0));
3884 if (integer_all_onesp (arg1))
3885 return fold (build1 (BIT_NOT_EXPR, type, arg0));
3886 goto associate;
3888 case BIT_AND_EXPR:
3889 bit_and:
3890 if (integer_all_onesp (arg1))
3891 return non_lvalue (convert (type, arg0));
3892 if (integer_zerop (arg1))
3893 return omit_one_operand (type, arg1, arg0);
3894 t1 = distribute_bit_expr (code, type, arg0, arg1);
3895 if (t1 != NULL_TREE)
3896 return t1;
3897 /* Simplify ((int)c & 0x377) into (int)c, if c is unsigned char. */
3898 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == NOP_EXPR
3899 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg1, 0))))
3901 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg1, 0)));
3902 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3903 && (~TREE_INT_CST_LOW (arg0)
3904 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3905 return build1 (NOP_EXPR, type, TREE_OPERAND (arg1, 0));
3907 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) == NOP_EXPR
3908 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0))))
3910 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)));
3911 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3912 && (~TREE_INT_CST_LOW (arg1)
3913 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3914 return build1 (NOP_EXPR, type, TREE_OPERAND (arg0, 0));
3916 goto associate;
3918 case BIT_ANDTC_EXPR:
3919 if (integer_all_onesp (arg0))
3920 return non_lvalue (convert (type, arg1));
3921 if (integer_zerop (arg0))
3922 return omit_one_operand (type, arg0, arg1);
3923 if (TREE_CODE (arg1) == INTEGER_CST)
3925 arg1 = fold (build1 (BIT_NOT_EXPR, type, arg1));
3926 code = BIT_AND_EXPR;
3927 goto bit_and;
3929 goto binary;
3931 case RDIV_EXPR:
3932 /* In most cases, do nothing with a divide by zero. */
3933 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3934 #ifndef REAL_INFINITY
3935 if (TREE_CODE (arg1) == REAL_CST && real_zerop (arg1))
3936 return t;
3937 #endif
3938 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3940 /* In IEEE floating point, x/1 is not equivalent to x for snans.
3941 However, ANSI says we can drop signals, so we can do this anyway. */
3942 if (real_onep (arg1))
3943 return non_lvalue (convert (type, arg0));
3945 /* If ARG1 is a constant, we can convert this to a multiply by the
3946 reciprocal. This does not have the same rounding properties,
3947 so only do this if -ffast-math. We can actually always safely
3948 do it if ARG1 is a power of two, but it's hard to tell if it is
3949 or not in a portable manner. */
3950 if (TREE_CODE (arg1) == REAL_CST && flag_fast_math
3951 && 0 != (tem = const_binop (code, build_real (type, dconst1),
3952 arg1, 0)))
3953 return fold (build (MULT_EXPR, type, arg0, tem));
3955 goto binary;
3957 case TRUNC_DIV_EXPR:
3958 case ROUND_DIV_EXPR:
3959 case FLOOR_DIV_EXPR:
3960 case CEIL_DIV_EXPR:
3961 case EXACT_DIV_EXPR:
3962 if (integer_onep (arg1))
3963 return non_lvalue (convert (type, arg0));
3964 if (integer_zerop (arg1))
3965 return t;
3967 /* If we have ((a / C1) / C2) where both division are the same type, try
3968 to simplify. First see if C1 * C2 overflows or not. */
3969 if (TREE_CODE (arg0) == code && TREE_CODE (arg1) == INTEGER_CST
3970 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
3972 tree new_divisor;
3974 new_divisor = const_binop (MULT_EXPR, TREE_OPERAND (arg0, 1), arg1, 0);
3975 tem = const_binop (FLOOR_DIV_EXPR, new_divisor, arg1, 0);
3977 if (TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)) == TREE_INT_CST_LOW (tem)
3978 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == TREE_INT_CST_HIGH (tem))
3980 /* If no overflow, divide by C1*C2. */
3981 return fold (build (code, type, TREE_OPERAND (arg0, 0), new_divisor));
3985 /* Look for ((a * C1) / C3) or (((a * C1) + C2) / C3),
3986 where C1 % C3 == 0 or C3 % C1 == 0. We can simplify these
3987 expressions, which often appear in the offsets or sizes of
3988 objects with a varying size. Only deal with positive divisors
3989 and multiplicands. If C2 is negative, we must have C2 % C3 == 0.
3991 Look for NOPs and SAVE_EXPRs inside. */
3993 if (TREE_CODE (arg1) == INTEGER_CST
3994 && tree_int_cst_sgn (arg1) >= 0)
3996 int have_save_expr = 0;
3997 tree c2 = integer_zero_node;
3998 tree xarg0 = arg0;
4000 if (TREE_CODE (xarg0) == SAVE_EXPR)
4001 have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
4003 STRIP_NOPS (xarg0);
4005 if (TREE_CODE (xarg0) == PLUS_EXPR
4006 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
4007 c2 = TREE_OPERAND (xarg0, 1), xarg0 = TREE_OPERAND (xarg0, 0);
4008 else if (TREE_CODE (xarg0) == MINUS_EXPR
4009 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4010 /* If we are doing this computation unsigned, the negate
4011 is incorrect. */
4012 && ! TREE_UNSIGNED (type))
4014 c2 = fold (build1 (NEGATE_EXPR, type, TREE_OPERAND (xarg0, 1)));
4015 xarg0 = TREE_OPERAND (xarg0, 0);
4018 if (TREE_CODE (xarg0) == SAVE_EXPR)
4019 have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
4021 STRIP_NOPS (xarg0);
4023 if (TREE_CODE (xarg0) == MULT_EXPR
4024 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4025 && tree_int_cst_sgn (TREE_OPERAND (xarg0, 1)) >= 0
4026 && (integer_zerop (const_binop (TRUNC_MOD_EXPR,
4027 TREE_OPERAND (xarg0, 1), arg1, 1))
4028 || integer_zerop (const_binop (TRUNC_MOD_EXPR, arg1,
4029 TREE_OPERAND (xarg0, 1), 1)))
4030 && (tree_int_cst_sgn (c2) >= 0
4031 || integer_zerop (const_binop (TRUNC_MOD_EXPR, c2,
4032 arg1, 1))))
4034 tree outer_div = integer_one_node;
4035 tree c1 = TREE_OPERAND (xarg0, 1);
4036 tree c3 = arg1;
4038 /* If C3 > C1, set them equal and do a divide by
4039 C3/C1 at the end of the operation. */
4040 if (tree_int_cst_lt (c1, c3))
4041 outer_div = const_binop (code, c3, c1, 0), c3 = c1;
4043 /* The result is A * (C1/C3) + (C2/C3). */
4044 t = fold (build (PLUS_EXPR, type,
4045 fold (build (MULT_EXPR, type,
4046 TREE_OPERAND (xarg0, 0),
4047 const_binop (code, c1, c3, 1))),
4048 const_binop (code, c2, c3, 1)));
4050 if (! integer_onep (outer_div))
4051 t = fold (build (code, type, t, convert (type, outer_div)));
4053 if (have_save_expr)
4054 t = save_expr (t);
4056 return t;
4060 goto binary;
4062 case CEIL_MOD_EXPR:
4063 case FLOOR_MOD_EXPR:
4064 case ROUND_MOD_EXPR:
4065 case TRUNC_MOD_EXPR:
4066 if (integer_onep (arg1))
4067 return omit_one_operand (type, integer_zero_node, arg0);
4068 if (integer_zerop (arg1))
4069 return t;
4071 /* Look for ((a * C1) % C3) or (((a * C1) + C2) % C3),
4072 where C1 % C3 == 0. Handle similarly to the division case,
4073 but don't bother with SAVE_EXPRs. */
4075 if (TREE_CODE (arg1) == INTEGER_CST
4076 && ! integer_zerop (arg1))
4078 tree c2 = integer_zero_node;
4079 tree xarg0 = arg0;
4081 if (TREE_CODE (xarg0) == PLUS_EXPR
4082 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
4083 c2 = TREE_OPERAND (xarg0, 1), xarg0 = TREE_OPERAND (xarg0, 0);
4084 else if (TREE_CODE (xarg0) == MINUS_EXPR
4085 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4086 && ! TREE_UNSIGNED (type))
4088 c2 = fold (build1 (NEGATE_EXPR, type, TREE_OPERAND (xarg0, 1)));
4089 xarg0 = TREE_OPERAND (xarg0, 0);
4092 STRIP_NOPS (xarg0);
4094 if (TREE_CODE (xarg0) == MULT_EXPR
4095 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4096 && integer_zerop (const_binop (TRUNC_MOD_EXPR,
4097 TREE_OPERAND (xarg0, 1),
4098 arg1, 1))
4099 && tree_int_cst_sgn (c2) >= 0)
4100 /* The result is (C2%C3). */
4101 return omit_one_operand (type, const_binop (code, c2, arg1, 1),
4102 TREE_OPERAND (xarg0, 0));
4105 goto binary;
4107 case LSHIFT_EXPR:
4108 case RSHIFT_EXPR:
4109 case LROTATE_EXPR:
4110 case RROTATE_EXPR:
4111 if (integer_zerop (arg1))
4112 return non_lvalue (convert (type, arg0));
4113 /* Since negative shift count is not well-defined,
4114 don't try to compute it in the compiler. */
4115 if (tree_int_cst_sgn (arg1) < 0)
4116 return t;
4117 goto binary;
4119 case MIN_EXPR:
4120 if (operand_equal_p (arg0, arg1, 0))
4121 return arg0;
4122 if (INTEGRAL_TYPE_P (type)
4123 && operand_equal_p (arg1, TYPE_MIN_VALUE (type), 1))
4124 return omit_one_operand (type, arg1, arg0);
4125 goto associate;
4127 case MAX_EXPR:
4128 if (operand_equal_p (arg0, arg1, 0))
4129 return arg0;
4130 if (INTEGRAL_TYPE_P (type)
4131 && operand_equal_p (arg1, TYPE_MAX_VALUE (type), 1))
4132 return omit_one_operand (type, arg1, arg0);
4133 goto associate;
4135 case TRUTH_NOT_EXPR:
4136 /* Note that the operand of this must be an int
4137 and its values must be 0 or 1.
4138 ("true" is a fixed value perhaps depending on the language,
4139 but we don't handle values other than 1 correctly yet.) */
4140 return invert_truthvalue (arg0);
4142 case TRUTH_ANDIF_EXPR:
4143 /* Note that the operands of this must be ints
4144 and their values must be 0 or 1.
4145 ("true" is a fixed value perhaps depending on the language.) */
4146 /* If first arg is constant zero, return it. */
4147 if (integer_zerop (arg0))
4148 return arg0;
4149 case TRUTH_AND_EXPR:
4150 /* If either arg is constant true, drop it. */
4151 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
4152 return non_lvalue (arg1);
4153 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
4154 return non_lvalue (arg0);
4155 /* If second arg is constant zero, result is zero, but first arg
4156 must be evaluated. */
4157 if (integer_zerop (arg1))
4158 return omit_one_operand (type, arg1, arg0);
4160 truth_andor:
4161 /* We only do these simplifications if we are optimizing. */
4162 if (!optimize)
4163 return t;
4165 /* Check for things like (A || B) && (A || C). We can convert this
4166 to A || (B && C). Note that either operator can be any of the four
4167 truth and/or operations and the transformation will still be
4168 valid. Also note that we only care about order for the
4169 ANDIF and ORIF operators. */
4170 if (TREE_CODE (arg0) == TREE_CODE (arg1)
4171 && (TREE_CODE (arg0) == TRUTH_ANDIF_EXPR
4172 || TREE_CODE (arg0) == TRUTH_ORIF_EXPR
4173 || TREE_CODE (arg0) == TRUTH_AND_EXPR
4174 || TREE_CODE (arg0) == TRUTH_OR_EXPR))
4176 tree a00 = TREE_OPERAND (arg0, 0);
4177 tree a01 = TREE_OPERAND (arg0, 1);
4178 tree a10 = TREE_OPERAND (arg1, 0);
4179 tree a11 = TREE_OPERAND (arg1, 1);
4180 int commutative = ((TREE_CODE (arg0) == TRUTH_OR_EXPR
4181 || TREE_CODE (arg0) == TRUTH_AND_EXPR)
4182 && (code == TRUTH_AND_EXPR
4183 || code == TRUTH_OR_EXPR));
4185 if (operand_equal_p (a00, a10, 0))
4186 return fold (build (TREE_CODE (arg0), type, a00,
4187 fold (build (code, type, a01, a11))));
4188 else if (commutative && operand_equal_p (a00, a11, 0))
4189 return fold (build (TREE_CODE (arg0), type, a00,
4190 fold (build (code, type, a01, a10))));
4191 else if (commutative && operand_equal_p (a01, a10, 0))
4192 return fold (build (TREE_CODE (arg0), type, a01,
4193 fold (build (code, type, a00, a11))));
4195 /* This case if tricky because we must either have commutative
4196 operators or else A10 must not have side-effects. */
4198 else if ((commutative || ! TREE_SIDE_EFFECTS (a10))
4199 && operand_equal_p (a01, a11, 0))
4200 return fold (build (TREE_CODE (arg0), type,
4201 fold (build (code, type, a00, a10)),
4202 a01));
4205 /* Check for the possibility of merging component references. If our
4206 lhs is another similar operation, try to merge its rhs with our
4207 rhs. Then try to merge our lhs and rhs. */
4208 if (TREE_CODE (arg0) == code
4209 && 0 != (tem = fold_truthop (code, type,
4210 TREE_OPERAND (arg0, 1), arg1)))
4211 return fold (build (code, type, TREE_OPERAND (arg0, 0), tem));
4213 if ((tem = fold_truthop (code, type, arg0, arg1)) != 0)
4214 return tem;
4216 return t;
4218 case TRUTH_ORIF_EXPR:
4219 /* Note that the operands of this must be ints
4220 and their values must be 0 or true.
4221 ("true" is a fixed value perhaps depending on the language.) */
4222 /* If first arg is constant true, return it. */
4223 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
4224 return arg0;
4225 case TRUTH_OR_EXPR:
4226 /* If either arg is constant zero, drop it. */
4227 if (TREE_CODE (arg0) == INTEGER_CST && integer_zerop (arg0))
4228 return non_lvalue (arg1);
4229 if (TREE_CODE (arg1) == INTEGER_CST && integer_zerop (arg1))
4230 return non_lvalue (arg0);
4231 /* If second arg is constant true, result is true, but we must
4232 evaluate first arg. */
4233 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
4234 return omit_one_operand (type, arg1, arg0);
4235 goto truth_andor;
4237 case TRUTH_XOR_EXPR:
4238 /* If either arg is constant zero, drop it. */
4239 if (integer_zerop (arg0))
4240 return non_lvalue (arg1);
4241 if (integer_zerop (arg1))
4242 return non_lvalue (arg0);
4243 /* If either arg is constant true, this is a logical inversion. */
4244 if (integer_onep (arg0))
4245 return non_lvalue (invert_truthvalue (arg1));
4246 if (integer_onep (arg1))
4247 return non_lvalue (invert_truthvalue (arg0));
4248 return t;
4250 case EQ_EXPR:
4251 case NE_EXPR:
4252 case LT_EXPR:
4253 case GT_EXPR:
4254 case LE_EXPR:
4255 case GE_EXPR:
4256 /* If one arg is a constant integer, put it last. */
4257 if (TREE_CODE (arg0) == INTEGER_CST
4258 && TREE_CODE (arg1) != INTEGER_CST)
4260 TREE_OPERAND (t, 0) = arg1;
4261 TREE_OPERAND (t, 1) = arg0;
4262 arg0 = TREE_OPERAND (t, 0);
4263 arg1 = TREE_OPERAND (t, 1);
4264 code = swap_tree_comparison (code);
4265 TREE_SET_CODE (t, code);
4268 /* Convert foo++ == CONST into ++foo == CONST + INCR.
4269 First, see if one arg is constant; find the constant arg
4270 and the other one. */
4272 tree constop = 0, varop;
4273 tree *constoploc;
4275 if (TREE_CONSTANT (arg1))
4276 constoploc = &TREE_OPERAND (t, 1), constop = arg1, varop = arg0;
4277 if (TREE_CONSTANT (arg0))
4278 constoploc = &TREE_OPERAND (t, 0), constop = arg0, varop = arg1;
4280 if (constop && TREE_CODE (varop) == POSTINCREMENT_EXPR)
4282 /* This optimization is invalid for ordered comparisons
4283 if CONST+INCR overflows or if foo+incr might overflow.
4284 This optimization is invalid for floating point due to rounding.
4285 For pointer types we assume overflow doesn't happen. */
4286 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
4287 || (! FLOAT_TYPE_P (TREE_TYPE (varop))
4288 && (code == EQ_EXPR || code == NE_EXPR)))
4290 tree newconst
4291 = fold (build (PLUS_EXPR, TREE_TYPE (varop),
4292 constop, TREE_OPERAND (varop, 1)));
4293 TREE_SET_CODE (varop, PREINCREMENT_EXPR);
4294 *constoploc = newconst;
4295 return t;
4298 else if (constop && TREE_CODE (varop) == POSTDECREMENT_EXPR)
4300 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
4301 || (! FLOAT_TYPE_P (TREE_TYPE (varop))
4302 && (code == EQ_EXPR || code == NE_EXPR)))
4304 tree newconst
4305 = fold (build (MINUS_EXPR, TREE_TYPE (varop),
4306 constop, TREE_OPERAND (varop, 1)));
4307 TREE_SET_CODE (varop, PREDECREMENT_EXPR);
4308 *constoploc = newconst;
4309 return t;
4314 /* Change X >= CST to X > (CST - 1) if CST is positive. */
4315 if (TREE_CODE (arg1) == INTEGER_CST
4316 && TREE_CODE (arg0) != INTEGER_CST
4317 && tree_int_cst_sgn (arg1) > 0)
4319 switch (TREE_CODE (t))
4321 case GE_EXPR:
4322 code = GT_EXPR;
4323 TREE_SET_CODE (t, code);
4324 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
4325 TREE_OPERAND (t, 1) = arg1;
4326 break;
4328 case LT_EXPR:
4329 code = LE_EXPR;
4330 TREE_SET_CODE (t, code);
4331 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
4332 TREE_OPERAND (t, 1) = arg1;
4336 /* If this is an EQ or NE comparison with zero and ARG0 is
4337 (1 << foo) & bar, convert it to (bar >> foo) & 1. Both require
4338 two operations, but the latter can be done in one less insn
4339 one machine that have only two-operand insns or on which a
4340 constant cannot be the first operand. */
4341 if (integer_zerop (arg1) && (code == EQ_EXPR || code == NE_EXPR)
4342 && TREE_CODE (arg0) == BIT_AND_EXPR)
4344 if (TREE_CODE (TREE_OPERAND (arg0, 0)) == LSHIFT_EXPR
4345 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 0), 0)))
4346 return
4347 fold (build (code, type,
4348 build (BIT_AND_EXPR, TREE_TYPE (arg0),
4349 build (RSHIFT_EXPR,
4350 TREE_TYPE (TREE_OPERAND (arg0, 0)),
4351 TREE_OPERAND (arg0, 1),
4352 TREE_OPERAND (TREE_OPERAND (arg0, 0), 1)),
4353 convert (TREE_TYPE (arg0),
4354 integer_one_node)),
4355 arg1));
4356 else if (TREE_CODE (TREE_OPERAND (arg0, 1)) == LSHIFT_EXPR
4357 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 1), 0)))
4358 return
4359 fold (build (code, type,
4360 build (BIT_AND_EXPR, TREE_TYPE (arg0),
4361 build (RSHIFT_EXPR,
4362 TREE_TYPE (TREE_OPERAND (arg0, 1)),
4363 TREE_OPERAND (arg0, 0),
4364 TREE_OPERAND (TREE_OPERAND (arg0, 1), 1)),
4365 convert (TREE_TYPE (arg0),
4366 integer_one_node)),
4367 arg1));
4370 /* If this is an NE or EQ comparison of zero against the result of a
4371 signed MOD operation whose second operand is a power of 2, make
4372 the MOD operation unsigned since it is simpler and equivalent. */
4373 if ((code == NE_EXPR || code == EQ_EXPR)
4374 && integer_zerop (arg1)
4375 && ! TREE_UNSIGNED (TREE_TYPE (arg0))
4376 && (TREE_CODE (arg0) == TRUNC_MOD_EXPR
4377 || TREE_CODE (arg0) == CEIL_MOD_EXPR
4378 || TREE_CODE (arg0) == FLOOR_MOD_EXPR
4379 || TREE_CODE (arg0) == ROUND_MOD_EXPR)
4380 && integer_pow2p (TREE_OPERAND (arg0, 1)))
4382 tree newtype = unsigned_type (TREE_TYPE (arg0));
4383 tree newmod = build (TREE_CODE (arg0), newtype,
4384 convert (newtype, TREE_OPERAND (arg0, 0)),
4385 convert (newtype, TREE_OPERAND (arg0, 1)));
4387 return build (code, type, newmod, convert (newtype, arg1));
4390 /* If this is an NE comparison of zero with an AND of one, remove the
4391 comparison since the AND will give the correct value. */
4392 if (code == NE_EXPR && integer_zerop (arg1)
4393 && TREE_CODE (arg0) == BIT_AND_EXPR
4394 && integer_onep (TREE_OPERAND (arg0, 1)))
4395 return convert (type, arg0);
4397 /* If we have (A & C) == C where C is a power of 2, convert this into
4398 (A & C) != 0. Similarly for NE_EXPR. */
4399 if ((code == EQ_EXPR || code == NE_EXPR)
4400 && TREE_CODE (arg0) == BIT_AND_EXPR
4401 && integer_pow2p (TREE_OPERAND (arg0, 1))
4402 && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
4403 return build (code == EQ_EXPR ? NE_EXPR : EQ_EXPR, type,
4404 arg0, integer_zero_node);
4406 /* If X is unsigned, convert X < (1 << Y) into X >> Y == 0
4407 and similarly for >= into !=. */
4408 if ((code == LT_EXPR || code == GE_EXPR)
4409 && TREE_UNSIGNED (TREE_TYPE (arg0))
4410 && TREE_CODE (arg1) == LSHIFT_EXPR
4411 && integer_onep (TREE_OPERAND (arg1, 0)))
4412 return build (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
4413 build (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
4414 TREE_OPERAND (arg1, 1)),
4415 convert (TREE_TYPE (arg0), integer_zero_node));
4417 else if ((code == LT_EXPR || code == GE_EXPR)
4418 && TREE_UNSIGNED (TREE_TYPE (arg0))
4419 && (TREE_CODE (arg1) == NOP_EXPR
4420 || TREE_CODE (arg1) == CONVERT_EXPR)
4421 && TREE_CODE (TREE_OPERAND (arg1, 0)) == LSHIFT_EXPR
4422 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg1, 0), 0)))
4423 return
4424 build (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
4425 convert (TREE_TYPE (arg0),
4426 build (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
4427 TREE_OPERAND (TREE_OPERAND (arg1, 0), 1))),
4428 convert (TREE_TYPE (arg0), integer_zero_node));
4430 /* Simplify comparison of something with itself. (For IEEE
4431 floating-point, we can only do some of these simplifications.) */
4432 if (operand_equal_p (arg0, arg1, 0))
4434 switch (code)
4436 case EQ_EXPR:
4437 case GE_EXPR:
4438 case LE_EXPR:
4439 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
4441 t = build_int_2 (1, 0);
4442 TREE_TYPE (t) = type;
4443 return t;
4445 code = EQ_EXPR;
4446 TREE_SET_CODE (t, code);
4447 break;
4449 case NE_EXPR:
4450 /* For NE, we can only do this simplification if integer. */
4451 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
4452 break;
4453 /* ... fall through ... */
4454 case GT_EXPR:
4455 case LT_EXPR:
4456 t = build_int_2 (0, 0);
4457 TREE_TYPE (t) = type;
4458 return t;
4462 /* An unsigned comparison against 0 can be simplified. */
4463 if (integer_zerop (arg1)
4464 && (INTEGRAL_TYPE_P (TREE_TYPE (arg1))
4465 || TREE_CODE (TREE_TYPE (arg1)) == POINTER_TYPE)
4466 && TREE_UNSIGNED (TREE_TYPE (arg1)))
4468 switch (TREE_CODE (t))
4470 case GT_EXPR:
4471 code = NE_EXPR;
4472 TREE_SET_CODE (t, NE_EXPR);
4473 break;
4474 case LE_EXPR:
4475 code = EQ_EXPR;
4476 TREE_SET_CODE (t, EQ_EXPR);
4477 break;
4478 case GE_EXPR:
4479 return omit_one_operand (type,
4480 convert (type, integer_one_node),
4481 arg0);
4482 case LT_EXPR:
4483 return omit_one_operand (type,
4484 convert (type, integer_zero_node),
4485 arg0);
4489 /* If we are comparing an expression that just has comparisons
4490 of two integer values, arithmetic expressions of those comparisons,
4491 and constants, we can simplify it. There are only three cases
4492 to check: the two values can either be equal, the first can be
4493 greater, or the second can be greater. Fold the expression for
4494 those three values. Since each value must be 0 or 1, we have
4495 eight possibilities, each of which corresponds to the constant 0
4496 or 1 or one of the six possible comparisons.
4498 This handles common cases like (a > b) == 0 but also handles
4499 expressions like ((x > y) - (y > x)) > 0, which supposedly
4500 occur in macroized code. */
4502 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) != INTEGER_CST)
4504 tree cval1 = 0, cval2 = 0;
4505 int save_p = 0;
4507 if (twoval_comparison_p (arg0, &cval1, &cval2, &save_p)
4508 /* Don't handle degenerate cases here; they should already
4509 have been handled anyway. */
4510 && cval1 != 0 && cval2 != 0
4511 && ! (TREE_CONSTANT (cval1) && TREE_CONSTANT (cval2))
4512 && TREE_TYPE (cval1) == TREE_TYPE (cval2)
4513 && INTEGRAL_TYPE_P (TREE_TYPE (cval1))
4514 && ! operand_equal_p (TYPE_MIN_VALUE (TREE_TYPE (cval1)),
4515 TYPE_MAX_VALUE (TREE_TYPE (cval2)), 0))
4517 tree maxval = TYPE_MAX_VALUE (TREE_TYPE (cval1));
4518 tree minval = TYPE_MIN_VALUE (TREE_TYPE (cval1));
4520 /* We can't just pass T to eval_subst in case cval1 or cval2
4521 was the same as ARG1. */
4523 tree high_result
4524 = fold (build (code, type,
4525 eval_subst (arg0, cval1, maxval, cval2, minval),
4526 arg1));
4527 tree equal_result
4528 = fold (build (code, type,
4529 eval_subst (arg0, cval1, maxval, cval2, maxval),
4530 arg1));
4531 tree low_result
4532 = fold (build (code, type,
4533 eval_subst (arg0, cval1, minval, cval2, maxval),
4534 arg1));
4536 /* All three of these results should be 0 or 1. Confirm they
4537 are. Then use those values to select the proper code
4538 to use. */
4540 if ((integer_zerop (high_result)
4541 || integer_onep (high_result))
4542 && (integer_zerop (equal_result)
4543 || integer_onep (equal_result))
4544 && (integer_zerop (low_result)
4545 || integer_onep (low_result)))
4547 /* Make a 3-bit mask with the high-order bit being the
4548 value for `>', the next for '=', and the low for '<'. */
4549 switch ((integer_onep (high_result) * 4)
4550 + (integer_onep (equal_result) * 2)
4551 + integer_onep (low_result))
4553 case 0:
4554 /* Always false. */
4555 return omit_one_operand (type, integer_zero_node, arg0);
4556 case 1:
4557 code = LT_EXPR;
4558 break;
4559 case 2:
4560 code = EQ_EXPR;
4561 break;
4562 case 3:
4563 code = LE_EXPR;
4564 break;
4565 case 4:
4566 code = GT_EXPR;
4567 break;
4568 case 5:
4569 code = NE_EXPR;
4570 break;
4571 case 6:
4572 code = GE_EXPR;
4573 break;
4574 case 7:
4575 /* Always true. */
4576 return omit_one_operand (type, integer_one_node, arg0);
4579 t = build (code, type, cval1, cval2);
4580 if (save_p)
4581 return save_expr (t);
4582 else
4583 return fold (t);
4588 /* If this is a comparison of a field, we may be able to simplify it. */
4589 if ((TREE_CODE (arg0) == COMPONENT_REF
4590 || TREE_CODE (arg0) == BIT_FIELD_REF)
4591 && (code == EQ_EXPR || code == NE_EXPR)
4592 /* Handle the constant case even without -O
4593 to make sure the warnings are given. */
4594 && (optimize || TREE_CODE (arg1) == INTEGER_CST))
4596 t1 = optimize_bit_field_compare (code, type, arg0, arg1);
4597 return t1 ? t1 : t;
4600 /* If this is a comparison of complex values and either or both
4601 sizes are a COMPLEX_EXPR, it is best to split up the comparisons
4602 and join them with a TRUTH_ANDIF_EXPR or TRUTH_ORIF_EXPR. This
4603 may prevent needless evaluations. */
4604 if ((code == EQ_EXPR || code == NE_EXPR)
4605 && TREE_CODE (TREE_TYPE (arg0)) == COMPLEX_TYPE
4606 && (TREE_CODE (arg0) == COMPLEX_EXPR
4607 || TREE_CODE (arg1) == COMPLEX_EXPR))
4609 tree subtype = TREE_TYPE (TREE_TYPE (arg0));
4610 tree real0 = fold (build1 (REALPART_EXPR, subtype, arg0));
4611 tree imag0 = fold (build1 (IMAGPART_EXPR, subtype, arg0));
4612 tree real1 = fold (build1 (REALPART_EXPR, subtype, arg1));
4613 tree imag1 = fold (build1 (IMAGPART_EXPR, subtype, arg1));
4615 return fold (build ((code == EQ_EXPR ? TRUTH_ANDIF_EXPR
4616 : TRUTH_ORIF_EXPR),
4617 type,
4618 fold (build (code, type, real0, real1)),
4619 fold (build (code, type, imag0, imag1))));
4622 /* From here on, the only cases we handle are when the result is
4623 known to be a constant.
4625 To compute GT, swap the arguments and do LT.
4626 To compute GE, do LT and invert the result.
4627 To compute LE, swap the arguments, do LT and invert the result.
4628 To compute NE, do EQ and invert the result.
4630 Therefore, the code below must handle only EQ and LT. */
4632 if (code == LE_EXPR || code == GT_EXPR)
4634 tem = arg0, arg0 = arg1, arg1 = tem;
4635 code = swap_tree_comparison (code);
4638 /* Note that it is safe to invert for real values here because we
4639 will check below in the one case that it matters. */
4641 invert = 0;
4642 if (code == NE_EXPR || code == GE_EXPR)
4644 invert = 1;
4645 code = invert_tree_comparison (code);
4648 /* Compute a result for LT or EQ if args permit;
4649 otherwise return T. */
4650 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
4652 if (code == EQ_EXPR)
4653 t1 = build_int_2 ((TREE_INT_CST_LOW (arg0)
4654 == TREE_INT_CST_LOW (arg1))
4655 && (TREE_INT_CST_HIGH (arg0)
4656 == TREE_INT_CST_HIGH (arg1)),
4658 else
4659 t1 = build_int_2 ((TREE_UNSIGNED (TREE_TYPE (arg0))
4660 ? INT_CST_LT_UNSIGNED (arg0, arg1)
4661 : INT_CST_LT (arg0, arg1)),
4665 /* Assume a nonexplicit constant cannot equal an explicit one,
4666 since such code would be undefined anyway.
4667 Exception: on sysvr4, using #pragma weak,
4668 a label can come out as 0. */
4669 else if (TREE_CODE (arg1) == INTEGER_CST
4670 && !integer_zerop (arg1)
4671 && TREE_CONSTANT (arg0)
4672 && TREE_CODE (arg0) == ADDR_EXPR
4673 && code == EQ_EXPR)
4674 t1 = build_int_2 (0, 0);
4676 /* Two real constants can be compared explicitly. */
4677 else if (TREE_CODE (arg0) == REAL_CST && TREE_CODE (arg1) == REAL_CST)
4679 /* If either operand is a NaN, the result is false with two
4680 exceptions: First, an NE_EXPR is true on NaNs, but that case
4681 is already handled correctly since we will be inverting the
4682 result for NE_EXPR. Second, if we had inverted a LE_EXPR
4683 or a GE_EXPR into a LT_EXPR, we must return true so that it
4684 will be inverted into false. */
4686 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg0))
4687 || REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
4688 t1 = build_int_2 (invert && code == LT_EXPR, 0);
4690 else if (code == EQ_EXPR)
4691 t1 = build_int_2 (REAL_VALUES_EQUAL (TREE_REAL_CST (arg0),
4692 TREE_REAL_CST (arg1)),
4694 else
4695 t1 = build_int_2 (REAL_VALUES_LESS (TREE_REAL_CST (arg0),
4696 TREE_REAL_CST (arg1)),
4700 if (t1 == NULL_TREE)
4701 return t;
4703 if (invert)
4704 TREE_INT_CST_LOW (t1) ^= 1;
4706 TREE_TYPE (t1) = type;
4707 return t1;
4709 case COND_EXPR:
4710 /* Pedantic ANSI C says that a conditional expression is never an lvalue,
4711 so all simple results must be passed through pedantic_non_lvalue. */
4712 if (TREE_CODE (arg0) == INTEGER_CST)
4713 return pedantic_non_lvalue
4714 (TREE_OPERAND (t, (integer_zerop (arg0) ? 2 : 1)));
4715 else if (operand_equal_p (arg1, TREE_OPERAND (expr, 2), 0))
4716 return pedantic_omit_one_operand (type, arg1, arg0);
4718 /* If the second operand is zero, invert the comparison and swap
4719 the second and third operands. Likewise if the second operand
4720 is constant and the third is not or if the third operand is
4721 equivalent to the first operand of the comparison. */
4723 if (integer_zerop (arg1)
4724 || (TREE_CONSTANT (arg1) && ! TREE_CONSTANT (TREE_OPERAND (t, 2)))
4725 || (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4726 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4727 TREE_OPERAND (t, 2),
4728 TREE_OPERAND (arg0, 1))))
4730 /* See if this can be inverted. If it can't, possibly because
4731 it was a floating-point inequality comparison, don't do
4732 anything. */
4733 tem = invert_truthvalue (arg0);
4735 if (TREE_CODE (tem) != TRUTH_NOT_EXPR)
4737 arg0 = TREE_OPERAND (t, 0) = tem;
4738 arg1 = TREE_OPERAND (t, 2);
4739 TREE_OPERAND (t, 2) = TREE_OPERAND (t, 1);
4740 TREE_OPERAND (t, 1) = arg1;
4741 STRIP_NOPS (arg1);
4745 /* If we have A op B ? A : C, we may be able to convert this to a
4746 simpler expression, depending on the operation and the values
4747 of B and C. IEEE floating point prevents this though,
4748 because A or B might be -0.0 or a NaN. */
4750 if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4751 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
4752 || ! FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 0)))
4753 || flag_fast_math)
4754 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4755 arg1, TREE_OPERAND (arg0, 1)))
4757 tree arg2 = TREE_OPERAND (t, 2);
4758 enum tree_code comp_code = TREE_CODE (arg0);
4760 STRIP_NOPS (arg2);
4762 /* If we have A op 0 ? A : -A, this is A, -A, abs (A), or abs (-A),
4763 depending on the comparison operation. */
4764 if ((FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 1)))
4765 ? real_zerop (TREE_OPERAND (arg0, 1))
4766 : integer_zerop (TREE_OPERAND (arg0, 1)))
4767 && TREE_CODE (arg2) == NEGATE_EXPR
4768 && operand_equal_p (TREE_OPERAND (arg2, 0), arg1, 0))
4769 switch (comp_code)
4771 case EQ_EXPR:
4772 return pedantic_non_lvalue
4773 (fold (build1 (NEGATE_EXPR, type, arg1)));
4774 case NE_EXPR:
4775 return pedantic_non_lvalue (convert (type, arg1));
4776 case GE_EXPR:
4777 case GT_EXPR:
4778 return pedantic_non_lvalue
4779 (fold (build1 (ABS_EXPR, type, arg1)));
4780 case LE_EXPR:
4781 case LT_EXPR:
4782 return pedantic_non_lvalue
4783 (fold (build1 (NEGATE_EXPR, type,
4784 fold (build1 (ABS_EXPR, type, arg1)))));
4787 /* If this is A != 0 ? A : 0, this is simply A. For ==, it is
4788 always zero. */
4790 if (integer_zerop (TREE_OPERAND (arg0, 1)) && integer_zerop (arg2))
4792 if (comp_code == NE_EXPR)
4793 return pedantic_non_lvalue (convert (type, arg1));
4794 else if (comp_code == EQ_EXPR)
4795 return pedantic_non_lvalue (convert (type, integer_zero_node));
4798 /* If this is A op B ? A : B, this is either A, B, min (A, B),
4799 or max (A, B), depending on the operation. */
4801 if (operand_equal_for_comparison_p (TREE_OPERAND (arg0, 1),
4802 arg2, TREE_OPERAND (arg0, 0)))
4804 tree comp_op0 = TREE_OPERAND (arg0, 0);
4805 tree comp_op1 = TREE_OPERAND (arg0, 1);
4806 tree comp_type = TREE_TYPE (comp_op0);
4808 switch (comp_code)
4810 case EQ_EXPR:
4811 return pedantic_non_lvalue (convert (type, arg2));
4812 case NE_EXPR:
4813 return pedantic_non_lvalue (convert (type, arg1));
4814 case LE_EXPR:
4815 case LT_EXPR:
4816 return pedantic_non_lvalue
4817 (convert (type, (fold (build (MIN_EXPR, comp_type,
4818 comp_op0, comp_op1)))));
4819 case GE_EXPR:
4820 case GT_EXPR:
4821 return pedantic_non_lvalue
4822 (convert (type, fold (build (MAX_EXPR, comp_type,
4823 comp_op0, comp_op1))));
4827 /* If this is A op C1 ? A : C2 with C1 and C2 constant integers,
4828 we might still be able to simplify this. For example,
4829 if C1 is one less or one more than C2, this might have started
4830 out as a MIN or MAX and been transformed by this function.
4831 Only good for INTEGER_TYPEs, because we need TYPE_MAX_VALUE. */
4833 if (INTEGRAL_TYPE_P (type)
4834 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
4835 && TREE_CODE (arg2) == INTEGER_CST)
4836 switch (comp_code)
4838 case EQ_EXPR:
4839 /* We can replace A with C1 in this case. */
4840 arg1 = TREE_OPERAND (t, 1)
4841 = convert (type, TREE_OPERAND (arg0, 1));
4842 break;
4844 case LT_EXPR:
4845 /* If C1 is C2 + 1, this is min(A, C2). */
4846 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4847 && operand_equal_p (TREE_OPERAND (arg0, 1),
4848 const_binop (PLUS_EXPR, arg2,
4849 integer_one_node, 0), 1))
4850 return pedantic_non_lvalue
4851 (fold (build (MIN_EXPR, type, arg1, arg2)));
4852 break;
4854 case LE_EXPR:
4855 /* If C1 is C2 - 1, this is min(A, C2). */
4856 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4857 && operand_equal_p (TREE_OPERAND (arg0, 1),
4858 const_binop (MINUS_EXPR, arg2,
4859 integer_one_node, 0), 1))
4860 return pedantic_non_lvalue
4861 (fold (build (MIN_EXPR, type, arg1, arg2)));
4862 break;
4864 case GT_EXPR:
4865 /* If C1 is C2 - 1, this is max(A, C2). */
4866 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4867 && operand_equal_p (TREE_OPERAND (arg0, 1),
4868 const_binop (MINUS_EXPR, arg2,
4869 integer_one_node, 0), 1))
4870 return pedantic_non_lvalue
4871 (fold (build (MAX_EXPR, type, arg1, arg2)));
4872 break;
4874 case GE_EXPR:
4875 /* If C1 is C2 + 1, this is max(A, C2). */
4876 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4877 && operand_equal_p (TREE_OPERAND (arg0, 1),
4878 const_binop (PLUS_EXPR, arg2,
4879 integer_one_node, 0), 1))
4880 return pedantic_non_lvalue
4881 (fold (build (MAX_EXPR, type, arg1, arg2)));
4882 break;
4886 /* If the second operand is simpler than the third, swap them
4887 since that produces better jump optimization results. */
4888 if ((TREE_CONSTANT (arg1) || TREE_CODE_CLASS (TREE_CODE (arg1)) == 'd'
4889 || TREE_CODE (arg1) == SAVE_EXPR)
4890 && ! (TREE_CONSTANT (TREE_OPERAND (t, 2))
4891 || TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (t, 2))) == 'd'
4892 || TREE_CODE (TREE_OPERAND (t, 2)) == SAVE_EXPR))
4894 /* See if this can be inverted. If it can't, possibly because
4895 it was a floating-point inequality comparison, don't do
4896 anything. */
4897 tem = invert_truthvalue (arg0);
4899 if (TREE_CODE (tem) != TRUTH_NOT_EXPR)
4901 arg0 = TREE_OPERAND (t, 0) = tem;
4902 arg1 = TREE_OPERAND (t, 2);
4903 TREE_OPERAND (t, 2) = TREE_OPERAND (t, 1);
4904 TREE_OPERAND (t, 1) = arg1;
4905 STRIP_NOPS (arg1);
4909 /* Convert A ? 1 : 0 to simply A. */
4910 if (integer_onep (TREE_OPERAND (t, 1))
4911 && integer_zerop (TREE_OPERAND (t, 2))
4912 /* If we try to convert TREE_OPERAND (t, 0) to our type, the
4913 call to fold will try to move the conversion inside
4914 a COND, which will recurse. In that case, the COND_EXPR
4915 is probably the best choice, so leave it alone. */
4916 && type == TREE_TYPE (arg0))
4917 return pedantic_non_lvalue (arg0);
4919 /* Look for expressions of the form A & 2 ? 2 : 0. The result of this
4920 operation is simply A & 2. */
4922 if (integer_zerop (TREE_OPERAND (t, 2))
4923 && TREE_CODE (arg0) == NE_EXPR
4924 && integer_zerop (TREE_OPERAND (arg0, 1))
4925 && integer_pow2p (arg1)
4926 && TREE_CODE (TREE_OPERAND (arg0, 0)) == BIT_AND_EXPR
4927 && operand_equal_p (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1),
4928 arg1, 1))
4929 return pedantic_non_lvalue (convert (type, TREE_OPERAND (arg0, 0)));
4931 return t;
4933 case COMPOUND_EXPR:
4934 /* When pedantic, a compound expression can be neither an lvalue
4935 nor an integer constant expression. */
4936 if (TREE_SIDE_EFFECTS (arg0) || pedantic)
4937 return t;
4938 /* Don't let (0, 0) be null pointer constant. */
4939 if (integer_zerop (arg1))
4940 return non_lvalue (arg1);
4941 return arg1;
4943 case COMPLEX_EXPR:
4944 if (wins)
4945 return build_complex (arg0, arg1);
4946 return t;
4948 case REALPART_EXPR:
4949 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
4950 return t;
4951 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
4952 return omit_one_operand (type, TREE_OPERAND (arg0, 0),
4953 TREE_OPERAND (arg0, 1));
4954 else if (TREE_CODE (arg0) == COMPLEX_CST)
4955 return TREE_REALPART (arg0);
4956 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
4957 return fold (build (TREE_CODE (arg0), type,
4958 fold (build1 (REALPART_EXPR, type,
4959 TREE_OPERAND (arg0, 0))),
4960 fold (build1 (REALPART_EXPR,
4961 type, TREE_OPERAND (arg0, 1)))));
4962 return t;
4964 case IMAGPART_EXPR:
4965 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
4966 return convert (type, integer_zero_node);
4967 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
4968 return omit_one_operand (type, TREE_OPERAND (arg0, 1),
4969 TREE_OPERAND (arg0, 0));
4970 else if (TREE_CODE (arg0) == COMPLEX_CST)
4971 return TREE_IMAGPART (arg0);
4972 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
4973 return fold (build (TREE_CODE (arg0), type,
4974 fold (build1 (IMAGPART_EXPR, type,
4975 TREE_OPERAND (arg0, 0))),
4976 fold (build1 (IMAGPART_EXPR, type,
4977 TREE_OPERAND (arg0, 1)))));
4978 return t;
4980 default:
4981 return t;
4982 } /* switch (code) */