1 /* Fold a constant sub-tree into a single node for C-compiler
2 Copyright (C) 1987, 88, 92-98, 1999 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
21 /*@@ This file should be rewritten to use an arbitrary precision
22 @@ representation for "struct tree_int_cst" and "struct tree_real_cst".
23 @@ Perhaps the routines could also be used for bc/dc, and made a lib.
24 @@ The routines that translate from the ap rep should
25 @@ warn if precision et. al. is lost.
26 @@ This would also make life easier when this technology is used
27 @@ for cross-compilers. */
30 /* The entry points in this file are fold, size_int_wide, size_binop
33 fold takes a tree as argument and returns a simplified tree.
35 size_binop takes a tree code for an arithmetic operation
36 and two operands that are trees, and produces a tree for the
37 result, assuming the type comes from `sizetype'.
39 size_int takes an integer value, and creates a tree constant
40 with type from `sizetype'.
42 force_fit_type takes a constant and prior overflow indicator, and
43 forces the value to fit the type. It returns an overflow indicator. */
53 static void encode
PROTO((HOST_WIDE_INT
*,
54 HOST_WIDE_INT
, HOST_WIDE_INT
));
55 static void decode
PROTO((HOST_WIDE_INT
*,
56 HOST_WIDE_INT
*, HOST_WIDE_INT
*));
57 int div_and_round_double
PROTO((enum tree_code
, int, HOST_WIDE_INT
,
58 HOST_WIDE_INT
, HOST_WIDE_INT
,
59 HOST_WIDE_INT
, HOST_WIDE_INT
*,
60 HOST_WIDE_INT
*, HOST_WIDE_INT
*,
62 static int split_tree
PROTO((tree
, enum tree_code
, tree
*,
64 static tree int_const_binop
PROTO((enum tree_code
, tree
, tree
, int, int));
65 static tree const_binop
PROTO((enum tree_code
, tree
, tree
, int));
66 static tree fold_convert
PROTO((tree
, tree
));
67 static enum tree_code invert_tree_comparison
PROTO((enum tree_code
));
68 static enum tree_code swap_tree_comparison
PROTO((enum tree_code
));
69 static int truth_value_p
PROTO((enum tree_code
));
70 static int operand_equal_for_comparison_p
PROTO((tree
, tree
, tree
));
71 static int twoval_comparison_p
PROTO((tree
, tree
*, tree
*, int *));
72 static tree eval_subst
PROTO((tree
, tree
, tree
, tree
, tree
));
73 static tree omit_one_operand
PROTO((tree
, tree
, tree
));
74 static tree pedantic_omit_one_operand
PROTO((tree
, tree
, tree
));
75 static tree distribute_bit_expr
PROTO((enum tree_code
, tree
, tree
, tree
));
76 static tree make_bit_field_ref
PROTO((tree
, tree
, int, int, int));
77 static tree optimize_bit_field_compare
PROTO((enum tree_code
, tree
,
79 static tree decode_field_reference
PROTO((tree
, int *, int *,
80 enum machine_mode
*, int *,
81 int *, tree
*, tree
*));
82 static int all_ones_mask_p
PROTO((tree
, int));
83 static int simple_operand_p
PROTO((tree
));
84 static tree range_binop
PROTO((enum tree_code
, tree
, tree
, int,
86 static tree make_range
PROTO((tree
, int *, tree
*, tree
*));
87 static tree build_range_check
PROTO((tree
, tree
, int, tree
, tree
));
88 static int merge_ranges
PROTO((int *, tree
*, tree
*, int, tree
, tree
,
90 static tree fold_range_test
PROTO((tree
));
91 static tree unextend
PROTO((tree
, int, int, tree
));
92 static tree fold_truthop
PROTO((enum tree_code
, tree
, tree
, tree
));
93 static tree strip_compound_expr
PROTO((tree
, tree
));
94 static int multiple_of_p
PROTO((tree
, tree
, tree
));
95 static tree constant_boolean_node
PROTO((int, tree
));
96 static int count_cond
PROTO((tree
, int));
97 static void const_binop_1
PROTO((PTR
));
98 static void fold_convert_1
PROTO((PTR
));
101 #define BRANCH_COST 1
104 /* Suppose A1 + B1 = SUM1, using 2's complement arithmetic ignoring overflow.
105 Suppose A, B and SUM have the same respective signs as A1, B1, and SUM1.
106 Then this yields nonzero if overflow occurred during the addition.
107 Overflow occurs if A and B have the same sign, but A and SUM differ in sign.
108 Use `^' to test whether signs differ, and `< 0' to isolate the sign. */
109 #define overflow_sum_sign(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
111 /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
112 We do that by representing the two-word integer in 4 words, with only
113 HOST_BITS_PER_WIDE_INT/2 bits stored in each word, as a positive number. */
116 ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT/2)) - 1))
117 #define HIGHPART(x) \
118 ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT/2)
119 #define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT/2)
121 /* Unpack a two-word integer into 4 words.
122 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
123 WORDS points to the array of HOST_WIDE_INTs. */
126 encode (words
, low
, hi
)
127 HOST_WIDE_INT
*words
;
128 HOST_WIDE_INT low
, hi
;
130 words
[0] = LOWPART (low
);
131 words
[1] = HIGHPART (low
);
132 words
[2] = LOWPART (hi
);
133 words
[3] = HIGHPART (hi
);
136 /* Pack an array of 4 words into a two-word integer.
137 WORDS points to the array of words.
138 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
141 decode (words
, low
, hi
)
142 HOST_WIDE_INT
*words
;
143 HOST_WIDE_INT
*low
, *hi
;
145 *low
= words
[0] | words
[1] * BASE
;
146 *hi
= words
[2] | words
[3] * BASE
;
149 /* Make the integer constant T valid for its type
150 by setting to 0 or 1 all the bits in the constant
151 that don't belong in the type.
152 Yield 1 if a signed overflow occurs, 0 otherwise.
153 If OVERFLOW is nonzero, a signed overflow has already occurred
154 in calculating T, so propagate it.
156 Make the real constant T valid for its type by calling CHECK_FLOAT_VALUE,
160 force_fit_type (t
, overflow
)
164 HOST_WIDE_INT low
, high
;
167 if (TREE_CODE (t
) == REAL_CST
)
169 #ifdef CHECK_FLOAT_VALUE
170 CHECK_FLOAT_VALUE (TYPE_MODE (TREE_TYPE (t
)), TREE_REAL_CST (t
),
176 else if (TREE_CODE (t
) != INTEGER_CST
)
179 low
= TREE_INT_CST_LOW (t
);
180 high
= TREE_INT_CST_HIGH (t
);
182 if (POINTER_TYPE_P (TREE_TYPE (t
)))
185 prec
= TYPE_PRECISION (TREE_TYPE (t
));
187 /* First clear all bits that are beyond the type's precision. */
189 if (prec
== 2 * HOST_BITS_PER_WIDE_INT
)
191 else if (prec
> HOST_BITS_PER_WIDE_INT
)
193 TREE_INT_CST_HIGH (t
)
194 &= ~((HOST_WIDE_INT
) (-1) << (prec
- HOST_BITS_PER_WIDE_INT
));
198 TREE_INT_CST_HIGH (t
) = 0;
199 if (prec
< HOST_BITS_PER_WIDE_INT
)
200 TREE_INT_CST_LOW (t
) &= ~((HOST_WIDE_INT
) (-1) << prec
);
203 /* Unsigned types do not suffer sign extension or overflow. */
204 if (TREE_UNSIGNED (TREE_TYPE (t
)))
207 /* If the value's sign bit is set, extend the sign. */
208 if (prec
!= 2 * HOST_BITS_PER_WIDE_INT
209 && (prec
> HOST_BITS_PER_WIDE_INT
210 ? (TREE_INT_CST_HIGH (t
)
211 & ((HOST_WIDE_INT
) 1 << (prec
- HOST_BITS_PER_WIDE_INT
- 1)))
212 : TREE_INT_CST_LOW (t
) & ((HOST_WIDE_INT
) 1 << (prec
- 1))))
214 /* Value is negative:
215 set to 1 all the bits that are outside this type's precision. */
216 if (prec
> HOST_BITS_PER_WIDE_INT
)
218 TREE_INT_CST_HIGH (t
)
219 |= ((HOST_WIDE_INT
) (-1) << (prec
- HOST_BITS_PER_WIDE_INT
));
223 TREE_INT_CST_HIGH (t
) = -1;
224 if (prec
< HOST_BITS_PER_WIDE_INT
)
225 TREE_INT_CST_LOW (t
) |= ((HOST_WIDE_INT
) (-1) << prec
);
229 /* Yield nonzero if signed overflow occurred. */
231 ((overflow
| (low
^ TREE_INT_CST_LOW (t
)) | (high
^ TREE_INT_CST_HIGH (t
)))
235 /* Add two doubleword integers with doubleword result.
236 Each argument is given as two `HOST_WIDE_INT' pieces.
237 One argument is L1 and H1; the other, L2 and H2.
238 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
241 add_double (l1
, h1
, l2
, h2
, lv
, hv
)
242 HOST_WIDE_INT l1
, h1
, l2
, h2
;
243 HOST_WIDE_INT
*lv
, *hv
;
248 h
= h1
+ h2
+ ((unsigned HOST_WIDE_INT
) l
< l1
);
252 return overflow_sum_sign (h1
, h2
, h
);
255 /* Negate a doubleword integer with doubleword result.
256 Return nonzero if the operation overflows, assuming it's signed.
257 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
258 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
261 neg_double (l1
, h1
, lv
, hv
)
262 HOST_WIDE_INT l1
, h1
;
263 HOST_WIDE_INT
*lv
, *hv
;
269 return (*hv
& h1
) < 0;
279 /* Multiply two doubleword integers with doubleword result.
280 Return nonzero if the operation overflows, assuming it's signed.
281 Each argument is given as two `HOST_WIDE_INT' pieces.
282 One argument is L1 and H1; the other, L2 and H2.
283 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
286 mul_double (l1
, h1
, l2
, h2
, lv
, hv
)
287 HOST_WIDE_INT l1
, h1
, l2
, h2
;
288 HOST_WIDE_INT
*lv
, *hv
;
290 HOST_WIDE_INT arg1
[4];
291 HOST_WIDE_INT arg2
[4];
292 HOST_WIDE_INT prod
[4 * 2];
293 register unsigned HOST_WIDE_INT carry
;
294 register int i
, j
, k
;
295 HOST_WIDE_INT toplow
, tophigh
, neglow
, neghigh
;
297 encode (arg1
, l1
, h1
);
298 encode (arg2
, l2
, h2
);
300 bzero ((char *) prod
, sizeof prod
);
302 for (i
= 0; i
< 4; i
++)
305 for (j
= 0; j
< 4; j
++)
308 /* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */
309 carry
+= arg1
[i
] * arg2
[j
];
310 /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */
312 prod
[k
] = LOWPART (carry
);
313 carry
= HIGHPART (carry
);
318 decode (prod
, lv
, hv
); /* This ignores prod[4] through prod[4*2-1] */
320 /* Check for overflow by calculating the top half of the answer in full;
321 it should agree with the low half's sign bit. */
322 decode (prod
+4, &toplow
, &tophigh
);
325 neg_double (l2
, h2
, &neglow
, &neghigh
);
326 add_double (neglow
, neghigh
, toplow
, tophigh
, &toplow
, &tophigh
);
330 neg_double (l1
, h1
, &neglow
, &neghigh
);
331 add_double (neglow
, neghigh
, toplow
, tophigh
, &toplow
, &tophigh
);
333 return (*hv
< 0 ? ~(toplow
& tophigh
) : toplow
| tophigh
) != 0;
336 /* Shift the doubleword integer in L1, H1 left by COUNT places
337 keeping only PREC bits of result.
338 Shift right if COUNT is negative.
339 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
340 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
343 lshift_double (l1
, h1
, count
, prec
, lv
, hv
, arith
)
344 HOST_WIDE_INT l1
, h1
, count
;
346 HOST_WIDE_INT
*lv
, *hv
;
351 rshift_double (l1
, h1
, - count
, prec
, lv
, hv
, arith
);
355 #ifdef SHIFT_COUNT_TRUNCATED
356 if (SHIFT_COUNT_TRUNCATED
)
360 if (count
>= HOST_BITS_PER_WIDE_INT
)
362 *hv
= (unsigned HOST_WIDE_INT
) l1
<< (count
- HOST_BITS_PER_WIDE_INT
);
367 *hv
= (((unsigned HOST_WIDE_INT
) h1
<< count
)
368 | ((unsigned HOST_WIDE_INT
) l1
>> (HOST_BITS_PER_WIDE_INT
- count
- 1) >> 1));
369 *lv
= (unsigned HOST_WIDE_INT
) l1
<< count
;
373 /* Shift the doubleword integer in L1, H1 right by COUNT places
374 keeping only PREC bits of result. COUNT must be positive.
375 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
376 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
379 rshift_double (l1
, h1
, count
, prec
, lv
, hv
, arith
)
380 HOST_WIDE_INT l1
, h1
, count
;
381 int prec ATTRIBUTE_UNUSED
;
382 HOST_WIDE_INT
*lv
, *hv
;
385 unsigned HOST_WIDE_INT signmask
;
387 ? -((unsigned HOST_WIDE_INT
) h1
>> (HOST_BITS_PER_WIDE_INT
- 1))
390 #ifdef SHIFT_COUNT_TRUNCATED
391 if (SHIFT_COUNT_TRUNCATED
)
395 if (count
>= HOST_BITS_PER_WIDE_INT
)
398 *lv
= ((signmask
<< (2 * HOST_BITS_PER_WIDE_INT
- count
- 1) << 1)
399 | ((unsigned HOST_WIDE_INT
) h1
>> (count
- HOST_BITS_PER_WIDE_INT
)));
403 *lv
= (((unsigned HOST_WIDE_INT
) l1
>> count
)
404 | ((unsigned HOST_WIDE_INT
) h1
<< (HOST_BITS_PER_WIDE_INT
- count
- 1) << 1));
405 *hv
= ((signmask
<< (HOST_BITS_PER_WIDE_INT
- count
))
406 | ((unsigned HOST_WIDE_INT
) h1
>> count
));
410 /* Rotate the doubleword integer in L1, H1 left by COUNT places
411 keeping only PREC bits of result.
412 Rotate right if COUNT is negative.
413 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
416 lrotate_double (l1
, h1
, count
, prec
, lv
, hv
)
417 HOST_WIDE_INT l1
, h1
, count
;
419 HOST_WIDE_INT
*lv
, *hv
;
421 HOST_WIDE_INT s1l
, s1h
, s2l
, s2h
;
427 lshift_double (l1
, h1
, count
, prec
, &s1l
, &s1h
, 0);
428 rshift_double (l1
, h1
, prec
- count
, prec
, &s2l
, &s2h
, 0);
433 /* Rotate the doubleword integer in L1, H1 left by COUNT places
434 keeping only PREC bits of result. COUNT must be positive.
435 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
438 rrotate_double (l1
, h1
, count
, prec
, lv
, hv
)
439 HOST_WIDE_INT l1
, h1
, count
;
441 HOST_WIDE_INT
*lv
, *hv
;
443 HOST_WIDE_INT s1l
, s1h
, s2l
, s2h
;
449 rshift_double (l1
, h1
, count
, prec
, &s1l
, &s1h
, 0);
450 lshift_double (l1
, h1
, prec
- count
, prec
, &s2l
, &s2h
, 0);
455 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
456 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
457 CODE is a tree code for a kind of division, one of
458 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
460 It controls how the quotient is rounded to a integer.
461 Return nonzero if the operation overflows.
462 UNS nonzero says do unsigned division. */
465 div_and_round_double (code
, uns
,
466 lnum_orig
, hnum_orig
, lden_orig
, hden_orig
,
467 lquo
, hquo
, lrem
, hrem
)
470 HOST_WIDE_INT lnum_orig
, hnum_orig
; /* num == numerator == dividend */
471 HOST_WIDE_INT lden_orig
, hden_orig
; /* den == denominator == divisor */
472 HOST_WIDE_INT
*lquo
, *hquo
, *lrem
, *hrem
;
475 HOST_WIDE_INT num
[4 + 1]; /* extra element for scaling. */
476 HOST_WIDE_INT den
[4], quo
[4];
478 unsigned HOST_WIDE_INT work
;
479 register unsigned HOST_WIDE_INT carry
= 0;
480 HOST_WIDE_INT lnum
= lnum_orig
;
481 HOST_WIDE_INT hnum
= hnum_orig
;
482 HOST_WIDE_INT lden
= lden_orig
;
483 HOST_WIDE_INT hden
= hden_orig
;
486 if ((hden
== 0) && (lden
== 0))
487 overflow
= 1, lden
= 1;
489 /* calculate quotient sign and convert operands to unsigned. */
495 /* (minimum integer) / (-1) is the only overflow case. */
496 if (neg_double (lnum
, hnum
, &lnum
, &hnum
) && (lden
& hden
) == -1)
502 neg_double (lden
, hden
, &lden
, &hden
);
506 if (hnum
== 0 && hden
== 0)
507 { /* single precision */
509 /* This unsigned division rounds toward zero. */
510 *lquo
= lnum
/ (unsigned HOST_WIDE_INT
) lden
;
515 { /* trivial case: dividend < divisor */
516 /* hden != 0 already checked. */
523 bzero ((char *) quo
, sizeof quo
);
525 bzero ((char *) num
, sizeof num
); /* to zero 9th element */
526 bzero ((char *) den
, sizeof den
);
528 encode (num
, lnum
, hnum
);
529 encode (den
, lden
, hden
);
531 /* Special code for when the divisor < BASE. */
532 if (hden
== 0 && lden
< (HOST_WIDE_INT
) BASE
)
534 /* hnum != 0 already checked. */
535 for (i
= 4 - 1; i
>= 0; i
--)
537 work
= num
[i
] + carry
* BASE
;
538 quo
[i
] = work
/ (unsigned HOST_WIDE_INT
) lden
;
539 carry
= work
% (unsigned HOST_WIDE_INT
) lden
;
544 /* Full double precision division,
545 with thanks to Don Knuth's "Seminumerical Algorithms". */
546 int num_hi_sig
, den_hi_sig
;
547 unsigned HOST_WIDE_INT quo_est
, scale
;
549 /* Find the highest non-zero divisor digit. */
550 for (i
= 4 - 1; ; i
--)
556 /* Insure that the first digit of the divisor is at least BASE/2.
557 This is required by the quotient digit estimation algorithm. */
559 scale
= BASE
/ (den
[den_hi_sig
] + 1);
560 if (scale
> 1) { /* scale divisor and dividend */
562 for (i
= 0; i
<= 4 - 1; i
++) {
563 work
= (num
[i
] * scale
) + carry
;
564 num
[i
] = LOWPART (work
);
565 carry
= HIGHPART (work
);
568 for (i
= 0; i
<= 4 - 1; i
++) {
569 work
= (den
[i
] * scale
) + carry
;
570 den
[i
] = LOWPART (work
);
571 carry
= HIGHPART (work
);
572 if (den
[i
] != 0) den_hi_sig
= i
;
579 for (i
= num_hi_sig
- den_hi_sig
- 1; i
>= 0; i
--) {
580 /* guess the next quotient digit, quo_est, by dividing the first
581 two remaining dividend digits by the high order quotient digit.
582 quo_est is never low and is at most 2 high. */
583 unsigned HOST_WIDE_INT tmp
;
585 num_hi_sig
= i
+ den_hi_sig
+ 1;
586 work
= num
[num_hi_sig
] * BASE
+ num
[num_hi_sig
- 1];
587 if (num
[num_hi_sig
] != den
[den_hi_sig
])
588 quo_est
= work
/ den
[den_hi_sig
];
592 /* refine quo_est so it's usually correct, and at most one high. */
593 tmp
= work
- quo_est
* den
[den_hi_sig
];
595 && den
[den_hi_sig
- 1] * quo_est
> (tmp
* BASE
+ num
[num_hi_sig
- 2]))
598 /* Try QUO_EST as the quotient digit, by multiplying the
599 divisor by QUO_EST and subtracting from the remaining dividend.
600 Keep in mind that QUO_EST is the I - 1st digit. */
603 for (j
= 0; j
<= den_hi_sig
; j
++)
605 work
= quo_est
* den
[j
] + carry
;
606 carry
= HIGHPART (work
);
607 work
= num
[i
+ j
] - LOWPART (work
);
608 num
[i
+ j
] = LOWPART (work
);
609 carry
+= HIGHPART (work
) != 0;
612 /* if quo_est was high by one, then num[i] went negative and
613 we need to correct things. */
615 if (num
[num_hi_sig
] < carry
)
618 carry
= 0; /* add divisor back in */
619 for (j
= 0; j
<= den_hi_sig
; j
++)
621 work
= num
[i
+ j
] + den
[j
] + carry
;
622 carry
= HIGHPART (work
);
623 num
[i
+ j
] = LOWPART (work
);
625 num
[num_hi_sig
] += carry
;
628 /* store the quotient digit. */
633 decode (quo
, lquo
, hquo
);
636 /* if result is negative, make it so. */
638 neg_double (*lquo
, *hquo
, lquo
, hquo
);
640 /* compute trial remainder: rem = num - (quo * den) */
641 mul_double (*lquo
, *hquo
, lden_orig
, hden_orig
, lrem
, hrem
);
642 neg_double (*lrem
, *hrem
, lrem
, hrem
);
643 add_double (lnum_orig
, hnum_orig
, *lrem
, *hrem
, lrem
, hrem
);
648 case TRUNC_MOD_EXPR
: /* round toward zero */
649 case EXACT_DIV_EXPR
: /* for this one, it shouldn't matter */
653 case FLOOR_MOD_EXPR
: /* round toward negative infinity */
654 if (quo_neg
&& (*lrem
!= 0 || *hrem
!= 0)) /* ratio < 0 && rem != 0 */
657 add_double (*lquo
, *hquo
, (HOST_WIDE_INT
) -1, (HOST_WIDE_INT
) -1,
660 else return overflow
;
664 case CEIL_MOD_EXPR
: /* round toward positive infinity */
665 if (!quo_neg
&& (*lrem
!= 0 || *hrem
!= 0)) /* ratio > 0 && rem != 0 */
667 add_double (*lquo
, *hquo
, (HOST_WIDE_INT
) 1, (HOST_WIDE_INT
) 0,
670 else return overflow
;
674 case ROUND_MOD_EXPR
: /* round to closest integer */
676 HOST_WIDE_INT labs_rem
= *lrem
, habs_rem
= *hrem
;
677 HOST_WIDE_INT labs_den
= lden
, habs_den
= hden
, ltwice
, htwice
;
679 /* get absolute values */
680 if (*hrem
< 0) neg_double (*lrem
, *hrem
, &labs_rem
, &habs_rem
);
681 if (hden
< 0) neg_double (lden
, hden
, &labs_den
, &habs_den
);
683 /* if (2 * abs (lrem) >= abs (lden)) */
684 mul_double ((HOST_WIDE_INT
) 2, (HOST_WIDE_INT
) 0,
685 labs_rem
, habs_rem
, <wice
, &htwice
);
686 if (((unsigned HOST_WIDE_INT
) habs_den
687 < (unsigned HOST_WIDE_INT
) htwice
)
688 || (((unsigned HOST_WIDE_INT
) habs_den
689 == (unsigned HOST_WIDE_INT
) htwice
)
690 && ((HOST_WIDE_INT
unsigned) labs_den
691 < (unsigned HOST_WIDE_INT
) ltwice
)))
695 add_double (*lquo
, *hquo
,
696 (HOST_WIDE_INT
) -1, (HOST_WIDE_INT
) -1, lquo
, hquo
);
699 add_double (*lquo
, *hquo
, (HOST_WIDE_INT
) 1, (HOST_WIDE_INT
) 0,
702 else return overflow
;
710 /* compute true remainder: rem = num - (quo * den) */
711 mul_double (*lquo
, *hquo
, lden_orig
, hden_orig
, lrem
, hrem
);
712 neg_double (*lrem
, *hrem
, lrem
, hrem
);
713 add_double (lnum_orig
, hnum_orig
, *lrem
, *hrem
, lrem
, hrem
);
717 #ifndef REAL_ARITHMETIC
718 /* Effectively truncate a real value to represent the nearest possible value
719 in a narrower mode. The result is actually represented in the same data
720 type as the argument, but its value is usually different.
722 A trap may occur during the FP operations and it is the responsibility
723 of the calling function to have a handler established. */
726 real_value_truncate (mode
, arg
)
727 enum machine_mode mode
;
730 return REAL_VALUE_TRUNCATE (mode
, arg
);
733 #if TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
735 /* Check for infinity in an IEEE double precision number. */
741 /* The IEEE 64-bit double format. */
746 unsigned exponent
: 11;
747 unsigned mantissa1
: 20;
752 unsigned mantissa1
: 20;
753 unsigned exponent
: 11;
759 if (u
.big_endian
.sign
== 1)
762 return (u
.big_endian
.exponent
== 2047
763 && u
.big_endian
.mantissa1
== 0
764 && u
.big_endian
.mantissa2
== 0);
769 return (u
.little_endian
.exponent
== 2047
770 && u
.little_endian
.mantissa1
== 0
771 && u
.little_endian
.mantissa2
== 0);
775 /* Check whether an IEEE double precision number is a NaN. */
781 /* The IEEE 64-bit double format. */
786 unsigned exponent
: 11;
787 unsigned mantissa1
: 20;
792 unsigned mantissa1
: 20;
793 unsigned exponent
: 11;
799 if (u
.big_endian
.sign
== 1)
802 return (u
.big_endian
.exponent
== 2047
803 && (u
.big_endian
.mantissa1
!= 0
804 || u
.big_endian
.mantissa2
!= 0));
809 return (u
.little_endian
.exponent
== 2047
810 && (u
.little_endian
.mantissa1
!= 0
811 || u
.little_endian
.mantissa2
!= 0));
815 /* Check for a negative IEEE double precision number. */
821 /* The IEEE 64-bit double format. */
826 unsigned exponent
: 11;
827 unsigned mantissa1
: 20;
832 unsigned mantissa1
: 20;
833 unsigned exponent
: 11;
839 if (u
.big_endian
.sign
== 1)
842 return u
.big_endian
.sign
;
847 return u
.little_endian
.sign
;
850 #else /* Target not IEEE */
852 /* Let's assume other float formats don't have infinity.
853 (This can be overridden by redefining REAL_VALUE_ISINF.) */
861 /* Let's assume other float formats don't have NaNs.
862 (This can be overridden by redefining REAL_VALUE_ISNAN.) */
870 /* Let's assume other float formats don't have minus zero.
871 (This can be overridden by redefining REAL_VALUE_NEGATIVE.) */
878 #endif /* Target not IEEE */
880 /* Try to change R into its exact multiplicative inverse in machine mode
881 MODE. Return nonzero function value if successful. */
884 exact_real_inverse (mode
, r
)
885 enum machine_mode mode
;
896 /* Usually disable if bounds checks are not reliable. */
897 if ((HOST_FLOAT_FORMAT
!= TARGET_FLOAT_FORMAT
) && !flag_pretend_float
)
900 /* Set array index to the less significant bits in the unions, depending
901 on the endian-ness of the host doubles.
902 Disable if insufficient information on the data structure. */
903 #if HOST_FLOAT_FORMAT == UNKNOWN_FLOAT_FORMAT
906 #if HOST_FLOAT_FORMAT == VAX_FLOAT_FORMAT
909 #if HOST_FLOAT_FORMAT == IBM_FLOAT_FORMAT
912 #define K (2 * HOST_FLOAT_WORDS_BIG_ENDIAN)
917 if (setjmp (float_error
))
919 /* Don't do the optimization if there was an arithmetic error. */
921 set_float_handler (NULL_PTR
);
924 set_float_handler (float_error
);
926 /* Domain check the argument. */
932 if (REAL_VALUE_ISINF (x
.d
) || REAL_VALUE_ISNAN (x
.d
))
936 /* Compute the reciprocal and check for numerical exactness.
937 It is unnecessary to check all the significand bits to determine
938 whether X is a power of 2. If X is not, then it is impossible for
939 the bottom half significand of both X and 1/X to be all zero bits.
940 Hence we ignore the data structure of the top half and examine only
941 the low order bits of the two significands. */
943 if (x
.i
[K
] != 0 || x
.i
[K
+ 1] != 0 || t
.i
[K
] != 0 || t
.i
[K
+ 1] != 0)
946 /* Truncate to the required mode and range-check the result. */
947 y
.d
= REAL_VALUE_TRUNCATE (mode
, t
.d
);
948 #ifdef CHECK_FLOAT_VALUE
950 if (CHECK_FLOAT_VALUE (mode
, y
.d
, i
))
954 /* Fail if truncation changed the value. */
955 if (y
.d
!= t
.d
|| y
.d
== 0.0)
959 if (REAL_VALUE_ISINF (y
.d
) || REAL_VALUE_ISNAN (y
.d
))
963 /* Output the reciprocal and return success flag. */
964 set_float_handler (NULL_PTR
);
970 /* Convert C9X hexadecimal floating point string constant S. Return
971 real value type in mode MODE. This function uses the host computer's
972 fp arithmetic when there is no REAL_ARITHMETIC. */
975 real_hex_to_f (s
, mode
)
977 enum machine_mode mode
;
981 unsigned HOST_WIDE_INT low
, high
;
982 int frexpon
, expon
, shcount
, nrmcount
, k
;
983 int sign
, expsign
, decpt
, isfloat
, isldouble
, gotp
, lost
;
993 while (*p
== ' ' || *p
== '\t')
996 /* Sign, if any, comes first. */
1004 /* The string is supposed to start with 0x or 0X . */
1008 if (*p
== 'x' || *p
== 'X')
1021 lost
= 0; /* Nonzero low order bits shifted out and discarded. */
1022 frexpon
= 0; /* Bits after the decimal point. */
1023 expon
= 0; /* Value of exponent. */
1024 decpt
= 0; /* How many decimal points. */
1025 gotp
= 0; /* How many P's. */
1027 while ((c
= *p
) != '\0')
1029 if ((c
>= '0' && c
<= '9') || (c
>= 'A' && c
<= 'F')
1030 || (c
>= 'a' && c
<= 'f'))
1040 if ((high
& 0xf0000000) == 0)
1042 high
= (high
<< 4) + ((low
>> 28) & 15);
1043 low
= (low
<< 4) + k
;
1050 /* Record nonzero lost bits. */
1062 else if (c
== 'p' || c
== 'P')
1066 /* Sign of exponent. */
1072 /* Value of exponent.
1073 The exponent field is a decimal integer. */
1076 k
= (*p
++ & 0x7f) - '0';
1077 expon
= 10 * expon
+ k
;
1080 /* F suffix is ambiguous in the significand part
1081 so it must appear after the decimal exponent field. */
1082 if (*p
== 'f' || *p
== 'F')
1089 else if (c
== 'l' || c
== 'L')
1098 /* Abort if last character read was not legitimate. */
1100 if ((c
!= '\0' && c
!= ' ' && c
!= '\n' && c
!= '\r') || (decpt
> 1))
1102 /* There must be either one decimal point or one p. */
1103 if (decpt
== 0 && gotp
== 0)
1106 if ((high
== 0) && (low
== 0))
1119 /* Leave a high guard bit for carry-out. */
1120 if ((high
& 0x80000000) != 0)
1123 low
= (low
>> 1) | (high
<< 31);
1127 if ((high
& 0xffff8000) == 0)
1129 high
= (high
<< 16) + ((low
>> 16) & 0xffff);
1133 while ((high
& 0xc0000000) == 0)
1135 high
= (high
<< 1) + ((low
>> 31) & 1);
1139 if (isfloat
|| GET_MODE_SIZE(mode
) == UNITS_PER_WORD
)
1141 /* Keep 24 bits precision, bits 0x7fffff80.
1142 Rounding bit is 0x40. */
1143 lost
= lost
| low
| (high
& 0x3f);
1147 if ((high
& 0x80) || lost
)
1154 /* We need real.c to do long double formats, so here default
1155 to double precision. */
1156 #if HOST_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1158 Keep 53 bits precision, bits 0x7fffffff fffffc00.
1159 Rounding bit is low word 0x200. */
1160 lost
= lost
| (low
& 0x1ff);
1163 if ((low
& 0x400) || lost
)
1165 low
= (low
+ 0x200) & 0xfffffc00;
1172 /* Assume it's a VAX with 56-bit significand,
1173 bits 0x7fffffff ffffff80. */
1174 lost
= lost
| (low
& 0x7f);
1177 if ((low
& 0x80) || lost
)
1179 low
= (low
+ 0x40) & 0xffffff80;
1188 ip
= REAL_VALUE_LDEXP (ip
, 32) + (double) low
;
1189 /* Apply shifts and exponent value as power of 2. */
1190 ip
= REAL_VALUE_LDEXP (ip
, expon
- (nrmcount
+ frexpon
));
1197 #endif /* no REAL_ARITHMETIC */
1199 /* Split a tree IN into a constant and a variable part
1200 that could be combined with CODE to make IN.
1201 CODE must be a commutative arithmetic operation.
1202 Store the constant part into *CONP and the variable in &VARP.
1203 Return 1 if this was done; zero means the tree IN did not decompose
1206 If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR.
1207 Therefore, we must tell the caller whether the variable part
1208 was subtracted. We do this by storing 1 or -1 into *VARSIGNP.
1209 The value stored is the coefficient for the variable term.
1210 The constant term we return should always be added;
1211 we negate it if necessary. */
1214 split_tree (in
, code
, varp
, conp
, varsignp
)
1216 enum tree_code code
;
1220 register tree outtype
= TREE_TYPE (in
);
1224 /* Strip any conversions that don't change the machine mode. */
1225 while ((TREE_CODE (in
) == NOP_EXPR
1226 || TREE_CODE (in
) == CONVERT_EXPR
)
1227 && (TYPE_MODE (TREE_TYPE (in
))
1228 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (in
, 0)))))
1229 in
= TREE_OPERAND (in
, 0);
1231 if (TREE_CODE (in
) == code
1232 || (! FLOAT_TYPE_P (TREE_TYPE (in
))
1233 /* We can associate addition and subtraction together
1234 (even though the C standard doesn't say so)
1235 for integers because the value is not affected.
1236 For reals, the value might be affected, so we can't. */
1237 && ((code
== PLUS_EXPR
&& TREE_CODE (in
) == MINUS_EXPR
)
1238 || (code
== MINUS_EXPR
&& TREE_CODE (in
) == PLUS_EXPR
))))
1240 enum tree_code code
= TREE_CODE (TREE_OPERAND (in
, 0));
1241 if (code
== INTEGER_CST
)
1243 *conp
= TREE_OPERAND (in
, 0);
1244 *varp
= TREE_OPERAND (in
, 1);
1245 if (TYPE_MODE (TREE_TYPE (*varp
)) != TYPE_MODE (outtype
)
1246 && TREE_TYPE (*varp
) != outtype
)
1247 *varp
= convert (outtype
, *varp
);
1248 *varsignp
= (TREE_CODE (in
) == MINUS_EXPR
) ? -1 : 1;
1251 if (TREE_CONSTANT (TREE_OPERAND (in
, 1)))
1253 *conp
= TREE_OPERAND (in
, 1);
1254 *varp
= TREE_OPERAND (in
, 0);
1256 if (TYPE_MODE (TREE_TYPE (*varp
)) != TYPE_MODE (outtype
)
1257 && TREE_TYPE (*varp
) != outtype
)
1258 *varp
= convert (outtype
, *varp
);
1259 if (TREE_CODE (in
) == MINUS_EXPR
)
1261 /* If operation is subtraction and constant is second,
1262 must negate it to get an additive constant.
1263 And this cannot be done unless it is a manifest constant.
1264 It could also be the address of a static variable.
1265 We cannot negate that, so give up. */
1266 if (TREE_CODE (*conp
) == INTEGER_CST
)
1267 /* Subtracting from integer_zero_node loses for long long. */
1268 *conp
= fold (build1 (NEGATE_EXPR
, TREE_TYPE (*conp
), *conp
));
1274 if (TREE_CONSTANT (TREE_OPERAND (in
, 0)))
1276 *conp
= TREE_OPERAND (in
, 0);
1277 *varp
= TREE_OPERAND (in
, 1);
1278 if (TYPE_MODE (TREE_TYPE (*varp
)) != TYPE_MODE (outtype
)
1279 && TREE_TYPE (*varp
) != outtype
)
1280 *varp
= convert (outtype
, *varp
);
1281 *varsignp
= (TREE_CODE (in
) == MINUS_EXPR
) ? -1 : 1;
1288 /* Combine two integer constants ARG1 and ARG2 under operation CODE
1289 to produce a new constant.
1291 If NOTRUNC is nonzero, do not truncate the result to fit the data type.
1292 If FORSIZE is nonzero, compute overflow for unsigned types. */
1295 int_const_binop (code
, arg1
, arg2
, notrunc
, forsize
)
1296 enum tree_code code
;
1297 register tree arg1
, arg2
;
1298 int notrunc
, forsize
;
1300 HOST_WIDE_INT int1l
, int1h
, int2l
, int2h
;
1301 HOST_WIDE_INT low
, hi
;
1302 HOST_WIDE_INT garbagel
, garbageh
;
1304 int uns
= TREE_UNSIGNED (TREE_TYPE (arg1
));
1306 int no_overflow
= 0;
1308 int1l
= TREE_INT_CST_LOW (arg1
);
1309 int1h
= TREE_INT_CST_HIGH (arg1
);
1310 int2l
= TREE_INT_CST_LOW (arg2
);
1311 int2h
= TREE_INT_CST_HIGH (arg2
);
1316 low
= int1l
| int2l
, hi
= int1h
| int2h
;
1320 low
= int1l
^ int2l
, hi
= int1h
^ int2h
;
1324 low
= int1l
& int2l
, hi
= int1h
& int2h
;
1327 case BIT_ANDTC_EXPR
:
1328 low
= int1l
& ~int2l
, hi
= int1h
& ~int2h
;
1334 /* It's unclear from the C standard whether shifts can overflow.
1335 The following code ignores overflow; perhaps a C standard
1336 interpretation ruling is needed. */
1337 lshift_double (int1l
, int1h
, int2l
,
1338 TYPE_PRECISION (TREE_TYPE (arg1
)),
1347 lrotate_double (int1l
, int1h
, int2l
,
1348 TYPE_PRECISION (TREE_TYPE (arg1
)),
1353 overflow
= add_double (int1l
, int1h
, int2l
, int2h
, &low
, &hi
);
1357 neg_double (int2l
, int2h
, &low
, &hi
);
1358 add_double (int1l
, int1h
, low
, hi
, &low
, &hi
);
1359 overflow
= overflow_sum_sign (hi
, int2h
, int1h
);
1363 overflow
= mul_double (int1l
, int1h
, int2l
, int2h
, &low
, &hi
);
1366 case TRUNC_DIV_EXPR
:
1367 case FLOOR_DIV_EXPR
: case CEIL_DIV_EXPR
:
1368 case EXACT_DIV_EXPR
:
1369 /* This is a shortcut for a common special case. */
1370 if (int2h
== 0 && int2l
> 0
1371 && ! TREE_CONSTANT_OVERFLOW (arg1
)
1372 && ! TREE_CONSTANT_OVERFLOW (arg2
)
1373 && int1h
== 0 && int1l
>= 0)
1375 if (code
== CEIL_DIV_EXPR
)
1377 low
= int1l
/ int2l
, hi
= 0;
1381 /* ... fall through ... */
1383 case ROUND_DIV_EXPR
:
1384 if (int2h
== 0 && int2l
== 1)
1386 low
= int1l
, hi
= int1h
;
1389 if (int1l
== int2l
&& int1h
== int2h
1390 && ! (int1l
== 0 && int1h
== 0))
1395 overflow
= div_and_round_double (code
, uns
,
1396 int1l
, int1h
, int2l
, int2h
,
1397 &low
, &hi
, &garbagel
, &garbageh
);
1400 case TRUNC_MOD_EXPR
:
1401 case FLOOR_MOD_EXPR
: case CEIL_MOD_EXPR
:
1402 /* This is a shortcut for a common special case. */
1403 if (int2h
== 0 && int2l
> 0
1404 && ! TREE_CONSTANT_OVERFLOW (arg1
)
1405 && ! TREE_CONSTANT_OVERFLOW (arg2
)
1406 && int1h
== 0 && int1l
>= 0)
1408 if (code
== CEIL_MOD_EXPR
)
1410 low
= int1l
% int2l
, hi
= 0;
1414 /* ... fall through ... */
1416 case ROUND_MOD_EXPR
:
1417 overflow
= div_and_round_double (code
, uns
,
1418 int1l
, int1h
, int2l
, int2h
,
1419 &garbagel
, &garbageh
, &low
, &hi
);
1426 low
= (((unsigned HOST_WIDE_INT
) int1h
1427 < (unsigned HOST_WIDE_INT
) int2h
)
1428 || (((unsigned HOST_WIDE_INT
) int1h
1429 == (unsigned HOST_WIDE_INT
) int2h
)
1430 && ((unsigned HOST_WIDE_INT
) int1l
1431 < (unsigned HOST_WIDE_INT
) int2l
)));
1435 low
= ((int1h
< int2h
)
1436 || ((int1h
== int2h
)
1437 && ((unsigned HOST_WIDE_INT
) int1l
1438 < (unsigned HOST_WIDE_INT
) int2l
)));
1440 if (low
== (code
== MIN_EXPR
))
1441 low
= int1l
, hi
= int1h
;
1443 low
= int2l
, hi
= int2h
;
1450 if (TREE_TYPE (arg1
) == sizetype
&& hi
== 0
1452 && (TYPE_MAX_VALUE (sizetype
) == NULL
1453 || low
<= TREE_INT_CST_LOW (TYPE_MAX_VALUE (sizetype
)))
1455 && ! TREE_OVERFLOW (arg1
) && ! TREE_OVERFLOW (arg2
))
1459 t
= build_int_2 (low
, hi
);
1460 TREE_TYPE (t
) = TREE_TYPE (arg1
);
1464 = ((notrunc
? (!uns
|| forsize
) && overflow
1465 : force_fit_type (t
, (!uns
|| forsize
) && overflow
) && ! no_overflow
)
1466 | TREE_OVERFLOW (arg1
)
1467 | TREE_OVERFLOW (arg2
));
1468 /* If we're doing a size calculation, unsigned arithmetic does overflow.
1469 So check if force_fit_type truncated the value. */
1471 && ! TREE_OVERFLOW (t
)
1472 && (TREE_INT_CST_HIGH (t
) != hi
1473 || TREE_INT_CST_LOW (t
) != low
))
1474 TREE_OVERFLOW (t
) = 1;
1475 TREE_CONSTANT_OVERFLOW (t
) = (TREE_OVERFLOW (t
)
1476 | TREE_CONSTANT_OVERFLOW (arg1
)
1477 | TREE_CONSTANT_OVERFLOW (arg2
));
1485 REAL_VALUE_TYPE d1
, d2
;
1486 enum tree_code code
;
1492 const_binop_1 (data
)
1495 struct cb_args
* args
= (struct cb_args
*) data
;
1496 REAL_VALUE_TYPE value
;
1498 #ifdef REAL_ARITHMETIC
1499 REAL_ARITHMETIC (value
, args
->code
, args
->d1
, args
->d2
);
1504 value
= args
->d1
+ args
->d2
;
1508 value
= args
->d1
- args
->d2
;
1512 value
= args
->d1
* args
->d2
;
1516 #ifndef REAL_INFINITY
1521 value
= args
->d1
/ args
->d2
;
1525 value
= MIN (args
->d1
, args
->d2
);
1529 value
= MAX (args
->d1
, args
->d2
);
1535 #endif /* no REAL_ARITHMETIC */
1537 build_real (TREE_TYPE (args
->arg1
),
1538 real_value_truncate (TYPE_MODE (TREE_TYPE (args
->arg1
)),
1542 /* Combine two constants ARG1 and ARG2 under operation CODE
1543 to produce a new constant.
1544 We assume ARG1 and ARG2 have the same data type,
1545 or at least are the same kind of constant and the same machine mode.
1547 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
1550 const_binop (code
, arg1
, arg2
, notrunc
)
1551 enum tree_code code
;
1552 register tree arg1
, arg2
;
1555 STRIP_NOPS (arg1
); STRIP_NOPS (arg2
);
1557 if (TREE_CODE (arg1
) == INTEGER_CST
)
1558 return int_const_binop (code
, arg1
, arg2
, notrunc
, 0);
1560 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1561 if (TREE_CODE (arg1
) == REAL_CST
)
1567 struct cb_args args
;
1569 d1
= TREE_REAL_CST (arg1
);
1570 d2
= TREE_REAL_CST (arg2
);
1572 /* If either operand is a NaN, just return it. Otherwise, set up
1573 for floating-point trap; we return an overflow. */
1574 if (REAL_VALUE_ISNAN (d1
))
1576 else if (REAL_VALUE_ISNAN (d2
))
1579 /* Setup input for const_binop_1() */
1585 if (do_float_handler (const_binop_1
, (PTR
) &args
))
1587 /* Receive output from const_binop_1() */
1592 /* We got an exception from const_binop_1() */
1593 t
= copy_node (arg1
);
1598 = (force_fit_type (t
, overflow
)
1599 | TREE_OVERFLOW (arg1
) | TREE_OVERFLOW (arg2
));
1600 TREE_CONSTANT_OVERFLOW (t
)
1602 | TREE_CONSTANT_OVERFLOW (arg1
)
1603 | TREE_CONSTANT_OVERFLOW (arg2
);
1606 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1607 if (TREE_CODE (arg1
) == COMPLEX_CST
)
1609 register tree type
= TREE_TYPE (arg1
);
1610 register tree r1
= TREE_REALPART (arg1
);
1611 register tree i1
= TREE_IMAGPART (arg1
);
1612 register tree r2
= TREE_REALPART (arg2
);
1613 register tree i2
= TREE_IMAGPART (arg2
);
1619 t
= build_complex (type
,
1620 const_binop (PLUS_EXPR
, r1
, r2
, notrunc
),
1621 const_binop (PLUS_EXPR
, i1
, i2
, notrunc
));
1625 t
= build_complex (type
,
1626 const_binop (MINUS_EXPR
, r1
, r2
, notrunc
),
1627 const_binop (MINUS_EXPR
, i1
, i2
, notrunc
));
1631 t
= build_complex (type
,
1632 const_binop (MINUS_EXPR
,
1633 const_binop (MULT_EXPR
,
1635 const_binop (MULT_EXPR
,
1638 const_binop (PLUS_EXPR
,
1639 const_binop (MULT_EXPR
,
1641 const_binop (MULT_EXPR
,
1648 register tree magsquared
1649 = const_binop (PLUS_EXPR
,
1650 const_binop (MULT_EXPR
, r2
, r2
, notrunc
),
1651 const_binop (MULT_EXPR
, i2
, i2
, notrunc
),
1654 t
= build_complex (type
,
1656 (INTEGRAL_TYPE_P (TREE_TYPE (r1
))
1657 ? TRUNC_DIV_EXPR
: RDIV_EXPR
,
1658 const_binop (PLUS_EXPR
,
1659 const_binop (MULT_EXPR
, r1
, r2
,
1661 const_binop (MULT_EXPR
, i1
, i2
,
1664 magsquared
, notrunc
),
1666 (INTEGRAL_TYPE_P (TREE_TYPE (r1
))
1667 ? TRUNC_DIV_EXPR
: RDIV_EXPR
,
1668 const_binop (MINUS_EXPR
,
1669 const_binop (MULT_EXPR
, i1
, r2
,
1671 const_binop (MULT_EXPR
, r1
, i2
,
1674 magsquared
, notrunc
));
1686 /* Return an INTEGER_CST with value V . The type is determined by bit_p:
1687 if it is zero, the type is taken from sizetype; if it is one, the type
1688 is taken from bitsizetype. */
1691 size_int_wide (number
, high
, bit_p
)
1692 unsigned HOST_WIDE_INT number
, high
;
1696 /* Type-size nodes already made for small sizes. */
1697 static tree size_table
[2*HOST_BITS_PER_WIDE_INT
+ 1][2];
1699 if (number
< 2*HOST_BITS_PER_WIDE_INT
+ 1 && ! high
1700 && size_table
[number
][bit_p
] != 0)
1701 return size_table
[number
][bit_p
];
1702 if (number
< 2*HOST_BITS_PER_WIDE_INT
+ 1 && ! high
)
1704 push_obstacks_nochange ();
1705 /* Make this a permanent node. */
1706 end_temporary_allocation ();
1707 t
= build_int_2 (number
, 0);
1708 TREE_TYPE (t
) = bit_p
? bitsizetype
: sizetype
;
1709 size_table
[number
][bit_p
] = t
;
1714 t
= build_int_2 (number
, high
);
1715 TREE_TYPE (t
) = bit_p
? bitsizetype
: sizetype
;
1716 TREE_OVERFLOW (t
) = TREE_CONSTANT_OVERFLOW (t
) = force_fit_type (t
, 0);
1721 /* Combine operands OP1 and OP2 with arithmetic operation CODE.
1722 CODE is a tree code. Data type is taken from `sizetype',
1723 If the operands are constant, so is the result. */
1726 size_binop (code
, arg0
, arg1
)
1727 enum tree_code code
;
1730 /* Handle the special case of two integer constants faster. */
1731 if (TREE_CODE (arg0
) == INTEGER_CST
&& TREE_CODE (arg1
) == INTEGER_CST
)
1733 /* And some specific cases even faster than that. */
1734 if (code
== PLUS_EXPR
&& integer_zerop (arg0
))
1736 else if ((code
== MINUS_EXPR
|| code
== PLUS_EXPR
)
1737 && integer_zerop (arg1
))
1739 else if (code
== MULT_EXPR
&& integer_onep (arg0
))
1742 /* Handle general case of two integer constants. */
1743 return int_const_binop (code
, arg0
, arg1
, 0, 1);
1746 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
1747 return error_mark_node
;
1749 return fold (build (code
, sizetype
, arg0
, arg1
));
1752 /* Combine operands OP1 and OP2 with arithmetic operation CODE.
1753 CODE is a tree code. Data type is taken from `ssizetype',
1754 If the operands are constant, so is the result. */
1757 ssize_binop (code
, arg0
, arg1
)
1758 enum tree_code code
;
1761 /* Handle the special case of two integer constants faster. */
1762 if (TREE_CODE (arg0
) == INTEGER_CST
&& TREE_CODE (arg1
) == INTEGER_CST
)
1764 /* And some specific cases even faster than that. */
1765 if (code
== PLUS_EXPR
&& integer_zerop (arg0
))
1767 else if ((code
== MINUS_EXPR
|| code
== PLUS_EXPR
)
1768 && integer_zerop (arg1
))
1770 else if (code
== MULT_EXPR
&& integer_onep (arg0
))
1773 /* Handle general case of two integer constants. We convert
1774 arg0 to ssizetype because int_const_binop uses its type for the
1776 arg0
= convert (ssizetype
, arg0
);
1777 return int_const_binop (code
, arg0
, arg1
, 0, 0);
1780 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
1781 return error_mark_node
;
1783 return fold (build (code
, ssizetype
, arg0
, arg1
));
1795 fold_convert_1 (data
)
1798 struct fc_args
* args
= (struct fc_args
*) data
;
1800 args
->t
= build_real (args
->type
,
1801 real_value_truncate (TYPE_MODE (args
->type
),
1802 TREE_REAL_CST (args
->arg1
)));
1805 /* Given T, a tree representing type conversion of ARG1, a constant,
1806 return a constant tree representing the result of conversion. */
1809 fold_convert (t
, arg1
)
1813 register tree type
= TREE_TYPE (t
);
1816 if (POINTER_TYPE_P (type
) || INTEGRAL_TYPE_P (type
))
1818 if (TREE_CODE (arg1
) == INTEGER_CST
)
1820 /* If we would build a constant wider than GCC supports,
1821 leave the conversion unfolded. */
1822 if (TYPE_PRECISION (type
) > 2 * HOST_BITS_PER_WIDE_INT
)
1825 /* Given an integer constant, make new constant with new type,
1826 appropriately sign-extended or truncated. */
1827 t
= build_int_2 (TREE_INT_CST_LOW (arg1
),
1828 TREE_INT_CST_HIGH (arg1
));
1829 TREE_TYPE (t
) = type
;
1830 /* Indicate an overflow if (1) ARG1 already overflowed,
1831 or (2) force_fit_type indicates an overflow.
1832 Tell force_fit_type that an overflow has already occurred
1833 if ARG1 is a too-large unsigned value and T is signed.
1834 But don't indicate an overflow if converting a pointer. */
1836 = ((force_fit_type (t
,
1837 (TREE_INT_CST_HIGH (arg1
) < 0
1838 && (TREE_UNSIGNED (type
)
1839 < TREE_UNSIGNED (TREE_TYPE (arg1
)))))
1840 && ! POINTER_TYPE_P (TREE_TYPE (arg1
)))
1841 || TREE_OVERFLOW (arg1
));
1842 TREE_CONSTANT_OVERFLOW (t
)
1843 = TREE_OVERFLOW (t
) | TREE_CONSTANT_OVERFLOW (arg1
);
1845 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1846 else if (TREE_CODE (arg1
) == REAL_CST
)
1848 /* Don't initialize these, use assignments.
1849 Initialized local aggregates don't work on old compilers. */
1853 tree type1
= TREE_TYPE (arg1
);
1856 x
= TREE_REAL_CST (arg1
);
1857 l
= real_value_from_int_cst (type1
, TYPE_MIN_VALUE (type
));
1859 no_upper_bound
= (TYPE_MAX_VALUE (type
) == NULL
);
1860 if (!no_upper_bound
)
1861 u
= real_value_from_int_cst (type1
, TYPE_MAX_VALUE (type
));
1863 /* See if X will be in range after truncation towards 0.
1864 To compensate for truncation, move the bounds away from 0,
1865 but reject if X exactly equals the adjusted bounds. */
1866 #ifdef REAL_ARITHMETIC
1867 REAL_ARITHMETIC (l
, MINUS_EXPR
, l
, dconst1
);
1868 if (!no_upper_bound
)
1869 REAL_ARITHMETIC (u
, PLUS_EXPR
, u
, dconst1
);
1872 if (!no_upper_bound
)
1875 /* If X is a NaN, use zero instead and show we have an overflow.
1876 Otherwise, range check. */
1877 if (REAL_VALUE_ISNAN (x
))
1878 overflow
= 1, x
= dconst0
;
1879 else if (! (REAL_VALUES_LESS (l
, x
)
1881 && REAL_VALUES_LESS (x
, u
)))
1884 #ifndef REAL_ARITHMETIC
1886 HOST_WIDE_INT low
, high
;
1887 HOST_WIDE_INT half_word
1888 = (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2);
1893 high
= (HOST_WIDE_INT
) (x
/ half_word
/ half_word
);
1894 x
-= (REAL_VALUE_TYPE
) high
* half_word
* half_word
;
1895 if (x
>= (REAL_VALUE_TYPE
) half_word
* half_word
/ 2)
1897 low
= x
- (REAL_VALUE_TYPE
) half_word
* half_word
/ 2;
1898 low
|= (HOST_WIDE_INT
) -1 << (HOST_BITS_PER_WIDE_INT
- 1);
1901 low
= (HOST_WIDE_INT
) x
;
1902 if (TREE_REAL_CST (arg1
) < 0)
1903 neg_double (low
, high
, &low
, &high
);
1904 t
= build_int_2 (low
, high
);
1908 HOST_WIDE_INT low
, high
;
1909 REAL_VALUE_TO_INT (&low
, &high
, x
);
1910 t
= build_int_2 (low
, high
);
1913 TREE_TYPE (t
) = type
;
1915 = TREE_OVERFLOW (arg1
) | force_fit_type (t
, overflow
);
1916 TREE_CONSTANT_OVERFLOW (t
)
1917 = TREE_OVERFLOW (t
) | TREE_CONSTANT_OVERFLOW (arg1
);
1919 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1920 TREE_TYPE (t
) = type
;
1922 else if (TREE_CODE (type
) == REAL_TYPE
)
1924 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1925 if (TREE_CODE (arg1
) == INTEGER_CST
)
1926 return build_real_from_int_cst (type
, arg1
);
1927 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1928 if (TREE_CODE (arg1
) == REAL_CST
)
1930 struct fc_args args
;
1932 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg1
)))
1935 TREE_TYPE (arg1
) = type
;
1939 /* Setup input for fold_convert_1() */
1943 if (do_float_handler (fold_convert_1
, (PTR
) &args
))
1945 /* Receive output from fold_convert_1() */
1950 /* We got an exception from fold_convert_1() */
1952 t
= copy_node (arg1
);
1956 = TREE_OVERFLOW (arg1
) | force_fit_type (t
, overflow
);
1957 TREE_CONSTANT_OVERFLOW (t
)
1958 = TREE_OVERFLOW (t
) | TREE_CONSTANT_OVERFLOW (arg1
);
1962 TREE_CONSTANT (t
) = 1;
1966 /* Return an expr equal to X but certainly not valid as an lvalue. */
1974 /* These things are certainly not lvalues. */
1975 if (TREE_CODE (x
) == NON_LVALUE_EXPR
1976 || TREE_CODE (x
) == INTEGER_CST
1977 || TREE_CODE (x
) == REAL_CST
1978 || TREE_CODE (x
) == STRING_CST
1979 || TREE_CODE (x
) == ADDR_EXPR
)
1982 result
= build1 (NON_LVALUE_EXPR
, TREE_TYPE (x
), x
);
1983 TREE_CONSTANT (result
) = TREE_CONSTANT (x
);
1987 /* Nonzero means lvalues are limited to those valid in pedantic ANSI C.
1988 Zero means allow extended lvalues. */
1990 int pedantic_lvalues
;
1992 /* When pedantic, return an expr equal to X but certainly not valid as a
1993 pedantic lvalue. Otherwise, return X. */
1996 pedantic_non_lvalue (x
)
1999 if (pedantic_lvalues
)
2000 return non_lvalue (x
);
2005 /* Given a tree comparison code, return the code that is the logical inverse
2006 of the given code. It is not safe to do this for floating-point
2007 comparisons, except for NE_EXPR and EQ_EXPR. */
2009 static enum tree_code
2010 invert_tree_comparison (code
)
2011 enum tree_code code
;
2032 /* Similar, but return the comparison that results if the operands are
2033 swapped. This is safe for floating-point. */
2035 static enum tree_code
2036 swap_tree_comparison (code
)
2037 enum tree_code code
;
2057 /* Return nonzero if CODE is a tree code that represents a truth value. */
2060 truth_value_p (code
)
2061 enum tree_code code
;
2063 return (TREE_CODE_CLASS (code
) == '<'
2064 || code
== TRUTH_AND_EXPR
|| code
== TRUTH_ANDIF_EXPR
2065 || code
== TRUTH_OR_EXPR
|| code
== TRUTH_ORIF_EXPR
2066 || code
== TRUTH_XOR_EXPR
|| code
== TRUTH_NOT_EXPR
);
2069 /* Return nonzero if two operands are necessarily equal.
2070 If ONLY_CONST is non-zero, only return non-zero for constants.
2071 This function tests whether the operands are indistinguishable;
2072 it does not test whether they are equal using C's == operation.
2073 The distinction is important for IEEE floating point, because
2074 (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and
2075 (2) two NaNs may be indistinguishable, but NaN!=NaN. */
2078 operand_equal_p (arg0
, arg1
, only_const
)
2082 /* If both types don't have the same signedness, then we can't consider
2083 them equal. We must check this before the STRIP_NOPS calls
2084 because they may change the signedness of the arguments. */
2085 if (TREE_UNSIGNED (TREE_TYPE (arg0
)) != TREE_UNSIGNED (TREE_TYPE (arg1
)))
2091 if (TREE_CODE (arg0
) != TREE_CODE (arg1
)
2092 /* This is needed for conversions and for COMPONENT_REF.
2093 Might as well play it safe and always test this. */
2094 || TYPE_MODE (TREE_TYPE (arg0
)) != TYPE_MODE (TREE_TYPE (arg1
)))
2097 /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
2098 We don't care about side effects in that case because the SAVE_EXPR
2099 takes care of that for us. In all other cases, two expressions are
2100 equal if they have no side effects. If we have two identical
2101 expressions with side effects that should be treated the same due
2102 to the only side effects being identical SAVE_EXPR's, that will
2103 be detected in the recursive calls below. */
2104 if (arg0
== arg1
&& ! only_const
2105 && (TREE_CODE (arg0
) == SAVE_EXPR
2106 || (! TREE_SIDE_EFFECTS (arg0
) && ! TREE_SIDE_EFFECTS (arg1
))))
2109 /* Next handle constant cases, those for which we can return 1 even
2110 if ONLY_CONST is set. */
2111 if (TREE_CONSTANT (arg0
) && TREE_CONSTANT (arg1
))
2112 switch (TREE_CODE (arg0
))
2115 return (! TREE_CONSTANT_OVERFLOW (arg0
)
2116 && ! TREE_CONSTANT_OVERFLOW (arg1
)
2117 && TREE_INT_CST_LOW (arg0
) == TREE_INT_CST_LOW (arg1
)
2118 && TREE_INT_CST_HIGH (arg0
) == TREE_INT_CST_HIGH (arg1
));
2121 return (! TREE_CONSTANT_OVERFLOW (arg0
)
2122 && ! TREE_CONSTANT_OVERFLOW (arg1
)
2123 && REAL_VALUES_IDENTICAL (TREE_REAL_CST (arg0
),
2124 TREE_REAL_CST (arg1
)));
2127 return (operand_equal_p (TREE_REALPART (arg0
), TREE_REALPART (arg1
),
2129 && operand_equal_p (TREE_IMAGPART (arg0
), TREE_IMAGPART (arg1
),
2133 return (TREE_STRING_LENGTH (arg0
) == TREE_STRING_LENGTH (arg1
)
2134 && ! strncmp (TREE_STRING_POINTER (arg0
),
2135 TREE_STRING_POINTER (arg1
),
2136 TREE_STRING_LENGTH (arg0
)));
2139 return operand_equal_p (TREE_OPERAND (arg0
, 0), TREE_OPERAND (arg1
, 0),
2148 switch (TREE_CODE_CLASS (TREE_CODE (arg0
)))
2151 /* Two conversions are equal only if signedness and modes match. */
2152 if ((TREE_CODE (arg0
) == NOP_EXPR
|| TREE_CODE (arg0
) == CONVERT_EXPR
)
2153 && (TREE_UNSIGNED (TREE_TYPE (arg0
))
2154 != TREE_UNSIGNED (TREE_TYPE (arg1
))))
2157 return operand_equal_p (TREE_OPERAND (arg0
, 0),
2158 TREE_OPERAND (arg1
, 0), 0);
2162 if (operand_equal_p (TREE_OPERAND (arg0
, 0), TREE_OPERAND (arg1
, 0), 0)
2163 && operand_equal_p (TREE_OPERAND (arg0
, 1), TREE_OPERAND (arg1
, 1),
2167 /* For commutative ops, allow the other order. */
2168 return ((TREE_CODE (arg0
) == PLUS_EXPR
|| TREE_CODE (arg0
) == MULT_EXPR
2169 || TREE_CODE (arg0
) == MIN_EXPR
|| TREE_CODE (arg0
) == MAX_EXPR
2170 || TREE_CODE (arg0
) == BIT_IOR_EXPR
2171 || TREE_CODE (arg0
) == BIT_XOR_EXPR
2172 || TREE_CODE (arg0
) == BIT_AND_EXPR
2173 || TREE_CODE (arg0
) == NE_EXPR
|| TREE_CODE (arg0
) == EQ_EXPR
)
2174 && operand_equal_p (TREE_OPERAND (arg0
, 0),
2175 TREE_OPERAND (arg1
, 1), 0)
2176 && operand_equal_p (TREE_OPERAND (arg0
, 1),
2177 TREE_OPERAND (arg1
, 0), 0));
2180 switch (TREE_CODE (arg0
))
2183 return operand_equal_p (TREE_OPERAND (arg0
, 0),
2184 TREE_OPERAND (arg1
, 0), 0);
2188 return (operand_equal_p (TREE_OPERAND (arg0
, 0),
2189 TREE_OPERAND (arg1
, 0), 0)
2190 && operand_equal_p (TREE_OPERAND (arg0
, 1),
2191 TREE_OPERAND (arg1
, 1), 0));
2194 return (operand_equal_p (TREE_OPERAND (arg0
, 0),
2195 TREE_OPERAND (arg1
, 0), 0)
2196 && operand_equal_p (TREE_OPERAND (arg0
, 1),
2197 TREE_OPERAND (arg1
, 1), 0)
2198 && operand_equal_p (TREE_OPERAND (arg0
, 2),
2199 TREE_OPERAND (arg1
, 2), 0));
2205 if (TREE_CODE (arg0
) == RTL_EXPR
)
2206 return rtx_equal_p (RTL_EXPR_RTL (arg0
), RTL_EXPR_RTL (arg1
));
2214 /* Similar to operand_equal_p, but see if ARG0 might have been made by
2215 shorten_compare from ARG1 when ARG1 was being compared with OTHER.
2217 When in doubt, return 0. */
2220 operand_equal_for_comparison_p (arg0
, arg1
, other
)
2224 int unsignedp1
, unsignedpo
;
2225 tree primarg0
, primarg1
, primother
;
2226 unsigned correct_width
;
2228 if (operand_equal_p (arg0
, arg1
, 0))
2231 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0
))
2232 || ! INTEGRAL_TYPE_P (TREE_TYPE (arg1
)))
2235 /* Discard any conversions that don't change the modes of ARG0 and ARG1
2236 and see if the inner values are the same. This removes any
2237 signedness comparison, which doesn't matter here. */
2238 primarg0
= arg0
, primarg1
= arg1
;
2239 STRIP_NOPS (primarg0
); STRIP_NOPS (primarg1
);
2240 if (operand_equal_p (primarg0
, primarg1
, 0))
2243 /* Duplicate what shorten_compare does to ARG1 and see if that gives the
2244 actual comparison operand, ARG0.
2246 First throw away any conversions to wider types
2247 already present in the operands. */
2249 primarg1
= get_narrower (arg1
, &unsignedp1
);
2250 primother
= get_narrower (other
, &unsignedpo
);
2252 correct_width
= TYPE_PRECISION (TREE_TYPE (arg1
));
2253 if (unsignedp1
== unsignedpo
2254 && TYPE_PRECISION (TREE_TYPE (primarg1
)) < correct_width
2255 && TYPE_PRECISION (TREE_TYPE (primother
)) < correct_width
)
2257 tree type
= TREE_TYPE (arg0
);
2259 /* Make sure shorter operand is extended the right way
2260 to match the longer operand. */
2261 primarg1
= convert (signed_or_unsigned_type (unsignedp1
,
2262 TREE_TYPE (primarg1
)),
2265 if (operand_equal_p (arg0
, convert (type
, primarg1
), 0))
2272 /* See if ARG is an expression that is either a comparison or is performing
2273 arithmetic on comparisons. The comparisons must only be comparing
2274 two different values, which will be stored in *CVAL1 and *CVAL2; if
2275 they are non-zero it means that some operands have already been found.
2276 No variables may be used anywhere else in the expression except in the
2277 comparisons. If SAVE_P is true it means we removed a SAVE_EXPR around
2278 the expression and save_expr needs to be called with CVAL1 and CVAL2.
2280 If this is true, return 1. Otherwise, return zero. */
2283 twoval_comparison_p (arg
, cval1
, cval2
, save_p
)
2285 tree
*cval1
, *cval2
;
2288 enum tree_code code
= TREE_CODE (arg
);
2289 char class = TREE_CODE_CLASS (code
);
2291 /* We can handle some of the 'e' cases here. */
2292 if (class == 'e' && code
== TRUTH_NOT_EXPR
)
2294 else if (class == 'e'
2295 && (code
== TRUTH_ANDIF_EXPR
|| code
== TRUTH_ORIF_EXPR
2296 || code
== COMPOUND_EXPR
))
2299 /* ??? Disable this since the SAVE_EXPR might already be in use outside
2300 the expression. There may be no way to make this work, but it needs
2301 to be looked at again for 2.6. */
2303 else if (class == 'e' && code
== SAVE_EXPR
&& SAVE_EXPR_RTL (arg
) == 0)
2305 /* If we've already found a CVAL1 or CVAL2, this expression is
2306 two complex to handle. */
2307 if (*cval1
|| *cval2
)
2318 return twoval_comparison_p (TREE_OPERAND (arg
, 0), cval1
, cval2
, save_p
);
2321 return (twoval_comparison_p (TREE_OPERAND (arg
, 0), cval1
, cval2
, save_p
)
2322 && twoval_comparison_p (TREE_OPERAND (arg
, 1),
2323 cval1
, cval2
, save_p
));
2329 if (code
== COND_EXPR
)
2330 return (twoval_comparison_p (TREE_OPERAND (arg
, 0),
2331 cval1
, cval2
, save_p
)
2332 && twoval_comparison_p (TREE_OPERAND (arg
, 1),
2333 cval1
, cval2
, save_p
)
2334 && twoval_comparison_p (TREE_OPERAND (arg
, 2),
2335 cval1
, cval2
, save_p
));
2339 /* First see if we can handle the first operand, then the second. For
2340 the second operand, we know *CVAL1 can't be zero. It must be that
2341 one side of the comparison is each of the values; test for the
2342 case where this isn't true by failing if the two operands
2345 if (operand_equal_p (TREE_OPERAND (arg
, 0),
2346 TREE_OPERAND (arg
, 1), 0))
2350 *cval1
= TREE_OPERAND (arg
, 0);
2351 else if (operand_equal_p (*cval1
, TREE_OPERAND (arg
, 0), 0))
2353 else if (*cval2
== 0)
2354 *cval2
= TREE_OPERAND (arg
, 0);
2355 else if (operand_equal_p (*cval2
, TREE_OPERAND (arg
, 0), 0))
2360 if (operand_equal_p (*cval1
, TREE_OPERAND (arg
, 1), 0))
2362 else if (*cval2
== 0)
2363 *cval2
= TREE_OPERAND (arg
, 1);
2364 else if (operand_equal_p (*cval2
, TREE_OPERAND (arg
, 1), 0))
2376 /* ARG is a tree that is known to contain just arithmetic operations and
2377 comparisons. Evaluate the operations in the tree substituting NEW0 for
2378 any occurrence of OLD0 as an operand of a comparison and likewise for
2382 eval_subst (arg
, old0
, new0
, old1
, new1
)
2384 tree old0
, new0
, old1
, new1
;
2386 tree type
= TREE_TYPE (arg
);
2387 enum tree_code code
= TREE_CODE (arg
);
2388 char class = TREE_CODE_CLASS (code
);
2390 /* We can handle some of the 'e' cases here. */
2391 if (class == 'e' && code
== TRUTH_NOT_EXPR
)
2393 else if (class == 'e'
2394 && (code
== TRUTH_ANDIF_EXPR
|| code
== TRUTH_ORIF_EXPR
))
2400 return fold (build1 (code
, type
,
2401 eval_subst (TREE_OPERAND (arg
, 0),
2402 old0
, new0
, old1
, new1
)));
2405 return fold (build (code
, type
,
2406 eval_subst (TREE_OPERAND (arg
, 0),
2407 old0
, new0
, old1
, new1
),
2408 eval_subst (TREE_OPERAND (arg
, 1),
2409 old0
, new0
, old1
, new1
)));
2415 return eval_subst (TREE_OPERAND (arg
, 0), old0
, new0
, old1
, new1
);
2418 return eval_subst (TREE_OPERAND (arg
, 1), old0
, new0
, old1
, new1
);
2421 return fold (build (code
, type
,
2422 eval_subst (TREE_OPERAND (arg
, 0),
2423 old0
, new0
, old1
, new1
),
2424 eval_subst (TREE_OPERAND (arg
, 1),
2425 old0
, new0
, old1
, new1
),
2426 eval_subst (TREE_OPERAND (arg
, 2),
2427 old0
, new0
, old1
, new1
)));
2431 /* fall through - ??? */
2435 tree arg0
= TREE_OPERAND (arg
, 0);
2436 tree arg1
= TREE_OPERAND (arg
, 1);
2438 /* We need to check both for exact equality and tree equality. The
2439 former will be true if the operand has a side-effect. In that
2440 case, we know the operand occurred exactly once. */
2442 if (arg0
== old0
|| operand_equal_p (arg0
, old0
, 0))
2444 else if (arg0
== old1
|| operand_equal_p (arg0
, old1
, 0))
2447 if (arg1
== old0
|| operand_equal_p (arg1
, old0
, 0))
2449 else if (arg1
== old1
|| operand_equal_p (arg1
, old1
, 0))
2452 return fold (build (code
, type
, arg0
, arg1
));
2460 /* Return a tree for the case when the result of an expression is RESULT
2461 converted to TYPE and OMITTED was previously an operand of the expression
2462 but is now not needed (e.g., we folded OMITTED * 0).
2464 If OMITTED has side effects, we must evaluate it. Otherwise, just do
2465 the conversion of RESULT to TYPE. */
2468 omit_one_operand (type
, result
, omitted
)
2469 tree type
, result
, omitted
;
2471 tree t
= convert (type
, result
);
2473 if (TREE_SIDE_EFFECTS (omitted
))
2474 return build (COMPOUND_EXPR
, type
, omitted
, t
);
2476 return non_lvalue (t
);
2479 /* Similar, but call pedantic_non_lvalue instead of non_lvalue. */
2482 pedantic_omit_one_operand (type
, result
, omitted
)
2483 tree type
, result
, omitted
;
2485 tree t
= convert (type
, result
);
2487 if (TREE_SIDE_EFFECTS (omitted
))
2488 return build (COMPOUND_EXPR
, type
, omitted
, t
);
2490 return pedantic_non_lvalue (t
);
2495 /* Return a simplified tree node for the truth-negation of ARG. This
2496 never alters ARG itself. We assume that ARG is an operation that
2497 returns a truth value (0 or 1). */
2500 invert_truthvalue (arg
)
2503 tree type
= TREE_TYPE (arg
);
2504 enum tree_code code
= TREE_CODE (arg
);
2506 if (code
== ERROR_MARK
)
2509 /* If this is a comparison, we can simply invert it, except for
2510 floating-point non-equality comparisons, in which case we just
2511 enclose a TRUTH_NOT_EXPR around what we have. */
2513 if (TREE_CODE_CLASS (code
) == '<')
2515 if (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg
, 0)))
2516 && !flag_fast_math
&& code
!= NE_EXPR
&& code
!= EQ_EXPR
)
2517 return build1 (TRUTH_NOT_EXPR
, type
, arg
);
2519 return build (invert_tree_comparison (code
), type
,
2520 TREE_OPERAND (arg
, 0), TREE_OPERAND (arg
, 1));
2526 return convert (type
, build_int_2 (TREE_INT_CST_LOW (arg
) == 0
2527 && TREE_INT_CST_HIGH (arg
) == 0, 0));
2529 case TRUTH_AND_EXPR
:
2530 return build (TRUTH_OR_EXPR
, type
,
2531 invert_truthvalue (TREE_OPERAND (arg
, 0)),
2532 invert_truthvalue (TREE_OPERAND (arg
, 1)));
2535 return build (TRUTH_AND_EXPR
, type
,
2536 invert_truthvalue (TREE_OPERAND (arg
, 0)),
2537 invert_truthvalue (TREE_OPERAND (arg
, 1)));
2539 case TRUTH_XOR_EXPR
:
2540 /* Here we can invert either operand. We invert the first operand
2541 unless the second operand is a TRUTH_NOT_EXPR in which case our
2542 result is the XOR of the first operand with the inside of the
2543 negation of the second operand. */
2545 if (TREE_CODE (TREE_OPERAND (arg
, 1)) == TRUTH_NOT_EXPR
)
2546 return build (TRUTH_XOR_EXPR
, type
, TREE_OPERAND (arg
, 0),
2547 TREE_OPERAND (TREE_OPERAND (arg
, 1), 0));
2549 return build (TRUTH_XOR_EXPR
, type
,
2550 invert_truthvalue (TREE_OPERAND (arg
, 0)),
2551 TREE_OPERAND (arg
, 1));
2553 case TRUTH_ANDIF_EXPR
:
2554 return build (TRUTH_ORIF_EXPR
, type
,
2555 invert_truthvalue (TREE_OPERAND (arg
, 0)),
2556 invert_truthvalue (TREE_OPERAND (arg
, 1)));
2558 case TRUTH_ORIF_EXPR
:
2559 return build (TRUTH_ANDIF_EXPR
, type
,
2560 invert_truthvalue (TREE_OPERAND (arg
, 0)),
2561 invert_truthvalue (TREE_OPERAND (arg
, 1)));
2563 case TRUTH_NOT_EXPR
:
2564 return TREE_OPERAND (arg
, 0);
2567 return build (COND_EXPR
, type
, TREE_OPERAND (arg
, 0),
2568 invert_truthvalue (TREE_OPERAND (arg
, 1)),
2569 invert_truthvalue (TREE_OPERAND (arg
, 2)));
2572 return build (COMPOUND_EXPR
, type
, TREE_OPERAND (arg
, 0),
2573 invert_truthvalue (TREE_OPERAND (arg
, 1)));
2575 case NON_LVALUE_EXPR
:
2576 return invert_truthvalue (TREE_OPERAND (arg
, 0));
2581 return build1 (TREE_CODE (arg
), type
,
2582 invert_truthvalue (TREE_OPERAND (arg
, 0)));
2585 if (!integer_onep (TREE_OPERAND (arg
, 1)))
2587 return build (EQ_EXPR
, type
, arg
, convert (type
, integer_zero_node
));
2590 return build1 (TRUTH_NOT_EXPR
, type
, arg
);
2592 case CLEANUP_POINT_EXPR
:
2593 return build1 (CLEANUP_POINT_EXPR
, type
,
2594 invert_truthvalue (TREE_OPERAND (arg
, 0)));
2599 if (TREE_CODE (TREE_TYPE (arg
)) != BOOLEAN_TYPE
)
2601 return build1 (TRUTH_NOT_EXPR
, type
, arg
);
2604 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
2605 operands are another bit-wise operation with a common input. If so,
2606 distribute the bit operations to save an operation and possibly two if
2607 constants are involved. For example, convert
2608 (A | B) & (A | C) into A | (B & C)
2609 Further simplification will occur if B and C are constants.
2611 If this optimization cannot be done, 0 will be returned. */
2614 distribute_bit_expr (code
, type
, arg0
, arg1
)
2615 enum tree_code code
;
2622 if (TREE_CODE (arg0
) != TREE_CODE (arg1
)
2623 || TREE_CODE (arg0
) == code
2624 || (TREE_CODE (arg0
) != BIT_AND_EXPR
2625 && TREE_CODE (arg0
) != BIT_IOR_EXPR
))
2628 if (operand_equal_p (TREE_OPERAND (arg0
, 0), TREE_OPERAND (arg1
, 0), 0))
2630 common
= TREE_OPERAND (arg0
, 0);
2631 left
= TREE_OPERAND (arg0
, 1);
2632 right
= TREE_OPERAND (arg1
, 1);
2634 else if (operand_equal_p (TREE_OPERAND (arg0
, 0), TREE_OPERAND (arg1
, 1), 0))
2636 common
= TREE_OPERAND (arg0
, 0);
2637 left
= TREE_OPERAND (arg0
, 1);
2638 right
= TREE_OPERAND (arg1
, 0);
2640 else if (operand_equal_p (TREE_OPERAND (arg0
, 1), TREE_OPERAND (arg1
, 0), 0))
2642 common
= TREE_OPERAND (arg0
, 1);
2643 left
= TREE_OPERAND (arg0
, 0);
2644 right
= TREE_OPERAND (arg1
, 1);
2646 else if (operand_equal_p (TREE_OPERAND (arg0
, 1), TREE_OPERAND (arg1
, 1), 0))
2648 common
= TREE_OPERAND (arg0
, 1);
2649 left
= TREE_OPERAND (arg0
, 0);
2650 right
= TREE_OPERAND (arg1
, 0);
2655 return fold (build (TREE_CODE (arg0
), type
, common
,
2656 fold (build (code
, type
, left
, right
))));
2659 /* Return a BIT_FIELD_REF of type TYPE to refer to BITSIZE bits of INNER
2660 starting at BITPOS. The field is unsigned if UNSIGNEDP is non-zero. */
2663 make_bit_field_ref (inner
, type
, bitsize
, bitpos
, unsignedp
)
2666 int bitsize
, bitpos
;
2669 tree result
= build (BIT_FIELD_REF
, type
, inner
,
2670 size_int (bitsize
), bitsize_int (bitpos
, 0L));
2672 TREE_UNSIGNED (result
) = unsignedp
;
2677 /* Optimize a bit-field compare.
2679 There are two cases: First is a compare against a constant and the
2680 second is a comparison of two items where the fields are at the same
2681 bit position relative to the start of a chunk (byte, halfword, word)
2682 large enough to contain it. In these cases we can avoid the shift
2683 implicit in bitfield extractions.
2685 For constants, we emit a compare of the shifted constant with the
2686 BIT_AND_EXPR of a mask and a byte, halfword, or word of the operand being
2687 compared. For two fields at the same position, we do the ANDs with the
2688 similar mask and compare the result of the ANDs.
2690 CODE is the comparison code, known to be either NE_EXPR or EQ_EXPR.
2691 COMPARE_TYPE is the type of the comparison, and LHS and RHS
2692 are the left and right operands of the comparison, respectively.
2694 If the optimization described above can be done, we return the resulting
2695 tree. Otherwise we return zero. */
2698 optimize_bit_field_compare (code
, compare_type
, lhs
, rhs
)
2699 enum tree_code code
;
2703 int lbitpos
, lbitsize
, rbitpos
, rbitsize
;
2704 int lnbitpos
, lnbitsize
, rnbitpos
= 0, rnbitsize
= 0;
2705 tree type
= TREE_TYPE (lhs
);
2706 tree signed_type
, unsigned_type
;
2707 int const_p
= TREE_CODE (rhs
) == INTEGER_CST
;
2708 enum machine_mode lmode
, rmode
, lnmode
, rnmode
= VOIDmode
;
2709 int lunsignedp
, runsignedp
;
2710 int lvolatilep
= 0, rvolatilep
= 0;
2712 tree linner
, rinner
= NULL_TREE
;
2716 /* Get all the information about the extractions being done. If the bit size
2717 if the same as the size of the underlying object, we aren't doing an
2718 extraction at all and so can do nothing. */
2719 linner
= get_inner_reference (lhs
, &lbitsize
, &lbitpos
, &offset
, &lmode
,
2720 &lunsignedp
, &lvolatilep
, &alignment
);
2721 if (linner
== lhs
|| lbitsize
== GET_MODE_BITSIZE (lmode
) || lbitsize
< 0
2727 /* If this is not a constant, we can only do something if bit positions,
2728 sizes, and signedness are the same. */
2729 rinner
= get_inner_reference (rhs
, &rbitsize
, &rbitpos
, &offset
, &rmode
,
2730 &runsignedp
, &rvolatilep
, &alignment
);
2732 if (rinner
== rhs
|| lbitpos
!= rbitpos
|| lbitsize
!= rbitsize
2733 || lunsignedp
!= runsignedp
|| offset
!= 0)
2737 /* See if we can find a mode to refer to this field. We should be able to,
2738 but fail if we can't. */
2739 lnmode
= get_best_mode (lbitsize
, lbitpos
,
2740 TYPE_ALIGN (TREE_TYPE (linner
)), word_mode
,
2742 if (lnmode
== VOIDmode
)
2745 /* Set signed and unsigned types of the precision of this mode for the
2747 signed_type
= type_for_mode (lnmode
, 0);
2748 unsigned_type
= type_for_mode (lnmode
, 1);
2752 rnmode
= get_best_mode (rbitsize
, rbitpos
,
2753 TYPE_ALIGN (TREE_TYPE (rinner
)), word_mode
,
2755 if (rnmode
== VOIDmode
)
2759 /* Compute the bit position and size for the new reference and our offset
2760 within it. If the new reference is the same size as the original, we
2761 won't optimize anything, so return zero. */
2762 lnbitsize
= GET_MODE_BITSIZE (lnmode
);
2763 lnbitpos
= lbitpos
& ~ (lnbitsize
- 1);
2764 lbitpos
-= lnbitpos
;
2765 if (lnbitsize
== lbitsize
)
2770 rnbitsize
= GET_MODE_BITSIZE (rnmode
);
2771 rnbitpos
= rbitpos
& ~ (rnbitsize
- 1);
2772 rbitpos
-= rnbitpos
;
2773 if (rnbitsize
== rbitsize
)
2777 if (BYTES_BIG_ENDIAN
)
2778 lbitpos
= lnbitsize
- lbitsize
- lbitpos
;
2780 /* Make the mask to be used against the extracted field. */
2781 mask
= build_int_2 (~0, ~0);
2782 TREE_TYPE (mask
) = unsigned_type
;
2783 force_fit_type (mask
, 0);
2784 mask
= convert (unsigned_type
, mask
);
2785 mask
= const_binop (LSHIFT_EXPR
, mask
, size_int (lnbitsize
- lbitsize
), 0);
2786 mask
= const_binop (RSHIFT_EXPR
, mask
,
2787 size_int (lnbitsize
- lbitsize
- lbitpos
), 0);
2790 /* If not comparing with constant, just rework the comparison
2792 return build (code
, compare_type
,
2793 build (BIT_AND_EXPR
, unsigned_type
,
2794 make_bit_field_ref (linner
, unsigned_type
,
2795 lnbitsize
, lnbitpos
, 1),
2797 build (BIT_AND_EXPR
, unsigned_type
,
2798 make_bit_field_ref (rinner
, unsigned_type
,
2799 rnbitsize
, rnbitpos
, 1),
2802 /* Otherwise, we are handling the constant case. See if the constant is too
2803 big for the field. Warn and return a tree of for 0 (false) if so. We do
2804 this not only for its own sake, but to avoid having to test for this
2805 error case below. If we didn't, we might generate wrong code.
2807 For unsigned fields, the constant shifted right by the field length should
2808 be all zero. For signed fields, the high-order bits should agree with
2813 if (! integer_zerop (const_binop (RSHIFT_EXPR
,
2814 convert (unsigned_type
, rhs
),
2815 size_int (lbitsize
), 0)))
2817 warning ("comparison is always %d due to width of bitfield",
2819 return convert (compare_type
,
2821 ? integer_one_node
: integer_zero_node
));
2826 tree tem
= const_binop (RSHIFT_EXPR
, convert (signed_type
, rhs
),
2827 size_int (lbitsize
- 1), 0);
2828 if (! integer_zerop (tem
) && ! integer_all_onesp (tem
))
2830 warning ("comparison is always %d due to width of bitfield",
2832 return convert (compare_type
,
2834 ? integer_one_node
: integer_zero_node
));
2838 /* Single-bit compares should always be against zero. */
2839 if (lbitsize
== 1 && ! integer_zerop (rhs
))
2841 code
= code
== EQ_EXPR
? NE_EXPR
: EQ_EXPR
;
2842 rhs
= convert (type
, integer_zero_node
);
2845 /* Make a new bitfield reference, shift the constant over the
2846 appropriate number of bits and mask it with the computed mask
2847 (in case this was a signed field). If we changed it, make a new one. */
2848 lhs
= make_bit_field_ref (linner
, unsigned_type
, lnbitsize
, lnbitpos
, 1);
2851 TREE_SIDE_EFFECTS (lhs
) = 1;
2852 TREE_THIS_VOLATILE (lhs
) = 1;
2855 rhs
= fold (const_binop (BIT_AND_EXPR
,
2856 const_binop (LSHIFT_EXPR
,
2857 convert (unsigned_type
, rhs
),
2858 size_int (lbitpos
), 0),
2861 return build (code
, compare_type
,
2862 build (BIT_AND_EXPR
, unsigned_type
, lhs
, mask
),
2866 /* Subroutine for fold_truthop: decode a field reference.
2868 If EXP is a comparison reference, we return the innermost reference.
2870 *PBITSIZE is set to the number of bits in the reference, *PBITPOS is
2871 set to the starting bit number.
2873 If the innermost field can be completely contained in a mode-sized
2874 unit, *PMODE is set to that mode. Otherwise, it is set to VOIDmode.
2876 *PVOLATILEP is set to 1 if the any expression encountered is volatile;
2877 otherwise it is not changed.
2879 *PUNSIGNEDP is set to the signedness of the field.
2881 *PMASK is set to the mask used. This is either contained in a
2882 BIT_AND_EXPR or derived from the width of the field.
2884 *PAND_MASK is set to the mask found in a BIT_AND_EXPR, if any.
2886 Return 0 if this is not a component reference or is one that we can't
2887 do anything with. */
2890 decode_field_reference (exp
, pbitsize
, pbitpos
, pmode
, punsignedp
,
2891 pvolatilep
, pmask
, pand_mask
)
2893 int *pbitsize
, *pbitpos
;
2894 enum machine_mode
*pmode
;
2895 int *punsignedp
, *pvolatilep
;
2900 tree mask
, inner
, offset
;
2905 /* All the optimizations using this function assume integer fields.
2906 There are problems with FP fields since the type_for_size call
2907 below can fail for, e.g., XFmode. */
2908 if (! INTEGRAL_TYPE_P (TREE_TYPE (exp
)))
2913 if (TREE_CODE (exp
) == BIT_AND_EXPR
)
2915 and_mask
= TREE_OPERAND (exp
, 1);
2916 exp
= TREE_OPERAND (exp
, 0);
2917 STRIP_NOPS (exp
); STRIP_NOPS (and_mask
);
2918 if (TREE_CODE (and_mask
) != INTEGER_CST
)
2923 inner
= get_inner_reference (exp
, pbitsize
, pbitpos
, &offset
, pmode
,
2924 punsignedp
, pvolatilep
, &alignment
);
2925 if ((inner
== exp
&& and_mask
== 0)
2926 || *pbitsize
< 0 || offset
!= 0)
2929 /* Compute the mask to access the bitfield. */
2930 unsigned_type
= type_for_size (*pbitsize
, 1);
2931 precision
= TYPE_PRECISION (unsigned_type
);
2933 mask
= build_int_2 (~0, ~0);
2934 TREE_TYPE (mask
) = unsigned_type
;
2935 force_fit_type (mask
, 0);
2936 mask
= const_binop (LSHIFT_EXPR
, mask
, size_int (precision
- *pbitsize
), 0);
2937 mask
= const_binop (RSHIFT_EXPR
, mask
, size_int (precision
- *pbitsize
), 0);
2939 /* Merge it with the mask we found in the BIT_AND_EXPR, if any. */
2941 mask
= fold (build (BIT_AND_EXPR
, unsigned_type
,
2942 convert (unsigned_type
, and_mask
), mask
));
2945 *pand_mask
= and_mask
;
2949 /* Return non-zero if MASK represents a mask of SIZE ones in the low-order
2953 all_ones_mask_p (mask
, size
)
2957 tree type
= TREE_TYPE (mask
);
2958 int precision
= TYPE_PRECISION (type
);
2961 tmask
= build_int_2 (~0, ~0);
2962 TREE_TYPE (tmask
) = signed_type (type
);
2963 force_fit_type (tmask
, 0);
2965 tree_int_cst_equal (mask
,
2966 const_binop (RSHIFT_EXPR
,
2967 const_binop (LSHIFT_EXPR
, tmask
,
2968 size_int (precision
- size
),
2970 size_int (precision
- size
), 0));
2973 /* Subroutine for fold_truthop: determine if an operand is simple enough
2974 to be evaluated unconditionally. */
2977 simple_operand_p (exp
)
2980 /* Strip any conversions that don't change the machine mode. */
2981 while ((TREE_CODE (exp
) == NOP_EXPR
2982 || TREE_CODE (exp
) == CONVERT_EXPR
)
2983 && (TYPE_MODE (TREE_TYPE (exp
))
2984 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp
, 0)))))
2985 exp
= TREE_OPERAND (exp
, 0);
2987 return (TREE_CODE_CLASS (TREE_CODE (exp
)) == 'c'
2988 || (TREE_CODE_CLASS (TREE_CODE (exp
)) == 'd'
2989 && ! TREE_ADDRESSABLE (exp
)
2990 && ! TREE_THIS_VOLATILE (exp
)
2991 && ! DECL_NONLOCAL (exp
)
2992 /* Don't regard global variables as simple. They may be
2993 allocated in ways unknown to the compiler (shared memory,
2994 #pragma weak, etc). */
2995 && ! TREE_PUBLIC (exp
)
2996 && ! DECL_EXTERNAL (exp
)
2997 /* Loading a static variable is unduly expensive, but global
2998 registers aren't expensive. */
2999 && (! TREE_STATIC (exp
) || DECL_REGISTER (exp
))));
3002 /* The following functions are subroutines to fold_range_test and allow it to
3003 try to change a logical combination of comparisons into a range test.
3006 X == 2 && X == 3 && X == 4 && X == 5
3010 (unsigned) (X - 2) <= 3
3012 We describe each set of comparisons as being either inside or outside
3013 a range, using a variable named like IN_P, and then describe the
3014 range with a lower and upper bound. If one of the bounds is omitted,
3015 it represents either the highest or lowest value of the type.
3017 In the comments below, we represent a range by two numbers in brackets
3018 preceded by a "+" to designate being inside that range, or a "-" to
3019 designate being outside that range, so the condition can be inverted by
3020 flipping the prefix. An omitted bound is represented by a "-". For
3021 example, "- [-, 10]" means being outside the range starting at the lowest
3022 possible value and ending at 10, in other words, being greater than 10.
3023 The range "+ [-, -]" is always true and hence the range "- [-, -]" is
3026 We set up things so that the missing bounds are handled in a consistent
3027 manner so neither a missing bound nor "true" and "false" need to be
3028 handled using a special case. */
3030 /* Return the result of applying CODE to ARG0 and ARG1, but handle the case
3031 of ARG0 and/or ARG1 being omitted, meaning an unlimited range. UPPER0_P
3032 and UPPER1_P are nonzero if the respective argument is an upper bound
3033 and zero for a lower. TYPE, if nonzero, is the type of the result; it
3034 must be specified for a comparison. ARG1 will be converted to ARG0's
3035 type if both are specified. */
3038 range_binop (code
, type
, arg0
, upper0_p
, arg1
, upper1_p
)
3039 enum tree_code code
;
3042 int upper0_p
, upper1_p
;
3048 /* If neither arg represents infinity, do the normal operation.
3049 Else, if not a comparison, return infinity. Else handle the special
3050 comparison rules. Note that most of the cases below won't occur, but
3051 are handled for consistency. */
3053 if (arg0
!= 0 && arg1
!= 0)
3055 tem
= fold (build (code
, type
!= 0 ? type
: TREE_TYPE (arg0
),
3056 arg0
, convert (TREE_TYPE (arg0
), arg1
)));
3058 return TREE_CODE (tem
) == INTEGER_CST
? tem
: 0;
3061 if (TREE_CODE_CLASS (code
) != '<')
3064 /* Set SGN[01] to -1 if ARG[01] is a lower bound, 1 for upper, and 0
3065 for neither. In real maths, we cannot assume open ended ranges are
3066 the same. But, this is computer arithmetic, where numbers are finite.
3067 We can therefore make the transformation of any unbounded range with
3068 the value Z, Z being greater than any representable number. This permits
3069 us to treat unbounded ranges as equal. */
3070 sgn0
= arg0
!= 0 ? 0 : (upper0_p
? 1 : -1);
3071 sgn1
= arg1
!= 0 ? 0 : (upper1_p
? 1 : -1);
3075 result
= sgn0
== sgn1
;
3078 result
= sgn0
!= sgn1
;
3081 result
= sgn0
< sgn1
;
3084 result
= sgn0
<= sgn1
;
3087 result
= sgn0
> sgn1
;
3090 result
= sgn0
>= sgn1
;
3096 return convert (type
, result
? integer_one_node
: integer_zero_node
);
3099 /* Given EXP, a logical expression, set the range it is testing into
3100 variables denoted by PIN_P, PLOW, and PHIGH. Return the expression
3101 actually being tested. *PLOW and *PHIGH will have be made the same type
3102 as the returned expression. If EXP is not a comparison, we will most
3103 likely not be returning a useful value and range. */
3106 make_range (exp
, pin_p
, plow
, phigh
)
3111 enum tree_code code
;
3112 tree arg0
, arg1
, type
= NULL_TREE
;
3113 tree orig_type
= NULL_TREE
;
3115 tree low
, high
, n_low
, n_high
;
3117 /* Start with simply saying "EXP != 0" and then look at the code of EXP
3118 and see if we can refine the range. Some of the cases below may not
3119 happen, but it doesn't seem worth worrying about this. We "continue"
3120 the outer loop when we've changed something; otherwise we "break"
3121 the switch, which will "break" the while. */
3123 in_p
= 0, low
= high
= convert (TREE_TYPE (exp
), integer_zero_node
);
3127 code
= TREE_CODE (exp
);
3129 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code
)))
3131 arg0
= TREE_OPERAND (exp
, 0);
3132 if (TREE_CODE_CLASS (code
) == '<'
3133 || TREE_CODE_CLASS (code
) == '1'
3134 || TREE_CODE_CLASS (code
) == '2')
3135 type
= TREE_TYPE (arg0
);
3136 if (TREE_CODE_CLASS (code
) == '2'
3137 || TREE_CODE_CLASS (code
) == '<'
3138 || (TREE_CODE_CLASS (code
) == 'e'
3139 && tree_code_length
[(int) code
] > 1))
3140 arg1
= TREE_OPERAND (exp
, 1);
3143 /* Set ORIG_TYPE as soon as TYPE is non-null so that we do not
3144 lose a cast by accident. */
3145 if (type
!= NULL_TREE
&& orig_type
== NULL_TREE
)
3150 case TRUTH_NOT_EXPR
:
3151 in_p
= ! in_p
, exp
= arg0
;
3154 case EQ_EXPR
: case NE_EXPR
:
3155 case LT_EXPR
: case LE_EXPR
: case GE_EXPR
: case GT_EXPR
:
3156 /* We can only do something if the range is testing for zero
3157 and if the second operand is an integer constant. Note that
3158 saying something is "in" the range we make is done by
3159 complementing IN_P since it will set in the initial case of
3160 being not equal to zero; "out" is leaving it alone. */
3161 if (low
== 0 || high
== 0
3162 || ! integer_zerop (low
) || ! integer_zerop (high
)
3163 || TREE_CODE (arg1
) != INTEGER_CST
)
3168 case NE_EXPR
: /* - [c, c] */
3171 case EQ_EXPR
: /* + [c, c] */
3172 in_p
= ! in_p
, low
= high
= arg1
;
3174 case GT_EXPR
: /* - [-, c] */
3175 low
= 0, high
= arg1
;
3177 case GE_EXPR
: /* + [c, -] */
3178 in_p
= ! in_p
, low
= arg1
, high
= 0;
3180 case LT_EXPR
: /* - [c, -] */
3181 low
= arg1
, high
= 0;
3183 case LE_EXPR
: /* + [-, c] */
3184 in_p
= ! in_p
, low
= 0, high
= arg1
;
3192 /* If this is an unsigned comparison, we also know that EXP is
3193 greater than or equal to zero. We base the range tests we make
3194 on that fact, so we record it here so we can parse existing
3196 if (TREE_UNSIGNED (type
) && (low
== 0 || high
== 0))
3198 if (! merge_ranges (&n_in_p
, &n_low
, &n_high
, in_p
, low
, high
,
3199 1, convert (type
, integer_zero_node
),
3203 in_p
= n_in_p
, low
= n_low
, high
= n_high
;
3205 /* If the high bound is missing, reverse the range so it
3206 goes from zero to the low bound minus 1. */
3210 high
= range_binop (MINUS_EXPR
, NULL_TREE
, low
, 0,
3211 integer_one_node
, 0);
3212 low
= convert (type
, integer_zero_node
);
3218 /* (-x) IN [a,b] -> x in [-b, -a] */
3219 n_low
= range_binop (MINUS_EXPR
, type
,
3220 convert (type
, integer_zero_node
), 0, high
, 1);
3221 n_high
= range_binop (MINUS_EXPR
, type
,
3222 convert (type
, integer_zero_node
), 0, low
, 0);
3223 low
= n_low
, high
= n_high
;
3229 exp
= build (MINUS_EXPR
, type
, build1 (NEGATE_EXPR
, type
, arg0
),
3230 convert (type
, integer_one_node
));
3233 case PLUS_EXPR
: case MINUS_EXPR
:
3234 if (TREE_CODE (arg1
) != INTEGER_CST
)
3237 /* If EXP is signed, any overflow in the computation is undefined,
3238 so we don't worry about it so long as our computations on
3239 the bounds don't overflow. For unsigned, overflow is defined
3240 and this is exactly the right thing. */
3241 n_low
= range_binop (code
== MINUS_EXPR
? PLUS_EXPR
: MINUS_EXPR
,
3242 type
, low
, 0, arg1
, 0);
3243 n_high
= range_binop (code
== MINUS_EXPR
? PLUS_EXPR
: MINUS_EXPR
,
3244 type
, high
, 1, arg1
, 0);
3245 if ((n_low
!= 0 && TREE_OVERFLOW (n_low
))
3246 || (n_high
!= 0 && TREE_OVERFLOW (n_high
)))
3249 /* Check for an unsigned range which has wrapped around the maximum
3250 value thus making n_high < n_low, and normalize it. */
3251 if (n_low
&& n_high
&& tree_int_cst_lt (n_high
, n_low
))
3253 low
= range_binop (PLUS_EXPR
, type
, n_high
, 0,
3254 integer_one_node
, 0);
3255 high
= range_binop (MINUS_EXPR
, type
, n_low
, 0,
3256 integer_one_node
, 0);
3260 low
= n_low
, high
= n_high
;
3265 case NOP_EXPR
: case NON_LVALUE_EXPR
: case CONVERT_EXPR
:
3266 if (TYPE_PRECISION (type
) > TYPE_PRECISION (orig_type
))
3269 if (! INTEGRAL_TYPE_P (type
)
3270 || (low
!= 0 && ! int_fits_type_p (low
, type
))
3271 || (high
!= 0 && ! int_fits_type_p (high
, type
)))
3274 n_low
= low
, n_high
= high
;
3277 n_low
= convert (type
, n_low
);
3280 n_high
= convert (type
, n_high
);
3282 /* If we're converting from an unsigned to a signed type,
3283 we will be doing the comparison as unsigned. The tests above
3284 have already verified that LOW and HIGH are both positive.
3286 So we have to make sure that the original unsigned value will
3287 be interpreted as positive. */
3288 if (TREE_UNSIGNED (type
) && ! TREE_UNSIGNED (TREE_TYPE (exp
)))
3290 tree equiv_type
= type_for_mode (TYPE_MODE (type
), 1);
3293 /* A range without an upper bound is, naturally, unbounded.
3294 Since convert would have cropped a very large value, use
3295 the max value for the destination type. */
3297 high_positive
= TYPE_MAX_VALUE (equiv_type
);
3300 high_positive
= TYPE_MAX_VALUE (type
);
3304 high_positive
= fold (build (RSHIFT_EXPR
, type
,
3305 convert (type
, high_positive
),
3306 convert (type
, integer_one_node
)));
3308 /* If the low bound is specified, "and" the range with the
3309 range for which the original unsigned value will be
3313 if (! merge_ranges (&n_in_p
, &n_low
, &n_high
,
3315 1, convert (type
, integer_zero_node
),
3319 in_p
= (n_in_p
== in_p
);
3323 /* Otherwise, "or" the range with the range of the input
3324 that will be interpreted as negative. */
3325 if (! merge_ranges (&n_in_p
, &n_low
, &n_high
,
3327 1, convert (type
, integer_zero_node
),
3331 in_p
= (in_p
!= n_in_p
);
3336 low
= n_low
, high
= n_high
;
3346 /* If EXP is a constant, we can evaluate whether this is true or false. */
3347 if (TREE_CODE (exp
) == INTEGER_CST
)
3349 in_p
= in_p
== (integer_onep (range_binop (GE_EXPR
, integer_type_node
,
3351 && integer_onep (range_binop (LE_EXPR
, integer_type_node
,
3357 *pin_p
= in_p
, *plow
= low
, *phigh
= high
;
3361 /* Given a range, LOW, HIGH, and IN_P, an expression, EXP, and a result
3362 type, TYPE, return an expression to test if EXP is in (or out of, depending
3363 on IN_P) the range. */
3366 build_range_check (type
, exp
, in_p
, low
, high
)
3372 tree etype
= TREE_TYPE (exp
);
3376 && (0 != (value
= build_range_check (type
, exp
, 1, low
, high
))))
3377 return invert_truthvalue (value
);
3379 else if (low
== 0 && high
== 0)
3380 return convert (type
, integer_one_node
);
3383 return fold (build (LE_EXPR
, type
, exp
, high
));
3386 return fold (build (GE_EXPR
, type
, exp
, low
));
3388 else if (operand_equal_p (low
, high
, 0))
3389 return fold (build (EQ_EXPR
, type
, exp
, low
));
3391 else if (TREE_UNSIGNED (etype
) && integer_zerop (low
))
3392 return build_range_check (type
, exp
, 1, 0, high
);
3394 else if (integer_zerop (low
))
3396 utype
= unsigned_type (etype
);
3397 return build_range_check (type
, convert (utype
, exp
), 1, 0,
3398 convert (utype
, high
));
3401 else if (0 != (value
= const_binop (MINUS_EXPR
, high
, low
, 0))
3402 && ! TREE_OVERFLOW (value
))
3403 return build_range_check (type
,
3404 fold (build (MINUS_EXPR
, etype
, exp
, low
)),
3405 1, convert (etype
, integer_zero_node
), value
);
3410 /* Given two ranges, see if we can merge them into one. Return 1 if we
3411 can, 0 if we can't. Set the output range into the specified parameters. */
3414 merge_ranges (pin_p
, plow
, phigh
, in0_p
, low0
, high0
, in1_p
, low1
, high1
)
3418 tree low0
, high0
, low1
, high1
;
3426 int lowequal
= ((low0
== 0 && low1
== 0)
3427 || integer_onep (range_binop (EQ_EXPR
, integer_type_node
,
3428 low0
, 0, low1
, 0)));
3429 int highequal
= ((high0
== 0 && high1
== 0)
3430 || integer_onep (range_binop (EQ_EXPR
, integer_type_node
,
3431 high0
, 1, high1
, 1)));
3433 /* Make range 0 be the range that starts first, or ends last if they
3434 start at the same value. Swap them if it isn't. */
3435 if (integer_onep (range_binop (GT_EXPR
, integer_type_node
,
3438 && integer_onep (range_binop (GT_EXPR
, integer_type_node
,
3439 high1
, 1, high0
, 1))))
3441 temp
= in0_p
, in0_p
= in1_p
, in1_p
= temp
;
3442 tem
= low0
, low0
= low1
, low1
= tem
;
3443 tem
= high0
, high0
= high1
, high1
= tem
;
3446 /* Now flag two cases, whether the ranges are disjoint or whether the
3447 second range is totally subsumed in the first. Note that the tests
3448 below are simplified by the ones above. */
3449 no_overlap
= integer_onep (range_binop (LT_EXPR
, integer_type_node
,
3450 high0
, 1, low1
, 0));
3451 subset
= integer_onep (range_binop (LE_EXPR
, integer_type_node
,
3452 high1
, 1, high0
, 1));
3454 /* We now have four cases, depending on whether we are including or
3455 excluding the two ranges. */
3458 /* If they don't overlap, the result is false. If the second range
3459 is a subset it is the result. Otherwise, the range is from the start
3460 of the second to the end of the first. */
3462 in_p
= 0, low
= high
= 0;
3464 in_p
= 1, low
= low1
, high
= high1
;
3466 in_p
= 1, low
= low1
, high
= high0
;
3469 else if (in0_p
&& ! in1_p
)
3471 /* If they don't overlap, the result is the first range. If they are
3472 equal, the result is false. If the second range is a subset of the
3473 first, and the ranges begin at the same place, we go from just after
3474 the end of the first range to the end of the second. If the second
3475 range is not a subset of the first, or if it is a subset and both
3476 ranges end at the same place, the range starts at the start of the
3477 first range and ends just before the second range.
3478 Otherwise, we can't describe this as a single range. */
3480 in_p
= 1, low
= low0
, high
= high0
;
3481 else if (lowequal
&& highequal
)
3482 in_p
= 0, low
= high
= 0;
3483 else if (subset
&& lowequal
)
3485 in_p
= 1, high
= high0
;
3486 low
= range_binop (PLUS_EXPR
, NULL_TREE
, high1
, 0,
3487 integer_one_node
, 0);
3489 else if (! subset
|| highequal
)
3491 in_p
= 1, low
= low0
;
3492 high
= range_binop (MINUS_EXPR
, NULL_TREE
, low1
, 0,
3493 integer_one_node
, 0);
3499 else if (! in0_p
&& in1_p
)
3501 /* If they don't overlap, the result is the second range. If the second
3502 is a subset of the first, the result is false. Otherwise,
3503 the range starts just after the first range and ends at the
3504 end of the second. */
3506 in_p
= 1, low
= low1
, high
= high1
;
3508 in_p
= 0, low
= high
= 0;
3511 in_p
= 1, high
= high1
;
3512 low
= range_binop (PLUS_EXPR
, NULL_TREE
, high0
, 1,
3513 integer_one_node
, 0);
3519 /* The case where we are excluding both ranges. Here the complex case
3520 is if they don't overlap. In that case, the only time we have a
3521 range is if they are adjacent. If the second is a subset of the
3522 first, the result is the first. Otherwise, the range to exclude
3523 starts at the beginning of the first range and ends at the end of the
3527 if (integer_onep (range_binop (EQ_EXPR
, integer_type_node
,
3528 range_binop (PLUS_EXPR
, NULL_TREE
,
3530 integer_one_node
, 1),
3532 in_p
= 0, low
= low0
, high
= high1
;
3537 in_p
= 0, low
= low0
, high
= high0
;
3539 in_p
= 0, low
= low0
, high
= high1
;
3542 *pin_p
= in_p
, *plow
= low
, *phigh
= high
;
3546 /* EXP is some logical combination of boolean tests. See if we can
3547 merge it into some range test. Return the new tree if so. */
3550 fold_range_test (exp
)
3553 int or_op
= (TREE_CODE (exp
) == TRUTH_ORIF_EXPR
3554 || TREE_CODE (exp
) == TRUTH_OR_EXPR
);
3555 int in0_p
, in1_p
, in_p
;
3556 tree low0
, low1
, low
, high0
, high1
, high
;
3557 tree lhs
= make_range (TREE_OPERAND (exp
, 0), &in0_p
, &low0
, &high0
);
3558 tree rhs
= make_range (TREE_OPERAND (exp
, 1), &in1_p
, &low1
, &high1
);
3561 /* If this is an OR operation, invert both sides; we will invert
3562 again at the end. */
3564 in0_p
= ! in0_p
, in1_p
= ! in1_p
;
3566 /* If both expressions are the same, if we can merge the ranges, and we
3567 can build the range test, return it or it inverted. If one of the
3568 ranges is always true or always false, consider it to be the same
3569 expression as the other. */
3570 if ((lhs
== 0 || rhs
== 0 || operand_equal_p (lhs
, rhs
, 0))
3571 && merge_ranges (&in_p
, &low
, &high
, in0_p
, low0
, high0
,
3573 && 0 != (tem
= (build_range_check (TREE_TYPE (exp
),
3575 : rhs
!= 0 ? rhs
: integer_zero_node
,
3577 return or_op
? invert_truthvalue (tem
) : tem
;
3579 /* On machines where the branch cost is expensive, if this is a
3580 short-circuited branch and the underlying object on both sides
3581 is the same, make a non-short-circuit operation. */
3582 else if (BRANCH_COST
>= 2
3583 && (TREE_CODE (exp
) == TRUTH_ANDIF_EXPR
3584 || TREE_CODE (exp
) == TRUTH_ORIF_EXPR
)
3585 && operand_equal_p (lhs
, rhs
, 0))
3587 /* If simple enough, just rewrite. Otherwise, make a SAVE_EXPR
3588 unless we are at top level or LHS contains a PLACEHOLDER_EXPR, in
3589 which cases we can't do this. */
3590 if (simple_operand_p (lhs
))
3591 return build (TREE_CODE (exp
) == TRUTH_ANDIF_EXPR
3592 ? TRUTH_AND_EXPR
: TRUTH_OR_EXPR
,
3593 TREE_TYPE (exp
), TREE_OPERAND (exp
, 0),
3594 TREE_OPERAND (exp
, 1));
3596 else if (current_function_decl
!= 0
3597 && ! contains_placeholder_p (lhs
))
3599 tree common
= save_expr (lhs
);
3601 if (0 != (lhs
= build_range_check (TREE_TYPE (exp
), common
,
3602 or_op
? ! in0_p
: in0_p
,
3604 && (0 != (rhs
= build_range_check (TREE_TYPE (exp
), common
,
3605 or_op
? ! in1_p
: in1_p
,
3607 return build (TREE_CODE (exp
) == TRUTH_ANDIF_EXPR
3608 ? TRUTH_AND_EXPR
: TRUTH_OR_EXPR
,
3609 TREE_TYPE (exp
), lhs
, rhs
);
3616 /* Subroutine for fold_truthop: C is an INTEGER_CST interpreted as a P
3617 bit value. Arrange things so the extra bits will be set to zero if and
3618 only if C is signed-extended to its full width. If MASK is nonzero,
3619 it is an INTEGER_CST that should be AND'ed with the extra bits. */
3622 unextend (c
, p
, unsignedp
, mask
)
3628 tree type
= TREE_TYPE (c
);
3629 int modesize
= GET_MODE_BITSIZE (TYPE_MODE (type
));
3632 if (p
== modesize
|| unsignedp
)
3635 /* We work by getting just the sign bit into the low-order bit, then
3636 into the high-order bit, then sign-extend. We then XOR that value
3638 temp
= const_binop (RSHIFT_EXPR
, c
, size_int (p
- 1), 0);
3639 temp
= const_binop (BIT_AND_EXPR
, temp
, size_int (1), 0);
3641 /* We must use a signed type in order to get an arithmetic right shift.
3642 However, we must also avoid introducing accidental overflows, so that
3643 a subsequent call to integer_zerop will work. Hence we must
3644 do the type conversion here. At this point, the constant is either
3645 zero or one, and the conversion to a signed type can never overflow.
3646 We could get an overflow if this conversion is done anywhere else. */
3647 if (TREE_UNSIGNED (type
))
3648 temp
= convert (signed_type (type
), temp
);
3650 temp
= const_binop (LSHIFT_EXPR
, temp
, size_int (modesize
- 1), 0);
3651 temp
= const_binop (RSHIFT_EXPR
, temp
, size_int (modesize
- p
- 1), 0);
3653 temp
= const_binop (BIT_AND_EXPR
, temp
, convert (TREE_TYPE (c
), mask
), 0);
3654 /* If necessary, convert the type back to match the type of C. */
3655 if (TREE_UNSIGNED (type
))
3656 temp
= convert (type
, temp
);
3658 return convert (type
, const_binop (BIT_XOR_EXPR
, c
, temp
, 0));
3661 /* Find ways of folding logical expressions of LHS and RHS:
3662 Try to merge two comparisons to the same innermost item.
3663 Look for range tests like "ch >= '0' && ch <= '9'".
3664 Look for combinations of simple terms on machines with expensive branches
3665 and evaluate the RHS unconditionally.
3667 For example, if we have p->a == 2 && p->b == 4 and we can make an
3668 object large enough to span both A and B, we can do this with a comparison
3669 against the object ANDed with the a mask.
3671 If we have p->a == q->a && p->b == q->b, we may be able to use bit masking
3672 operations to do this with one comparison.
3674 We check for both normal comparisons and the BIT_AND_EXPRs made this by
3675 function and the one above.
3677 CODE is the logical operation being done. It can be TRUTH_ANDIF_EXPR,
3678 TRUTH_AND_EXPR, TRUTH_ORIF_EXPR, or TRUTH_OR_EXPR.
3680 TRUTH_TYPE is the type of the logical operand and LHS and RHS are its
3683 We return the simplified tree or 0 if no optimization is possible. */
3686 fold_truthop (code
, truth_type
, lhs
, rhs
)
3687 enum tree_code code
;
3688 tree truth_type
, lhs
, rhs
;
3690 /* If this is the "or" of two comparisons, we can do something if we
3691 the comparisons are NE_EXPR. If this is the "and", we can do something
3692 if the comparisons are EQ_EXPR. I.e.,
3693 (a->b == 2 && a->c == 4) can become (a->new == NEW).
3695 WANTED_CODE is this operation code. For single bit fields, we can
3696 convert EQ_EXPR to NE_EXPR so we need not reject the "wrong"
3697 comparison for one-bit fields. */
3699 enum tree_code wanted_code
;
3700 enum tree_code lcode
, rcode
;
3701 tree ll_arg
, lr_arg
, rl_arg
, rr_arg
;
3702 tree ll_inner
, lr_inner
, rl_inner
, rr_inner
;
3703 int ll_bitsize
, ll_bitpos
, lr_bitsize
, lr_bitpos
;
3704 int rl_bitsize
, rl_bitpos
, rr_bitsize
, rr_bitpos
;
3705 int xll_bitpos
, xlr_bitpos
, xrl_bitpos
, xrr_bitpos
;
3706 int lnbitsize
, lnbitpos
, rnbitsize
, rnbitpos
;
3707 int ll_unsignedp
, lr_unsignedp
, rl_unsignedp
, rr_unsignedp
;
3708 enum machine_mode ll_mode
, lr_mode
, rl_mode
, rr_mode
;
3709 enum machine_mode lnmode
, rnmode
;
3710 tree ll_mask
, lr_mask
, rl_mask
, rr_mask
;
3711 tree ll_and_mask
, lr_and_mask
, rl_and_mask
, rr_and_mask
;
3712 tree l_const
, r_const
;
3713 tree lntype
, rntype
, result
;
3714 int first_bit
, end_bit
;
3717 /* Start by getting the comparison codes. Fail if anything is volatile.
3718 If one operand is a BIT_AND_EXPR with the constant one, treat it as if
3719 it were surrounded with a NE_EXPR. */
3721 if (TREE_SIDE_EFFECTS (lhs
) || TREE_SIDE_EFFECTS (rhs
))
3724 lcode
= TREE_CODE (lhs
);
3725 rcode
= TREE_CODE (rhs
);
3727 if (lcode
== BIT_AND_EXPR
&& integer_onep (TREE_OPERAND (lhs
, 1)))
3728 lcode
= NE_EXPR
, lhs
= build (NE_EXPR
, truth_type
, lhs
, integer_zero_node
);
3730 if (rcode
== BIT_AND_EXPR
&& integer_onep (TREE_OPERAND (rhs
, 1)))
3731 rcode
= NE_EXPR
, rhs
= build (NE_EXPR
, truth_type
, rhs
, integer_zero_node
);
3733 if (TREE_CODE_CLASS (lcode
) != '<' || TREE_CODE_CLASS (rcode
) != '<')
3736 code
= ((code
== TRUTH_AND_EXPR
|| code
== TRUTH_ANDIF_EXPR
)
3737 ? TRUTH_AND_EXPR
: TRUTH_OR_EXPR
);
3739 ll_arg
= TREE_OPERAND (lhs
, 0);
3740 lr_arg
= TREE_OPERAND (lhs
, 1);
3741 rl_arg
= TREE_OPERAND (rhs
, 0);
3742 rr_arg
= TREE_OPERAND (rhs
, 1);
3744 /* If the RHS can be evaluated unconditionally and its operands are
3745 simple, it wins to evaluate the RHS unconditionally on machines
3746 with expensive branches. In this case, this isn't a comparison
3747 that can be merged. */
3749 /* @@ I'm not sure it wins on the m88110 to do this if the comparisons
3750 are with zero (tmw). */
3752 if (BRANCH_COST
>= 2
3753 && INTEGRAL_TYPE_P (TREE_TYPE (rhs
))
3754 && simple_operand_p (rl_arg
)
3755 && simple_operand_p (rr_arg
))
3756 return build (code
, truth_type
, lhs
, rhs
);
3758 /* See if the comparisons can be merged. Then get all the parameters for
3761 if ((lcode
!= EQ_EXPR
&& lcode
!= NE_EXPR
)
3762 || (rcode
!= EQ_EXPR
&& rcode
!= NE_EXPR
))
3766 ll_inner
= decode_field_reference (ll_arg
,
3767 &ll_bitsize
, &ll_bitpos
, &ll_mode
,
3768 &ll_unsignedp
, &volatilep
, &ll_mask
,
3770 lr_inner
= decode_field_reference (lr_arg
,
3771 &lr_bitsize
, &lr_bitpos
, &lr_mode
,
3772 &lr_unsignedp
, &volatilep
, &lr_mask
,
3774 rl_inner
= decode_field_reference (rl_arg
,
3775 &rl_bitsize
, &rl_bitpos
, &rl_mode
,
3776 &rl_unsignedp
, &volatilep
, &rl_mask
,
3778 rr_inner
= decode_field_reference (rr_arg
,
3779 &rr_bitsize
, &rr_bitpos
, &rr_mode
,
3780 &rr_unsignedp
, &volatilep
, &rr_mask
,
3783 /* It must be true that the inner operation on the lhs of each
3784 comparison must be the same if we are to be able to do anything.
3785 Then see if we have constants. If not, the same must be true for
3787 if (volatilep
|| ll_inner
== 0 || rl_inner
== 0
3788 || ! operand_equal_p (ll_inner
, rl_inner
, 0))
3791 if (TREE_CODE (lr_arg
) == INTEGER_CST
3792 && TREE_CODE (rr_arg
) == INTEGER_CST
)
3793 l_const
= lr_arg
, r_const
= rr_arg
;
3794 else if (lr_inner
== 0 || rr_inner
== 0
3795 || ! operand_equal_p (lr_inner
, rr_inner
, 0))
3798 l_const
= r_const
= 0;
3800 /* If either comparison code is not correct for our logical operation,
3801 fail. However, we can convert a one-bit comparison against zero into
3802 the opposite comparison against that bit being set in the field. */
3804 wanted_code
= (code
== TRUTH_AND_EXPR
? EQ_EXPR
: NE_EXPR
);
3805 if (lcode
!= wanted_code
)
3807 if (l_const
&& integer_zerop (l_const
) && integer_pow2p (ll_mask
))
3809 if (ll_unsignedp
|| tree_log2 (ll_mask
) + 1 < ll_bitsize
)
3812 /* Since ll_arg is a single bit bit mask, we can sign extend
3813 it appropriately with a NEGATE_EXPR.
3814 l_const is made a signed value here, but since for l_const != NULL
3815 lr_unsignedp is not used, we don't need to clear the latter. */
3816 l_const
= fold (build1 (NEGATE_EXPR
, TREE_TYPE (ll_arg
),
3817 convert (TREE_TYPE (ll_arg
), ll_mask
)));
3823 if (rcode
!= wanted_code
)
3825 if (r_const
&& integer_zerop (r_const
) && integer_pow2p (rl_mask
))
3827 if (rl_unsignedp
|| tree_log2 (rl_mask
) + 1 < rl_bitsize
)
3830 /* This is analogous to the code for l_const above. */
3831 r_const
= fold (build1 (NEGATE_EXPR
, TREE_TYPE (rl_arg
),
3832 convert (TREE_TYPE (rl_arg
), rl_mask
)));
3838 /* See if we can find a mode that contains both fields being compared on
3839 the left. If we can't, fail. Otherwise, update all constants and masks
3840 to be relative to a field of that size. */
3841 first_bit
= MIN (ll_bitpos
, rl_bitpos
);
3842 end_bit
= MAX (ll_bitpos
+ ll_bitsize
, rl_bitpos
+ rl_bitsize
);
3843 lnmode
= get_best_mode (end_bit
- first_bit
, first_bit
,
3844 TYPE_ALIGN (TREE_TYPE (ll_inner
)), word_mode
,
3846 if (lnmode
== VOIDmode
)
3849 lnbitsize
= GET_MODE_BITSIZE (lnmode
);
3850 lnbitpos
= first_bit
& ~ (lnbitsize
- 1);
3851 lntype
= type_for_size (lnbitsize
, 1);
3852 xll_bitpos
= ll_bitpos
- lnbitpos
, xrl_bitpos
= rl_bitpos
- lnbitpos
;
3854 if (BYTES_BIG_ENDIAN
)
3856 xll_bitpos
= lnbitsize
- xll_bitpos
- ll_bitsize
;
3857 xrl_bitpos
= lnbitsize
- xrl_bitpos
- rl_bitsize
;
3860 ll_mask
= const_binop (LSHIFT_EXPR
, convert (lntype
, ll_mask
),
3861 size_int (xll_bitpos
), 0);
3862 rl_mask
= const_binop (LSHIFT_EXPR
, convert (lntype
, rl_mask
),
3863 size_int (xrl_bitpos
), 0);
3867 l_const
= convert (lntype
, l_const
);
3868 l_const
= unextend (l_const
, ll_bitsize
, ll_unsignedp
, ll_and_mask
);
3869 l_const
= const_binop (LSHIFT_EXPR
, l_const
, size_int (xll_bitpos
), 0);
3870 if (! integer_zerop (const_binop (BIT_AND_EXPR
, l_const
,
3871 fold (build1 (BIT_NOT_EXPR
,
3875 warning ("comparison is always %d", wanted_code
== NE_EXPR
);
3877 return convert (truth_type
,
3878 wanted_code
== NE_EXPR
3879 ? integer_one_node
: integer_zero_node
);
3884 r_const
= convert (lntype
, r_const
);
3885 r_const
= unextend (r_const
, rl_bitsize
, rl_unsignedp
, rl_and_mask
);
3886 r_const
= const_binop (LSHIFT_EXPR
, r_const
, size_int (xrl_bitpos
), 0);
3887 if (! integer_zerop (const_binop (BIT_AND_EXPR
, r_const
,
3888 fold (build1 (BIT_NOT_EXPR
,
3892 warning ("comparison is always %d", wanted_code
== NE_EXPR
);
3894 return convert (truth_type
,
3895 wanted_code
== NE_EXPR
3896 ? integer_one_node
: integer_zero_node
);
3900 /* If the right sides are not constant, do the same for it. Also,
3901 disallow this optimization if a size or signedness mismatch occurs
3902 between the left and right sides. */
3905 if (ll_bitsize
!= lr_bitsize
|| rl_bitsize
!= rr_bitsize
3906 || ll_unsignedp
!= lr_unsignedp
|| rl_unsignedp
!= rr_unsignedp
3907 /* Make sure the two fields on the right
3908 correspond to the left without being swapped. */
3909 || ll_bitpos
- rl_bitpos
!= lr_bitpos
- rr_bitpos
)
3912 first_bit
= MIN (lr_bitpos
, rr_bitpos
);
3913 end_bit
= MAX (lr_bitpos
+ lr_bitsize
, rr_bitpos
+ rr_bitsize
);
3914 rnmode
= get_best_mode (end_bit
- first_bit
, first_bit
,
3915 TYPE_ALIGN (TREE_TYPE (lr_inner
)), word_mode
,
3917 if (rnmode
== VOIDmode
)
3920 rnbitsize
= GET_MODE_BITSIZE (rnmode
);
3921 rnbitpos
= first_bit
& ~ (rnbitsize
- 1);
3922 rntype
= type_for_size (rnbitsize
, 1);
3923 xlr_bitpos
= lr_bitpos
- rnbitpos
, xrr_bitpos
= rr_bitpos
- rnbitpos
;
3925 if (BYTES_BIG_ENDIAN
)
3927 xlr_bitpos
= rnbitsize
- xlr_bitpos
- lr_bitsize
;
3928 xrr_bitpos
= rnbitsize
- xrr_bitpos
- rr_bitsize
;
3931 lr_mask
= const_binop (LSHIFT_EXPR
, convert (rntype
, lr_mask
),
3932 size_int (xlr_bitpos
), 0);
3933 rr_mask
= const_binop (LSHIFT_EXPR
, convert (rntype
, rr_mask
),
3934 size_int (xrr_bitpos
), 0);
3936 /* Make a mask that corresponds to both fields being compared.
3937 Do this for both items being compared. If the operands are the
3938 same size and the bits being compared are in the same position
3939 then we can do this by masking both and comparing the masked
3941 ll_mask
= const_binop (BIT_IOR_EXPR
, ll_mask
, rl_mask
, 0);
3942 lr_mask
= const_binop (BIT_IOR_EXPR
, lr_mask
, rr_mask
, 0);
3943 if (lnbitsize
== rnbitsize
&& xll_bitpos
== xlr_bitpos
)
3945 lhs
= make_bit_field_ref (ll_inner
, lntype
, lnbitsize
, lnbitpos
,
3946 ll_unsignedp
|| rl_unsignedp
);
3947 if (! all_ones_mask_p (ll_mask
, lnbitsize
))
3948 lhs
= build (BIT_AND_EXPR
, lntype
, lhs
, ll_mask
);
3950 rhs
= make_bit_field_ref (lr_inner
, rntype
, rnbitsize
, rnbitpos
,
3951 lr_unsignedp
|| rr_unsignedp
);
3952 if (! all_ones_mask_p (lr_mask
, rnbitsize
))
3953 rhs
= build (BIT_AND_EXPR
, rntype
, rhs
, lr_mask
);
3955 return build (wanted_code
, truth_type
, lhs
, rhs
);
3958 /* There is still another way we can do something: If both pairs of
3959 fields being compared are adjacent, we may be able to make a wider
3960 field containing them both.
3962 Note that we still must mask the lhs/rhs expressions. Furthermore,
3963 the mask must be shifted to account for the shift done by
3964 make_bit_field_ref. */
3965 if ((ll_bitsize
+ ll_bitpos
== rl_bitpos
3966 && lr_bitsize
+ lr_bitpos
== rr_bitpos
)
3967 || (ll_bitpos
== rl_bitpos
+ rl_bitsize
3968 && lr_bitpos
== rr_bitpos
+ rr_bitsize
))
3972 lhs
= make_bit_field_ref (ll_inner
, lntype
, ll_bitsize
+ rl_bitsize
,
3973 MIN (ll_bitpos
, rl_bitpos
), ll_unsignedp
);
3974 rhs
= make_bit_field_ref (lr_inner
, rntype
, lr_bitsize
+ rr_bitsize
,
3975 MIN (lr_bitpos
, rr_bitpos
), lr_unsignedp
);
3977 ll_mask
= const_binop (RSHIFT_EXPR
, ll_mask
,
3978 size_int (MIN (xll_bitpos
, xrl_bitpos
)), 0);
3979 lr_mask
= const_binop (RSHIFT_EXPR
, lr_mask
,
3980 size_int (MIN (xlr_bitpos
, xrr_bitpos
)), 0);
3982 /* Convert to the smaller type before masking out unwanted bits. */
3984 if (lntype
!= rntype
)
3986 if (lnbitsize
> rnbitsize
)
3988 lhs
= convert (rntype
, lhs
);
3989 ll_mask
= convert (rntype
, ll_mask
);
3992 else if (lnbitsize
< rnbitsize
)
3994 rhs
= convert (lntype
, rhs
);
3995 lr_mask
= convert (lntype
, lr_mask
);
4000 if (! all_ones_mask_p (ll_mask
, ll_bitsize
+ rl_bitsize
))
4001 lhs
= build (BIT_AND_EXPR
, type
, lhs
, ll_mask
);
4003 if (! all_ones_mask_p (lr_mask
, lr_bitsize
+ rr_bitsize
))
4004 rhs
= build (BIT_AND_EXPR
, type
, rhs
, lr_mask
);
4006 return build (wanted_code
, truth_type
, lhs
, rhs
);
4012 /* Handle the case of comparisons with constants. If there is something in
4013 common between the masks, those bits of the constants must be the same.
4014 If not, the condition is always false. Test for this to avoid generating
4015 incorrect code below. */
4016 result
= const_binop (BIT_AND_EXPR
, ll_mask
, rl_mask
, 0);
4017 if (! integer_zerop (result
)
4018 && simple_cst_equal (const_binop (BIT_AND_EXPR
, result
, l_const
, 0),
4019 const_binop (BIT_AND_EXPR
, result
, r_const
, 0)) != 1)
4021 if (wanted_code
== NE_EXPR
)
4023 warning ("`or' of unmatched not-equal tests is always 1");
4024 return convert (truth_type
, integer_one_node
);
4028 warning ("`and' of mutually exclusive equal-tests is always 0");
4029 return convert (truth_type
, integer_zero_node
);
4033 /* Construct the expression we will return. First get the component
4034 reference we will make. Unless the mask is all ones the width of
4035 that field, perform the mask operation. Then compare with the
4037 result
= make_bit_field_ref (ll_inner
, lntype
, lnbitsize
, lnbitpos
,
4038 ll_unsignedp
|| rl_unsignedp
);
4040 ll_mask
= const_binop (BIT_IOR_EXPR
, ll_mask
, rl_mask
, 0);
4041 if (! all_ones_mask_p (ll_mask
, lnbitsize
))
4042 result
= build (BIT_AND_EXPR
, lntype
, result
, ll_mask
);
4044 return build (wanted_code
, truth_type
, result
,
4045 const_binop (BIT_IOR_EXPR
, l_const
, r_const
, 0));
4048 /* If T contains a COMPOUND_EXPR which was inserted merely to evaluate
4049 S, a SAVE_EXPR, return the expression actually being evaluated. Note
4050 that we may sometimes modify the tree. */
4053 strip_compound_expr (t
, s
)
4057 enum tree_code code
= TREE_CODE (t
);
4059 /* See if this is the COMPOUND_EXPR we want to eliminate. */
4060 if (code
== COMPOUND_EXPR
&& TREE_CODE (TREE_OPERAND (t
, 0)) == CONVERT_EXPR
4061 && TREE_OPERAND (TREE_OPERAND (t
, 0), 0) == s
)
4062 return TREE_OPERAND (t
, 1);
4064 /* See if this is a COND_EXPR or a simple arithmetic operator. We
4065 don't bother handling any other types. */
4066 else if (code
== COND_EXPR
)
4068 TREE_OPERAND (t
, 0) = strip_compound_expr (TREE_OPERAND (t
, 0), s
);
4069 TREE_OPERAND (t
, 1) = strip_compound_expr (TREE_OPERAND (t
, 1), s
);
4070 TREE_OPERAND (t
, 2) = strip_compound_expr (TREE_OPERAND (t
, 2), s
);
4072 else if (TREE_CODE_CLASS (code
) == '1')
4073 TREE_OPERAND (t
, 0) = strip_compound_expr (TREE_OPERAND (t
, 0), s
);
4074 else if (TREE_CODE_CLASS (code
) == '<'
4075 || TREE_CODE_CLASS (code
) == '2')
4077 TREE_OPERAND (t
, 0) = strip_compound_expr (TREE_OPERAND (t
, 0), s
);
4078 TREE_OPERAND (t
, 1) = strip_compound_expr (TREE_OPERAND (t
, 1), s
);
4084 /* Return a node which has the indicated constant VALUE (either 0 or
4085 1), and is of the indicated TYPE. */
4088 constant_boolean_node (value
, type
)
4092 if (type
== integer_type_node
)
4093 return value
? integer_one_node
: integer_zero_node
;
4094 else if (TREE_CODE (type
) == BOOLEAN_TYPE
)
4095 return truthvalue_conversion (value
? integer_one_node
:
4099 tree t
= build_int_2 (value
, 0);
4100 TREE_TYPE (t
) = type
;
4105 /* Utility function for the following routine, to see how complex a nesting of
4106 COND_EXPRs can be. EXPR is the expression and LIMIT is a count beyond which
4107 we don't care (to avoid spending too much time on complex expressions.). */
4110 count_cond (expr
, lim
)
4116 if (TREE_CODE (expr
) != COND_EXPR
)
4121 true = count_cond (TREE_OPERAND (expr
, 1), lim
- 1);
4122 false = count_cond (TREE_OPERAND (expr
, 2), lim
- 1 - true);
4123 return MIN (lim
, 1 + true + false);
4126 /* Perform constant folding and related simplification of EXPR.
4127 The related simplifications include x*1 => x, x*0 => 0, etc.,
4128 and application of the associative law.
4129 NOP_EXPR conversions may be removed freely (as long as we
4130 are careful not to change the C type of the overall expression)
4131 We cannot simplify through a CONVERT_EXPR, FIX_EXPR or FLOAT_EXPR,
4132 but we can constant-fold them if they have constant operands. */
4138 register tree t
= expr
;
4139 tree t1
= NULL_TREE
;
4141 tree type
= TREE_TYPE (expr
);
4142 register tree arg0
= NULL_TREE
, arg1
= NULL_TREE
;
4143 register enum tree_code code
= TREE_CODE (t
);
4147 /* WINS will be nonzero when the switch is done
4148 if all operands are constant. */
4152 /* Don't try to process an RTL_EXPR since its operands aren't trees.
4153 Likewise for a SAVE_EXPR that's already been evaluated. */
4154 if (code
== RTL_EXPR
|| (code
== SAVE_EXPR
&& SAVE_EXPR_RTL (t
)) != 0)
4157 /* Return right away if already constant. */
4158 if (TREE_CONSTANT (t
))
4160 if (code
== CONST_DECL
)
4161 return DECL_INITIAL (t
);
4165 #ifdef MAX_INTEGER_COMPUTATION_MODE
4166 check_max_integer_computation_mode (expr
);
4169 kind
= TREE_CODE_CLASS (code
);
4170 if (code
== NOP_EXPR
|| code
== FLOAT_EXPR
|| code
== CONVERT_EXPR
)
4174 /* Special case for conversion ops that can have fixed point args. */
4175 arg0
= TREE_OPERAND (t
, 0);
4177 /* Don't use STRIP_NOPS, because signedness of argument type matters. */
4179 STRIP_TYPE_NOPS (arg0
);
4181 if (arg0
!= 0 && TREE_CODE (arg0
) == COMPLEX_CST
)
4182 subop
= TREE_REALPART (arg0
);
4186 if (subop
!= 0 && TREE_CODE (subop
) != INTEGER_CST
4187 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
4188 && TREE_CODE (subop
) != REAL_CST
4189 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
4191 /* Note that TREE_CONSTANT isn't enough:
4192 static var addresses are constant but we can't
4193 do arithmetic on them. */
4196 else if (kind
== 'e' || kind
== '<'
4197 || kind
== '1' || kind
== '2' || kind
== 'r')
4199 register int len
= tree_code_length
[(int) code
];
4201 for (i
= 0; i
< len
; i
++)
4203 tree op
= TREE_OPERAND (t
, i
);
4207 continue; /* Valid for CALL_EXPR, at least. */
4209 if (kind
== '<' || code
== RSHIFT_EXPR
)
4211 /* Signedness matters here. Perhaps we can refine this
4213 STRIP_TYPE_NOPS (op
);
4217 /* Strip any conversions that don't change the mode. */
4221 if (TREE_CODE (op
) == COMPLEX_CST
)
4222 subop
= TREE_REALPART (op
);
4226 if (TREE_CODE (subop
) != INTEGER_CST
4227 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
4228 && TREE_CODE (subop
) != REAL_CST
4229 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
4231 /* Note that TREE_CONSTANT isn't enough:
4232 static var addresses are constant but we can't
4233 do arithmetic on them. */
4243 /* If this is a commutative operation, and ARG0 is a constant, move it
4244 to ARG1 to reduce the number of tests below. */
4245 if ((code
== PLUS_EXPR
|| code
== MULT_EXPR
|| code
== MIN_EXPR
4246 || code
== MAX_EXPR
|| code
== BIT_IOR_EXPR
|| code
== BIT_XOR_EXPR
4247 || code
== BIT_AND_EXPR
)
4248 && (TREE_CODE (arg0
) == INTEGER_CST
|| TREE_CODE (arg0
) == REAL_CST
))
4250 tem
= arg0
; arg0
= arg1
; arg1
= tem
;
4252 tem
= TREE_OPERAND (t
, 0); TREE_OPERAND (t
, 0) = TREE_OPERAND (t
, 1);
4253 TREE_OPERAND (t
, 1) = tem
;
4256 /* Now WINS is set as described above,
4257 ARG0 is the first operand of EXPR,
4258 and ARG1 is the second operand (if it has more than one operand).
4260 First check for cases where an arithmetic operation is applied to a
4261 compound, conditional, or comparison operation. Push the arithmetic
4262 operation inside the compound or conditional to see if any folding
4263 can then be done. Convert comparison to conditional for this purpose.
4264 The also optimizes non-constant cases that used to be done in
4267 Before we do that, see if this is a BIT_AND_EXPR or a BIT_OR_EXPR,
4268 one of the operands is a comparison and the other is a comparison, a
4269 BIT_AND_EXPR with the constant 1, or a truth value. In that case, the
4270 code below would make the expression more complex. Change it to a
4271 TRUTH_{AND,OR}_EXPR. Likewise, convert a similar NE_EXPR to
4272 TRUTH_XOR_EXPR and an EQ_EXPR to the inversion of a TRUTH_XOR_EXPR. */
4274 if ((code
== BIT_AND_EXPR
|| code
== BIT_IOR_EXPR
4275 || code
== EQ_EXPR
|| code
== NE_EXPR
)
4276 && ((truth_value_p (TREE_CODE (arg0
))
4277 && (truth_value_p (TREE_CODE (arg1
))
4278 || (TREE_CODE (arg1
) == BIT_AND_EXPR
4279 && integer_onep (TREE_OPERAND (arg1
, 1)))))
4280 || (truth_value_p (TREE_CODE (arg1
))
4281 && (truth_value_p (TREE_CODE (arg0
))
4282 || (TREE_CODE (arg0
) == BIT_AND_EXPR
4283 && integer_onep (TREE_OPERAND (arg0
, 1)))))))
4285 t
= fold (build (code
== BIT_AND_EXPR
? TRUTH_AND_EXPR
4286 : code
== BIT_IOR_EXPR
? TRUTH_OR_EXPR
4290 if (code
== EQ_EXPR
)
4291 t
= invert_truthvalue (t
);
4296 if (TREE_CODE_CLASS (code
) == '1')
4298 if (TREE_CODE (arg0
) == COMPOUND_EXPR
)
4299 return build (COMPOUND_EXPR
, type
, TREE_OPERAND (arg0
, 0),
4300 fold (build1 (code
, type
, TREE_OPERAND (arg0
, 1))));
4301 else if (TREE_CODE (arg0
) == COND_EXPR
)
4303 t
= fold (build (COND_EXPR
, type
, TREE_OPERAND (arg0
, 0),
4304 fold (build1 (code
, type
, TREE_OPERAND (arg0
, 1))),
4305 fold (build1 (code
, type
, TREE_OPERAND (arg0
, 2)))));
4307 /* If this was a conversion, and all we did was to move into
4308 inside the COND_EXPR, bring it back out. But leave it if
4309 it is a conversion from integer to integer and the
4310 result precision is no wider than a word since such a
4311 conversion is cheap and may be optimized away by combine,
4312 while it couldn't if it were outside the COND_EXPR. Then return
4313 so we don't get into an infinite recursion loop taking the
4314 conversion out and then back in. */
4316 if ((code
== NOP_EXPR
|| code
== CONVERT_EXPR
4317 || code
== NON_LVALUE_EXPR
)
4318 && TREE_CODE (t
) == COND_EXPR
4319 && TREE_CODE (TREE_OPERAND (t
, 1)) == code
4320 && TREE_CODE (TREE_OPERAND (t
, 2)) == code
4321 && (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t
, 1), 0))
4322 == TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t
, 2), 0)))
4323 && ! (INTEGRAL_TYPE_P (TREE_TYPE (t
))
4324 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t
, 1), 0)))
4325 && TYPE_PRECISION (TREE_TYPE (t
)) <= BITS_PER_WORD
))
4326 t
= build1 (code
, type
,
4328 TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t
, 1), 0)),
4329 TREE_OPERAND (t
, 0),
4330 TREE_OPERAND (TREE_OPERAND (t
, 1), 0),
4331 TREE_OPERAND (TREE_OPERAND (t
, 2), 0)));
4334 else if (TREE_CODE_CLASS (TREE_CODE (arg0
)) == '<')
4335 return fold (build (COND_EXPR
, type
, arg0
,
4336 fold (build1 (code
, type
, integer_one_node
)),
4337 fold (build1 (code
, type
, integer_zero_node
))));
4339 else if (TREE_CODE_CLASS (code
) == '2'
4340 || TREE_CODE_CLASS (code
) == '<')
4342 if (TREE_CODE (arg1
) == COMPOUND_EXPR
)
4343 return build (COMPOUND_EXPR
, type
, TREE_OPERAND (arg1
, 0),
4344 fold (build (code
, type
,
4345 arg0
, TREE_OPERAND (arg1
, 1))));
4346 else if ((TREE_CODE (arg1
) == COND_EXPR
4347 || (TREE_CODE_CLASS (TREE_CODE (arg1
)) == '<'
4348 && TREE_CODE_CLASS (code
) != '<'))
4349 && (TREE_CODE (arg0
) != COND_EXPR
4350 || count_cond (arg0
, 25) + count_cond (arg1
, 25) <= 25)
4351 && (! TREE_SIDE_EFFECTS (arg0
)
4352 || (current_function_decl
!= 0
4353 && ! contains_placeholder_p (arg0
))))
4355 tree test
, true_value
, false_value
;
4356 tree lhs
= 0, rhs
= 0;
4358 if (TREE_CODE (arg1
) == COND_EXPR
)
4360 test
= TREE_OPERAND (arg1
, 0);
4361 true_value
= TREE_OPERAND (arg1
, 1);
4362 false_value
= TREE_OPERAND (arg1
, 2);
4366 tree testtype
= TREE_TYPE (arg1
);
4368 true_value
= convert (testtype
, integer_one_node
);
4369 false_value
= convert (testtype
, integer_zero_node
);
4372 /* If ARG0 is complex we want to make sure we only evaluate
4373 it once. Though this is only required if it is volatile, it
4374 might be more efficient even if it is not. However, if we
4375 succeed in folding one part to a constant, we do not need
4376 to make this SAVE_EXPR. Since we do this optimization
4377 primarily to see if we do end up with constant and this
4378 SAVE_EXPR interferes with later optimizations, suppressing
4379 it when we can is important.
4381 If we are not in a function, we can't make a SAVE_EXPR, so don't
4382 try to do so. Don't try to see if the result is a constant
4383 if an arm is a COND_EXPR since we get exponential behavior
4386 if (TREE_CODE (arg0
) != SAVE_EXPR
&& ! TREE_CONSTANT (arg0
)
4387 && current_function_decl
!= 0
4388 && ((TREE_CODE (arg0
) != VAR_DECL
4389 && TREE_CODE (arg0
) != PARM_DECL
)
4390 || TREE_SIDE_EFFECTS (arg0
)))
4392 if (TREE_CODE (true_value
) != COND_EXPR
)
4393 lhs
= fold (build (code
, type
, arg0
, true_value
));
4395 if (TREE_CODE (false_value
) != COND_EXPR
)
4396 rhs
= fold (build (code
, type
, arg0
, false_value
));
4398 if ((lhs
== 0 || ! TREE_CONSTANT (lhs
))
4399 && (rhs
== 0 || !TREE_CONSTANT (rhs
)))
4400 arg0
= save_expr (arg0
), lhs
= rhs
= 0;
4404 lhs
= fold (build (code
, type
, arg0
, true_value
));
4406 rhs
= fold (build (code
, type
, arg0
, false_value
));
4408 test
= fold (build (COND_EXPR
, type
, test
, lhs
, rhs
));
4410 if (TREE_CODE (arg0
) == SAVE_EXPR
)
4411 return build (COMPOUND_EXPR
, type
,
4412 convert (void_type_node
, arg0
),
4413 strip_compound_expr (test
, arg0
));
4415 return convert (type
, test
);
4418 else if (TREE_CODE (arg0
) == COMPOUND_EXPR
)
4419 return build (COMPOUND_EXPR
, type
, TREE_OPERAND (arg0
, 0),
4420 fold (build (code
, type
, TREE_OPERAND (arg0
, 1), arg1
)));
4421 else if ((TREE_CODE (arg0
) == COND_EXPR
4422 || (TREE_CODE_CLASS (TREE_CODE (arg0
)) == '<'
4423 && TREE_CODE_CLASS (code
) != '<'))
4424 && (TREE_CODE (arg1
) != COND_EXPR
4425 || count_cond (arg0
, 25) + count_cond (arg1
, 25) <= 25)
4426 && (! TREE_SIDE_EFFECTS (arg1
)
4427 || (current_function_decl
!= 0
4428 && ! contains_placeholder_p (arg1
))))
4430 tree test
, true_value
, false_value
;
4431 tree lhs
= 0, rhs
= 0;
4433 if (TREE_CODE (arg0
) == COND_EXPR
)
4435 test
= TREE_OPERAND (arg0
, 0);
4436 true_value
= TREE_OPERAND (arg0
, 1);
4437 false_value
= TREE_OPERAND (arg0
, 2);
4441 tree testtype
= TREE_TYPE (arg0
);
4443 true_value
= convert (testtype
, integer_one_node
);
4444 false_value
= convert (testtype
, integer_zero_node
);
4447 if (TREE_CODE (arg1
) != SAVE_EXPR
&& ! TREE_CONSTANT (arg0
)
4448 && current_function_decl
!= 0
4449 && ((TREE_CODE (arg1
) != VAR_DECL
4450 && TREE_CODE (arg1
) != PARM_DECL
)
4451 || TREE_SIDE_EFFECTS (arg1
)))
4453 if (TREE_CODE (true_value
) != COND_EXPR
)
4454 lhs
= fold (build (code
, type
, true_value
, arg1
));
4456 if (TREE_CODE (false_value
) != COND_EXPR
)
4457 rhs
= fold (build (code
, type
, false_value
, arg1
));
4459 if ((lhs
== 0 || ! TREE_CONSTANT (lhs
))
4460 && (rhs
== 0 || !TREE_CONSTANT (rhs
)))
4461 arg1
= save_expr (arg1
), lhs
= rhs
= 0;
4465 lhs
= fold (build (code
, type
, true_value
, arg1
));
4468 rhs
= fold (build (code
, type
, false_value
, arg1
));
4470 test
= fold (build (COND_EXPR
, type
, test
, lhs
, rhs
));
4471 if (TREE_CODE (arg1
) == SAVE_EXPR
)
4472 return build (COMPOUND_EXPR
, type
,
4473 convert (void_type_node
, arg1
),
4474 strip_compound_expr (test
, arg1
));
4476 return convert (type
, test
);
4479 else if (TREE_CODE_CLASS (code
) == '<'
4480 && TREE_CODE (arg0
) == COMPOUND_EXPR
)
4481 return build (COMPOUND_EXPR
, type
, TREE_OPERAND (arg0
, 0),
4482 fold (build (code
, type
, TREE_OPERAND (arg0
, 1), arg1
)));
4483 else if (TREE_CODE_CLASS (code
) == '<'
4484 && TREE_CODE (arg1
) == COMPOUND_EXPR
)
4485 return build (COMPOUND_EXPR
, type
, TREE_OPERAND (arg1
, 0),
4486 fold (build (code
, type
, arg0
, TREE_OPERAND (arg1
, 1))));
4498 return fold (DECL_INITIAL (t
));
4503 case FIX_TRUNC_EXPR
:
4504 /* Other kinds of FIX are not handled properly by fold_convert. */
4506 if (TREE_TYPE (TREE_OPERAND (t
, 0)) == TREE_TYPE (t
))
4507 return TREE_OPERAND (t
, 0);
4509 /* Handle cases of two conversions in a row. */
4510 if (TREE_CODE (TREE_OPERAND (t
, 0)) == NOP_EXPR
4511 || TREE_CODE (TREE_OPERAND (t
, 0)) == CONVERT_EXPR
)
4513 tree inside_type
= TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t
, 0), 0));
4514 tree inter_type
= TREE_TYPE (TREE_OPERAND (t
, 0));
4515 tree final_type
= TREE_TYPE (t
);
4516 int inside_int
= INTEGRAL_TYPE_P (inside_type
);
4517 int inside_ptr
= POINTER_TYPE_P (inside_type
);
4518 int inside_float
= FLOAT_TYPE_P (inside_type
);
4519 int inside_prec
= TYPE_PRECISION (inside_type
);
4520 int inside_unsignedp
= TREE_UNSIGNED (inside_type
);
4521 int inter_int
= INTEGRAL_TYPE_P (inter_type
);
4522 int inter_ptr
= POINTER_TYPE_P (inter_type
);
4523 int inter_float
= FLOAT_TYPE_P (inter_type
);
4524 int inter_prec
= TYPE_PRECISION (inter_type
);
4525 int inter_unsignedp
= TREE_UNSIGNED (inter_type
);
4526 int final_int
= INTEGRAL_TYPE_P (final_type
);
4527 int final_ptr
= POINTER_TYPE_P (final_type
);
4528 int final_float
= FLOAT_TYPE_P (final_type
);
4529 int final_prec
= TYPE_PRECISION (final_type
);
4530 int final_unsignedp
= TREE_UNSIGNED (final_type
);
4532 /* In addition to the cases of two conversions in a row
4533 handled below, if we are converting something to its own
4534 type via an object of identical or wider precision, neither
4535 conversion is needed. */
4536 if (inside_type
== final_type
4537 && ((inter_int
&& final_int
) || (inter_float
&& final_float
))
4538 && inter_prec
>= final_prec
)
4539 return TREE_OPERAND (TREE_OPERAND (t
, 0), 0);
4541 /* Likewise, if the intermediate and final types are either both
4542 float or both integer, we don't need the middle conversion if
4543 it is wider than the final type and doesn't change the signedness
4544 (for integers). Avoid this if the final type is a pointer
4545 since then we sometimes need the inner conversion. Likewise if
4546 the outer has a precision not equal to the size of its mode. */
4547 if ((((inter_int
|| inter_ptr
) && (inside_int
|| inside_ptr
))
4548 || (inter_float
&& inside_float
))
4549 && inter_prec
>= inside_prec
4550 && (inter_float
|| inter_unsignedp
== inside_unsignedp
)
4551 && ! (final_prec
!= GET_MODE_BITSIZE (TYPE_MODE (final_type
))
4552 && TYPE_MODE (final_type
) == TYPE_MODE (inter_type
))
4554 return convert (final_type
, TREE_OPERAND (TREE_OPERAND (t
, 0), 0));
4556 /* If we have a sign-extension of a zero-extended value, we can
4557 replace that by a single zero-extension. */
4558 if (inside_int
&& inter_int
&& final_int
4559 && inside_prec
< inter_prec
&& inter_prec
< final_prec
4560 && inside_unsignedp
&& !inter_unsignedp
)
4561 return convert (final_type
, TREE_OPERAND (TREE_OPERAND (t
, 0), 0));
4563 /* Two conversions in a row are not needed unless:
4564 - some conversion is floating-point (overstrict for now), or
4565 - the intermediate type is narrower than both initial and
4567 - the intermediate type and innermost type differ in signedness,
4568 and the outermost type is wider than the intermediate, or
4569 - the initial type is a pointer type and the precisions of the
4570 intermediate and final types differ, or
4571 - the final type is a pointer type and the precisions of the
4572 initial and intermediate types differ. */
4573 if (! inside_float
&& ! inter_float
&& ! final_float
4574 && (inter_prec
> inside_prec
|| inter_prec
> final_prec
)
4575 && ! (inside_int
&& inter_int
4576 && inter_unsignedp
!= inside_unsignedp
4577 && inter_prec
< final_prec
)
4578 && ((inter_unsignedp
&& inter_prec
> inside_prec
)
4579 == (final_unsignedp
&& final_prec
> inter_prec
))
4580 && ! (inside_ptr
&& inter_prec
!= final_prec
)
4581 && ! (final_ptr
&& inside_prec
!= inter_prec
)
4582 && ! (final_prec
!= GET_MODE_BITSIZE (TYPE_MODE (final_type
))
4583 && TYPE_MODE (final_type
) == TYPE_MODE (inter_type
))
4585 return convert (final_type
, TREE_OPERAND (TREE_OPERAND (t
, 0), 0));
4588 if (TREE_CODE (TREE_OPERAND (t
, 0)) == MODIFY_EXPR
4589 && TREE_CONSTANT (TREE_OPERAND (TREE_OPERAND (t
, 0), 1))
4590 /* Detect assigning a bitfield. */
4591 && !(TREE_CODE (TREE_OPERAND (TREE_OPERAND (t
, 0), 0)) == COMPONENT_REF
4592 && DECL_BIT_FIELD (TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (t
, 0), 0), 1))))
4594 /* Don't leave an assignment inside a conversion
4595 unless assigning a bitfield. */
4596 tree prev
= TREE_OPERAND (t
, 0);
4597 TREE_OPERAND (t
, 0) = TREE_OPERAND (prev
, 1);
4598 /* First do the assignment, then return converted constant. */
4599 t
= build (COMPOUND_EXPR
, TREE_TYPE (t
), prev
, fold (t
));
4605 TREE_CONSTANT (t
) = TREE_CONSTANT (arg0
);
4608 return fold_convert (t
, arg0
);
4610 #if 0 /* This loses on &"foo"[0]. */
4615 /* Fold an expression like: "foo"[2] */
4616 if (TREE_CODE (arg0
) == STRING_CST
4617 && TREE_CODE (arg1
) == INTEGER_CST
4618 && !TREE_INT_CST_HIGH (arg1
)
4619 && (i
= TREE_INT_CST_LOW (arg1
)) < TREE_STRING_LENGTH (arg0
))
4621 t
= build_int_2 (TREE_STRING_POINTER (arg0
)[i
], 0);
4622 TREE_TYPE (t
) = TREE_TYPE (TREE_TYPE (arg0
));
4623 force_fit_type (t
, 0);
4630 if (TREE_CODE (arg0
) == CONSTRUCTOR
)
4632 tree m
= purpose_member (arg1
, CONSTRUCTOR_ELTS (arg0
));
4639 TREE_CONSTANT (t
) = wins
;
4645 if (TREE_CODE (arg0
) == INTEGER_CST
)
4647 HOST_WIDE_INT low
, high
;
4648 int overflow
= neg_double (TREE_INT_CST_LOW (arg0
),
4649 TREE_INT_CST_HIGH (arg0
),
4651 t
= build_int_2 (low
, high
);
4652 TREE_TYPE (t
) = type
;
4654 = (TREE_OVERFLOW (arg0
)
4655 | force_fit_type (t
, overflow
&& !TREE_UNSIGNED (type
)));
4656 TREE_CONSTANT_OVERFLOW (t
)
4657 = TREE_OVERFLOW (t
) | TREE_CONSTANT_OVERFLOW (arg0
);
4659 else if (TREE_CODE (arg0
) == REAL_CST
)
4660 t
= build_real (type
, REAL_VALUE_NEGATE (TREE_REAL_CST (arg0
)));
4662 else if (TREE_CODE (arg0
) == NEGATE_EXPR
)
4663 return TREE_OPERAND (arg0
, 0);
4665 /* Convert - (a - b) to (b - a) for non-floating-point. */
4666 else if (TREE_CODE (arg0
) == MINUS_EXPR
&& ! FLOAT_TYPE_P (type
))
4667 return build (MINUS_EXPR
, type
, TREE_OPERAND (arg0
, 1),
4668 TREE_OPERAND (arg0
, 0));
4675 if (TREE_CODE (arg0
) == INTEGER_CST
)
4677 if (! TREE_UNSIGNED (type
)
4678 && TREE_INT_CST_HIGH (arg0
) < 0)
4680 HOST_WIDE_INT low
, high
;
4681 int overflow
= neg_double (TREE_INT_CST_LOW (arg0
),
4682 TREE_INT_CST_HIGH (arg0
),
4684 t
= build_int_2 (low
, high
);
4685 TREE_TYPE (t
) = type
;
4687 = (TREE_OVERFLOW (arg0
)
4688 | force_fit_type (t
, overflow
));
4689 TREE_CONSTANT_OVERFLOW (t
)
4690 = TREE_OVERFLOW (t
) | TREE_CONSTANT_OVERFLOW (arg0
);
4693 else if (TREE_CODE (arg0
) == REAL_CST
)
4695 if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (arg0
)))
4696 t
= build_real (type
,
4697 REAL_VALUE_NEGATE (TREE_REAL_CST (arg0
)));
4700 else if (TREE_CODE (arg0
) == ABS_EXPR
|| TREE_CODE (arg0
) == NEGATE_EXPR
)
4701 return build1 (ABS_EXPR
, type
, TREE_OPERAND (arg0
, 0));
4705 if (TREE_CODE (TREE_TYPE (arg0
)) != COMPLEX_TYPE
)
4707 else if (TREE_CODE (arg0
) == COMPLEX_EXPR
)
4708 return build (COMPLEX_EXPR
, TREE_TYPE (arg0
),
4709 TREE_OPERAND (arg0
, 0),
4710 fold (build1 (NEGATE_EXPR
,
4711 TREE_TYPE (TREE_TYPE (arg0
)),
4712 TREE_OPERAND (arg0
, 1))));
4713 else if (TREE_CODE (arg0
) == COMPLEX_CST
)
4714 return build_complex (type
, TREE_OPERAND (arg0
, 0),
4715 fold (build1 (NEGATE_EXPR
,
4716 TREE_TYPE (TREE_TYPE (arg0
)),
4717 TREE_OPERAND (arg0
, 1))));
4718 else if (TREE_CODE (arg0
) == PLUS_EXPR
|| TREE_CODE (arg0
) == MINUS_EXPR
)
4719 return fold (build (TREE_CODE (arg0
), type
,
4720 fold (build1 (CONJ_EXPR
, type
,
4721 TREE_OPERAND (arg0
, 0))),
4722 fold (build1 (CONJ_EXPR
,
4723 type
, TREE_OPERAND (arg0
, 1)))));
4724 else if (TREE_CODE (arg0
) == CONJ_EXPR
)
4725 return TREE_OPERAND (arg0
, 0);
4731 t
= build_int_2 (~ TREE_INT_CST_LOW (arg0
),
4732 ~ TREE_INT_CST_HIGH (arg0
));
4733 TREE_TYPE (t
) = type
;
4734 force_fit_type (t
, 0);
4735 TREE_OVERFLOW (t
) = TREE_OVERFLOW (arg0
);
4736 TREE_CONSTANT_OVERFLOW (t
) = TREE_CONSTANT_OVERFLOW (arg0
);
4738 else if (TREE_CODE (arg0
) == BIT_NOT_EXPR
)
4739 return TREE_OPERAND (arg0
, 0);
4743 /* A + (-B) -> A - B */
4744 if (TREE_CODE (arg1
) == NEGATE_EXPR
)
4745 return fold (build (MINUS_EXPR
, type
, arg0
, TREE_OPERAND (arg1
, 0)));
4746 else if (! FLOAT_TYPE_P (type
))
4748 if (integer_zerop (arg1
))
4749 return non_lvalue (convert (type
, arg0
));
4751 /* If we are adding two BIT_AND_EXPR's, both of which are and'ing
4752 with a constant, and the two constants have no bits in common,
4753 we should treat this as a BIT_IOR_EXPR since this may produce more
4755 if (TREE_CODE (arg0
) == BIT_AND_EXPR
4756 && TREE_CODE (arg1
) == BIT_AND_EXPR
4757 && TREE_CODE (TREE_OPERAND (arg0
, 1)) == INTEGER_CST
4758 && TREE_CODE (TREE_OPERAND (arg1
, 1)) == INTEGER_CST
4759 && integer_zerop (const_binop (BIT_AND_EXPR
,
4760 TREE_OPERAND (arg0
, 1),
4761 TREE_OPERAND (arg1
, 1), 0)))
4763 code
= BIT_IOR_EXPR
;
4767 if (TREE_CODE (arg0
) == MULT_EXPR
&& TREE_CODE (arg1
) == MULT_EXPR
)
4769 tree arg00
, arg01
, arg10
, arg11
;
4770 tree alt0
, alt1
, same
;
4772 /* (A * C) + (B * C) -> (A+B) * C.
4773 We are most concerned about the case where C is a constant,
4774 but other combinations show up during loop reduction. Since
4775 it is not difficult, try all four possibilities. */
4777 arg00
= TREE_OPERAND (arg0
, 0);
4778 arg01
= TREE_OPERAND (arg0
, 1);
4779 arg10
= TREE_OPERAND (arg1
, 0);
4780 arg11
= TREE_OPERAND (arg1
, 1);
4783 if (operand_equal_p (arg01
, arg11
, 0))
4784 same
= arg01
, alt0
= arg00
, alt1
= arg10
;
4785 else if (operand_equal_p (arg00
, arg10
, 0))
4786 same
= arg00
, alt0
= arg01
, alt1
= arg11
;
4787 else if (operand_equal_p (arg00
, arg11
, 0))
4788 same
= arg00
, alt0
= arg01
, alt1
= arg10
;
4789 else if (operand_equal_p (arg01
, arg10
, 0))
4790 same
= arg01
, alt0
= arg00
, alt1
= arg11
;
4793 return fold (build (MULT_EXPR
, type
,
4794 fold (build (PLUS_EXPR
, type
, alt0
, alt1
)),
4798 /* In IEEE floating point, x+0 may not equal x. */
4799 else if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
4801 && real_zerop (arg1
))
4802 return non_lvalue (convert (type
, arg0
));
4804 /* In most languages, can't associate operations on floats
4805 through parentheses. Rather than remember where the parentheses
4806 were, we don't associate floats at all. It shouldn't matter much.
4807 However, associating multiplications is only very slightly
4808 inaccurate, so do that if -ffast-math is specified. */
4809 if (FLOAT_TYPE_P (type
)
4810 && ! (flag_fast_math
&& code
== MULT_EXPR
))
4813 /* The varsign == -1 cases happen only for addition and subtraction.
4814 It says that the arg that was split was really CON minus VAR.
4815 The rest of the code applies to all associative operations. */
4821 if (split_tree (arg0
, code
, &var
, &con
, &varsign
))
4825 /* EXPR is (CON-VAR) +- ARG1. */
4826 /* If it is + and VAR==ARG1, return just CONST. */
4827 if (code
== PLUS_EXPR
&& operand_equal_p (var
, arg1
, 0))
4828 return convert (TREE_TYPE (t
), con
);
4830 /* If ARG0 is a constant, don't change things around;
4831 instead keep all the constant computations together. */
4833 if (TREE_CONSTANT (arg0
))
4836 /* Otherwise return (CON +- ARG1) - VAR. */
4837 t
= build (MINUS_EXPR
, type
,
4838 fold (build (code
, type
, con
, arg1
)), var
);
4842 /* EXPR is (VAR+CON) +- ARG1. */
4843 /* If it is - and VAR==ARG1, return just CONST. */
4844 if (code
== MINUS_EXPR
&& operand_equal_p (var
, arg1
, 0))
4845 return convert (TREE_TYPE (t
), con
);
4847 /* If ARG0 is a constant, don't change things around;
4848 instead keep all the constant computations together. */
4850 if (TREE_CONSTANT (arg0
))
4853 /* Otherwise return VAR +- (ARG1 +- CON). */
4854 tem
= fold (build (code
, type
, arg1
, con
));
4855 t
= build (code
, type
, var
, tem
);
4857 if (integer_zerop (tem
)
4858 && (code
== PLUS_EXPR
|| code
== MINUS_EXPR
))
4859 return convert (type
, var
);
4860 /* If we have x +/- (c - d) [c an explicit integer]
4861 change it to x -/+ (d - c) since if d is relocatable
4862 then the latter can be a single immediate insn
4863 and the former cannot. */
4864 if (TREE_CODE (tem
) == MINUS_EXPR
4865 && TREE_CODE (TREE_OPERAND (tem
, 0)) == INTEGER_CST
)
4867 tree tem1
= TREE_OPERAND (tem
, 1);
4868 TREE_OPERAND (tem
, 1) = TREE_OPERAND (tem
, 0);
4869 TREE_OPERAND (tem
, 0) = tem1
;
4871 (code
== PLUS_EXPR
? MINUS_EXPR
: PLUS_EXPR
));
4877 if (split_tree (arg1
, code
, &var
, &con
, &varsign
))
4879 if (TREE_CONSTANT (arg1
))
4884 (code
== PLUS_EXPR
? MINUS_EXPR
: PLUS_EXPR
));
4886 /* EXPR is ARG0 +- (CON +- VAR). */
4887 if (TREE_CODE (t
) == MINUS_EXPR
4888 && operand_equal_p (var
, arg0
, 0))
4890 /* If VAR and ARG0 cancel, return just CON or -CON. */
4891 if (code
== PLUS_EXPR
)
4892 return convert (TREE_TYPE (t
), con
);
4893 return fold (build1 (NEGATE_EXPR
, TREE_TYPE (t
),
4894 convert (TREE_TYPE (t
), con
)));
4897 t
= build (TREE_CODE (t
), type
,
4898 fold (build (code
, TREE_TYPE (t
), arg0
, con
)), var
);
4900 if (integer_zerop (TREE_OPERAND (t
, 0))
4901 && TREE_CODE (t
) == PLUS_EXPR
)
4902 return convert (TREE_TYPE (t
), var
);
4907 #if defined (REAL_IS_NOT_DOUBLE) && ! defined (REAL_ARITHMETIC)
4908 if (TREE_CODE (arg1
) == REAL_CST
)
4910 #endif /* REAL_IS_NOT_DOUBLE, and no REAL_ARITHMETIC */
4912 t1
= const_binop (code
, arg0
, arg1
, 0);
4913 if (t1
!= NULL_TREE
)
4915 /* The return value should always have
4916 the same type as the original expression. */
4917 if (TREE_TYPE (t1
) != TREE_TYPE (t
))
4918 t1
= convert (TREE_TYPE (t
), t1
);
4925 if (! FLOAT_TYPE_P (type
))
4927 if (! wins
&& integer_zerop (arg0
))
4928 return build1 (NEGATE_EXPR
, type
, arg1
);
4929 if (integer_zerop (arg1
))
4930 return non_lvalue (convert (type
, arg0
));
4932 /* (A * C) - (B * C) -> (A-B) * C. Since we are most concerned
4933 about the case where C is a constant, just try one of the
4934 four possibilities. */
4936 if (TREE_CODE (arg0
) == MULT_EXPR
&& TREE_CODE (arg1
) == MULT_EXPR
4937 && operand_equal_p (TREE_OPERAND (arg0
, 1),
4938 TREE_OPERAND (arg1
, 1), 0))
4939 return fold (build (MULT_EXPR
, type
,
4940 fold (build (MINUS_EXPR
, type
,
4941 TREE_OPERAND (arg0
, 0),
4942 TREE_OPERAND (arg1
, 0))),
4943 TREE_OPERAND (arg0
, 1)));
4945 /* Convert A - (-B) to A + B. */
4946 else if (TREE_CODE (arg1
) == NEGATE_EXPR
)
4947 return fold (build (PLUS_EXPR
, type
, arg0
, TREE_OPERAND (arg1
, 0)));
4949 else if (TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
4952 /* Except with IEEE floating point, 0-x equals -x. */
4953 if (! wins
&& real_zerop (arg0
))
4954 return build1 (NEGATE_EXPR
, type
, arg1
);
4955 /* Except with IEEE floating point, x-0 equals x. */
4956 if (real_zerop (arg1
))
4957 return non_lvalue (convert (type
, arg0
));
4960 /* Fold &x - &x. This can happen from &x.foo - &x.
4961 This is unsafe for certain floats even in non-IEEE formats.
4962 In IEEE, it is unsafe because it does wrong for NaNs.
4963 Also note that operand_equal_p is always false if an operand
4966 if ((! FLOAT_TYPE_P (type
) || flag_fast_math
)
4967 && operand_equal_p (arg0
, arg1
, 0))
4968 return convert (type
, integer_zero_node
);
4973 if (! FLOAT_TYPE_P (type
))
4975 if (integer_zerop (arg1
))
4976 return omit_one_operand (type
, arg1
, arg0
);
4977 if (integer_onep (arg1
))
4978 return non_lvalue (convert (type
, arg0
));
4980 /* ((A / C) * C) is A if the division is an
4981 EXACT_DIV_EXPR. Since C is normally a constant,
4982 just check for one of the four possibilities. */
4984 if (TREE_CODE (arg0
) == EXACT_DIV_EXPR
4985 && operand_equal_p (TREE_OPERAND (arg0
, 1), arg1
, 0))
4986 return TREE_OPERAND (arg0
, 0);
4988 /* (a * (1 << b)) is (a << b) */
4989 if (TREE_CODE (arg1
) == LSHIFT_EXPR
4990 && integer_onep (TREE_OPERAND (arg1
, 0)))
4991 return fold (build (LSHIFT_EXPR
, type
, arg0
,
4992 TREE_OPERAND (arg1
, 1)));
4993 if (TREE_CODE (arg0
) == LSHIFT_EXPR
4994 && integer_onep (TREE_OPERAND (arg0
, 0)))
4995 return fold (build (LSHIFT_EXPR
, type
, arg1
,
4996 TREE_OPERAND (arg0
, 1)));
5000 /* x*0 is 0, except for IEEE floating point. */
5001 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
5003 && real_zerop (arg1
))
5004 return omit_one_operand (type
, arg1
, arg0
);
5005 /* In IEEE floating point, x*1 is not equivalent to x for snans.
5006 However, ANSI says we can drop signals,
5007 so we can do this anyway. */
5008 if (real_onep (arg1
))
5009 return non_lvalue (convert (type
, arg0
));
5011 if (! wins
&& real_twop (arg1
) && current_function_decl
!= 0
5012 && ! contains_placeholder_p (arg0
))
5014 tree arg
= save_expr (arg0
);
5015 return build (PLUS_EXPR
, type
, arg
, arg
);
5023 register enum tree_code code0
, code1
;
5025 if (integer_all_onesp (arg1
))
5026 return omit_one_operand (type
, arg1
, arg0
);
5027 if (integer_zerop (arg1
))
5028 return non_lvalue (convert (type
, arg0
));
5029 t1
= distribute_bit_expr (code
, type
, arg0
, arg1
);
5030 if (t1
!= NULL_TREE
)
5033 /* (A << C1) | (A >> C2) if A is unsigned and C1+C2 is the size of A
5034 is a rotate of A by C1 bits. */
5035 /* (A << B) | (A >> (Z - B)) if A is unsigned and Z is the size of A
5036 is a rotate of A by B bits. */
5038 code0
= TREE_CODE (arg0
);
5039 code1
= TREE_CODE (arg1
);
5040 if (((code0
== RSHIFT_EXPR
&& code1
== LSHIFT_EXPR
)
5041 || (code1
== RSHIFT_EXPR
&& code0
== LSHIFT_EXPR
))
5042 && operand_equal_p (TREE_OPERAND (arg0
, 0), TREE_OPERAND (arg1
,0), 0)
5043 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0
, 0))))
5045 register tree tree01
, tree11
;
5046 register enum tree_code code01
, code11
;
5048 tree01
= TREE_OPERAND (arg0
, 1);
5049 tree11
= TREE_OPERAND (arg1
, 1);
5050 STRIP_NOPS (tree01
);
5051 STRIP_NOPS (tree11
);
5052 code01
= TREE_CODE (tree01
);
5053 code11
= TREE_CODE (tree11
);
5054 if (code01
== INTEGER_CST
5055 && code11
== INTEGER_CST
5056 && TREE_INT_CST_HIGH (tree01
) == 0
5057 && TREE_INT_CST_HIGH (tree11
) == 0
5058 && ((TREE_INT_CST_LOW (tree01
) + TREE_INT_CST_LOW (tree11
))
5059 == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0
, 0)))))
5060 return build (LROTATE_EXPR
, type
, TREE_OPERAND (arg0
, 0),
5061 code0
== LSHIFT_EXPR
? tree01
: tree11
);
5062 else if (code11
== MINUS_EXPR
)
5064 tree tree110
, tree111
;
5065 tree110
= TREE_OPERAND (tree11
, 0);
5066 tree111
= TREE_OPERAND (tree11
, 1);
5067 STRIP_NOPS (tree110
);
5068 STRIP_NOPS (tree111
);
5069 if (TREE_CODE (tree110
) == INTEGER_CST
5070 && TREE_INT_CST_HIGH (tree110
) == 0
5071 && (TREE_INT_CST_LOW (tree110
)
5072 == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0
, 0))))
5073 && operand_equal_p (tree01
, tree111
, 0))
5074 return build ((code0
== LSHIFT_EXPR
5077 type
, TREE_OPERAND (arg0
, 0), tree01
);
5079 else if (code01
== MINUS_EXPR
)
5081 tree tree010
, tree011
;
5082 tree010
= TREE_OPERAND (tree01
, 0);
5083 tree011
= TREE_OPERAND (tree01
, 1);
5084 STRIP_NOPS (tree010
);
5085 STRIP_NOPS (tree011
);
5086 if (TREE_CODE (tree010
) == INTEGER_CST
5087 && TREE_INT_CST_HIGH (tree010
) == 0
5088 && (TREE_INT_CST_LOW (tree010
)
5089 == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0
, 0))))
5090 && operand_equal_p (tree11
, tree011
, 0))
5091 return build ((code0
!= LSHIFT_EXPR
5094 type
, TREE_OPERAND (arg0
, 0), tree11
);
5102 if (integer_zerop (arg1
))
5103 return non_lvalue (convert (type
, arg0
));
5104 if (integer_all_onesp (arg1
))
5105 return fold (build1 (BIT_NOT_EXPR
, type
, arg0
));
5110 if (integer_all_onesp (arg1
))
5111 return non_lvalue (convert (type
, arg0
));
5112 if (integer_zerop (arg1
))
5113 return omit_one_operand (type
, arg1
, arg0
);
5114 t1
= distribute_bit_expr (code
, type
, arg0
, arg1
);
5115 if (t1
!= NULL_TREE
)
5117 /* Simplify ((int)c & 0x377) into (int)c, if c is unsigned char. */
5118 if (TREE_CODE (arg0
) == INTEGER_CST
&& TREE_CODE (arg1
) == NOP_EXPR
5119 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg1
, 0))))
5121 int prec
= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg1
, 0)));
5122 if (prec
< BITS_PER_WORD
&& prec
< HOST_BITS_PER_WIDE_INT
5123 && (~TREE_INT_CST_LOW (arg0
)
5124 & (((HOST_WIDE_INT
) 1 << prec
) - 1)) == 0)
5125 return build1 (NOP_EXPR
, type
, TREE_OPERAND (arg1
, 0));
5127 if (TREE_CODE (arg1
) == INTEGER_CST
&& TREE_CODE (arg0
) == NOP_EXPR
5128 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0
, 0))))
5130 int prec
= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0
, 0)));
5131 if (prec
< BITS_PER_WORD
&& prec
< HOST_BITS_PER_WIDE_INT
5132 && (~TREE_INT_CST_LOW (arg1
)
5133 & (((HOST_WIDE_INT
) 1 << prec
) - 1)) == 0)
5134 return build1 (NOP_EXPR
, type
, TREE_OPERAND (arg0
, 0));
5138 case BIT_ANDTC_EXPR
:
5139 if (integer_all_onesp (arg0
))
5140 return non_lvalue (convert (type
, arg1
));
5141 if (integer_zerop (arg0
))
5142 return omit_one_operand (type
, arg0
, arg1
);
5143 if (TREE_CODE (arg1
) == INTEGER_CST
)
5145 arg1
= fold (build1 (BIT_NOT_EXPR
, type
, arg1
));
5146 code
= BIT_AND_EXPR
;
5152 /* In most cases, do nothing with a divide by zero. */
5153 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
5154 #ifndef REAL_INFINITY
5155 if (TREE_CODE (arg1
) == REAL_CST
&& real_zerop (arg1
))
5158 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
5160 /* In IEEE floating point, x/1 is not equivalent to x for snans.
5161 However, ANSI says we can drop signals, so we can do this anyway. */
5162 if (real_onep (arg1
))
5163 return non_lvalue (convert (type
, arg0
));
5165 /* If ARG1 is a constant, we can convert this to a multiply by the
5166 reciprocal. This does not have the same rounding properties,
5167 so only do this if -ffast-math. We can actually always safely
5168 do it if ARG1 is a power of two, but it's hard to tell if it is
5169 or not in a portable manner. */
5170 if (TREE_CODE (arg1
) == REAL_CST
)
5173 && 0 != (tem
= const_binop (code
, build_real (type
, dconst1
),
5175 return fold (build (MULT_EXPR
, type
, arg0
, tem
));
5176 /* Find the reciprocal if optimizing and the result is exact. */
5180 r
= TREE_REAL_CST (arg1
);
5181 if (exact_real_inverse (TYPE_MODE(TREE_TYPE(arg0
)), &r
))
5183 tem
= build_real (type
, r
);
5184 return fold (build (MULT_EXPR
, type
, arg0
, tem
));
5190 case TRUNC_DIV_EXPR
:
5191 case ROUND_DIV_EXPR
:
5192 case FLOOR_DIV_EXPR
:
5194 case EXACT_DIV_EXPR
:
5195 if (integer_onep (arg1
))
5196 return non_lvalue (convert (type
, arg0
));
5197 if (integer_zerop (arg1
))
5200 /* If arg0 is a multiple of arg1, then rewrite to the fastest div
5201 operation, EXACT_DIV_EXPR.
5203 Note that only CEIL_DIV_EXPR and FLOOR_DIV_EXPR are rewritten now.
5204 At one time others generated faster code, it's not clear if they do
5205 after the last round to changes to the DIV code in expmed.c. */
5206 if ((code
== CEIL_DIV_EXPR
|| code
== FLOOR_DIV_EXPR
)
5207 && multiple_of_p (type
, arg0
, arg1
))
5208 return fold (build (EXACT_DIV_EXPR
, type
, arg0
, arg1
));
5210 /* If we have ((a / C1) / C2) where both division are the same type, try
5211 to simplify. First see if C1 * C2 overflows or not. */
5212 if (TREE_CODE (arg0
) == code
&& TREE_CODE (arg1
) == INTEGER_CST
5213 && TREE_CODE (TREE_OPERAND (arg0
, 1)) == INTEGER_CST
)
5217 new_divisor
= const_binop (MULT_EXPR
, TREE_OPERAND (arg0
, 1), arg1
, 0);
5218 tem
= const_binop (FLOOR_DIV_EXPR
, new_divisor
, arg1
, 0);
5220 if (TREE_INT_CST_LOW (TREE_OPERAND (arg0
, 1)) == TREE_INT_CST_LOW (tem
)
5221 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0
, 1)) == TREE_INT_CST_HIGH (tem
))
5223 /* If no overflow, divide by C1*C2. */
5224 return fold (build (code
, type
, TREE_OPERAND (arg0
, 0), new_divisor
));
5228 /* Look for ((a * C1) / C3) or (((a * C1) + C2) / C3),
5229 where C1 % C3 == 0 or C3 % C1 == 0. We can simplify these
5230 expressions, which often appear in the offsets or sizes of
5231 objects with a varying size. Only deal with positive divisors
5232 and multiplicands. If C2 is negative, we must have C2 % C3 == 0.
5234 Look for NOPs and SAVE_EXPRs inside. */
5236 if (TREE_CODE (arg1
) == INTEGER_CST
5237 && tree_int_cst_sgn (arg1
) >= 0)
5239 int have_save_expr
= 0;
5240 tree c2
= integer_zero_node
;
5243 if (TREE_CODE (xarg0
) == SAVE_EXPR
&& SAVE_EXPR_RTL (xarg0
) == 0)
5244 have_save_expr
= 1, xarg0
= TREE_OPERAND (xarg0
, 0);
5248 /* Look inside the dividend and simplify using EXACT_DIV_EXPR
5250 if (TREE_CODE (xarg0
) == MULT_EXPR
5251 && multiple_of_p (type
, TREE_OPERAND (xarg0
, 0), arg1
))
5255 t
= fold (build (MULT_EXPR
, type
,
5256 fold (build (EXACT_DIV_EXPR
, type
,
5257 TREE_OPERAND (xarg0
, 0), arg1
)),
5258 TREE_OPERAND (xarg0
, 1)));
5265 if (TREE_CODE (xarg0
) == MULT_EXPR
5266 && multiple_of_p (type
, TREE_OPERAND (xarg0
, 1), arg1
))
5270 t
= fold (build (MULT_EXPR
, type
,
5271 fold (build (EXACT_DIV_EXPR
, type
,
5272 TREE_OPERAND (xarg0
, 1), arg1
)),
5273 TREE_OPERAND (xarg0
, 0)));
5279 if (TREE_CODE (xarg0
) == PLUS_EXPR
5280 && TREE_CODE (TREE_OPERAND (xarg0
, 1)) == INTEGER_CST
)
5281 c2
= TREE_OPERAND (xarg0
, 1), xarg0
= TREE_OPERAND (xarg0
, 0);
5282 else if (TREE_CODE (xarg0
) == MINUS_EXPR
5283 && TREE_CODE (TREE_OPERAND (xarg0
, 1)) == INTEGER_CST
5284 /* If we are doing this computation unsigned, the negate
5286 && ! TREE_UNSIGNED (type
))
5288 c2
= fold (build1 (NEGATE_EXPR
, type
, TREE_OPERAND (xarg0
, 1)));
5289 xarg0
= TREE_OPERAND (xarg0
, 0);
5292 if (TREE_CODE (xarg0
) == SAVE_EXPR
&& SAVE_EXPR_RTL (xarg0
) == 0)
5293 have_save_expr
= 1, xarg0
= TREE_OPERAND (xarg0
, 0);
5297 if (TREE_CODE (xarg0
) == MULT_EXPR
5298 && TREE_CODE (TREE_OPERAND (xarg0
, 1)) == INTEGER_CST
5299 && tree_int_cst_sgn (TREE_OPERAND (xarg0
, 1)) >= 0
5300 && (integer_zerop (const_binop (TRUNC_MOD_EXPR
,
5301 TREE_OPERAND (xarg0
, 1), arg1
, 1))
5302 || integer_zerop (const_binop (TRUNC_MOD_EXPR
, arg1
,
5303 TREE_OPERAND (xarg0
, 1), 1)))
5304 && (tree_int_cst_sgn (c2
) >= 0
5305 || integer_zerop (const_binop (TRUNC_MOD_EXPR
, c2
,
5308 tree outer_div
= integer_one_node
;
5309 tree c1
= TREE_OPERAND (xarg0
, 1);
5312 /* If C3 > C1, set them equal and do a divide by
5313 C3/C1 at the end of the operation. */
5314 if (tree_int_cst_lt (c1
, c3
))
5315 outer_div
= const_binop (code
, c3
, c1
, 0), c3
= c1
;
5317 /* The result is A * (C1/C3) + (C2/C3). */
5318 t
= fold (build (PLUS_EXPR
, type
,
5319 fold (build (MULT_EXPR
, type
,
5320 TREE_OPERAND (xarg0
, 0),
5321 const_binop (code
, c1
, c3
, 1))),
5322 const_binop (code
, c2
, c3
, 1)));
5324 if (! integer_onep (outer_div
))
5325 t
= fold (build (code
, type
, t
, convert (type
, outer_div
)));
5337 case FLOOR_MOD_EXPR
:
5338 case ROUND_MOD_EXPR
:
5339 case TRUNC_MOD_EXPR
:
5340 if (integer_onep (arg1
))
5341 return omit_one_operand (type
, integer_zero_node
, arg0
);
5342 if (integer_zerop (arg1
))
5345 /* Look for ((a * C1) % C3) or (((a * C1) + C2) % C3),
5346 where C1 % C3 == 0. Handle similarly to the division case,
5347 but don't bother with SAVE_EXPRs. */
5349 if (TREE_CODE (arg1
) == INTEGER_CST
5350 && ! integer_zerop (arg1
))
5352 tree c2
= integer_zero_node
;
5355 if (TREE_CODE (xarg0
) == PLUS_EXPR
5356 && TREE_CODE (TREE_OPERAND (xarg0
, 1)) == INTEGER_CST
)
5357 c2
= TREE_OPERAND (xarg0
, 1), xarg0
= TREE_OPERAND (xarg0
, 0);
5358 else if (TREE_CODE (xarg0
) == MINUS_EXPR
5359 && TREE_CODE (TREE_OPERAND (xarg0
, 1)) == INTEGER_CST
5360 && ! TREE_UNSIGNED (type
))
5362 c2
= fold (build1 (NEGATE_EXPR
, type
, TREE_OPERAND (xarg0
, 1)));
5363 xarg0
= TREE_OPERAND (xarg0
, 0);
5368 if (TREE_CODE (xarg0
) == MULT_EXPR
5369 && TREE_CODE (TREE_OPERAND (xarg0
, 1)) == INTEGER_CST
5370 && integer_zerop (const_binop (TRUNC_MOD_EXPR
,
5371 TREE_OPERAND (xarg0
, 1),
5373 && tree_int_cst_sgn (c2
) >= 0)
5374 /* The result is (C2%C3). */
5375 return omit_one_operand (type
, const_binop (code
, c2
, arg1
, 1),
5376 TREE_OPERAND (xarg0
, 0));
5385 if (integer_zerop (arg1
))
5386 return non_lvalue (convert (type
, arg0
));
5387 /* Since negative shift count is not well-defined,
5388 don't try to compute it in the compiler. */
5389 if (TREE_CODE (arg1
) == INTEGER_CST
&& tree_int_cst_sgn (arg1
) < 0)
5391 /* Rewrite an LROTATE_EXPR by a constant into an
5392 RROTATE_EXPR by a new constant. */
5393 if (code
== LROTATE_EXPR
&& TREE_CODE (arg1
) == INTEGER_CST
)
5395 TREE_SET_CODE (t
, RROTATE_EXPR
);
5396 code
= RROTATE_EXPR
;
5397 TREE_OPERAND (t
, 1) = arg1
5400 convert (TREE_TYPE (arg1
),
5401 build_int_2 (GET_MODE_BITSIZE (TYPE_MODE (type
)), 0)),
5403 if (tree_int_cst_sgn (arg1
) < 0)
5407 /* If we have a rotate of a bit operation with the rotate count and
5408 the second operand of the bit operation both constant,
5409 permute the two operations. */
5410 if (code
== RROTATE_EXPR
&& TREE_CODE (arg1
) == INTEGER_CST
5411 && (TREE_CODE (arg0
) == BIT_AND_EXPR
5412 || TREE_CODE (arg0
) == BIT_ANDTC_EXPR
5413 || TREE_CODE (arg0
) == BIT_IOR_EXPR
5414 || TREE_CODE (arg0
) == BIT_XOR_EXPR
)
5415 && TREE_CODE (TREE_OPERAND (arg0
, 1)) == INTEGER_CST
)
5416 return fold (build (TREE_CODE (arg0
), type
,
5417 fold (build (code
, type
,
5418 TREE_OPERAND (arg0
, 0), arg1
)),
5419 fold (build (code
, type
,
5420 TREE_OPERAND (arg0
, 1), arg1
))));
5422 /* Two consecutive rotates adding up to the width of the mode can
5424 if (code
== RROTATE_EXPR
&& TREE_CODE (arg1
) == INTEGER_CST
5425 && TREE_CODE (arg0
) == RROTATE_EXPR
5426 && TREE_CODE (TREE_OPERAND (arg0
, 1)) == INTEGER_CST
5427 && TREE_INT_CST_HIGH (arg1
) == 0
5428 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0
, 1)) == 0
5429 && ((TREE_INT_CST_LOW (arg1
)
5430 + TREE_INT_CST_LOW (TREE_OPERAND (arg0
, 1)))
5431 == GET_MODE_BITSIZE (TYPE_MODE (type
))))
5432 return TREE_OPERAND (arg0
, 0);
5437 if (operand_equal_p (arg0
, arg1
, 0))
5439 if (INTEGRAL_TYPE_P (type
)
5440 && operand_equal_p (arg1
, TYPE_MIN_VALUE (type
), 1))
5441 return omit_one_operand (type
, arg1
, arg0
);
5445 if (operand_equal_p (arg0
, arg1
, 0))
5447 if (INTEGRAL_TYPE_P (type
)
5448 && TYPE_MAX_VALUE (type
)
5449 && operand_equal_p (arg1
, TYPE_MAX_VALUE (type
), 1))
5450 return omit_one_operand (type
, arg1
, arg0
);
5453 case TRUTH_NOT_EXPR
:
5454 /* Note that the operand of this must be an int
5455 and its values must be 0 or 1.
5456 ("true" is a fixed value perhaps depending on the language,
5457 but we don't handle values other than 1 correctly yet.) */
5458 tem
= invert_truthvalue (arg0
);
5459 /* Avoid infinite recursion. */
5460 if (TREE_CODE (tem
) == TRUTH_NOT_EXPR
)
5462 return convert (type
, tem
);
5464 case TRUTH_ANDIF_EXPR
:
5465 /* Note that the operands of this must be ints
5466 and their values must be 0 or 1.
5467 ("true" is a fixed value perhaps depending on the language.) */
5468 /* If first arg is constant zero, return it. */
5469 if (integer_zerop (arg0
))
5471 case TRUTH_AND_EXPR
:
5472 /* If either arg is constant true, drop it. */
5473 if (TREE_CODE (arg0
) == INTEGER_CST
&& ! integer_zerop (arg0
))
5474 return non_lvalue (arg1
);
5475 if (TREE_CODE (arg1
) == INTEGER_CST
&& ! integer_zerop (arg1
))
5476 return non_lvalue (arg0
);
5477 /* If second arg is constant zero, result is zero, but first arg
5478 must be evaluated. */
5479 if (integer_zerop (arg1
))
5480 return omit_one_operand (type
, arg1
, arg0
);
5481 /* Likewise for first arg, but note that only the TRUTH_AND_EXPR
5482 case will be handled here. */
5483 if (integer_zerop (arg0
))
5484 return omit_one_operand (type
, arg0
, arg1
);
5487 /* We only do these simplifications if we are optimizing. */
5491 /* Check for things like (A || B) && (A || C). We can convert this
5492 to A || (B && C). Note that either operator can be any of the four
5493 truth and/or operations and the transformation will still be
5494 valid. Also note that we only care about order for the
5495 ANDIF and ORIF operators. If B contains side effects, this
5496 might change the truth-value of A. */
5497 if (TREE_CODE (arg0
) == TREE_CODE (arg1
)
5498 && (TREE_CODE (arg0
) == TRUTH_ANDIF_EXPR
5499 || TREE_CODE (arg0
) == TRUTH_ORIF_EXPR
5500 || TREE_CODE (arg0
) == TRUTH_AND_EXPR
5501 || TREE_CODE (arg0
) == TRUTH_OR_EXPR
)
5502 && ! TREE_SIDE_EFFECTS (TREE_OPERAND (arg0
, 1)))
5504 tree a00
= TREE_OPERAND (arg0
, 0);
5505 tree a01
= TREE_OPERAND (arg0
, 1);
5506 tree a10
= TREE_OPERAND (arg1
, 0);
5507 tree a11
= TREE_OPERAND (arg1
, 1);
5508 int commutative
= ((TREE_CODE (arg0
) == TRUTH_OR_EXPR
5509 || TREE_CODE (arg0
) == TRUTH_AND_EXPR
)
5510 && (code
== TRUTH_AND_EXPR
5511 || code
== TRUTH_OR_EXPR
));
5513 if (operand_equal_p (a00
, a10
, 0))
5514 return fold (build (TREE_CODE (arg0
), type
, a00
,
5515 fold (build (code
, type
, a01
, a11
))));
5516 else if (commutative
&& operand_equal_p (a00
, a11
, 0))
5517 return fold (build (TREE_CODE (arg0
), type
, a00
,
5518 fold (build (code
, type
, a01
, a10
))));
5519 else if (commutative
&& operand_equal_p (a01
, a10
, 0))
5520 return fold (build (TREE_CODE (arg0
), type
, a01
,
5521 fold (build (code
, type
, a00
, a11
))));
5523 /* This case if tricky because we must either have commutative
5524 operators or else A10 must not have side-effects. */
5526 else if ((commutative
|| ! TREE_SIDE_EFFECTS (a10
))
5527 && operand_equal_p (a01
, a11
, 0))
5528 return fold (build (TREE_CODE (arg0
), type
,
5529 fold (build (code
, type
, a00
, a10
)),
5533 /* See if we can build a range comparison. */
5534 if (0 != (tem
= fold_range_test (t
)))
5537 /* Check for the possibility of merging component references. If our
5538 lhs is another similar operation, try to merge its rhs with our
5539 rhs. Then try to merge our lhs and rhs. */
5540 if (TREE_CODE (arg0
) == code
5541 && 0 != (tem
= fold_truthop (code
, type
,
5542 TREE_OPERAND (arg0
, 1), arg1
)))
5543 return fold (build (code
, type
, TREE_OPERAND (arg0
, 0), tem
));
5545 if ((tem
= fold_truthop (code
, type
, arg0
, arg1
)) != 0)
5550 case TRUTH_ORIF_EXPR
:
5551 /* Note that the operands of this must be ints
5552 and their values must be 0 or true.
5553 ("true" is a fixed value perhaps depending on the language.) */
5554 /* If first arg is constant true, return it. */
5555 if (TREE_CODE (arg0
) == INTEGER_CST
&& ! integer_zerop (arg0
))
5558 /* If either arg is constant zero, drop it. */
5559 if (TREE_CODE (arg0
) == INTEGER_CST
&& integer_zerop (arg0
))
5560 return non_lvalue (arg1
);
5561 if (TREE_CODE (arg1
) == INTEGER_CST
&& integer_zerop (arg1
))
5562 return non_lvalue (arg0
);
5563 /* If second arg is constant true, result is true, but we must
5564 evaluate first arg. */
5565 if (TREE_CODE (arg1
) == INTEGER_CST
&& ! integer_zerop (arg1
))
5566 return omit_one_operand (type
, arg1
, arg0
);
5567 /* Likewise for first arg, but note this only occurs here for
5569 if (TREE_CODE (arg0
) == INTEGER_CST
&& ! integer_zerop (arg0
))
5570 return omit_one_operand (type
, arg0
, arg1
);
5573 case TRUTH_XOR_EXPR
:
5574 /* If either arg is constant zero, drop it. */
5575 if (integer_zerop (arg0
))
5576 return non_lvalue (arg1
);
5577 if (integer_zerop (arg1
))
5578 return non_lvalue (arg0
);
5579 /* If either arg is constant true, this is a logical inversion. */
5580 if (integer_onep (arg0
))
5581 return non_lvalue (invert_truthvalue (arg1
));
5582 if (integer_onep (arg1
))
5583 return non_lvalue (invert_truthvalue (arg0
));
5592 /* If one arg is a constant integer, put it last. */
5593 if (TREE_CODE (arg0
) == INTEGER_CST
5594 && TREE_CODE (arg1
) != INTEGER_CST
)
5596 TREE_OPERAND (t
, 0) = arg1
;
5597 TREE_OPERAND (t
, 1) = arg0
;
5598 arg0
= TREE_OPERAND (t
, 0);
5599 arg1
= TREE_OPERAND (t
, 1);
5600 code
= swap_tree_comparison (code
);
5601 TREE_SET_CODE (t
, code
);
5604 /* Convert foo++ == CONST into ++foo == CONST + INCR.
5605 First, see if one arg is constant; find the constant arg
5606 and the other one. */
5608 tree constop
= 0, varop
= NULL_TREE
;
5609 int constopnum
= -1;
5611 if (TREE_CONSTANT (arg1
))
5612 constopnum
= 1, constop
= arg1
, varop
= arg0
;
5613 if (TREE_CONSTANT (arg0
))
5614 constopnum
= 0, constop
= arg0
, varop
= arg1
;
5616 if (constop
&& TREE_CODE (varop
) == POSTINCREMENT_EXPR
)
5618 /* This optimization is invalid for ordered comparisons
5619 if CONST+INCR overflows or if foo+incr might overflow.
5620 This optimization is invalid for floating point due to rounding.
5621 For pointer types we assume overflow doesn't happen. */
5622 if (POINTER_TYPE_P (TREE_TYPE (varop
))
5623 || (! FLOAT_TYPE_P (TREE_TYPE (varop
))
5624 && (code
== EQ_EXPR
|| code
== NE_EXPR
)))
5627 = fold (build (PLUS_EXPR
, TREE_TYPE (varop
),
5628 constop
, TREE_OPERAND (varop
, 1)));
5629 TREE_SET_CODE (varop
, PREINCREMENT_EXPR
);
5631 /* If VAROP is a reference to a bitfield, we must mask
5632 the constant by the width of the field. */
5633 if (TREE_CODE (TREE_OPERAND (varop
, 0)) == COMPONENT_REF
5634 && DECL_BIT_FIELD(TREE_OPERAND
5635 (TREE_OPERAND (varop
, 0), 1)))
5638 = TREE_INT_CST_LOW (DECL_SIZE
5640 (TREE_OPERAND (varop
, 0), 1)));
5641 tree mask
, unsigned_type
;
5643 tree folded_compare
;
5645 /* First check whether the comparison would come out
5646 always the same. If we don't do that we would
5647 change the meaning with the masking. */
5648 if (constopnum
== 0)
5649 folded_compare
= fold (build (code
, type
, constop
,
5650 TREE_OPERAND (varop
, 0)));
5652 folded_compare
= fold (build (code
, type
,
5653 TREE_OPERAND (varop
, 0),
5655 if (integer_zerop (folded_compare
)
5656 || integer_onep (folded_compare
))
5657 return omit_one_operand (type
, folded_compare
, varop
);
5659 unsigned_type
= type_for_size (size
, 1);
5660 precision
= TYPE_PRECISION (unsigned_type
);
5661 mask
= build_int_2 (~0, ~0);
5662 TREE_TYPE (mask
) = unsigned_type
;
5663 force_fit_type (mask
, 0);
5664 mask
= const_binop (RSHIFT_EXPR
, mask
,
5665 size_int (precision
- size
), 0);
5666 newconst
= fold (build (BIT_AND_EXPR
,
5667 TREE_TYPE (varop
), newconst
,
5668 convert (TREE_TYPE (varop
),
5673 t
= build (code
, type
, TREE_OPERAND (t
, 0),
5674 TREE_OPERAND (t
, 1));
5675 TREE_OPERAND (t
, constopnum
) = newconst
;
5679 else if (constop
&& TREE_CODE (varop
) == POSTDECREMENT_EXPR
)
5681 if (POINTER_TYPE_P (TREE_TYPE (varop
))
5682 || (! FLOAT_TYPE_P (TREE_TYPE (varop
))
5683 && (code
== EQ_EXPR
|| code
== NE_EXPR
)))
5686 = fold (build (MINUS_EXPR
, TREE_TYPE (varop
),
5687 constop
, TREE_OPERAND (varop
, 1)));
5688 TREE_SET_CODE (varop
, PREDECREMENT_EXPR
);
5690 if (TREE_CODE (TREE_OPERAND (varop
, 0)) == COMPONENT_REF
5691 && DECL_BIT_FIELD(TREE_OPERAND
5692 (TREE_OPERAND (varop
, 0), 1)))
5695 = TREE_INT_CST_LOW (DECL_SIZE
5697 (TREE_OPERAND (varop
, 0), 1)));
5698 tree mask
, unsigned_type
;
5700 tree folded_compare
;
5702 if (constopnum
== 0)
5703 folded_compare
= fold (build (code
, type
, constop
,
5704 TREE_OPERAND (varop
, 0)));
5706 folded_compare
= fold (build (code
, type
,
5707 TREE_OPERAND (varop
, 0),
5709 if (integer_zerop (folded_compare
)
5710 || integer_onep (folded_compare
))
5711 return omit_one_operand (type
, folded_compare
, varop
);
5713 unsigned_type
= type_for_size (size
, 1);
5714 precision
= TYPE_PRECISION (unsigned_type
);
5715 mask
= build_int_2 (~0, ~0);
5716 TREE_TYPE (mask
) = TREE_TYPE (varop
);
5717 force_fit_type (mask
, 0);
5718 mask
= const_binop (RSHIFT_EXPR
, mask
,
5719 size_int (precision
- size
), 0);
5720 newconst
= fold (build (BIT_AND_EXPR
,
5721 TREE_TYPE (varop
), newconst
,
5722 convert (TREE_TYPE (varop
),
5727 t
= build (code
, type
, TREE_OPERAND (t
, 0),
5728 TREE_OPERAND (t
, 1));
5729 TREE_OPERAND (t
, constopnum
) = newconst
;
5735 /* Change X >= CST to X > (CST - 1) if CST is positive. */
5736 if (TREE_CODE (arg1
) == INTEGER_CST
5737 && TREE_CODE (arg0
) != INTEGER_CST
5738 && tree_int_cst_sgn (arg1
) > 0)
5740 switch (TREE_CODE (t
))
5744 arg1
= const_binop (MINUS_EXPR
, arg1
, integer_one_node
, 0);
5745 t
= build (code
, type
, TREE_OPERAND (t
, 0), arg1
);
5750 arg1
= const_binop (MINUS_EXPR
, arg1
, integer_one_node
, 0);
5751 t
= build (code
, type
, TREE_OPERAND (t
, 0), arg1
);
5759 /* If this is an EQ or NE comparison with zero and ARG0 is
5760 (1 << foo) & bar, convert it to (bar >> foo) & 1. Both require
5761 two operations, but the latter can be done in one less insn
5762 on machines that have only two-operand insns or on which a
5763 constant cannot be the first operand. */
5764 if (integer_zerop (arg1
) && (code
== EQ_EXPR
|| code
== NE_EXPR
)
5765 && TREE_CODE (arg0
) == BIT_AND_EXPR
)
5767 if (TREE_CODE (TREE_OPERAND (arg0
, 0)) == LSHIFT_EXPR
5768 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0
, 0), 0)))
5770 fold (build (code
, type
,
5771 build (BIT_AND_EXPR
, TREE_TYPE (arg0
),
5773 TREE_TYPE (TREE_OPERAND (arg0
, 0)),
5774 TREE_OPERAND (arg0
, 1),
5775 TREE_OPERAND (TREE_OPERAND (arg0
, 0), 1)),
5776 convert (TREE_TYPE (arg0
),
5779 else if (TREE_CODE (TREE_OPERAND (arg0
, 1)) == LSHIFT_EXPR
5780 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0
, 1), 0)))
5782 fold (build (code
, type
,
5783 build (BIT_AND_EXPR
, TREE_TYPE (arg0
),
5785 TREE_TYPE (TREE_OPERAND (arg0
, 1)),
5786 TREE_OPERAND (arg0
, 0),
5787 TREE_OPERAND (TREE_OPERAND (arg0
, 1), 1)),
5788 convert (TREE_TYPE (arg0
),
5793 /* If this is an NE or EQ comparison of zero against the result of a
5794 signed MOD operation whose second operand is a power of 2, make
5795 the MOD operation unsigned since it is simpler and equivalent. */
5796 if ((code
== NE_EXPR
|| code
== EQ_EXPR
)
5797 && integer_zerop (arg1
)
5798 && ! TREE_UNSIGNED (TREE_TYPE (arg0
))
5799 && (TREE_CODE (arg0
) == TRUNC_MOD_EXPR
5800 || TREE_CODE (arg0
) == CEIL_MOD_EXPR
5801 || TREE_CODE (arg0
) == FLOOR_MOD_EXPR
5802 || TREE_CODE (arg0
) == ROUND_MOD_EXPR
)
5803 && integer_pow2p (TREE_OPERAND (arg0
, 1)))
5805 tree newtype
= unsigned_type (TREE_TYPE (arg0
));
5806 tree newmod
= build (TREE_CODE (arg0
), newtype
,
5807 convert (newtype
, TREE_OPERAND (arg0
, 0)),
5808 convert (newtype
, TREE_OPERAND (arg0
, 1)));
5810 return build (code
, type
, newmod
, convert (newtype
, arg1
));
5813 /* If this is an NE comparison of zero with an AND of one, remove the
5814 comparison since the AND will give the correct value. */
5815 if (code
== NE_EXPR
&& integer_zerop (arg1
)
5816 && TREE_CODE (arg0
) == BIT_AND_EXPR
5817 && integer_onep (TREE_OPERAND (arg0
, 1)))
5818 return convert (type
, arg0
);
5820 /* If we have (A & C) == C where C is a power of 2, convert this into
5821 (A & C) != 0. Similarly for NE_EXPR. */
5822 if ((code
== EQ_EXPR
|| code
== NE_EXPR
)
5823 && TREE_CODE (arg0
) == BIT_AND_EXPR
5824 && integer_pow2p (TREE_OPERAND (arg0
, 1))
5825 && operand_equal_p (TREE_OPERAND (arg0
, 1), arg1
, 0))
5826 return build (code
== EQ_EXPR
? NE_EXPR
: EQ_EXPR
, type
,
5827 arg0
, integer_zero_node
);
5829 /* If X is unsigned, convert X < (1 << Y) into X >> Y == 0
5830 and similarly for >= into !=. */
5831 if ((code
== LT_EXPR
|| code
== GE_EXPR
)
5832 && TREE_UNSIGNED (TREE_TYPE (arg0
))
5833 && TREE_CODE (arg1
) == LSHIFT_EXPR
5834 && integer_onep (TREE_OPERAND (arg1
, 0)))
5835 return build (code
== LT_EXPR
? EQ_EXPR
: NE_EXPR
, type
,
5836 build (RSHIFT_EXPR
, TREE_TYPE (arg0
), arg0
,
5837 TREE_OPERAND (arg1
, 1)),
5838 convert (TREE_TYPE (arg0
), integer_zero_node
));
5840 else if ((code
== LT_EXPR
|| code
== GE_EXPR
)
5841 && TREE_UNSIGNED (TREE_TYPE (arg0
))
5842 && (TREE_CODE (arg1
) == NOP_EXPR
5843 || TREE_CODE (arg1
) == CONVERT_EXPR
)
5844 && TREE_CODE (TREE_OPERAND (arg1
, 0)) == LSHIFT_EXPR
5845 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg1
, 0), 0)))
5847 build (code
== LT_EXPR
? EQ_EXPR
: NE_EXPR
, type
,
5848 convert (TREE_TYPE (arg0
),
5849 build (RSHIFT_EXPR
, TREE_TYPE (arg0
), arg0
,
5850 TREE_OPERAND (TREE_OPERAND (arg1
, 0), 1))),
5851 convert (TREE_TYPE (arg0
), integer_zero_node
));
5853 /* Simplify comparison of something with itself. (For IEEE
5854 floating-point, we can only do some of these simplifications.) */
5855 if (operand_equal_p (arg0
, arg1
, 0))
5862 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0
)))
5863 return constant_boolean_node (1, type
);
5865 TREE_SET_CODE (t
, code
);
5869 /* For NE, we can only do this simplification if integer. */
5870 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0
)))
5872 /* ... fall through ... */
5875 return constant_boolean_node (0, type
);
5881 /* An unsigned comparison against 0 can be simplified. */
5882 if (integer_zerop (arg1
)
5883 && (INTEGRAL_TYPE_P (TREE_TYPE (arg1
))
5884 || POINTER_TYPE_P (TREE_TYPE (arg1
)))
5885 && TREE_UNSIGNED (TREE_TYPE (arg1
)))
5887 switch (TREE_CODE (t
))
5891 TREE_SET_CODE (t
, NE_EXPR
);
5895 TREE_SET_CODE (t
, EQ_EXPR
);
5898 return omit_one_operand (type
,
5899 convert (type
, integer_one_node
),
5902 return omit_one_operand (type
,
5903 convert (type
, integer_zero_node
),
5910 /* An unsigned <= 0x7fffffff can be simplified. */
5912 int width
= TYPE_PRECISION (TREE_TYPE (arg1
));
5913 if (TREE_CODE (arg1
) == INTEGER_CST
5914 && ! TREE_CONSTANT_OVERFLOW (arg1
)
5915 && width
<= HOST_BITS_PER_WIDE_INT
5916 && TREE_INT_CST_LOW (arg1
) == ((HOST_WIDE_INT
) 1 << (width
- 1)) - 1
5917 && TREE_INT_CST_HIGH (arg1
) == 0
5918 && (INTEGRAL_TYPE_P (TREE_TYPE (arg1
))
5919 || POINTER_TYPE_P (TREE_TYPE (arg1
)))
5920 && TREE_UNSIGNED (TREE_TYPE (arg1
)))
5922 switch (TREE_CODE (t
))
5925 return fold (build (GE_EXPR
, type
,
5926 convert (signed_type (TREE_TYPE (arg0
)),
5928 convert (signed_type (TREE_TYPE (arg1
)),
5929 integer_zero_node
)));
5931 return fold (build (LT_EXPR
, type
,
5932 convert (signed_type (TREE_TYPE (arg0
)),
5934 convert (signed_type (TREE_TYPE (arg1
)),
5935 integer_zero_node
)));
5942 /* If we are comparing an expression that just has comparisons
5943 of two integer values, arithmetic expressions of those comparisons,
5944 and constants, we can simplify it. There are only three cases
5945 to check: the two values can either be equal, the first can be
5946 greater, or the second can be greater. Fold the expression for
5947 those three values. Since each value must be 0 or 1, we have
5948 eight possibilities, each of which corresponds to the constant 0
5949 or 1 or one of the six possible comparisons.
5951 This handles common cases like (a > b) == 0 but also handles
5952 expressions like ((x > y) - (y > x)) > 0, which supposedly
5953 occur in macroized code. */
5955 if (TREE_CODE (arg1
) == INTEGER_CST
&& TREE_CODE (arg0
) != INTEGER_CST
)
5957 tree cval1
= 0, cval2
= 0;
5960 if (twoval_comparison_p (arg0
, &cval1
, &cval2
, &save_p
)
5961 /* Don't handle degenerate cases here; they should already
5962 have been handled anyway. */
5963 && cval1
!= 0 && cval2
!= 0
5964 && ! (TREE_CONSTANT (cval1
) && TREE_CONSTANT (cval2
))
5965 && TREE_TYPE (cval1
) == TREE_TYPE (cval2
)
5966 && INTEGRAL_TYPE_P (TREE_TYPE (cval1
))
5967 && TYPE_MAX_VALUE (TREE_TYPE (cval1
))
5968 && TYPE_MAX_VALUE (TREE_TYPE (cval2
))
5969 && ! operand_equal_p (TYPE_MIN_VALUE (TREE_TYPE (cval1
)),
5970 TYPE_MAX_VALUE (TREE_TYPE (cval2
)), 0))
5972 tree maxval
= TYPE_MAX_VALUE (TREE_TYPE (cval1
));
5973 tree minval
= TYPE_MIN_VALUE (TREE_TYPE (cval1
));
5975 /* We can't just pass T to eval_subst in case cval1 or cval2
5976 was the same as ARG1. */
5979 = fold (build (code
, type
,
5980 eval_subst (arg0
, cval1
, maxval
, cval2
, minval
),
5983 = fold (build (code
, type
,
5984 eval_subst (arg0
, cval1
, maxval
, cval2
, maxval
),
5987 = fold (build (code
, type
,
5988 eval_subst (arg0
, cval1
, minval
, cval2
, maxval
),
5991 /* All three of these results should be 0 or 1. Confirm they
5992 are. Then use those values to select the proper code
5995 if ((integer_zerop (high_result
)
5996 || integer_onep (high_result
))
5997 && (integer_zerop (equal_result
)
5998 || integer_onep (equal_result
))
5999 && (integer_zerop (low_result
)
6000 || integer_onep (low_result
)))
6002 /* Make a 3-bit mask with the high-order bit being the
6003 value for `>', the next for '=', and the low for '<'. */
6004 switch ((integer_onep (high_result
) * 4)
6005 + (integer_onep (equal_result
) * 2)
6006 + integer_onep (low_result
))
6010 return omit_one_operand (type
, integer_zero_node
, arg0
);
6031 return omit_one_operand (type
, integer_one_node
, arg0
);
6034 t
= build (code
, type
, cval1
, cval2
);
6036 return save_expr (t
);
6043 /* If this is a comparison of a field, we may be able to simplify it. */
6044 if ((TREE_CODE (arg0
) == COMPONENT_REF
6045 || TREE_CODE (arg0
) == BIT_FIELD_REF
)
6046 && (code
== EQ_EXPR
|| code
== NE_EXPR
)
6047 /* Handle the constant case even without -O
6048 to make sure the warnings are given. */
6049 && (optimize
|| TREE_CODE (arg1
) == INTEGER_CST
))
6051 t1
= optimize_bit_field_compare (code
, type
, arg0
, arg1
);
6055 /* If this is a comparison of complex values and either or both sides
6056 are a COMPLEX_EXPR or COMPLEX_CST, it is best to split up the
6057 comparisons and join them with a TRUTH_ANDIF_EXPR or TRUTH_ORIF_EXPR.
6058 This may prevent needless evaluations. */
6059 if ((code
== EQ_EXPR
|| code
== NE_EXPR
)
6060 && TREE_CODE (TREE_TYPE (arg0
)) == COMPLEX_TYPE
6061 && (TREE_CODE (arg0
) == COMPLEX_EXPR
6062 || TREE_CODE (arg1
) == COMPLEX_EXPR
6063 || TREE_CODE (arg0
) == COMPLEX_CST
6064 || TREE_CODE (arg1
) == COMPLEX_CST
))
6066 tree subtype
= TREE_TYPE (TREE_TYPE (arg0
));
6067 tree real0
, imag0
, real1
, imag1
;
6069 arg0
= save_expr (arg0
);
6070 arg1
= save_expr (arg1
);
6071 real0
= fold (build1 (REALPART_EXPR
, subtype
, arg0
));
6072 imag0
= fold (build1 (IMAGPART_EXPR
, subtype
, arg0
));
6073 real1
= fold (build1 (REALPART_EXPR
, subtype
, arg1
));
6074 imag1
= fold (build1 (IMAGPART_EXPR
, subtype
, arg1
));
6076 return fold (build ((code
== EQ_EXPR
? TRUTH_ANDIF_EXPR
6079 fold (build (code
, type
, real0
, real1
)),
6080 fold (build (code
, type
, imag0
, imag1
))));
6083 /* From here on, the only cases we handle are when the result is
6084 known to be a constant.
6086 To compute GT, swap the arguments and do LT.
6087 To compute GE, do LT and invert the result.
6088 To compute LE, swap the arguments, do LT and invert the result.
6089 To compute NE, do EQ and invert the result.
6091 Therefore, the code below must handle only EQ and LT. */
6093 if (code
== LE_EXPR
|| code
== GT_EXPR
)
6095 tem
= arg0
, arg0
= arg1
, arg1
= tem
;
6096 code
= swap_tree_comparison (code
);
6099 /* Note that it is safe to invert for real values here because we
6100 will check below in the one case that it matters. */
6103 if (code
== NE_EXPR
|| code
== GE_EXPR
)
6106 code
= invert_tree_comparison (code
);
6109 /* Compute a result for LT or EQ if args permit;
6110 otherwise return T. */
6111 if (TREE_CODE (arg0
) == INTEGER_CST
&& TREE_CODE (arg1
) == INTEGER_CST
)
6113 if (code
== EQ_EXPR
)
6114 t1
= build_int_2 ((TREE_INT_CST_LOW (arg0
)
6115 == TREE_INT_CST_LOW (arg1
))
6116 && (TREE_INT_CST_HIGH (arg0
)
6117 == TREE_INT_CST_HIGH (arg1
)),
6120 t1
= build_int_2 ((TREE_UNSIGNED (TREE_TYPE (arg0
))
6121 ? INT_CST_LT_UNSIGNED (arg0
, arg1
)
6122 : INT_CST_LT (arg0
, arg1
)),
6126 #if 0 /* This is no longer useful, but breaks some real code. */
6127 /* Assume a nonexplicit constant cannot equal an explicit one,
6128 since such code would be undefined anyway.
6129 Exception: on sysvr4, using #pragma weak,
6130 a label can come out as 0. */
6131 else if (TREE_CODE (arg1
) == INTEGER_CST
6132 && !integer_zerop (arg1
)
6133 && TREE_CONSTANT (arg0
)
6134 && TREE_CODE (arg0
) == ADDR_EXPR
6136 t1
= build_int_2 (0, 0);
6138 /* Two real constants can be compared explicitly. */
6139 else if (TREE_CODE (arg0
) == REAL_CST
&& TREE_CODE (arg1
) == REAL_CST
)
6141 /* If either operand is a NaN, the result is false with two
6142 exceptions: First, an NE_EXPR is true on NaNs, but that case
6143 is already handled correctly since we will be inverting the
6144 result for NE_EXPR. Second, if we had inverted a LE_EXPR
6145 or a GE_EXPR into a LT_EXPR, we must return true so that it
6146 will be inverted into false. */
6148 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg0
))
6149 || REAL_VALUE_ISNAN (TREE_REAL_CST (arg1
)))
6150 t1
= build_int_2 (invert
&& code
== LT_EXPR
, 0);
6152 else if (code
== EQ_EXPR
)
6153 t1
= build_int_2 (REAL_VALUES_EQUAL (TREE_REAL_CST (arg0
),
6154 TREE_REAL_CST (arg1
)),
6157 t1
= build_int_2 (REAL_VALUES_LESS (TREE_REAL_CST (arg0
),
6158 TREE_REAL_CST (arg1
)),
6162 if (t1
== NULL_TREE
)
6166 TREE_INT_CST_LOW (t1
) ^= 1;
6168 TREE_TYPE (t1
) = type
;
6169 if (TREE_CODE (type
) == BOOLEAN_TYPE
)
6170 return truthvalue_conversion (t1
);
6174 /* Pedantic ANSI C says that a conditional expression is never an lvalue,
6175 so all simple results must be passed through pedantic_non_lvalue. */
6176 if (TREE_CODE (arg0
) == INTEGER_CST
)
6177 return pedantic_non_lvalue
6178 (TREE_OPERAND (t
, (integer_zerop (arg0
) ? 2 : 1)));
6179 else if (operand_equal_p (arg1
, TREE_OPERAND (expr
, 2), 0))
6180 return pedantic_omit_one_operand (type
, arg1
, arg0
);
6182 /* If the second operand is zero, invert the comparison and swap
6183 the second and third operands. Likewise if the second operand
6184 is constant and the third is not or if the third operand is
6185 equivalent to the first operand of the comparison. */
6187 if (integer_zerop (arg1
)
6188 || (TREE_CONSTANT (arg1
) && ! TREE_CONSTANT (TREE_OPERAND (t
, 2)))
6189 || (TREE_CODE_CLASS (TREE_CODE (arg0
)) == '<'
6190 && operand_equal_for_comparison_p (TREE_OPERAND (arg0
, 0),
6191 TREE_OPERAND (t
, 2),
6192 TREE_OPERAND (arg0
, 1))))
6194 /* See if this can be inverted. If it can't, possibly because
6195 it was a floating-point inequality comparison, don't do
6197 tem
= invert_truthvalue (arg0
);
6199 if (TREE_CODE (tem
) != TRUTH_NOT_EXPR
)
6201 t
= build (code
, type
, tem
,
6202 TREE_OPERAND (t
, 2), TREE_OPERAND (t
, 1));
6204 /* arg1 should be the first argument of the new T. */
6205 arg1
= TREE_OPERAND (t
, 1);
6210 /* If we have A op B ? A : C, we may be able to convert this to a
6211 simpler expression, depending on the operation and the values
6212 of B and C. IEEE floating point prevents this though,
6213 because A or B might be -0.0 or a NaN. */
6215 if (TREE_CODE_CLASS (TREE_CODE (arg0
)) == '<'
6216 && (TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
6217 || ! FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0
, 0)))
6219 && operand_equal_for_comparison_p (TREE_OPERAND (arg0
, 0),
6220 arg1
, TREE_OPERAND (arg0
, 1)))
6222 tree arg2
= TREE_OPERAND (t
, 2);
6223 enum tree_code comp_code
= TREE_CODE (arg0
);
6227 /* If we have A op 0 ? A : -A, this is A, -A, abs (A), or abs (-A),
6228 depending on the comparison operation. */
6229 if ((FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0
, 1)))
6230 ? real_zerop (TREE_OPERAND (arg0
, 1))
6231 : integer_zerop (TREE_OPERAND (arg0
, 1)))
6232 && TREE_CODE (arg2
) == NEGATE_EXPR
6233 && operand_equal_p (TREE_OPERAND (arg2
, 0), arg1
, 0))
6237 return pedantic_non_lvalue
6238 (fold (build1 (NEGATE_EXPR
, type
, arg1
)));
6240 return pedantic_non_lvalue (convert (type
, arg1
));
6243 if (TREE_UNSIGNED (TREE_TYPE (arg1
)))
6244 arg1
= convert (signed_type (TREE_TYPE (arg1
)), arg1
);
6245 return pedantic_non_lvalue
6246 (convert (type
, fold (build1 (ABS_EXPR
,
6247 TREE_TYPE (arg1
), arg1
))));
6250 if (TREE_UNSIGNED (TREE_TYPE (arg1
)))
6251 arg1
= convert (signed_type (TREE_TYPE (arg1
)), arg1
);
6252 return pedantic_non_lvalue
6253 (fold (build1 (NEGATE_EXPR
, type
,
6255 fold (build1 (ABS_EXPR
,
6262 /* If this is A != 0 ? A : 0, this is simply A. For ==, it is
6265 if (integer_zerop (TREE_OPERAND (arg0
, 1)) && integer_zerop (arg2
))
6267 if (comp_code
== NE_EXPR
)
6268 return pedantic_non_lvalue (convert (type
, arg1
));
6269 else if (comp_code
== EQ_EXPR
)
6270 return pedantic_non_lvalue (convert (type
, integer_zero_node
));
6273 /* If this is A op B ? A : B, this is either A, B, min (A, B),
6274 or max (A, B), depending on the operation. */
6276 if (operand_equal_for_comparison_p (TREE_OPERAND (arg0
, 1),
6277 arg2
, TREE_OPERAND (arg0
, 0)))
6279 tree comp_op0
= TREE_OPERAND (arg0
, 0);
6280 tree comp_op1
= TREE_OPERAND (arg0
, 1);
6281 tree comp_type
= TREE_TYPE (comp_op0
);
6286 return pedantic_non_lvalue (convert (type
, arg2
));
6288 return pedantic_non_lvalue (convert (type
, arg1
));
6291 /* In C++ a ?: expression can be an lvalue, so put the
6292 operand which will be used if they are equal first
6293 so that we can convert this back to the
6294 corresponding COND_EXPR. */
6295 return pedantic_non_lvalue
6296 (convert (type
, (fold (build (MIN_EXPR
, comp_type
,
6297 (comp_code
== LE_EXPR
6298 ? comp_op0
: comp_op1
),
6299 (comp_code
== LE_EXPR
6300 ? comp_op1
: comp_op0
))))));
6304 return pedantic_non_lvalue
6305 (convert (type
, fold (build (MAX_EXPR
, comp_type
,
6306 (comp_code
== GE_EXPR
6307 ? comp_op0
: comp_op1
),
6308 (comp_code
== GE_EXPR
6309 ? comp_op1
: comp_op0
)))));
6316 /* If this is A op C1 ? A : C2 with C1 and C2 constant integers,
6317 we might still be able to simplify this. For example,
6318 if C1 is one less or one more than C2, this might have started
6319 out as a MIN or MAX and been transformed by this function.
6320 Only good for INTEGER_TYPEs, because we need TYPE_MAX_VALUE. */
6322 if (INTEGRAL_TYPE_P (type
)
6323 && TREE_CODE (TREE_OPERAND (arg0
, 1)) == INTEGER_CST
6324 && TREE_CODE (arg2
) == INTEGER_CST
)
6328 /* We can replace A with C1 in this case. */
6329 arg1
= convert (type
, TREE_OPERAND (arg0
, 1));
6330 t
= build (code
, type
, TREE_OPERAND (t
, 0), arg1
,
6331 TREE_OPERAND (t
, 2));
6335 /* If C1 is C2 + 1, this is min(A, C2). */
6336 if (! operand_equal_p (arg2
, TYPE_MAX_VALUE (type
), 1)
6337 && operand_equal_p (TREE_OPERAND (arg0
, 1),
6338 const_binop (PLUS_EXPR
, arg2
,
6339 integer_one_node
, 0), 1))
6340 return pedantic_non_lvalue
6341 (fold (build (MIN_EXPR
, type
, arg1
, arg2
)));
6345 /* If C1 is C2 - 1, this is min(A, C2). */
6346 if (! operand_equal_p (arg2
, TYPE_MIN_VALUE (type
), 1)
6347 && operand_equal_p (TREE_OPERAND (arg0
, 1),
6348 const_binop (MINUS_EXPR
, arg2
,
6349 integer_one_node
, 0), 1))
6350 return pedantic_non_lvalue
6351 (fold (build (MIN_EXPR
, type
, arg1
, arg2
)));
6355 /* If C1 is C2 - 1, this is max(A, C2). */
6356 if (! operand_equal_p (arg2
, TYPE_MIN_VALUE (type
), 1)
6357 && operand_equal_p (TREE_OPERAND (arg0
, 1),
6358 const_binop (MINUS_EXPR
, arg2
,
6359 integer_one_node
, 0), 1))
6360 return pedantic_non_lvalue
6361 (fold (build (MAX_EXPR
, type
, arg1
, arg2
)));
6365 /* If C1 is C2 + 1, this is max(A, C2). */
6366 if (! operand_equal_p (arg2
, TYPE_MAX_VALUE (type
), 1)
6367 && operand_equal_p (TREE_OPERAND (arg0
, 1),
6368 const_binop (PLUS_EXPR
, arg2
,
6369 integer_one_node
, 0), 1))
6370 return pedantic_non_lvalue
6371 (fold (build (MAX_EXPR
, type
, arg1
, arg2
)));
6380 /* If the second operand is simpler than the third, swap them
6381 since that produces better jump optimization results. */
6382 if ((TREE_CONSTANT (arg1
) || TREE_CODE_CLASS (TREE_CODE (arg1
)) == 'd'
6383 || TREE_CODE (arg1
) == SAVE_EXPR
)
6384 && ! (TREE_CONSTANT (TREE_OPERAND (t
, 2))
6385 || TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (t
, 2))) == 'd'
6386 || TREE_CODE (TREE_OPERAND (t
, 2)) == SAVE_EXPR
))
6388 /* See if this can be inverted. If it can't, possibly because
6389 it was a floating-point inequality comparison, don't do
6391 tem
= invert_truthvalue (arg0
);
6393 if (TREE_CODE (tem
) != TRUTH_NOT_EXPR
)
6395 t
= build (code
, type
, tem
,
6396 TREE_OPERAND (t
, 2), TREE_OPERAND (t
, 1));
6398 /* arg1 should be the first argument of the new T. */
6399 arg1
= TREE_OPERAND (t
, 1);
6404 /* Convert A ? 1 : 0 to simply A. */
6405 if (integer_onep (TREE_OPERAND (t
, 1))
6406 && integer_zerop (TREE_OPERAND (t
, 2))
6407 /* If we try to convert TREE_OPERAND (t, 0) to our type, the
6408 call to fold will try to move the conversion inside
6409 a COND, which will recurse. In that case, the COND_EXPR
6410 is probably the best choice, so leave it alone. */
6411 && type
== TREE_TYPE (arg0
))
6412 return pedantic_non_lvalue (arg0
);
6414 /* Look for expressions of the form A & 2 ? 2 : 0. The result of this
6415 operation is simply A & 2. */
6417 if (integer_zerop (TREE_OPERAND (t
, 2))
6418 && TREE_CODE (arg0
) == NE_EXPR
6419 && integer_zerop (TREE_OPERAND (arg0
, 1))
6420 && integer_pow2p (arg1
)
6421 && TREE_CODE (TREE_OPERAND (arg0
, 0)) == BIT_AND_EXPR
6422 && operand_equal_p (TREE_OPERAND (TREE_OPERAND (arg0
, 0), 1),
6424 return pedantic_non_lvalue (convert (type
, TREE_OPERAND (arg0
, 0)));
6429 /* When pedantic, a compound expression can be neither an lvalue
6430 nor an integer constant expression. */
6431 if (TREE_SIDE_EFFECTS (arg0
) || pedantic
)
6433 /* Don't let (0, 0) be null pointer constant. */
6434 if (integer_zerop (arg1
))
6435 return build1 (NOP_EXPR
, TREE_TYPE (arg1
), arg1
);
6440 return build_complex (type
, arg0
, arg1
);
6444 if (TREE_CODE (TREE_TYPE (arg0
)) != COMPLEX_TYPE
)
6446 else if (TREE_CODE (arg0
) == COMPLEX_EXPR
)
6447 return omit_one_operand (type
, TREE_OPERAND (arg0
, 0),
6448 TREE_OPERAND (arg0
, 1));
6449 else if (TREE_CODE (arg0
) == COMPLEX_CST
)
6450 return TREE_REALPART (arg0
);
6451 else if (TREE_CODE (arg0
) == PLUS_EXPR
|| TREE_CODE (arg0
) == MINUS_EXPR
)
6452 return fold (build (TREE_CODE (arg0
), type
,
6453 fold (build1 (REALPART_EXPR
, type
,
6454 TREE_OPERAND (arg0
, 0))),
6455 fold (build1 (REALPART_EXPR
,
6456 type
, TREE_OPERAND (arg0
, 1)))));
6460 if (TREE_CODE (TREE_TYPE (arg0
)) != COMPLEX_TYPE
)
6461 return convert (type
, integer_zero_node
);
6462 else if (TREE_CODE (arg0
) == COMPLEX_EXPR
)
6463 return omit_one_operand (type
, TREE_OPERAND (arg0
, 1),
6464 TREE_OPERAND (arg0
, 0));
6465 else if (TREE_CODE (arg0
) == COMPLEX_CST
)
6466 return TREE_IMAGPART (arg0
);
6467 else if (TREE_CODE (arg0
) == PLUS_EXPR
|| TREE_CODE (arg0
) == MINUS_EXPR
)
6468 return fold (build (TREE_CODE (arg0
), type
,
6469 fold (build1 (IMAGPART_EXPR
, type
,
6470 TREE_OPERAND (arg0
, 0))),
6471 fold (build1 (IMAGPART_EXPR
, type
,
6472 TREE_OPERAND (arg0
, 1)))));
6475 /* Pull arithmetic ops out of the CLEANUP_POINT_EXPR where
6477 case CLEANUP_POINT_EXPR
:
6478 if (! has_cleanups (arg0
))
6479 return TREE_OPERAND (t
, 0);
6482 enum tree_code code0
= TREE_CODE (arg0
);
6483 int kind0
= TREE_CODE_CLASS (code0
);
6484 tree arg00
= TREE_OPERAND (arg0
, 0);
6487 if (kind0
== '1' || code0
== TRUTH_NOT_EXPR
)
6488 return fold (build1 (code0
, type
,
6489 fold (build1 (CLEANUP_POINT_EXPR
,
6490 TREE_TYPE (arg00
), arg00
))));
6492 if (kind0
== '<' || kind0
== '2'
6493 || code0
== TRUTH_ANDIF_EXPR
|| code0
== TRUTH_ORIF_EXPR
6494 || code0
== TRUTH_AND_EXPR
|| code0
== TRUTH_OR_EXPR
6495 || code0
== TRUTH_XOR_EXPR
)
6497 arg01
= TREE_OPERAND (arg0
, 1);
6499 if (TREE_CONSTANT (arg00
)
6500 || ((code0
== TRUTH_ANDIF_EXPR
|| code0
== TRUTH_ORIF_EXPR
)
6501 && ! has_cleanups (arg00
)))
6502 return fold (build (code0
, type
, arg00
,
6503 fold (build1 (CLEANUP_POINT_EXPR
,
6504 TREE_TYPE (arg01
), arg01
))));
6506 if (TREE_CONSTANT (arg01
))
6507 return fold (build (code0
, type
,
6508 fold (build1 (CLEANUP_POINT_EXPR
,
6509 TREE_TYPE (arg00
), arg00
)),
6518 } /* switch (code) */
6521 /* Determine if first argument is a multiple of second argument.
6522 Return 0 if it is not, or is not easily determined to so be.
6524 An example of the sort of thing we care about (at this point --
6525 this routine could surely be made more general, and expanded
6526 to do what the *_DIV_EXPR's fold() cases do now) is discovering
6529 SAVE_EXPR (I) * SAVE_EXPR (J * 8)
6535 when we know that the two `SAVE_EXPR (J * 8)' nodes are the
6536 same node (which means they will have the same value at run
6537 time, even though we don't know when they'll be assigned).
6539 This code also handles discovering that
6541 SAVE_EXPR (I) * SAVE_EXPR (J * 8)
6547 (of course) so we don't have to worry about dealing with a
6550 Note that we _look_ inside a SAVE_EXPR only to determine
6551 how it was calculated; it is not safe for fold() to do much
6552 of anything else with the internals of a SAVE_EXPR, since
6553 fold() cannot know when it will be evaluated at run time.
6554 For example, the latter example above _cannot_ be implemented
6559 or any variant thereof, since the value of J at evaluation time
6560 of the original SAVE_EXPR is not necessarily the same at the time
6561 the new expression is evaluated. The only optimization of this
6562 sort that would be valid is changing
6564 SAVE_EXPR (I) * SAVE_EXPR (SAVE_EXPR (J) * 8)
6570 SAVE_EXPR (I) * SAVE_EXPR (J)
6572 (where the same SAVE_EXPR (J) is used in the original and the
6573 transformed version). */
6576 multiple_of_p (type
, top
, bottom
)
6581 if (operand_equal_p (top
, bottom
, 0))
6584 if (TREE_CODE (type
) != INTEGER_TYPE
)
6587 switch (TREE_CODE (top
))
6590 return (multiple_of_p (type
, TREE_OPERAND (top
, 0), bottom
)
6591 || multiple_of_p (type
, TREE_OPERAND (top
, 1), bottom
));
6595 return (multiple_of_p (type
, TREE_OPERAND (top
, 0), bottom
)
6596 && multiple_of_p (type
, TREE_OPERAND (top
, 1), bottom
));
6599 /* Punt if conversion from non-integral or wider integral type. */
6600 if ((TREE_CODE (TREE_TYPE (TREE_OPERAND (top
, 0))) != INTEGER_TYPE
)
6601 || (TYPE_PRECISION (type
)
6602 < TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (top
, 0)))))
6606 return multiple_of_p (type
, TREE_OPERAND (top
, 0), bottom
);
6609 if ((TREE_CODE (bottom
) != INTEGER_CST
)
6610 || (tree_int_cst_sgn (top
) < 0)
6611 || (tree_int_cst_sgn (bottom
) < 0))
6613 return integer_zerop (const_binop (TRUNC_MOD_EXPR
,