1 /* Match-and-simplify patterns for shared GENERIC and GIMPLE folding.
2 This file is consumed by genmatch which produces gimple-match.c
3 and generic-match.c from it.
5 Copyright (C) 2014-2017 Free Software Foundation, Inc.
6 Contributed by Richard Biener <rguenther@suse.de>
7 and Prathamesh Kulkarni <bilbotheelffriend@gmail.com>
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
26 /* Generic tree predicates we inherit. */
28 integer_onep integer_zerop integer_all_onesp integer_minus_onep
29 integer_each_onep integer_truep integer_nonzerop
30 real_zerop real_onep real_minus_onep
33 tree_expr_nonnegative_p
40 (define_operator_list tcc_comparison
41 lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
42 (define_operator_list inverted_tcc_comparison
43 ge gt ne eq lt le ordered unordered ge gt le lt ltgt uneq)
44 (define_operator_list inverted_tcc_comparison_with_nans
45 unge ungt ne eq unlt unle ordered unordered ge gt le lt ltgt uneq)
46 (define_operator_list swapped_tcc_comparison
47 gt ge eq ne le lt unordered ordered ungt unge unlt unle uneq ltgt)
48 (define_operator_list simple_comparison lt le eq ne ge gt)
49 (define_operator_list swapped_simple_comparison gt ge eq ne le lt)
51 #include "cfn-operators.pd"
53 /* Define operand lists for math rounding functions {,i,l,ll}FN,
54 where the versions prefixed with "i" return an int, those prefixed with
55 "l" return a long and those prefixed with "ll" return a long long.
57 Also define operand lists:
59 X<FN>F for all float functions, in the order i, l, ll
60 X<FN> for all double functions, in the same order
61 X<FN>L for all long double functions, in the same order. */
62 #define DEFINE_INT_AND_FLOAT_ROUND_FN(FN) \
63 (define_operator_list X##FN##F BUILT_IN_I##FN##F \
66 (define_operator_list X##FN BUILT_IN_I##FN \
69 (define_operator_list X##FN##L BUILT_IN_I##FN##L \
73 DEFINE_INT_AND_FLOAT_ROUND_FN (FLOOR)
74 DEFINE_INT_AND_FLOAT_ROUND_FN (CEIL)
75 DEFINE_INT_AND_FLOAT_ROUND_FN (ROUND)
76 DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
78 /* As opposed to convert?, this still creates a single pattern, so
79 it is not a suitable replacement for convert? in all cases. */
80 (match (nop_convert @0)
82 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))))
83 (match (nop_convert @0)
85 (if (VECTOR_TYPE_P (type) && VECTOR_TYPE_P (TREE_TYPE (@0))
86 && TYPE_VECTOR_SUBPARTS (type) == TYPE_VECTOR_SUBPARTS (TREE_TYPE (@0))
87 && tree_nop_conversion_p (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
88 /* This one has to be last, or it shadows the others. */
89 (match (nop_convert @0)
92 /* Simplifications of operations with one constant operand and
93 simplifications to constants or single values. */
95 (for op (plus pointer_plus minus bit_ior bit_xor)
100 /* 0 +p index -> (type)index */
102 (pointer_plus integer_zerop @1)
103 (non_lvalue (convert @1)))
105 /* See if ARG1 is zero and X + ARG1 reduces to X.
106 Likewise if the operands are reversed. */
108 (plus:c @0 real_zerop@1)
109 (if (fold_real_zero_addition_p (type, @1, 0))
112 /* See if ARG1 is zero and X - ARG1 reduces to X. */
114 (minus @0 real_zerop@1)
115 (if (fold_real_zero_addition_p (type, @1, 1))
119 This is unsafe for certain floats even in non-IEEE formats.
120 In IEEE, it is unsafe because it does wrong for NaNs.
121 Also note that operand_equal_p is always false if an operand
125 (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (type))
126 { build_zero_cst (type); }))
129 (mult @0 integer_zerop@1)
132 /* Maybe fold x * 0 to 0. The expressions aren't the same
133 when x is NaN, since x * 0 is also NaN. Nor are they the
134 same in modes with signed zeros, since multiplying a
135 negative value by 0 gives -0, not +0. */
137 (mult @0 real_zerop@1)
138 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
141 /* In IEEE floating point, x*1 is not equivalent to x for snans.
142 Likewise for complex arithmetic with signed zeros. */
145 (if (!HONOR_SNANS (type)
146 && (!HONOR_SIGNED_ZEROS (type)
147 || !COMPLEX_FLOAT_TYPE_P (type)))
150 /* Transform x * -1.0 into -x. */
152 (mult @0 real_minus_onep)
153 (if (!HONOR_SNANS (type)
154 && (!HONOR_SIGNED_ZEROS (type)
155 || !COMPLEX_FLOAT_TYPE_P (type)))
158 (for cmp (gt ge lt le)
159 outp (convert convert negate negate)
160 outn (negate negate convert convert)
161 /* Transform (X > 0.0 ? 1.0 : -1.0) into copysign(1, X). */
162 /* Transform (X >= 0.0 ? 1.0 : -1.0) into copysign(1, X). */
163 /* Transform (X < 0.0 ? 1.0 : -1.0) into copysign(1,-X). */
164 /* Transform (X <= 0.0 ? 1.0 : -1.0) into copysign(1,-X). */
166 (cond (cmp @0 real_zerop) real_onep@1 real_minus_onep)
167 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)
168 && types_match (type, TREE_TYPE (@0)))
170 (if (types_match (type, float_type_node))
171 (BUILT_IN_COPYSIGNF @1 (outp @0)))
172 (if (types_match (type, double_type_node))
173 (BUILT_IN_COPYSIGN @1 (outp @0)))
174 (if (types_match (type, long_double_type_node))
175 (BUILT_IN_COPYSIGNL @1 (outp @0))))))
176 /* Transform (X > 0.0 ? -1.0 : 1.0) into copysign(1,-X). */
177 /* Transform (X >= 0.0 ? -1.0 : 1.0) into copysign(1,-X). */
178 /* Transform (X < 0.0 ? -1.0 : 1.0) into copysign(1,X). */
179 /* Transform (X <= 0.0 ? -1.0 : 1.0) into copysign(1,X). */
181 (cond (cmp @0 real_zerop) real_minus_onep real_onep@1)
182 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)
183 && types_match (type, TREE_TYPE (@0)))
185 (if (types_match (type, float_type_node))
186 (BUILT_IN_COPYSIGNF @1 (outn @0)))
187 (if (types_match (type, double_type_node))
188 (BUILT_IN_COPYSIGN @1 (outn @0)))
189 (if (types_match (type, long_double_type_node))
190 (BUILT_IN_COPYSIGNL @1 (outn @0)))))))
192 /* Transform X * copysign (1.0, X) into abs(X). */
194 (mult:c @0 (COPYSIGN real_onep @0))
195 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
198 /* Transform X * copysign (1.0, -X) into -abs(X). */
200 (mult:c @0 (COPYSIGN real_onep (negate @0)))
201 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
204 /* Transform copysign (CST, X) into copysign (ABS(CST), X). */
206 (COPYSIGN REAL_CST@0 @1)
207 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@0)))
208 (COPYSIGN (negate @0) @1)))
210 /* X * 1, X / 1 -> X. */
211 (for op (mult trunc_div ceil_div floor_div round_div exact_div)
216 /* (A / (1 << B)) -> (A >> B).
217 Only for unsigned A. For signed A, this would not preserve rounding
219 For example: (-1 / ( 1 << B)) != -1 >> B. */
221 (trunc_div @0 (lshift integer_onep@1 @2))
222 (if ((TYPE_UNSIGNED (type) || tree_expr_nonnegative_p (@0))
223 && (!VECTOR_TYPE_P (type)
224 || target_supports_op_p (type, RSHIFT_EXPR, optab_vector)
225 || target_supports_op_p (type, RSHIFT_EXPR, optab_scalar)))
228 /* Preserve explicit divisions by 0: the C++ front-end wants to detect
229 undefined behavior in constexpr evaluation, and assuming that the division
230 traps enables better optimizations than these anyway. */
231 (for div (trunc_div ceil_div floor_div round_div exact_div)
232 /* 0 / X is always zero. */
234 (div integer_zerop@0 @1)
235 /* But not for 0 / 0 so that we can get the proper warnings and errors. */
236 (if (!integer_zerop (@1))
240 (div @0 integer_minus_onep@1)
241 (if (!TYPE_UNSIGNED (type))
246 /* But not for 0 / 0 so that we can get the proper warnings and errors.
247 And not for _Fract types where we can't build 1. */
248 (if (!integer_zerop (@0) && !ALL_FRACT_MODE_P (TYPE_MODE (type)))
249 { build_one_cst (type); }))
250 /* X / abs (X) is X < 0 ? -1 : 1. */
253 (if (INTEGRAL_TYPE_P (type)
254 && TYPE_OVERFLOW_UNDEFINED (type))
255 (cond (lt @0 { build_zero_cst (type); })
256 { build_minus_one_cst (type); } { build_one_cst (type); })))
259 (div:C @0 (negate @0))
260 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
261 && TYPE_OVERFLOW_UNDEFINED (type))
262 { build_minus_one_cst (type); })))
264 /* For unsigned integral types, FLOOR_DIV_EXPR is the same as
265 TRUNC_DIV_EXPR. Rewrite into the latter in this case. */
268 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
269 && TYPE_UNSIGNED (type))
272 /* Combine two successive divisions. Note that combining ceil_div
273 and floor_div is trickier and combining round_div even more so. */
274 (for div (trunc_div exact_div)
276 (div (div @0 INTEGER_CST@1) INTEGER_CST@2)
279 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
280 TYPE_SIGN (type), &overflow_p);
283 (div @0 { wide_int_to_tree (type, mul); })
284 (if (TYPE_UNSIGNED (type)
285 || mul != wi::min_value (TYPE_PRECISION (type), SIGNED))
286 { build_zero_cst (type); })))))
288 /* Combine successive multiplications. Similar to above, but handling
289 overflow is different. */
291 (mult (mult @0 INTEGER_CST@1) INTEGER_CST@2)
294 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
295 TYPE_SIGN (type), &overflow_p);
297 /* Skip folding on overflow: the only special case is @1 * @2 == -INT_MIN,
298 otherwise undefined overflow implies that @0 must be zero. */
299 (if (!overflow_p || TYPE_OVERFLOW_WRAPS (type))
300 (mult @0 { wide_int_to_tree (type, mul); }))))
302 /* Optimize A / A to 1.0 if we don't care about
303 NaNs or Infinities. */
306 (if (FLOAT_TYPE_P (type)
307 && ! HONOR_NANS (type)
308 && ! HONOR_INFINITIES (type))
309 { build_one_cst (type); }))
311 /* Optimize -A / A to -1.0 if we don't care about
312 NaNs or Infinities. */
314 (rdiv:C @0 (negate @0))
315 (if (FLOAT_TYPE_P (type)
316 && ! HONOR_NANS (type)
317 && ! HONOR_INFINITIES (type))
318 { build_minus_one_cst (type); }))
320 /* PR71078: x / abs(x) -> copysign (1.0, x) */
322 (rdiv:C (convert? @0) (convert? (abs @0)))
323 (if (SCALAR_FLOAT_TYPE_P (type)
324 && ! HONOR_NANS (type)
325 && ! HONOR_INFINITIES (type))
327 (if (types_match (type, float_type_node))
328 (BUILT_IN_COPYSIGNF { build_one_cst (type); } (convert @0)))
329 (if (types_match (type, double_type_node))
330 (BUILT_IN_COPYSIGN { build_one_cst (type); } (convert @0)))
331 (if (types_match (type, long_double_type_node))
332 (BUILT_IN_COPYSIGNL { build_one_cst (type); } (convert @0))))))
334 /* In IEEE floating point, x/1 is not equivalent to x for snans. */
337 (if (!HONOR_SNANS (type))
340 /* In IEEE floating point, x/-1 is not equivalent to -x for snans. */
342 (rdiv @0 real_minus_onep)
343 (if (!HONOR_SNANS (type))
346 (if (flag_reciprocal_math)
347 /* Convert (A/B)/C to A/(B*C). */
349 (rdiv (rdiv:s @0 @1) @2)
350 (rdiv @0 (mult @1 @2)))
352 /* Canonicalize x / (C1 * y) to (x * C2) / y. */
354 (rdiv @0 (mult:s @1 REAL_CST@2))
356 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @2); }
358 (rdiv (mult @0 { tem; } ) @1))))
360 /* Convert A/(B/C) to (A/B)*C */
362 (rdiv @0 (rdiv:s @1 @2))
363 (mult (rdiv @0 @1) @2)))
365 /* Simplify x / (- y) to -x / y. */
367 (rdiv @0 (negate @1))
368 (rdiv (negate @0) @1))
370 /* Optimize (X & (-A)) / A where A is a power of 2, to X >> log2(A) */
371 (for div (trunc_div ceil_div floor_div round_div exact_div)
373 (div (convert? (bit_and @0 INTEGER_CST@1)) INTEGER_CST@2)
374 (if (integer_pow2p (@2)
375 && tree_int_cst_sgn (@2) > 0
376 && tree_nop_conversion_p (type, TREE_TYPE (@0))
377 && wi::to_wide (@2) + wi::to_wide (@1) == 0)
379 { build_int_cst (integer_type_node,
380 wi::exact_log2 (wi::to_wide (@2))); }))))
382 /* If ARG1 is a constant, we can convert this to a multiply by the
383 reciprocal. This does not have the same rounding properties,
384 so only do this if -freciprocal-math. We can actually
385 always safely do it if ARG1 is a power of two, but it's hard to
386 tell if it is or not in a portable manner. */
387 (for cst (REAL_CST COMPLEX_CST VECTOR_CST)
391 (if (flag_reciprocal_math
394 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @1); }
396 (mult @0 { tem; } )))
397 (if (cst != COMPLEX_CST)
398 (with { tree inverse = exact_inverse (type, @1); }
400 (mult @0 { inverse; } ))))))))
402 (for mod (ceil_mod floor_mod round_mod trunc_mod)
403 /* 0 % X is always zero. */
405 (mod integer_zerop@0 @1)
406 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
407 (if (!integer_zerop (@1))
409 /* X % 1 is always zero. */
411 (mod @0 integer_onep)
412 { build_zero_cst (type); })
413 /* X % -1 is zero. */
415 (mod @0 integer_minus_onep@1)
416 (if (!TYPE_UNSIGNED (type))
417 { build_zero_cst (type); }))
421 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
422 (if (!integer_zerop (@0))
423 { build_zero_cst (type); }))
424 /* (X % Y) % Y is just X % Y. */
426 (mod (mod@2 @0 @1) @1)
428 /* From extract_muldiv_1: (X * C1) % C2 is zero if C1 is a multiple of C2. */
430 (mod (mult @0 INTEGER_CST@1) INTEGER_CST@2)
431 (if (ANY_INTEGRAL_TYPE_P (type)
432 && TYPE_OVERFLOW_UNDEFINED (type)
433 && wi::multiple_of_p (wi::to_wide (@1), wi::to_wide (@2),
435 { build_zero_cst (type); })))
437 /* X % -C is the same as X % C. */
439 (trunc_mod @0 INTEGER_CST@1)
440 (if (TYPE_SIGN (type) == SIGNED
441 && !TREE_OVERFLOW (@1)
442 && wi::neg_p (wi::to_wide (@1))
443 && !TYPE_OVERFLOW_TRAPS (type)
444 /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */
445 && !sign_bit_p (@1, @1))
446 (trunc_mod @0 (negate @1))))
448 /* X % -Y is the same as X % Y. */
450 (trunc_mod @0 (convert? (negate @1)))
451 (if (INTEGRAL_TYPE_P (type)
452 && !TYPE_UNSIGNED (type)
453 && !TYPE_OVERFLOW_TRAPS (type)
454 && tree_nop_conversion_p (type, TREE_TYPE (@1))
455 /* Avoid this transformation if X might be INT_MIN or
456 Y might be -1, because we would then change valid
457 INT_MIN % -(-1) into invalid INT_MIN % -1. */
458 && (expr_not_equal_to (@0, wi::to_wide (TYPE_MIN_VALUE (type)))
459 || expr_not_equal_to (@1, wi::minus_one (TYPE_PRECISION
461 (trunc_mod @0 (convert @1))))
463 /* X - (X / Y) * Y is the same as X % Y. */
465 (minus (convert1? @0) (convert2? (mult:c (trunc_div @@0 @@1) @1)))
466 (if (INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
467 (convert (trunc_mod @0 @1))))
469 /* Optimize TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR,
470 i.e. "X % C" into "X & (C - 1)", if X and C are positive.
471 Also optimize A % (C << N) where C is a power of 2,
472 to A & ((C << N) - 1). */
473 (match (power_of_two_cand @1)
475 (match (power_of_two_cand @1)
476 (lshift INTEGER_CST@1 @2))
477 (for mod (trunc_mod floor_mod)
479 (mod @0 (convert?@3 (power_of_two_cand@1 @2)))
480 (if ((TYPE_UNSIGNED (type)
481 || tree_expr_nonnegative_p (@0))
482 && tree_nop_conversion_p (type, TREE_TYPE (@3))
483 && integer_pow2p (@2) && tree_int_cst_sgn (@2) > 0)
484 (bit_and @0 (convert (minus @1 { build_int_cst (TREE_TYPE (@1), 1); }))))))
486 /* Simplify (unsigned t * 2)/2 -> unsigned t & 0x7FFFFFFF. */
488 (trunc_div (mult @0 integer_pow2p@1) @1)
489 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
490 (bit_and @0 { wide_int_to_tree
491 (type, wi::mask (TYPE_PRECISION (type)
492 - wi::exact_log2 (wi::to_wide (@1)),
493 false, TYPE_PRECISION (type))); })))
495 /* Simplify (unsigned t / 2) * 2 -> unsigned t & ~1. */
497 (mult (trunc_div @0 integer_pow2p@1) @1)
498 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
499 (bit_and @0 (negate @1))))
501 /* Simplify (t * 2) / 2) -> t. */
502 (for div (trunc_div ceil_div floor_div round_div exact_div)
504 (div (mult @0 @1) @1)
505 (if (ANY_INTEGRAL_TYPE_P (type)
506 && TYPE_OVERFLOW_UNDEFINED (type))
510 /* Simplify cos(-x) and cos(|x|) -> cos(x). Similarly for cosh. */
515 /* Simplify pow(-x, y) and pow(|x|,y) -> pow(x,y) if y is an even integer. */
518 (pows (op @0) REAL_CST@1)
519 (with { HOST_WIDE_INT n; }
520 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
522 /* Likewise for powi. */
525 (pows (op @0) INTEGER_CST@1)
526 (if ((wi::to_wide (@1) & 1) == 0)
528 /* Strip negate and abs from both operands of hypot. */
536 /* copysign(-x, y) and copysign(abs(x), y) -> copysign(x, y). */
537 (for copysigns (COPYSIGN)
539 (copysigns (op @0) @1)
542 /* abs(x)*abs(x) -> x*x. Should be valid for all types. */
547 /* cos(copysign(x, y)) -> cos(x). Similarly for cosh. */
551 (coss (copysigns @0 @1))
554 /* pow(copysign(x, y), z) -> pow(x, z) if z is an even integer. */
558 (pows (copysigns @0 @2) REAL_CST@1)
559 (with { HOST_WIDE_INT n; }
560 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
562 /* Likewise for powi. */
566 (pows (copysigns @0 @2) INTEGER_CST@1)
567 (if ((wi::to_wide (@1) & 1) == 0)
572 /* hypot(copysign(x, y), z) -> hypot(x, z). */
574 (hypots (copysigns @0 @1) @2)
576 /* hypot(x, copysign(y, z)) -> hypot(x, y). */
578 (hypots @0 (copysigns @1 @2))
581 /* copysign(x, CST) -> [-]abs (x). */
582 (for copysigns (COPYSIGN)
584 (copysigns @0 REAL_CST@1)
585 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
589 /* copysign(copysign(x, y), z) -> copysign(x, z). */
590 (for copysigns (COPYSIGN)
592 (copysigns (copysigns @0 @1) @2)
595 /* copysign(x,y)*copysign(x,y) -> x*x. */
596 (for copysigns (COPYSIGN)
598 (mult (copysigns@2 @0 @1) @2)
601 /* ccos(-x) -> ccos(x). Similarly for ccosh. */
602 (for ccoss (CCOS CCOSH)
607 /* cabs(-x) and cos(conj(x)) -> cabs(x). */
608 (for ops (conj negate)
614 /* Fold (a * (1 << b)) into (a << b) */
616 (mult:c @0 (convert? (lshift integer_onep@1 @2)))
617 (if (! FLOAT_TYPE_P (type)
618 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
621 /* Fold (1 << (C - x)) where C = precision(type) - 1
622 into ((1 << C) >> x). */
624 (lshift integer_onep@0 (minus@1 INTEGER_CST@2 @3))
625 (if (INTEGRAL_TYPE_P (type)
626 && wi::eq_p (wi::to_wide (@2), TYPE_PRECISION (type) - 1)
628 (if (TYPE_UNSIGNED (type))
629 (rshift (lshift @0 @2) @3)
631 { tree utype = unsigned_type_for (type); }
632 (convert (rshift (lshift (convert:utype @0) @2) @3))))))
634 /* Fold (C1/X)*C2 into (C1*C2)/X. */
636 (mult (rdiv@3 REAL_CST@0 @1) REAL_CST@2)
637 (if (flag_associative_math
640 { tree tem = const_binop (MULT_EXPR, type, @0, @2); }
642 (rdiv { tem; } @1)))))
644 /* Simplify ~X & X as zero. */
646 (bit_and:c (convert? @0) (convert? (bit_not @0)))
647 { build_zero_cst (type); })
649 /* PR71636: Transform x & ((1U << b) - 1) -> x & ~(~0U << b); */
651 (bit_and:c @0 (plus:s (lshift:s integer_onep @1) integer_minus_onep))
652 (if (TYPE_UNSIGNED (type))
653 (bit_and @0 (bit_not (lshift { build_all_ones_cst (type); } @1)))))
655 (for bitop (bit_and bit_ior)
657 /* PR35691: Transform
658 (x == 0 & y == 0) -> (x | typeof(x)(y)) == 0.
659 (x != 0 | y != 0) -> (x | typeof(x)(y)) != 0. */
661 (bitop (cmp @0 integer_zerop@2) (cmp @1 integer_zerop))
662 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
663 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
664 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
665 (cmp (bit_ior @0 (convert @1)) @2)))
667 (x == -1 & y == -1) -> (x & typeof(x)(y)) == -1.
668 (x != -1 | y != -1) -> (x & typeof(x)(y)) != -1. */
670 (bitop (cmp @0 integer_all_onesp@2) (cmp @1 integer_all_onesp))
671 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
672 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
673 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
674 (cmp (bit_and @0 (convert @1)) @2))))
676 /* Fold (A & ~B) - (A & B) into (A ^ B) - B. */
678 (minus (bit_and:cs @0 (bit_not @1)) (bit_and:cs @0 @1))
679 (minus (bit_xor @0 @1) @1))
681 (minus (bit_and:s @0 INTEGER_CST@2) (bit_and:s @0 INTEGER_CST@1))
682 (if (~wi::to_wide (@2) == wi::to_wide (@1))
683 (minus (bit_xor @0 @1) @1)))
685 /* Fold (A & B) - (A & ~B) into B - (A ^ B). */
687 (minus (bit_and:cs @0 @1) (bit_and:cs @0 (bit_not @1)))
688 (minus @1 (bit_xor @0 @1)))
690 /* Simplify (X & ~Y) |^+ (~X & Y) -> X ^ Y. */
691 (for op (bit_ior bit_xor plus)
693 (op (bit_and:c @0 (bit_not @1)) (bit_and:c (bit_not @0) @1))
696 (op:c (bit_and @0 INTEGER_CST@2) (bit_and (bit_not @0) INTEGER_CST@1))
697 (if (~wi::to_wide (@2) == wi::to_wide (@1))
700 /* PR53979: Transform ((a ^ b) | a) -> (a | b) */
702 (bit_ior:c (bit_xor:c @0 @1) @0)
705 /* (a & ~b) | (a ^ b) --> a ^ b */
707 (bit_ior:c (bit_and:c @0 (bit_not @1)) (bit_xor:c@2 @0 @1))
710 /* (a & ~b) ^ ~a --> ~(a & b) */
712 (bit_xor:c (bit_and:cs @0 (bit_not @1)) (bit_not @0))
713 (bit_not (bit_and @0 @1)))
715 /* (a | b) & ~(a ^ b) --> a & b */
717 (bit_and:c (bit_ior @0 @1) (bit_not (bit_xor:c @0 @1)))
720 /* a | ~(a ^ b) --> a | ~b */
722 (bit_ior:c @0 (bit_not:s (bit_xor:c @0 @1)))
723 (bit_ior @0 (bit_not @1)))
725 /* (a | b) | (a &^ b) --> a | b */
726 (for op (bit_and bit_xor)
728 (bit_ior:c (bit_ior@2 @0 @1) (op:c @0 @1))
731 /* (a & b) | ~(a ^ b) --> ~(a ^ b) */
733 (bit_ior:c (bit_and:c @0 @1) (bit_not@2 (bit_xor @0 @1)))
736 /* ~(~a & b) --> a | ~b */
738 (bit_not (bit_and:cs (bit_not @0) @1))
739 (bit_ior @0 (bit_not @1)))
741 /* Simplify (~X & Y) to X ^ Y if we know that (X & ~Y) is 0. */
744 (bit_and (bit_not SSA_NAME@0) INTEGER_CST@1)
745 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
746 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
750 /* X % Y is smaller than Y. */
753 (cmp (trunc_mod @0 @1) @1)
754 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
755 { constant_boolean_node (cmp == LT_EXPR, type); })))
758 (cmp @1 (trunc_mod @0 @1))
759 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
760 { constant_boolean_node (cmp == GT_EXPR, type); })))
764 (bit_ior @0 integer_all_onesp@1)
769 (bit_ior @0 integer_zerop)
774 (bit_and @0 integer_zerop@1)
780 (for op (bit_ior bit_xor plus)
782 (op:c (convert? @0) (convert? (bit_not @0)))
783 (convert { build_all_ones_cst (TREE_TYPE (@0)); })))
788 { build_zero_cst (type); })
790 /* Canonicalize X ^ ~0 to ~X. */
792 (bit_xor @0 integer_all_onesp@1)
797 (bit_and @0 integer_all_onesp)
800 /* x & x -> x, x | x -> x */
801 (for bitop (bit_and bit_ior)
806 /* x & C -> x if we know that x & ~C == 0. */
809 (bit_and SSA_NAME@0 INTEGER_CST@1)
810 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
811 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
815 /* x + (x & 1) -> (x + 1) & ~1 */
817 (plus:c @0 (bit_and:s @0 integer_onep@1))
818 (bit_and (plus @0 @1) (bit_not @1)))
820 /* x & ~(x & y) -> x & ~y */
821 /* x | ~(x | y) -> x | ~y */
822 (for bitop (bit_and bit_ior)
824 (bitop:c @0 (bit_not (bitop:cs @0 @1)))
825 (bitop @0 (bit_not @1))))
827 /* (x | y) & ~x -> y & ~x */
828 /* (x & y) | ~x -> y | ~x */
829 (for bitop (bit_and bit_ior)
830 rbitop (bit_ior bit_and)
832 (bitop:c (rbitop:c @0 @1) (bit_not@2 @0))
835 /* (x & y) ^ (x | y) -> x ^ y */
837 (bit_xor:c (bit_and @0 @1) (bit_ior @0 @1))
840 /* (x ^ y) ^ (x | y) -> x & y */
842 (bit_xor:c (bit_xor @0 @1) (bit_ior @0 @1))
845 /* (x & y) + (x ^ y) -> x | y */
846 /* (x & y) | (x ^ y) -> x | y */
847 /* (x & y) ^ (x ^ y) -> x | y */
848 (for op (plus bit_ior bit_xor)
850 (op:c (bit_and @0 @1) (bit_xor @0 @1))
853 /* (x & y) + (x | y) -> x + y */
855 (plus:c (bit_and @0 @1) (bit_ior @0 @1))
858 /* (x + y) - (x | y) -> x & y */
860 (minus (plus @0 @1) (bit_ior @0 @1))
861 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
862 && !TYPE_SATURATING (type))
865 /* (x + y) - (x & y) -> x | y */
867 (minus (plus @0 @1) (bit_and @0 @1))
868 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
869 && !TYPE_SATURATING (type))
872 /* (x | y) - (x ^ y) -> x & y */
874 (minus (bit_ior @0 @1) (bit_xor @0 @1))
877 /* (x | y) - (x & y) -> x ^ y */
879 (minus (bit_ior @0 @1) (bit_and @0 @1))
882 /* (x | y) & ~(x & y) -> x ^ y */
884 (bit_and:c (bit_ior @0 @1) (bit_not (bit_and @0 @1)))
887 /* (x | y) & (~x ^ y) -> x & y */
889 (bit_and:c (bit_ior:c @0 @1) (bit_xor:c @1 (bit_not @0)))
892 /* ~x & ~y -> ~(x | y)
893 ~x | ~y -> ~(x & y) */
894 (for op (bit_and bit_ior)
895 rop (bit_ior bit_and)
897 (op (convert1? (bit_not @0)) (convert2? (bit_not @1)))
898 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
899 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
900 (bit_not (rop (convert @0) (convert @1))))))
902 /* If we are XORing or adding two BIT_AND_EXPR's, both of which are and'ing
903 with a constant, and the two constants have no bits in common,
904 we should treat this as a BIT_IOR_EXPR since this may produce more
906 (for op (bit_xor plus)
908 (op (convert1? (bit_and@4 @0 INTEGER_CST@1))
909 (convert2? (bit_and@5 @2 INTEGER_CST@3)))
910 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
911 && tree_nop_conversion_p (type, TREE_TYPE (@2))
912 && (wi::to_wide (@1) & wi::to_wide (@3)) == 0)
913 (bit_ior (convert @4) (convert @5)))))
915 /* (X | Y) ^ X -> Y & ~ X*/
917 (bit_xor:c (convert1? (bit_ior:c @@0 @1)) (convert2? @0))
918 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
919 (convert (bit_and @1 (bit_not @0)))))
921 /* Convert ~X ^ ~Y to X ^ Y. */
923 (bit_xor (convert1? (bit_not @0)) (convert2? (bit_not @1)))
924 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
925 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
926 (bit_xor (convert @0) (convert @1))))
928 /* Convert ~X ^ C to X ^ ~C. */
930 (bit_xor (convert? (bit_not @0)) INTEGER_CST@1)
931 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
932 (bit_xor (convert @0) (bit_not @1))))
934 /* Fold (X & Y) ^ Y and (X ^ Y) & Y as ~X & Y. */
935 (for opo (bit_and bit_xor)
936 opi (bit_xor bit_and)
938 (opo:c (opi:c @0 @1) @1)
939 (bit_and (bit_not @0) @1)))
941 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
942 operands are another bit-wise operation with a common input. If so,
943 distribute the bit operations to save an operation and possibly two if
944 constants are involved. For example, convert
945 (A | B) & (A | C) into A | (B & C)
946 Further simplification will occur if B and C are constants. */
947 (for op (bit_and bit_ior bit_xor)
948 rop (bit_ior bit_and bit_and)
950 (op (convert? (rop:c @@0 @1)) (convert? (rop:c @0 @2)))
951 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
952 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
953 (rop (convert @0) (op (convert @1) (convert @2))))))
955 /* Some simple reassociation for bit operations, also handled in reassoc. */
956 /* (X & Y) & Y -> X & Y
957 (X | Y) | Y -> X | Y */
958 (for op (bit_and bit_ior)
960 (op:c (convert1?@2 (op:c @0 @@1)) (convert2? @1))
962 /* (X ^ Y) ^ Y -> X */
964 (bit_xor:c (convert1? (bit_xor:c @0 @@1)) (convert2? @1))
966 /* (X & Y) & (X & Z) -> (X & Y) & Z
967 (X | Y) | (X | Z) -> (X | Y) | Z */
968 (for op (bit_and bit_ior)
970 (op (convert1?@3 (op:c@4 @0 @1)) (convert2?@5 (op:c@6 @0 @2)))
971 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
972 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
973 (if (single_use (@5) && single_use (@6))
975 (if (single_use (@3) && single_use (@4))
976 (op (convert @1) @5))))))
977 /* (X ^ Y) ^ (X ^ Z) -> Y ^ Z */
979 (bit_xor (convert1? (bit_xor:c @0 @1)) (convert2? (bit_xor:c @0 @2)))
980 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
981 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
982 (bit_xor (convert @1) (convert @2))))
991 (abs tree_expr_nonnegative_p@0)
994 /* A few cases of fold-const.c negate_expr_p predicate. */
997 (if ((INTEGRAL_TYPE_P (type)
998 && TYPE_UNSIGNED (type))
999 || (!TYPE_OVERFLOW_SANITIZED (type)
1000 && may_negate_without_overflow_p (t)))))
1001 (match negate_expr_p
1003 (match negate_expr_p
1005 (if (!TYPE_OVERFLOW_SANITIZED (type))))
1006 (match negate_expr_p
1008 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (t)))))
1009 /* VECTOR_CST handling of non-wrapping types would recurse in unsupported
1011 (match negate_expr_p
1013 (if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type))))
1014 (match negate_expr_p
1016 (if ((ANY_INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type))
1017 || (FLOAT_TYPE_P (type)
1018 && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
1019 && !HONOR_SIGNED_ZEROS (type)))))
1021 /* (-A) * (-B) -> A * B */
1023 (mult:c (convert1? (negate @0)) (convert2? negate_expr_p@1))
1024 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1025 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1026 (mult (convert @0) (convert (negate @1)))))
1028 /* -(A + B) -> (-B) - A. */
1030 (negate (plus:c @0 negate_expr_p@1))
1031 (if (!HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type))
1032 && !HONOR_SIGNED_ZEROS (element_mode (type)))
1033 (minus (negate @1) @0)))
1035 /* -(A - B) -> B - A. */
1037 (negate (minus @0 @1))
1038 (if ((ANY_INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_SANITIZED (type))
1039 || (FLOAT_TYPE_P (type)
1040 && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
1041 && !HONOR_SIGNED_ZEROS (type)))
1044 /* A - B -> A + (-B) if B is easily negatable. */
1046 (minus @0 negate_expr_p@1)
1047 (if (!FIXED_POINT_TYPE_P (type))
1048 (plus @0 (negate @1))))
1050 /* Try to fold (type) X op CST -> (type) (X op ((type-x) CST))
1052 For bitwise binary operations apply operand conversions to the
1053 binary operation result instead of to the operands. This allows
1054 to combine successive conversions and bitwise binary operations.
1055 We combine the above two cases by using a conditional convert. */
1056 (for bitop (bit_and bit_ior bit_xor)
1058 (bitop (convert @0) (convert? @1))
1059 (if (((TREE_CODE (@1) == INTEGER_CST
1060 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
1061 && int_fits_type_p (@1, TREE_TYPE (@0)))
1062 || types_match (@0, @1))
1063 /* ??? This transform conflicts with fold-const.c doing
1064 Convert (T)(x & c) into (T)x & (T)c, if c is an integer
1065 constants (if x has signed type, the sign bit cannot be set
1066 in c). This folds extension into the BIT_AND_EXPR.
1067 Restrict it to GIMPLE to avoid endless recursions. */
1068 && (bitop != BIT_AND_EXPR || GIMPLE)
1069 && (/* That's a good idea if the conversion widens the operand, thus
1070 after hoisting the conversion the operation will be narrower. */
1071 TYPE_PRECISION (TREE_TYPE (@0)) < TYPE_PRECISION (type)
1072 /* It's also a good idea if the conversion is to a non-integer
1074 || GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT
1075 /* Or if the precision of TO is not the same as the precision
1077 || !type_has_mode_precision_p (type)))
1078 (convert (bitop @0 (convert @1))))))
1080 (for bitop (bit_and bit_ior)
1081 rbitop (bit_ior bit_and)
1082 /* (x | y) & x -> x */
1083 /* (x & y) | x -> x */
1085 (bitop:c (rbitop:c @0 @1) @0)
1087 /* (~x | y) & x -> x & y */
1088 /* (~x & y) | x -> x | y */
1090 (bitop:c (rbitop:c (bit_not @0) @1) @0)
1093 /* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */
1095 (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1096 (bit_ior (bit_and @0 @2) (bit_and @1 @2)))
1098 /* Combine successive equal operations with constants. */
1099 (for bitop (bit_and bit_ior bit_xor)
1101 (bitop (bitop @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1102 (bitop @0 (bitop @1 @2))))
1104 /* Try simple folding for X op !X, and X op X with the help
1105 of the truth_valued_p and logical_inverted_value predicates. */
1106 (match truth_valued_p
1108 (if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1)))
1109 (for op (tcc_comparison truth_and truth_andif truth_or truth_orif truth_xor)
1110 (match truth_valued_p
1112 (match truth_valued_p
1115 (match (logical_inverted_value @0)
1117 (match (logical_inverted_value @0)
1118 (bit_not truth_valued_p@0))
1119 (match (logical_inverted_value @0)
1120 (eq @0 integer_zerop))
1121 (match (logical_inverted_value @0)
1122 (ne truth_valued_p@0 integer_truep))
1123 (match (logical_inverted_value @0)
1124 (bit_xor truth_valued_p@0 integer_truep))
1128 (bit_and:c @0 (logical_inverted_value @0))
1129 { build_zero_cst (type); })
1130 /* X | !X and X ^ !X -> 1, , if X is truth-valued. */
1131 (for op (bit_ior bit_xor)
1133 (op:c truth_valued_p@0 (logical_inverted_value @0))
1134 { constant_boolean_node (true, type); }))
1135 /* X ==/!= !X is false/true. */
1138 (op:c truth_valued_p@0 (logical_inverted_value @0))
1139 { constant_boolean_node (op == NE_EXPR ? true : false, type); }))
1143 (bit_not (bit_not @0))
1146 /* Convert ~ (-A) to A - 1. */
1148 (bit_not (convert? (negate @0)))
1149 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1150 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1151 (convert (minus @0 { build_each_one_cst (TREE_TYPE (@0)); }))))
1153 /* Convert - (~A) to A + 1. */
1155 (negate (nop_convert (bit_not @0)))
1156 (plus (view_convert @0) { build_each_one_cst (type); }))
1158 /* Convert ~ (A - 1) or ~ (A + -1) to -A. */
1160 (bit_not (convert? (minus @0 integer_each_onep)))
1161 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1162 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1163 (convert (negate @0))))
1165 (bit_not (convert? (plus @0 integer_all_onesp)))
1166 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1167 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1168 (convert (negate @0))))
1170 /* Part of convert ~(X ^ Y) to ~X ^ Y or X ^ ~Y if ~X or ~Y simplify. */
1172 (bit_not (convert? (bit_xor @0 INTEGER_CST@1)))
1173 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1174 (convert (bit_xor @0 (bit_not @1)))))
1176 (bit_not (convert? (bit_xor:c (bit_not @0) @1)))
1177 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1178 (convert (bit_xor @0 @1))))
1180 /* Otherwise prefer ~(X ^ Y) to ~X ^ Y as more canonical. */
1182 (bit_xor:c (nop_convert:s (bit_not:s @0)) @1)
1183 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1184 (bit_not (bit_xor (view_convert @0) @1))))
1186 /* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */
1188 (bit_ior:c (bit_and:cs @0 (bit_not @2)) (bit_and:cs @1 @2))
1189 (bit_xor (bit_and (bit_xor @0 @1) @2) @0))
1191 /* Fold A - (A & B) into ~B & A. */
1193 (minus (convert1? @0) (convert2?:s (bit_and:cs @@0 @1)))
1194 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1195 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1196 (convert (bit_and (bit_not @1) @0))))
1198 /* (m1 CMP m2) * d -> (m1 CMP m2) ? d : 0 */
1199 (for cmp (gt lt ge le)
1201 (mult (convert (cmp @0 @1)) @2)
1202 (cond (cmp @0 @1) @2 { build_zero_cst (type); })))
1204 /* For integral types with undefined overflow and C != 0 fold
1205 x * C EQ/NE y * C into x EQ/NE y. */
1208 (cmp (mult:c @0 @1) (mult:c @2 @1))
1209 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1210 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1211 && tree_expr_nonzero_p (@1))
1214 /* For integral types with wrapping overflow and C odd fold
1215 x * C EQ/NE y * C into x EQ/NE y. */
1218 (cmp (mult @0 INTEGER_CST@1) (mult @2 @1))
1219 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1220 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
1221 && (TREE_INT_CST_LOW (@1) & 1) != 0)
1224 /* For integral types with undefined overflow and C != 0 fold
1225 x * C RELOP y * C into:
1227 x RELOP y for nonnegative C
1228 y RELOP x for negative C */
1229 (for cmp (lt gt le ge)
1231 (cmp (mult:c @0 @1) (mult:c @2 @1))
1232 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1233 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1234 (if (tree_expr_nonnegative_p (@1) && tree_expr_nonzero_p (@1))
1236 (if (TREE_CODE (@1) == INTEGER_CST
1237 && wi::neg_p (wi::to_wide (@1), TYPE_SIGN (TREE_TYPE (@1))))
1240 /* (X - 1U) <= INT_MAX-1U into (int) X > 0. */
1244 (cmp (plus @0 integer_minus_onep@1) INTEGER_CST@2)
1245 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1246 && TYPE_UNSIGNED (TREE_TYPE (@0))
1247 && TYPE_PRECISION (TREE_TYPE (@0)) > 1
1248 && (wi::to_wide (@2)
1249 == wi::max_value (TYPE_PRECISION (TREE_TYPE (@0)), SIGNED) - 1))
1250 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
1251 (icmp (convert:stype @0) { build_int_cst (stype, 0); })))))
1253 /* X / 4 < Y / 4 iff X < Y when the division is known to be exact. */
1254 (for cmp (simple_comparison)
1256 (cmp (exact_div @0 INTEGER_CST@2) (exact_div @1 @2))
1257 (if (wi::gt_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2))))
1260 /* X / C1 op C2 into a simple range test. */
1261 (for cmp (simple_comparison)
1263 (cmp (trunc_div:s @0 INTEGER_CST@1) INTEGER_CST@2)
1264 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1265 && integer_nonzerop (@1)
1266 && !TREE_OVERFLOW (@1)
1267 && !TREE_OVERFLOW (@2))
1268 (with { tree lo, hi; bool neg_overflow;
1269 enum tree_code code = fold_div_compare (cmp, @1, @2, &lo, &hi,
1272 (if (code == LT_EXPR || code == GE_EXPR)
1273 (if (TREE_OVERFLOW (lo))
1274 { build_int_cst (type, (code == LT_EXPR) ^ neg_overflow); }
1275 (if (code == LT_EXPR)
1278 (if (code == LE_EXPR || code == GT_EXPR)
1279 (if (TREE_OVERFLOW (hi))
1280 { build_int_cst (type, (code == LE_EXPR) ^ neg_overflow); }
1281 (if (code == LE_EXPR)
1285 { build_int_cst (type, code == NE_EXPR); })
1286 (if (code == EQ_EXPR && !hi)
1288 (if (code == EQ_EXPR && !lo)
1290 (if (code == NE_EXPR && !hi)
1292 (if (code == NE_EXPR && !lo)
1295 { build_range_check (UNKNOWN_LOCATION, type, @0, code == EQ_EXPR,
1299 tree etype = range_check_type (TREE_TYPE (@0));
1302 if (! TYPE_UNSIGNED (etype))
1303 etype = unsigned_type_for (etype);
1304 hi = fold_convert (etype, hi);
1305 lo = fold_convert (etype, lo);
1306 hi = const_binop (MINUS_EXPR, etype, hi, lo);
1309 (if (etype && hi && !TREE_OVERFLOW (hi))
1310 (if (code == EQ_EXPR)
1311 (le (minus (convert:etype @0) { lo; }) { hi; })
1312 (gt (minus (convert:etype @0) { lo; }) { hi; })))))))))
1314 /* X + Z < Y + Z is the same as X < Y when there is no overflow. */
1315 (for op (lt le ge gt)
1317 (op (plus:c @0 @2) (plus:c @1 @2))
1318 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1319 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1321 /* For equality and subtraction, this is also true with wrapping overflow. */
1322 (for op (eq ne minus)
1324 (op (plus:c @0 @2) (plus:c @1 @2))
1325 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1326 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1327 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1330 /* X - Z < Y - Z is the same as X < Y when there is no overflow. */
1331 (for op (lt le ge gt)
1333 (op (minus @0 @2) (minus @1 @2))
1334 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1335 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1337 /* For equality and subtraction, this is also true with wrapping overflow. */
1338 (for op (eq ne minus)
1340 (op (minus @0 @2) (minus @1 @2))
1341 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1342 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1343 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1346 /* Z - X < Z - Y is the same as Y < X when there is no overflow. */
1347 (for op (lt le ge gt)
1349 (op (minus @2 @0) (minus @2 @1))
1350 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1351 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1353 /* For equality and subtraction, this is also true with wrapping overflow. */
1354 (for op (eq ne minus)
1356 (op (minus @2 @0) (minus @2 @1))
1357 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1358 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1359 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1362 /* X + Y < Y is the same as X < 0 when there is no overflow. */
1363 (for op (lt le gt ge)
1365 (op:c (plus:c@2 @0 @1) @1)
1366 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1367 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1368 && (CONSTANT_CLASS_P (@0) || single_use (@2)))
1369 (op @0 { build_zero_cst (TREE_TYPE (@0)); }))))
1370 /* For equality, this is also true with wrapping overflow. */
1373 (op:c (nop_convert@3 (plus:c@2 @0 (convert1? @1))) (convert2? @1))
1374 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1375 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1376 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
1377 && (CONSTANT_CLASS_P (@0) || (single_use (@2) && single_use (@3)))
1378 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@2))
1379 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@1)))
1380 (op @0 { build_zero_cst (TREE_TYPE (@0)); })))
1382 (op:c (nop_convert@3 (pointer_plus@2 (convert1? @0) @1)) (convert2? @0))
1383 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0))
1384 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
1385 && (CONSTANT_CLASS_P (@1) || (single_use (@2) && single_use (@3))))
1386 (op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
1388 /* X - Y < X is the same as Y > 0 when there is no overflow.
1389 For equality, this is also true with wrapping overflow. */
1390 (for op (simple_comparison)
1392 (op:c @0 (minus@2 @0 @1))
1393 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1394 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1395 || ((op == EQ_EXPR || op == NE_EXPR)
1396 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1397 && (CONSTANT_CLASS_P (@1) || single_use (@2)))
1398 (op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
1401 * (X / Y) == 0 -> X < Y if X, Y are unsigned.
1402 * (X / Y) != 0 -> X >= Y, if X, Y are unsigned.
1407 (cmp (trunc_div @0 @1) integer_zerop)
1408 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
1409 && (VECTOR_TYPE_P (type) || !VECTOR_TYPE_P (TREE_TYPE (@0))))
1412 /* X == C - X can never be true if C is odd. */
1415 (cmp:c (convert? @0) (convert1? (minus INTEGER_CST@1 (convert2? @0))))
1416 (if (TREE_INT_CST_LOW (@1) & 1)
1417 { constant_boolean_node (cmp == NE_EXPR, type); })))
1419 /* Arguments on which one can call get_nonzero_bits to get the bits
1421 (match with_possible_nonzero_bits
1423 (match with_possible_nonzero_bits
1425 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))))
1426 /* Slightly extended version, do not make it recursive to keep it cheap. */
1427 (match (with_possible_nonzero_bits2 @0)
1428 with_possible_nonzero_bits@0)
1429 (match (with_possible_nonzero_bits2 @0)
1430 (bit_and:c with_possible_nonzero_bits@0 @2))
1432 /* Same for bits that are known to be set, but we do not have
1433 an equivalent to get_nonzero_bits yet. */
1434 (match (with_certain_nonzero_bits2 @0)
1436 (match (with_certain_nonzero_bits2 @0)
1437 (bit_ior @1 INTEGER_CST@0))
1439 /* X == C (or X & Z == Y | C) is impossible if ~nonzero(X) & C != 0. */
1442 (cmp:c (with_possible_nonzero_bits2 @0) (with_certain_nonzero_bits2 @1))
1443 (if (wi::bit_and_not (wi::to_wide (@1), get_nonzero_bits (@0)) != 0)
1444 { constant_boolean_node (cmp == NE_EXPR, type); })))
1446 /* ((X inner_op C0) outer_op C1)
1447 With X being a tree where value_range has reasoned certain bits to always be
1448 zero throughout its computed value range,
1449 inner_op = {|,^}, outer_op = {|,^} and inner_op != outer_op
1450 where zero_mask has 1's for all bits that are sure to be 0 in
1452 if (inner_op == '^') C0 &= ~C1;
1453 if ((C0 & ~zero_mask) == 0) then emit (X outer_op (C0 outer_op C1)
1454 if ((C1 & ~zero_mask) == 0) then emit (X inner_op (C0 outer_op C1)
1456 (for inner_op (bit_ior bit_xor)
1457 outer_op (bit_xor bit_ior)
1460 (inner_op:s @2 INTEGER_CST@0) INTEGER_CST@1)
1464 wide_int zero_mask_not;
1468 if (TREE_CODE (@2) == SSA_NAME)
1469 zero_mask_not = get_nonzero_bits (@2);
1473 if (inner_op == BIT_XOR_EXPR)
1475 C0 = wi::bit_and_not (wi::to_wide (@0), wi::to_wide (@1));
1476 cst_emit = C0 | wi::to_wide (@1);
1480 C0 = wi::to_wide (@0);
1481 cst_emit = C0 ^ wi::to_wide (@1);
1484 (if (!fail && (C0 & zero_mask_not) == 0)
1485 (outer_op @2 { wide_int_to_tree (type, cst_emit); })
1486 (if (!fail && (wi::to_wide (@1) & zero_mask_not) == 0)
1487 (inner_op @2 { wide_int_to_tree (type, cst_emit); }))))))
1489 /* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */
1491 (pointer_plus (pointer_plus:s @0 @1) @3)
1492 (pointer_plus @0 (plus @1 @3)))
1498 tem4 = (unsigned long) tem3;
1503 (pointer_plus @0 (convert?@2 (minus@3 (convert @1) (convert @0))))
1504 /* Conditionally look through a sign-changing conversion. */
1505 (if (TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@3))
1506 && ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@1)))
1507 || (GENERIC && type == TREE_TYPE (@1))))
1511 tem = (sizetype) ptr;
1515 and produce the simpler and easier to analyze with respect to alignment
1516 ... = ptr & ~algn; */
1518 (pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1)))
1519 (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), ~wi::to_wide (@1)); }
1520 (bit_and @0 { algn; })))
1522 /* Try folding difference of addresses. */
1524 (minus (convert ADDR_EXPR@0) (convert @1))
1525 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1526 (with { HOST_WIDE_INT diff; }
1527 (if (ptr_difference_const (@0, @1, &diff))
1528 { build_int_cst_type (type, diff); }))))
1530 (minus (convert @0) (convert ADDR_EXPR@1))
1531 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1532 (with { HOST_WIDE_INT diff; }
1533 (if (ptr_difference_const (@0, @1, &diff))
1534 { build_int_cst_type (type, diff); }))))
1536 /* If arg0 is derived from the address of an object or function, we may
1537 be able to fold this expression using the object or function's
1540 (bit_and (convert? @0) INTEGER_CST@1)
1541 (if (POINTER_TYPE_P (TREE_TYPE (@0))
1542 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1546 unsigned HOST_WIDE_INT bitpos;
1547 get_pointer_alignment_1 (@0, &align, &bitpos);
1549 (if (wi::ltu_p (wi::to_wide (@1), align / BITS_PER_UNIT))
1550 { wide_int_to_tree (type, (wi::to_wide (@1)
1551 & (bitpos / BITS_PER_UNIT))); }))))
1554 /* We can't reassociate at all for saturating types. */
1555 (if (!TYPE_SATURATING (type))
1557 /* Contract negates. */
1558 /* A + (-B) -> A - B */
1560 (plus:c @0 (convert? (negate @1)))
1561 /* Apply STRIP_NOPS on the negate. */
1562 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1563 && !TYPE_OVERFLOW_SANITIZED (type))
1567 if (INTEGRAL_TYPE_P (type)
1568 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
1569 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
1571 (convert (minus (convert:t1 @0) (convert:t1 @1))))))
1572 /* A - (-B) -> A + B */
1574 (minus @0 (convert? (negate @1)))
1575 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1576 && !TYPE_OVERFLOW_SANITIZED (type))
1580 if (INTEGRAL_TYPE_P (type)
1581 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
1582 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
1584 (convert (plus (convert:t1 @0) (convert:t1 @1))))))
1586 Sign-extension is ok except for INT_MIN, which thankfully cannot
1587 happen without overflow. */
1589 (negate (convert (negate @1)))
1590 (if (INTEGRAL_TYPE_P (type)
1591 && (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@1))
1592 || (!TYPE_UNSIGNED (TREE_TYPE (@1))
1593 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
1594 && !TYPE_OVERFLOW_SANITIZED (type)
1595 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
1598 (negate (convert negate_expr_p@1))
1599 (if (SCALAR_FLOAT_TYPE_P (type)
1600 && ((DECIMAL_FLOAT_TYPE_P (type)
1601 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))
1602 && TYPE_PRECISION (type) >= TYPE_PRECISION (TREE_TYPE (@1)))
1603 || !HONOR_SIGN_DEPENDENT_ROUNDING (type)))
1604 (convert (negate @1))))
1606 (negate (nop_convert (negate @1)))
1607 (if (!TYPE_OVERFLOW_SANITIZED (type)
1608 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
1611 /* We can't reassociate floating-point unless -fassociative-math
1612 or fixed-point plus or minus because of saturation to +-Inf. */
1613 (if ((!FLOAT_TYPE_P (type) || flag_associative_math)
1614 && !FIXED_POINT_TYPE_P (type))
1616 /* Match patterns that allow contracting a plus-minus pair
1617 irrespective of overflow issues. */
1618 /* (A +- B) - A -> +- B */
1619 /* (A +- B) -+ B -> A */
1620 /* A - (A +- B) -> -+ B */
1621 /* A +- (B -+ A) -> +- B */
1623 (minus (plus:c @0 @1) @0)
1626 (minus (minus @0 @1) @0)
1629 (plus:c (minus @0 @1) @1)
1632 (minus @0 (plus:c @0 @1))
1635 (minus @0 (minus @0 @1))
1637 /* (A +- B) + (C - A) -> C +- B */
1638 /* (A + B) - (A - C) -> B + C */
1639 /* More cases are handled with comparisons. */
1641 (plus:c (plus:c @0 @1) (minus @2 @0))
1644 (plus:c (minus @0 @1) (minus @2 @0))
1647 (minus (plus:c @0 @1) (minus @0 @2))
1650 /* (A +- CST1) +- CST2 -> A + CST3
1651 Use view_convert because it is safe for vectors and equivalent for
1653 (for outer_op (plus minus)
1654 (for inner_op (plus minus)
1655 neg_inner_op (minus plus)
1657 (outer_op (nop_convert (inner_op @0 CONSTANT_CLASS_P@1))
1659 /* If one of the types wraps, use that one. */
1660 (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
1661 (if (outer_op == PLUS_EXPR)
1662 (plus (view_convert @0) (inner_op @2 (view_convert @1)))
1663 (minus (view_convert @0) (neg_inner_op @2 (view_convert @1))))
1664 (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1665 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
1666 (if (outer_op == PLUS_EXPR)
1667 (view_convert (plus @0 (inner_op (view_convert @2) @1)))
1668 (view_convert (minus @0 (neg_inner_op (view_convert @2) @1))))
1669 /* If the constant operation overflows we cannot do the transform
1670 directly as we would introduce undefined overflow, for example
1671 with (a - 1) + INT_MIN. */
1672 (if (types_match (type, @0))
1673 (with { tree cst = const_binop (outer_op == inner_op
1674 ? PLUS_EXPR : MINUS_EXPR,
1676 (if (cst && !TREE_OVERFLOW (cst))
1677 (inner_op @0 { cst; } )
1678 /* X+INT_MAX+1 is X-INT_MIN. */
1679 (if (INTEGRAL_TYPE_P (type) && cst
1680 && wi::to_wide (cst) == wi::min_value (type))
1681 (neg_inner_op @0 { wide_int_to_tree (type, wi::to_wide (cst)); })
1682 /* Last resort, use some unsigned type. */
1683 (with { tree utype = unsigned_type_for (type); }
1684 (view_convert (inner_op
1685 (view_convert:utype @0)
1687 { drop_tree_overflow (cst); })))))))))))))
1689 /* (CST1 - A) +- CST2 -> CST3 - A */
1690 (for outer_op (plus minus)
1692 (outer_op (minus CONSTANT_CLASS_P@1 @0) CONSTANT_CLASS_P@2)
1693 (with { tree cst = const_binop (outer_op, type, @1, @2); }
1694 (if (cst && !TREE_OVERFLOW (cst))
1695 (minus { cst; } @0)))))
1697 /* CST1 - (CST2 - A) -> CST3 + A */
1699 (minus CONSTANT_CLASS_P@1 (minus CONSTANT_CLASS_P@2 @0))
1700 (with { tree cst = const_binop (MINUS_EXPR, type, @1, @2); }
1701 (if (cst && !TREE_OVERFLOW (cst))
1702 (plus { cst; } @0))))
1706 (plus:c (bit_not @0) @0)
1707 (if (!TYPE_OVERFLOW_TRAPS (type))
1708 { build_all_ones_cst (type); }))
1712 (plus (convert? (bit_not @0)) integer_each_onep)
1713 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1714 (negate (convert @0))))
1718 (minus (convert? (negate @0)) integer_each_onep)
1719 (if (!TYPE_OVERFLOW_TRAPS (type)
1720 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1721 (bit_not (convert @0))))
1725 (minus integer_all_onesp @0)
1728 /* (T)(P + A) - (T)P -> (T) A */
1729 (for add (plus pointer_plus)
1731 (minus (convert (add @@0 @1))
1733 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1734 /* For integer types, if A has a smaller type
1735 than T the result depends on the possible
1737 E.g. T=size_t, A=(unsigned)429497295, P>0.
1738 However, if an overflow in P + A would cause
1739 undefined behavior, we can assume that there
1741 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1742 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1743 /* For pointer types, if the conversion of A to the
1744 final type requires a sign- or zero-extension,
1745 then we have to punt - it is not defined which
1747 || (POINTER_TYPE_P (TREE_TYPE (@0))
1748 && TREE_CODE (@1) == INTEGER_CST
1749 && tree_int_cst_sign_bit (@1) == 0))
1752 /* (T)P - (T)(P + A) -> -(T) A */
1753 (for add (plus pointer_plus)
1756 (convert (add @@0 @1)))
1757 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1758 /* For integer types, if A has a smaller type
1759 than T the result depends on the possible
1761 E.g. T=size_t, A=(unsigned)429497295, P>0.
1762 However, if an overflow in P + A would cause
1763 undefined behavior, we can assume that there
1765 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1766 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1767 /* For pointer types, if the conversion of A to the
1768 final type requires a sign- or zero-extension,
1769 then we have to punt - it is not defined which
1771 || (POINTER_TYPE_P (TREE_TYPE (@0))
1772 && TREE_CODE (@1) == INTEGER_CST
1773 && tree_int_cst_sign_bit (@1) == 0))
1774 (negate (convert @1)))))
1776 /* (T)(P + A) - (T)(P + B) -> (T)A - (T)B */
1777 (for add (plus pointer_plus)
1779 (minus (convert (add @@0 @1))
1780 (convert (add @0 @2)))
1781 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1782 /* For integer types, if A has a smaller type
1783 than T the result depends on the possible
1785 E.g. T=size_t, A=(unsigned)429497295, P>0.
1786 However, if an overflow in P + A would cause
1787 undefined behavior, we can assume that there
1789 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1790 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1791 /* For pointer types, if the conversion of A to the
1792 final type requires a sign- or zero-extension,
1793 then we have to punt - it is not defined which
1795 || (POINTER_TYPE_P (TREE_TYPE (@0))
1796 && TREE_CODE (@1) == INTEGER_CST
1797 && tree_int_cst_sign_bit (@1) == 0
1798 && TREE_CODE (@2) == INTEGER_CST
1799 && tree_int_cst_sign_bit (@2) == 0))
1800 (minus (convert @1) (convert @2)))))))
1803 /* Simplifications of MIN_EXPR, MAX_EXPR, fmin() and fmax(). */
1805 (for minmax (min max FMIN FMIN_FN FMAX FMAX_FN)
1809 /* min(max(x,y),y) -> y. */
1811 (min:c (max:c @0 @1) @1)
1813 /* max(min(x,y),y) -> y. */
1815 (max:c (min:c @0 @1) @1)
1817 /* max(a,-a) -> abs(a). */
1819 (max:c @0 (negate @0))
1820 (if (TREE_CODE (type) != COMPLEX_TYPE
1821 && (! ANY_INTEGRAL_TYPE_P (type)
1822 || TYPE_OVERFLOW_UNDEFINED (type)))
1824 /* min(a,-a) -> -abs(a). */
1826 (min:c @0 (negate @0))
1827 (if (TREE_CODE (type) != COMPLEX_TYPE
1828 && (! ANY_INTEGRAL_TYPE_P (type)
1829 || TYPE_OVERFLOW_UNDEFINED (type)))
1834 (if (INTEGRAL_TYPE_P (type)
1835 && TYPE_MIN_VALUE (type)
1836 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
1838 (if (INTEGRAL_TYPE_P (type)
1839 && TYPE_MAX_VALUE (type)
1840 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
1845 (if (INTEGRAL_TYPE_P (type)
1846 && TYPE_MAX_VALUE (type)
1847 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
1849 (if (INTEGRAL_TYPE_P (type)
1850 && TYPE_MIN_VALUE (type)
1851 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
1854 /* max (a, a + CST) -> a + CST where CST is positive. */
1855 /* max (a, a + CST) -> a where CST is negative. */
1857 (max:c @0 (plus@2 @0 INTEGER_CST@1))
1858 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1859 (if (tree_int_cst_sgn (@1) > 0)
1863 /* min (a, a + CST) -> a where CST is positive. */
1864 /* min (a, a + CST) -> a + CST where CST is negative. */
1866 (min:c @0 (plus@2 @0 INTEGER_CST@1))
1867 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1868 (if (tree_int_cst_sgn (@1) > 0)
1872 /* (convert (minmax ((convert (x) c)))) -> minmax (x c) if x is promoted
1873 and the outer convert demotes the expression back to x's type. */
1874 (for minmax (min max)
1876 (convert (minmax@0 (convert @1) INTEGER_CST@2))
1877 (if (INTEGRAL_TYPE_P (type)
1878 && types_match (@1, type) && int_fits_type_p (@2, type)
1879 && TYPE_SIGN (TREE_TYPE (@0)) == TYPE_SIGN (type)
1880 && TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type))
1881 (minmax @1 (convert @2)))))
1883 (for minmax (FMIN FMIN_FN FMAX FMAX_FN)
1884 /* If either argument is NaN, return the other one. Avoid the
1885 transformation if we get (and honor) a signalling NaN. */
1887 (minmax:c @0 REAL_CST@1)
1888 (if (real_isnan (TREE_REAL_CST_PTR (@1))
1889 && (!HONOR_SNANS (@1) || !TREE_REAL_CST (@1).signalling))
1891 /* Convert fmin/fmax to MIN_EXPR/MAX_EXPR. C99 requires these
1892 functions to return the numeric arg if the other one is NaN.
1893 MIN and MAX don't honor that, so only transform if -ffinite-math-only
1894 is set. C99 doesn't require -0.0 to be handled, so we don't have to
1895 worry about it either. */
1896 (if (flag_finite_math_only)
1909 /* min (-A, -B) -> -max (A, B) */
1910 (for minmax (min max FMIN FMIN_FN FMAX FMAX_FN)
1911 maxmin (max min FMAX FMAX_FN FMIN FMAX_FN)
1913 (minmax (negate:s@2 @0) (negate:s@3 @1))
1914 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
1915 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1916 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
1917 (negate (maxmin @0 @1)))))
1918 /* MIN (~X, ~Y) -> ~MAX (X, Y)
1919 MAX (~X, ~Y) -> ~MIN (X, Y) */
1920 (for minmax (min max)
1923 (minmax (bit_not:s@2 @0) (bit_not:s@3 @1))
1924 (bit_not (maxmin @0 @1))))
1926 /* MIN (X, Y) == X -> X <= Y */
1927 (for minmax (min min max max)
1931 (cmp:c (minmax:c @0 @1) @0)
1932 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)))
1934 /* MIN (X, 5) == 0 -> X == 0
1935 MIN (X, 5) == 7 -> false */
1938 (cmp (min @0 INTEGER_CST@1) INTEGER_CST@2)
1939 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
1940 TYPE_SIGN (TREE_TYPE (@0))))
1941 { constant_boolean_node (cmp == NE_EXPR, type); }
1942 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
1943 TYPE_SIGN (TREE_TYPE (@0))))
1947 (cmp (max @0 INTEGER_CST@1) INTEGER_CST@2)
1948 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
1949 TYPE_SIGN (TREE_TYPE (@0))))
1950 { constant_boolean_node (cmp == NE_EXPR, type); }
1951 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
1952 TYPE_SIGN (TREE_TYPE (@0))))
1954 /* MIN (X, C1) < C2 -> X < C2 || C1 < C2 */
1955 (for minmax (min min max max min min max max )
1956 cmp (lt le gt ge gt ge lt le )
1957 comb (bit_ior bit_ior bit_ior bit_ior bit_and bit_and bit_and bit_and)
1959 (cmp (minmax @0 INTEGER_CST@1) INTEGER_CST@2)
1960 (comb (cmp @0 @2) (cmp @1 @2))))
1962 /* Simplifications of shift and rotates. */
1964 (for rotate (lrotate rrotate)
1966 (rotate integer_all_onesp@0 @1)
1969 /* Optimize -1 >> x for arithmetic right shifts. */
1971 (rshift integer_all_onesp@0 @1)
1972 (if (!TYPE_UNSIGNED (type)
1973 && tree_expr_nonnegative_p (@1))
1976 /* Optimize (x >> c) << c into x & (-1<<c). */
1978 (lshift (rshift @0 INTEGER_CST@1) @1)
1979 (if (wi::ltu_p (wi::to_wide (@1), element_precision (type)))
1980 (bit_and @0 (lshift { build_minus_one_cst (type); } @1))))
1982 /* Optimize (x << c) >> c into x & ((unsigned)-1 >> c) for unsigned
1985 (rshift (lshift @0 INTEGER_CST@1) @1)
1986 (if (TYPE_UNSIGNED (type)
1987 && (wi::ltu_p (wi::to_wide (@1), element_precision (type))))
1988 (bit_and @0 (rshift { build_minus_one_cst (type); } @1))))
1990 (for shiftrotate (lrotate rrotate lshift rshift)
1992 (shiftrotate @0 integer_zerop)
1995 (shiftrotate integer_zerop@0 @1)
1997 /* Prefer vector1 << scalar to vector1 << vector2
1998 if vector2 is uniform. */
1999 (for vec (VECTOR_CST CONSTRUCTOR)
2001 (shiftrotate @0 vec@1)
2002 (with { tree tem = uniform_vector_p (@1); }
2004 (shiftrotate @0 { tem; }))))))
2006 /* Simplify X << Y where Y's low width bits are 0 to X, as only valid
2007 Y is 0. Similarly for X >> Y. */
2009 (for shift (lshift rshift)
2011 (shift @0 SSA_NAME@1)
2012 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
2014 int width = ceil_log2 (element_precision (TREE_TYPE (@0)));
2015 int prec = TYPE_PRECISION (TREE_TYPE (@1));
2017 (if ((get_nonzero_bits (@1) & wi::mask (width, false, prec)) == 0)
2021 /* Rewrite an LROTATE_EXPR by a constant into an
2022 RROTATE_EXPR by a new constant. */
2024 (lrotate @0 INTEGER_CST@1)
2025 (rrotate @0 { const_binop (MINUS_EXPR, TREE_TYPE (@1),
2026 build_int_cst (TREE_TYPE (@1),
2027 element_precision (type)), @1); }))
2029 /* Turn (a OP c1) OP c2 into a OP (c1+c2). */
2030 (for op (lrotate rrotate rshift lshift)
2032 (op (op @0 INTEGER_CST@1) INTEGER_CST@2)
2033 (with { unsigned int prec = element_precision (type); }
2034 (if (wi::ge_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1)))
2035 && wi::lt_p (wi::to_wide (@1), prec, TYPE_SIGN (TREE_TYPE (@1)))
2036 && wi::ge_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2)))
2037 && wi::lt_p (wi::to_wide (@2), prec, TYPE_SIGN (TREE_TYPE (@2))))
2038 (with { unsigned int low = (tree_to_uhwi (@1)
2039 + tree_to_uhwi (@2)); }
2040 /* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2
2041 being well defined. */
2043 (if (op == LROTATE_EXPR || op == RROTATE_EXPR)
2044 (op @0 { build_int_cst (TREE_TYPE (@1), low % prec); })
2045 (if (TYPE_UNSIGNED (type) || op == LSHIFT_EXPR)
2046 { build_zero_cst (type); }
2047 (op @0 { build_int_cst (TREE_TYPE (@1), prec - 1); })))
2048 (op @0 { build_int_cst (TREE_TYPE (@1), low); })))))))
2051 /* ((1 << A) & 1) != 0 -> A == 0
2052 ((1 << A) & 1) == 0 -> A != 0 */
2056 (cmp (bit_and (lshift integer_onep @0) integer_onep) integer_zerop)
2057 (icmp @0 { build_zero_cst (TREE_TYPE (@0)); })))
2059 /* (CST1 << A) == CST2 -> A == ctz (CST2) - ctz (CST1)
2060 (CST1 << A) != CST2 -> A != ctz (CST2) - ctz (CST1)
2064 (cmp (lshift INTEGER_CST@0 @1) INTEGER_CST@2)
2065 (with { int cand = wi::ctz (wi::to_wide (@2)) - wi::ctz (wi::to_wide (@0)); }
2067 || (!integer_zerop (@2)
2068 && wi::lshift (wi::to_wide (@0), cand) != wi::to_wide (@2)))
2069 { constant_boolean_node (cmp == NE_EXPR, type); }
2070 (if (!integer_zerop (@2)
2071 && wi::lshift (wi::to_wide (@0), cand) == wi::to_wide (@2))
2072 (cmp @1 { build_int_cst (TREE_TYPE (@1), cand); }))))))
2074 /* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1))
2075 (X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1))
2076 if the new mask might be further optimized. */
2077 (for shift (lshift rshift)
2079 (bit_and (convert?:s@4 (shift:s@5 (convert1?@3 @0) INTEGER_CST@1))
2081 (if (tree_nop_conversion_p (TREE_TYPE (@4), TREE_TYPE (@5))
2082 && TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT
2083 && tree_fits_uhwi_p (@1)
2084 && tree_to_uhwi (@1) > 0
2085 && tree_to_uhwi (@1) < TYPE_PRECISION (type))
2088 unsigned int shiftc = tree_to_uhwi (@1);
2089 unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (@2);
2090 unsigned HOST_WIDE_INT newmask, zerobits = 0;
2091 tree shift_type = TREE_TYPE (@3);
2094 if (shift == LSHIFT_EXPR)
2095 zerobits = ((HOST_WIDE_INT_1U << shiftc) - 1);
2096 else if (shift == RSHIFT_EXPR
2097 && type_has_mode_precision_p (shift_type))
2099 prec = TYPE_PRECISION (TREE_TYPE (@3));
2101 /* See if more bits can be proven as zero because of
2104 && TYPE_UNSIGNED (TREE_TYPE (@0)))
2106 tree inner_type = TREE_TYPE (@0);
2107 if (type_has_mode_precision_p (inner_type)
2108 && TYPE_PRECISION (inner_type) < prec)
2110 prec = TYPE_PRECISION (inner_type);
2111 /* See if we can shorten the right shift. */
2113 shift_type = inner_type;
2114 /* Otherwise X >> C1 is all zeros, so we'll optimize
2115 it into (X, 0) later on by making sure zerobits
2119 zerobits = HOST_WIDE_INT_M1U;
2122 zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc;
2123 zerobits <<= prec - shiftc;
2125 /* For arithmetic shift if sign bit could be set, zerobits
2126 can contain actually sign bits, so no transformation is
2127 possible, unless MASK masks them all away. In that
2128 case the shift needs to be converted into logical shift. */
2129 if (!TYPE_UNSIGNED (TREE_TYPE (@3))
2130 && prec == TYPE_PRECISION (TREE_TYPE (@3)))
2132 if ((mask & zerobits) == 0)
2133 shift_type = unsigned_type_for (TREE_TYPE (@3));
2139 /* ((X << 16) & 0xff00) is (X, 0). */
2140 (if ((mask & zerobits) == mask)
2141 { build_int_cst (type, 0); }
2142 (with { newmask = mask | zerobits; }
2143 (if (newmask != mask && (newmask & (newmask + 1)) == 0)
2146 /* Only do the transformation if NEWMASK is some integer
2148 for (prec = BITS_PER_UNIT;
2149 prec < HOST_BITS_PER_WIDE_INT; prec <<= 1)
2150 if (newmask == (HOST_WIDE_INT_1U << prec) - 1)
2153 (if (prec < HOST_BITS_PER_WIDE_INT
2154 || newmask == HOST_WIDE_INT_M1U)
2156 { tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); }
2157 (if (!tree_int_cst_equal (newmaskt, @2))
2158 (if (shift_type != TREE_TYPE (@3))
2159 (bit_and (convert (shift:shift_type (convert @3) @1)) { newmaskt; })
2160 (bit_and @4 { newmaskt; })))))))))))))
2162 /* Fold (X {&,^,|} C2) << C1 into (X << C1) {&,^,|} (C2 << C1)
2163 (X {&,^,|} C2) >> C1 into (X >> C1) & (C2 >> C1). */
2164 (for shift (lshift rshift)
2165 (for bit_op (bit_and bit_xor bit_ior)
2167 (shift (convert?:s (bit_op:s @0 INTEGER_CST@2)) INTEGER_CST@1)
2168 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2169 (with { tree mask = int_const_binop (shift, fold_convert (type, @2), @1); }
2170 (bit_op (shift (convert @0) @1) { mask; }))))))
2172 /* ~(~X >> Y) -> X >> Y (for arithmetic shift). */
2174 (bit_not (convert1?:s (rshift:s (convert2?@0 (bit_not @1)) @2)))
2175 (if (!TYPE_UNSIGNED (TREE_TYPE (@0))
2176 && (element_precision (TREE_TYPE (@0))
2177 <= element_precision (TREE_TYPE (@1))
2178 || !TYPE_UNSIGNED (TREE_TYPE (@1))))
2180 { tree shift_type = TREE_TYPE (@0); }
2181 (convert (rshift (convert:shift_type @1) @2)))))
2183 /* ~(~X >>r Y) -> X >>r Y
2184 ~(~X <<r Y) -> X <<r Y */
2185 (for rotate (lrotate rrotate)
2187 (bit_not (convert1?:s (rotate:s (convert2?@0 (bit_not @1)) @2)))
2188 (if ((element_precision (TREE_TYPE (@0))
2189 <= element_precision (TREE_TYPE (@1))
2190 || !TYPE_UNSIGNED (TREE_TYPE (@1)))
2191 && (element_precision (type) <= element_precision (TREE_TYPE (@0))
2192 || !TYPE_UNSIGNED (TREE_TYPE (@0))))
2194 { tree rotate_type = TREE_TYPE (@0); }
2195 (convert (rotate (convert:rotate_type @1) @2))))))
2197 /* Simplifications of conversions. */
2199 /* Basic strip-useless-type-conversions / strip_nops. */
2200 (for cvt (convert view_convert float fix_trunc)
2203 (if ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@0)))
2204 || (GENERIC && type == TREE_TYPE (@0)))
2207 /* Contract view-conversions. */
2209 (view_convert (view_convert @0))
2212 /* For integral conversions with the same precision or pointer
2213 conversions use a NOP_EXPR instead. */
2216 (if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
2217 && (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
2218 && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0)))
2221 /* Strip inner integral conversions that do not change precision or size, or
2222 zero-extend while keeping the same size (for bool-to-char). */
2224 (view_convert (convert@0 @1))
2225 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
2226 && (INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
2227 && TYPE_SIZE (TREE_TYPE (@0)) == TYPE_SIZE (TREE_TYPE (@1))
2228 && (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))
2229 || (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@1))
2230 && TYPE_UNSIGNED (TREE_TYPE (@1)))))
2233 /* Re-association barriers around constants and other re-association
2234 barriers can be removed. */
2236 (paren CONSTANT_CLASS_P@0)
2239 (paren (paren@1 @0))
2242 /* Handle cases of two conversions in a row. */
2243 (for ocvt (convert float fix_trunc)
2244 (for icvt (convert float)
2249 tree inside_type = TREE_TYPE (@0);
2250 tree inter_type = TREE_TYPE (@1);
2251 int inside_int = INTEGRAL_TYPE_P (inside_type);
2252 int inside_ptr = POINTER_TYPE_P (inside_type);
2253 int inside_float = FLOAT_TYPE_P (inside_type);
2254 int inside_vec = VECTOR_TYPE_P (inside_type);
2255 unsigned int inside_prec = TYPE_PRECISION (inside_type);
2256 int inside_unsignedp = TYPE_UNSIGNED (inside_type);
2257 int inter_int = INTEGRAL_TYPE_P (inter_type);
2258 int inter_ptr = POINTER_TYPE_P (inter_type);
2259 int inter_float = FLOAT_TYPE_P (inter_type);
2260 int inter_vec = VECTOR_TYPE_P (inter_type);
2261 unsigned int inter_prec = TYPE_PRECISION (inter_type);
2262 int inter_unsignedp = TYPE_UNSIGNED (inter_type);
2263 int final_int = INTEGRAL_TYPE_P (type);
2264 int final_ptr = POINTER_TYPE_P (type);
2265 int final_float = FLOAT_TYPE_P (type);
2266 int final_vec = VECTOR_TYPE_P (type);
2267 unsigned int final_prec = TYPE_PRECISION (type);
2268 int final_unsignedp = TYPE_UNSIGNED (type);
2271 /* In addition to the cases of two conversions in a row
2272 handled below, if we are converting something to its own
2273 type via an object of identical or wider precision, neither
2274 conversion is needed. */
2275 (if (((GIMPLE && useless_type_conversion_p (type, inside_type))
2277 && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type)))
2278 && (((inter_int || inter_ptr) && final_int)
2279 || (inter_float && final_float))
2280 && inter_prec >= final_prec)
2283 /* Likewise, if the intermediate and initial types are either both
2284 float or both integer, we don't need the middle conversion if the
2285 former is wider than the latter and doesn't change the signedness
2286 (for integers). Avoid this if the final type is a pointer since
2287 then we sometimes need the middle conversion. */
2288 (if (((inter_int && inside_int) || (inter_float && inside_float))
2289 && (final_int || final_float)
2290 && inter_prec >= inside_prec
2291 && (inter_float || inter_unsignedp == inside_unsignedp))
2294 /* If we have a sign-extension of a zero-extended value, we can
2295 replace that by a single zero-extension. Likewise if the
2296 final conversion does not change precision we can drop the
2297 intermediate conversion. */
2298 (if (inside_int && inter_int && final_int
2299 && ((inside_prec < inter_prec && inter_prec < final_prec
2300 && inside_unsignedp && !inter_unsignedp)
2301 || final_prec == inter_prec))
2304 /* Two conversions in a row are not needed unless:
2305 - some conversion is floating-point (overstrict for now), or
2306 - some conversion is a vector (overstrict for now), or
2307 - the intermediate type is narrower than both initial and
2309 - the intermediate type and innermost type differ in signedness,
2310 and the outermost type is wider than the intermediate, or
2311 - the initial type is a pointer type and the precisions of the
2312 intermediate and final types differ, or
2313 - the final type is a pointer type and the precisions of the
2314 initial and intermediate types differ. */
2315 (if (! inside_float && ! inter_float && ! final_float
2316 && ! inside_vec && ! inter_vec && ! final_vec
2317 && (inter_prec >= inside_prec || inter_prec >= final_prec)
2318 && ! (inside_int && inter_int
2319 && inter_unsignedp != inside_unsignedp
2320 && inter_prec < final_prec)
2321 && ((inter_unsignedp && inter_prec > inside_prec)
2322 == (final_unsignedp && final_prec > inter_prec))
2323 && ! (inside_ptr && inter_prec != final_prec)
2324 && ! (final_ptr && inside_prec != inter_prec))
2327 /* A truncation to an unsigned type (a zero-extension) should be
2328 canonicalized as bitwise and of a mask. */
2329 (if (GIMPLE /* PR70366: doing this in GENERIC breaks -Wconversion. */
2330 && final_int && inter_int && inside_int
2331 && final_prec == inside_prec
2332 && final_prec > inter_prec
2334 (convert (bit_and @0 { wide_int_to_tree
2336 wi::mask (inter_prec, false,
2337 TYPE_PRECISION (inside_type))); })))
2339 /* If we are converting an integer to a floating-point that can
2340 represent it exactly and back to an integer, we can skip the
2341 floating-point conversion. */
2342 (if (GIMPLE /* PR66211 */
2343 && inside_int && inter_float && final_int &&
2344 (unsigned) significand_size (TYPE_MODE (inter_type))
2345 >= inside_prec - !inside_unsignedp)
2348 /* If we have a narrowing conversion to an integral type that is fed by a
2349 BIT_AND_EXPR, we might be able to remove the BIT_AND_EXPR if it merely
2350 masks off bits outside the final type (and nothing else). */
2352 (convert (bit_and @0 INTEGER_CST@1))
2353 (if (INTEGRAL_TYPE_P (type)
2354 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
2355 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))
2356 && operand_equal_p (@1, build_low_bits_mask (TREE_TYPE (@1),
2357 TYPE_PRECISION (type)), 0))
2361 /* (X /[ex] A) * A -> X. */
2363 (mult (convert1? (exact_div @0 @@1)) (convert2? @1))
2366 /* Canonicalization of binary operations. */
2368 /* Convert X + -C into X - C. */
2370 (plus @0 REAL_CST@1)
2371 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
2372 (with { tree tem = const_unop (NEGATE_EXPR, type, @1); }
2373 (if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
2374 (minus @0 { tem; })))))
2376 /* Convert x+x into x*2. */
2379 (if (SCALAR_FLOAT_TYPE_P (type))
2380 (mult @0 { build_real (type, dconst2); })
2381 (if (INTEGRAL_TYPE_P (type))
2382 (mult @0 { build_int_cst (type, 2); }))))
2385 (minus integer_zerop @1)
2388 /* (ARG0 - ARG1) is the same as (-ARG1 + ARG0). So check whether
2389 ARG0 is zero and X + ARG0 reduces to X, since that would mean
2390 (-ARG1 + ARG0) reduces to -ARG1. */
2392 (minus real_zerop@0 @1)
2393 (if (fold_real_zero_addition_p (type, @0, 0))
2396 /* Transform x * -1 into -x. */
2398 (mult @0 integer_minus_onep)
2401 /* Reassociate (X * CST) * Y to (X * Y) * CST. This does not introduce
2402 signed overflow for CST != 0 && CST != -1. */
2404 (mult:c (mult:s @0 INTEGER_CST@1) @2)
2405 (if (TREE_CODE (@2) != INTEGER_CST
2406 && !integer_zerop (@1) && !integer_minus_onep (@1))
2407 (mult (mult @0 @2) @1)))
2409 /* True if we can easily extract the real and imaginary parts of a complex
2411 (match compositional_complex
2412 (convert? (complex @0 @1)))
2414 /* COMPLEX_EXPR and REALPART/IMAGPART_EXPR cancellations. */
2416 (complex (realpart @0) (imagpart @0))
2419 (realpart (complex @0 @1))
2422 (imagpart (complex @0 @1))
2425 /* Sometimes we only care about half of a complex expression. */
2427 (realpart (convert?:s (conj:s @0)))
2428 (convert (realpart @0)))
2430 (imagpart (convert?:s (conj:s @0)))
2431 (convert (negate (imagpart @0))))
2432 (for part (realpart imagpart)
2433 (for op (plus minus)
2435 (part (convert?:s@2 (op:s @0 @1)))
2436 (convert (op (part @0) (part @1))))))
2438 (realpart (convert?:s (CEXPI:s @0)))
2441 (imagpart (convert?:s (CEXPI:s @0)))
2444 /* conj(conj(x)) -> x */
2446 (conj (convert? (conj @0)))
2447 (if (tree_nop_conversion_p (TREE_TYPE (@0), type))
2450 /* conj({x,y}) -> {x,-y} */
2452 (conj (convert?:s (complex:s @0 @1)))
2453 (with { tree itype = TREE_TYPE (type); }
2454 (complex (convert:itype @0) (negate (convert:itype @1)))))
2456 /* BSWAP simplifications, transforms checked by gcc.dg/builtin-bswap-8.c. */
2457 (for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32 BUILT_IN_BSWAP64)
2462 (bswap (bit_not (bswap @0)))
2464 (for bitop (bit_xor bit_ior bit_and)
2466 (bswap (bitop:c (bswap @0) @1))
2467 (bitop @0 (bswap @1)))))
2470 /* Combine COND_EXPRs and VEC_COND_EXPRs. */
2472 /* Simplify constant conditions.
2473 Only optimize constant conditions when the selected branch
2474 has the same type as the COND_EXPR. This avoids optimizing
2475 away "c ? x : throw", where the throw has a void type.
2476 Note that we cannot throw away the fold-const.c variant nor
2477 this one as we depend on doing this transform before possibly
2478 A ? B : B -> B triggers and the fold-const.c one can optimize
2479 0 ? A : B to B even if A has side-effects. Something
2480 genmatch cannot handle. */
2482 (cond INTEGER_CST@0 @1 @2)
2483 (if (integer_zerop (@0))
2484 (if (!VOID_TYPE_P (TREE_TYPE (@2)) || VOID_TYPE_P (type))
2486 (if (!VOID_TYPE_P (TREE_TYPE (@1)) || VOID_TYPE_P (type))
2489 (vec_cond VECTOR_CST@0 @1 @2)
2490 (if (integer_all_onesp (@0))
2492 (if (integer_zerop (@0))
2495 /* Simplification moved from fold_cond_expr_with_comparison. It may also
2497 /* This pattern implements two kinds simplification:
2500 (cond (cmp (convert1? x) c1) (convert2? x) c2) -> (minmax (x c)) if:
2501 1) Conversions are type widening from smaller type.
2502 2) Const c1 equals to c2 after canonicalizing comparison.
2503 3) Comparison has tree code LT, LE, GT or GE.
2504 This specific pattern is needed when (cmp (convert x) c) may not
2505 be simplified by comparison patterns because of multiple uses of
2506 x. It also makes sense here because simplifying across multiple
2507 referred var is always benefitial for complicated cases.
2510 (cond (eq (convert1? x) c1) (convert2? x) c2) -> (cond (eq x c1) c1 c2). */
2511 (for cmp (lt le gt ge eq)
2513 (cond (cmp (convert1? @1) INTEGER_CST@3) (convert2? @1) INTEGER_CST@2)
2516 tree from_type = TREE_TYPE (@1);
2517 tree c1_type = TREE_TYPE (@3), c2_type = TREE_TYPE (@2);
2518 enum tree_code code = ERROR_MARK;
2520 if (INTEGRAL_TYPE_P (from_type)
2521 && int_fits_type_p (@2, from_type)
2522 && (types_match (c1_type, from_type)
2523 || (TYPE_PRECISION (c1_type) > TYPE_PRECISION (from_type)
2524 && (TYPE_UNSIGNED (from_type)
2525 || TYPE_SIGN (c1_type) == TYPE_SIGN (from_type))))
2526 && (types_match (c2_type, from_type)
2527 || (TYPE_PRECISION (c2_type) > TYPE_PRECISION (from_type)
2528 && (TYPE_UNSIGNED (from_type)
2529 || TYPE_SIGN (c2_type) == TYPE_SIGN (from_type)))))
2533 if (wi::to_widest (@3) == (wi::to_widest (@2) - 1))
2535 /* X <= Y - 1 equals to X < Y. */
2538 /* X > Y - 1 equals to X >= Y. */
2542 if (wi::to_widest (@3) == (wi::to_widest (@2) + 1))
2544 /* X < Y + 1 equals to X <= Y. */
2547 /* X >= Y + 1 equals to X > Y. */
2551 if (code != ERROR_MARK
2552 || wi::to_widest (@2) == wi::to_widest (@3))
2554 if (cmp == LT_EXPR || cmp == LE_EXPR)
2556 if (cmp == GT_EXPR || cmp == GE_EXPR)
2560 /* Can do A == C1 ? A : C2 -> A == C1 ? C1 : C2? */
2561 else if (int_fits_type_p (@3, from_type))
2565 (if (code == MAX_EXPR)
2566 (convert (max @1 (convert @2)))
2567 (if (code == MIN_EXPR)
2568 (convert (min @1 (convert @2)))
2569 (if (code == EQ_EXPR)
2570 (convert (cond (eq @1 (convert @3))
2571 (convert:from_type @3) (convert:from_type @2)))))))))
2573 /* (cond (cmp (convert? x) c1) (op x c2) c3) -> (op (minmax x c1) c2) if:
2575 1) OP is PLUS or MINUS.
2576 2) CMP is LT, LE, GT or GE.
2577 3) C3 == (C1 op C2), and computation doesn't have undefined behavior.
2579 This pattern also handles special cases like:
2581 A) Operand x is a unsigned to signed type conversion and c1 is
2582 integer zero. In this case,
2583 (signed type)x < 0 <=> x > MAX_VAL(signed type)
2584 (signed type)x >= 0 <=> x <= MAX_VAL(signed type)
2585 B) Const c1 may not equal to (C3 op' C2). In this case we also
2586 check equality for (c1+1) and (c1-1) by adjusting comparison
2589 TODO: Though signed type is handled by this pattern, it cannot be
2590 simplified at the moment because C standard requires additional
2591 type promotion. In order to match&simplify it here, the IR needs
2592 to be cleaned up by other optimizers, i.e, VRP. */
2593 (for op (plus minus)
2594 (for cmp (lt le gt ge)
2596 (cond (cmp (convert? @X) INTEGER_CST@1) (op @X INTEGER_CST@2) INTEGER_CST@3)
2597 (with { tree from_type = TREE_TYPE (@X), to_type = TREE_TYPE (@1); }
2598 (if (types_match (from_type, to_type)
2599 /* Check if it is special case A). */
2600 || (TYPE_UNSIGNED (from_type)
2601 && !TYPE_UNSIGNED (to_type)
2602 && TYPE_PRECISION (from_type) == TYPE_PRECISION (to_type)
2603 && integer_zerop (@1)
2604 && (cmp == LT_EXPR || cmp == GE_EXPR)))
2607 bool overflow = false;
2608 enum tree_code code, cmp_code = cmp;
2610 wide_int c1 = wi::to_wide (@1);
2611 wide_int c2 = wi::to_wide (@2);
2612 wide_int c3 = wi::to_wide (@3);
2613 signop sgn = TYPE_SIGN (from_type);
2615 /* Handle special case A), given x of unsigned type:
2616 ((signed type)x < 0) <=> (x > MAX_VAL(signed type))
2617 ((signed type)x >= 0) <=> (x <= MAX_VAL(signed type)) */
2618 if (!types_match (from_type, to_type))
2620 if (cmp_code == LT_EXPR)
2622 if (cmp_code == GE_EXPR)
2624 c1 = wi::max_value (to_type);
2626 /* To simplify this pattern, we require c3 = (c1 op c2). Here we
2627 compute (c3 op' c2) and check if it equals to c1 with op' being
2628 the inverted operator of op. Make sure overflow doesn't happen
2629 if it is undefined. */
2630 if (op == PLUS_EXPR)
2631 real_c1 = wi::sub (c3, c2, sgn, &overflow);
2633 real_c1 = wi::add (c3, c2, sgn, &overflow);
2636 if (!overflow || !TYPE_OVERFLOW_UNDEFINED (from_type))
2638 /* Check if c1 equals to real_c1. Boundary condition is handled
2639 by adjusting comparison operation if necessary. */
2640 if (!wi::cmp (wi::sub (real_c1, 1, sgn, &overflow), c1, sgn)
2643 /* X <= Y - 1 equals to X < Y. */
2644 if (cmp_code == LE_EXPR)
2646 /* X > Y - 1 equals to X >= Y. */
2647 if (cmp_code == GT_EXPR)
2650 if (!wi::cmp (wi::add (real_c1, 1, sgn, &overflow), c1, sgn)
2653 /* X < Y + 1 equals to X <= Y. */
2654 if (cmp_code == LT_EXPR)
2656 /* X >= Y + 1 equals to X > Y. */
2657 if (cmp_code == GE_EXPR)
2660 if (code != cmp_code || !wi::cmp (real_c1, c1, sgn))
2662 if (cmp_code == LT_EXPR || cmp_code == LE_EXPR)
2664 if (cmp_code == GT_EXPR || cmp_code == GE_EXPR)
2669 (if (code == MAX_EXPR)
2670 (op (max @X { wide_int_to_tree (from_type, real_c1); })
2671 { wide_int_to_tree (from_type, c2); })
2672 (if (code == MIN_EXPR)
2673 (op (min @X { wide_int_to_tree (from_type, real_c1); })
2674 { wide_int_to_tree (from_type, c2); })))))))))
2676 (for cnd (cond vec_cond)
2677 /* A ? B : (A ? X : C) -> A ? B : C. */
2679 (cnd @0 (cnd @0 @1 @2) @3)
2682 (cnd @0 @1 (cnd @0 @2 @3))
2684 /* A ? B : (!A ? C : X) -> A ? B : C. */
2685 /* ??? This matches embedded conditions open-coded because genmatch
2686 would generate matching code for conditions in separate stmts only.
2687 The following is still important to merge then and else arm cases
2688 from if-conversion. */
2690 (cnd @0 @1 (cnd @2 @3 @4))
2691 (if (COMPARISON_CLASS_P (@0)
2692 && COMPARISON_CLASS_P (@2)
2693 && invert_tree_comparison
2694 (TREE_CODE (@0), HONOR_NANS (TREE_OPERAND (@0, 0))) == TREE_CODE (@2)
2695 && operand_equal_p (TREE_OPERAND (@0, 0), TREE_OPERAND (@2, 0), 0)
2696 && operand_equal_p (TREE_OPERAND (@0, 1), TREE_OPERAND (@2, 1), 0))
2699 (cnd @0 (cnd @1 @2 @3) @4)
2700 (if (COMPARISON_CLASS_P (@0)
2701 && COMPARISON_CLASS_P (@1)
2702 && invert_tree_comparison
2703 (TREE_CODE (@0), HONOR_NANS (TREE_OPERAND (@0, 0))) == TREE_CODE (@1)
2704 && operand_equal_p (TREE_OPERAND (@0, 0), TREE_OPERAND (@1, 0), 0)
2705 && operand_equal_p (TREE_OPERAND (@0, 1), TREE_OPERAND (@1, 1), 0))
2708 /* A ? B : B -> B. */
2713 /* !A ? B : C -> A ? C : B. */
2715 (cnd (logical_inverted_value truth_valued_p@0) @1 @2)
2718 /* A + (B vcmp C ? 1 : 0) -> A - (B vcmp C ? -1 : 0), since vector comparisons
2719 return all -1 or all 0 results. */
2720 /* ??? We could instead convert all instances of the vec_cond to negate,
2721 but that isn't necessarily a win on its own. */
2723 (plus:c @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
2724 (if (VECTOR_TYPE_P (type)
2725 && TYPE_VECTOR_SUBPARTS (type) == TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1))
2726 && (TYPE_MODE (TREE_TYPE (type))
2727 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
2728 (minus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
2730 /* ... likewise A - (B vcmp C ? 1 : 0) -> A + (B vcmp C ? -1 : 0). */
2732 (minus @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
2733 (if (VECTOR_TYPE_P (type)
2734 && TYPE_VECTOR_SUBPARTS (type) == TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1))
2735 && (TYPE_MODE (TREE_TYPE (type))
2736 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
2737 (plus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
2740 /* Simplifications of comparisons. */
2742 /* See if we can reduce the magnitude of a constant involved in a
2743 comparison by changing the comparison code. This is a canonicalization
2744 formerly done by maybe_canonicalize_comparison_1. */
2748 (cmp @0 INTEGER_CST@1)
2749 (if (tree_int_cst_sgn (@1) == -1)
2750 (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) + 1); }))))
2754 (cmp @0 INTEGER_CST@1)
2755 (if (tree_int_cst_sgn (@1) == 1)
2756 (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) - 1); }))))
2759 /* We can simplify a logical negation of a comparison to the
2760 inverted comparison. As we cannot compute an expression
2761 operator using invert_tree_comparison we have to simulate
2762 that with expression code iteration. */
2763 (for cmp (tcc_comparison)
2764 icmp (inverted_tcc_comparison)
2765 ncmp (inverted_tcc_comparison_with_nans)
2766 /* Ideally we'd like to combine the following two patterns
2767 and handle some more cases by using
2768 (logical_inverted_value (cmp @0 @1))
2769 here but for that genmatch would need to "inline" that.
2770 For now implement what forward_propagate_comparison did. */
2772 (bit_not (cmp @0 @1))
2773 (if (VECTOR_TYPE_P (type)
2774 || (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1))
2775 /* Comparison inversion may be impossible for trapping math,
2776 invert_tree_comparison will tell us. But we can't use
2777 a computed operator in the replacement tree thus we have
2778 to play the trick below. */
2779 (with { enum tree_code ic = invert_tree_comparison
2780 (cmp, HONOR_NANS (@0)); }
2786 (bit_xor (cmp @0 @1) integer_truep)
2787 (with { enum tree_code ic = invert_tree_comparison
2788 (cmp, HONOR_NANS (@0)); }
2794 /* Transform comparisons of the form X - Y CMP 0 to X CMP Y.
2795 ??? The transformation is valid for the other operators if overflow
2796 is undefined for the type, but performing it here badly interacts
2797 with the transformation in fold_cond_expr_with_comparison which
2798 attempts to synthetize ABS_EXPR. */
2801 (cmp (minus@2 @0 @1) integer_zerop)
2802 (if (single_use (@2))
2805 /* Transform comparisons of the form X * C1 CMP 0 to X CMP 0 in the
2806 signed arithmetic case. That form is created by the compiler
2807 often enough for folding it to be of value. One example is in
2808 computing loop trip counts after Operator Strength Reduction. */
2809 (for cmp (simple_comparison)
2810 scmp (swapped_simple_comparison)
2812 (cmp (mult@3 @0 INTEGER_CST@1) integer_zerop@2)
2813 /* Handle unfolded multiplication by zero. */
2814 (if (integer_zerop (@1))
2816 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2817 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
2819 /* If @1 is negative we swap the sense of the comparison. */
2820 (if (tree_int_cst_sgn (@1) < 0)
2824 /* Simplify comparison of something with itself. For IEEE
2825 floating-point, we can only do some of these simplifications. */
2829 (if (! FLOAT_TYPE_P (TREE_TYPE (@0))
2830 || ! HONOR_NANS (@0))
2831 { constant_boolean_node (true, type); }
2832 (if (cmp != EQ_EXPR)
2838 || ! FLOAT_TYPE_P (TREE_TYPE (@0))
2839 || ! HONOR_NANS (@0))
2840 { constant_boolean_node (false, type); })))
2841 (for cmp (unle unge uneq)
2844 { constant_boolean_node (true, type); }))
2845 (for cmp (unlt ungt)
2851 (if (!flag_trapping_math)
2852 { constant_boolean_node (false, type); }))
2854 /* Fold ~X op ~Y as Y op X. */
2855 (for cmp (simple_comparison)
2857 (cmp (bit_not@2 @0) (bit_not@3 @1))
2858 (if (single_use (@2) && single_use (@3))
2861 /* Fold ~X op C as X op' ~C, where op' is the swapped comparison. */
2862 (for cmp (simple_comparison)
2863 scmp (swapped_simple_comparison)
2865 (cmp (bit_not@2 @0) CONSTANT_CLASS_P@1)
2866 (if (single_use (@2)
2867 && (TREE_CODE (@1) == INTEGER_CST || TREE_CODE (@1) == VECTOR_CST))
2868 (scmp @0 (bit_not @1)))))
2870 (for cmp (simple_comparison)
2871 /* Fold (double)float1 CMP (double)float2 into float1 CMP float2. */
2873 (cmp (convert@2 @0) (convert? @1))
2874 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
2875 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
2876 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
2877 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
2878 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))))
2881 tree type1 = TREE_TYPE (@1);
2882 if (TREE_CODE (@1) == REAL_CST && !DECIMAL_FLOAT_TYPE_P (type1))
2884 REAL_VALUE_TYPE orig = TREE_REAL_CST (@1);
2885 if (TYPE_PRECISION (type1) > TYPE_PRECISION (float_type_node)
2886 && exact_real_truncate (TYPE_MODE (float_type_node), &orig))
2887 type1 = float_type_node;
2888 if (TYPE_PRECISION (type1) > TYPE_PRECISION (double_type_node)
2889 && exact_real_truncate (TYPE_MODE (double_type_node), &orig))
2890 type1 = double_type_node;
2893 = (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type1)
2894 ? TREE_TYPE (@0) : type1);
2896 (if (TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (newtype))
2897 (cmp (convert:newtype @0) (convert:newtype @1))))))
2901 /* IEEE doesn't distinguish +0 and -0 in comparisons. */
2903 /* a CMP (-0) -> a CMP 0 */
2904 (if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)))
2905 (cmp @0 { build_real (TREE_TYPE (@1), dconst0); }))
2906 /* x != NaN is always true, other ops are always false. */
2907 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
2908 && ! HONOR_SNANS (@1))
2909 { constant_boolean_node (cmp == NE_EXPR, type); })
2910 /* Fold comparisons against infinity. */
2911 (if (REAL_VALUE_ISINF (TREE_REAL_CST (@1))
2912 && MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (@1))))
2915 REAL_VALUE_TYPE max;
2916 enum tree_code code = cmp;
2917 bool neg = REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1));
2919 code = swap_tree_comparison (code);
2922 /* x > +Inf is always false, if with ignore sNANs. */
2923 (if (code == GT_EXPR
2924 && ! HONOR_SNANS (@0))
2925 { constant_boolean_node (false, type); })
2926 (if (code == LE_EXPR)
2927 /* x <= +Inf is always true, if we don't case about NaNs. */
2928 (if (! HONOR_NANS (@0))
2929 { constant_boolean_node (true, type); }
2930 /* x <= +Inf is the same as x == x, i.e. !isnan(x). */
2932 /* x == +Inf and x >= +Inf are always equal to x > DBL_MAX. */
2933 (if (code == EQ_EXPR || code == GE_EXPR)
2934 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
2936 (lt @0 { build_real (TREE_TYPE (@0), max); })
2937 (gt @0 { build_real (TREE_TYPE (@0), max); }))))
2938 /* x < +Inf is always equal to x <= DBL_MAX. */
2939 (if (code == LT_EXPR)
2940 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
2942 (ge @0 { build_real (TREE_TYPE (@0), max); })
2943 (le @0 { build_real (TREE_TYPE (@0), max); }))))
2944 /* x != +Inf is always equal to !(x > DBL_MAX). */
2945 (if (code == NE_EXPR)
2946 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
2947 (if (! HONOR_NANS (@0))
2949 (ge @0 { build_real (TREE_TYPE (@0), max); })
2950 (le @0 { build_real (TREE_TYPE (@0), max); }))
2952 (bit_xor (lt @0 { build_real (TREE_TYPE (@0), max); })
2953 { build_one_cst (type); })
2954 (bit_xor (gt @0 { build_real (TREE_TYPE (@0), max); })
2955 { build_one_cst (type); }))))))))))
2957 /* If this is a comparison of a real constant with a PLUS_EXPR
2958 or a MINUS_EXPR of a real constant, we can convert it into a
2959 comparison with a revised real constant as long as no overflow
2960 occurs when unsafe_math_optimizations are enabled. */
2961 (if (flag_unsafe_math_optimizations)
2962 (for op (plus minus)
2964 (cmp (op @0 REAL_CST@1) REAL_CST@2)
2967 tree tem = const_binop (op == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR,
2968 TREE_TYPE (@1), @2, @1);
2970 (if (tem && !TREE_OVERFLOW (tem))
2971 (cmp @0 { tem; }))))))
2973 /* Likewise, we can simplify a comparison of a real constant with
2974 a MINUS_EXPR whose first operand is also a real constant, i.e.
2975 (c1 - x) < c2 becomes x > c1-c2. Reordering is allowed on
2976 floating-point types only if -fassociative-math is set. */
2977 (if (flag_associative_math)
2979 (cmp (minus REAL_CST@0 @1) REAL_CST@2)
2980 (with { tree tem = const_binop (MINUS_EXPR, TREE_TYPE (@1), @0, @2); }
2981 (if (tem && !TREE_OVERFLOW (tem))
2982 (cmp { tem; } @1)))))
2984 /* Fold comparisons against built-in math functions. */
2985 (if (flag_unsafe_math_optimizations
2986 && ! flag_errno_math)
2989 (cmp (sq @0) REAL_CST@1)
2991 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
2993 /* sqrt(x) < y is always false, if y is negative. */
2994 (if (cmp == EQ_EXPR || cmp == LT_EXPR || cmp == LE_EXPR)
2995 { constant_boolean_node (false, type); })
2996 /* sqrt(x) > y is always true, if y is negative and we
2997 don't care about NaNs, i.e. negative values of x. */
2998 (if (cmp == NE_EXPR || !HONOR_NANS (@0))
2999 { constant_boolean_node (true, type); })
3000 /* sqrt(x) > y is the same as x >= 0, if y is negative. */
3001 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })))
3002 (if (real_equal (TREE_REAL_CST_PTR (@1), &dconst0))
3004 /* sqrt(x) < 0 is always false. */
3005 (if (cmp == LT_EXPR)
3006 { constant_boolean_node (false, type); })
3007 /* sqrt(x) >= 0 is always true if we don't care about NaNs. */
3008 (if (cmp == GE_EXPR && !HONOR_NANS (@0))
3009 { constant_boolean_node (true, type); })
3010 /* sqrt(x) <= 0 -> x == 0. */
3011 (if (cmp == LE_EXPR)
3013 /* Otherwise sqrt(x) cmp 0 -> x cmp 0. Here cmp can be >=, >,
3014 == or !=. In the last case:
3016 (sqrt(x) != 0) == (NaN != 0) == true == (x != 0)
3018 if x is negative or NaN. Due to -funsafe-math-optimizations,
3019 the results for other x follow from natural arithmetic. */
3021 (if (cmp == GT_EXPR || cmp == GE_EXPR)
3025 real_arithmetic (&c2, MULT_EXPR,
3026 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
3027 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
3029 (if (REAL_VALUE_ISINF (c2))
3030 /* sqrt(x) > y is x == +Inf, when y is very large. */
3031 (if (HONOR_INFINITIES (@0))
3032 (eq @0 { build_real (TREE_TYPE (@0), c2); })
3033 { constant_boolean_node (false, type); })
3034 /* sqrt(x) > c is the same as x > c*c. */
3035 (cmp @0 { build_real (TREE_TYPE (@0), c2); }))))
3036 (if (cmp == LT_EXPR || cmp == LE_EXPR)
3040 real_arithmetic (&c2, MULT_EXPR,
3041 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
3042 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
3044 (if (REAL_VALUE_ISINF (c2))
3046 /* sqrt(x) < y is always true, when y is a very large
3047 value and we don't care about NaNs or Infinities. */
3048 (if (! HONOR_NANS (@0) && ! HONOR_INFINITIES (@0))
3049 { constant_boolean_node (true, type); })
3050 /* sqrt(x) < y is x != +Inf when y is very large and we
3051 don't care about NaNs. */
3052 (if (! HONOR_NANS (@0))
3053 (ne @0 { build_real (TREE_TYPE (@0), c2); }))
3054 /* sqrt(x) < y is x >= 0 when y is very large and we
3055 don't care about Infinities. */
3056 (if (! HONOR_INFINITIES (@0))
3057 (ge @0 { build_real (TREE_TYPE (@0), dconst0); }))
3058 /* sqrt(x) < y is x >= 0 && x != +Inf, when y is large. */
3061 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
3062 (ne @0 { build_real (TREE_TYPE (@0), c2); }))))
3063 /* sqrt(x) < c is the same as x < c*c, if we ignore NaNs. */
3064 (if (! HONOR_NANS (@0))
3065 (cmp @0 { build_real (TREE_TYPE (@0), c2); })
3066 /* sqrt(x) < c is the same as x >= 0 && x < c*c. */
3069 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
3070 (cmp @0 { build_real (TREE_TYPE (@0), c2); })))))))))
3071 /* Transform sqrt(x) cmp sqrt(y) -> x cmp y. */
3073 (cmp (sq @0) (sq @1))
3074 (if (! HONOR_NANS (@0))
3077 /* Optimize various special cases of (FTYPE) N CMP CST. */
3078 (for cmp (lt le eq ne ge gt)
3079 icmp (le le eq ne ge ge)
3081 (cmp (float @0) REAL_CST@1)
3082 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@1))
3083 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1)))
3086 tree itype = TREE_TYPE (@0);
3087 signop isign = TYPE_SIGN (itype);
3088 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@1))));
3089 const REAL_VALUE_TYPE *cst = TREE_REAL_CST_PTR (@1);
3090 /* Be careful to preserve any potential exceptions due to
3091 NaNs. qNaNs are ok in == or != context.
3092 TODO: relax under -fno-trapping-math or
3093 -fno-signaling-nans. */
3095 = real_isnan (cst) && (cst->signalling
3096 || (cmp != EQ_EXPR && cmp != NE_EXPR));
3097 /* INT?_MIN is power-of-two so it takes
3098 only one mantissa bit. */
3099 bool signed_p = isign == SIGNED;
3100 bool itype_fits_ftype_p
3101 = TYPE_PRECISION (itype) - signed_p <= significand_size (fmt);
3103 /* TODO: allow non-fitting itype and SNaNs when
3104 -fno-trapping-math. */
3105 (if (itype_fits_ftype_p && ! exception_p)
3108 REAL_VALUE_TYPE imin, imax;
3109 real_from_integer (&imin, fmt, wi::min_value (itype), isign);
3110 real_from_integer (&imax, fmt, wi::max_value (itype), isign);
3112 REAL_VALUE_TYPE icst;
3113 if (cmp == GT_EXPR || cmp == GE_EXPR)
3114 real_ceil (&icst, fmt, cst);
3115 else if (cmp == LT_EXPR || cmp == LE_EXPR)
3116 real_floor (&icst, fmt, cst);
3118 real_trunc (&icst, fmt, cst);
3120 bool cst_int_p = !real_isnan (cst) && real_identical (&icst, cst);
3122 bool overflow_p = false;
3124 = real_to_integer (&icst, &overflow_p, TYPE_PRECISION (itype));
3127 /* Optimize cases when CST is outside of ITYPE's range. */
3128 (if (real_compare (LT_EXPR, cst, &imin))
3129 { constant_boolean_node (cmp == GT_EXPR || cmp == GE_EXPR || cmp == NE_EXPR,
3131 (if (real_compare (GT_EXPR, cst, &imax))
3132 { constant_boolean_node (cmp == LT_EXPR || cmp == LE_EXPR || cmp == NE_EXPR,
3134 /* Remove cast if CST is an integer representable by ITYPE. */
3136 (cmp @0 { gcc_assert (!overflow_p);
3137 wide_int_to_tree (itype, icst_val); })
3139 /* When CST is fractional, optimize
3140 (FTYPE) N == CST -> 0
3141 (FTYPE) N != CST -> 1. */
3142 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
3143 { constant_boolean_node (cmp == NE_EXPR, type); })
3144 /* Otherwise replace with sensible integer constant. */
3147 gcc_checking_assert (!overflow_p);
3149 (icmp @0 { wide_int_to_tree (itype, icst_val); })))))))))
3151 /* Fold A /[ex] B CMP C to A CMP B * C. */
3154 (cmp (exact_div @0 @1) INTEGER_CST@2)
3155 (if (!integer_zerop (@1))
3156 (if (wi::to_wide (@2) == 0)
3158 (if (TREE_CODE (@1) == INTEGER_CST)
3162 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
3163 TYPE_SIGN (TREE_TYPE (@1)), &ovf);
3166 { constant_boolean_node (cmp == NE_EXPR, type); }
3167 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))))
3168 (for cmp (lt le gt ge)
3170 (cmp (exact_div @0 INTEGER_CST@1) INTEGER_CST@2)
3171 (if (wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))))
3175 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
3176 TYPE_SIGN (TREE_TYPE (@1)), &ovf);
3179 { constant_boolean_node (wi::lt_p (wi::to_wide (@2), 0,
3180 TYPE_SIGN (TREE_TYPE (@2)))
3181 != (cmp == LT_EXPR || cmp == LE_EXPR), type); }
3182 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))
3184 /* Unordered tests if either argument is a NaN. */
3186 (bit_ior (unordered @0 @0) (unordered @1 @1))
3187 (if (types_match (@0, @1))
3190 (bit_and (ordered @0 @0) (ordered @1 @1))
3191 (if (types_match (@0, @1))
3194 (bit_ior:c (unordered @0 @0) (unordered:c@2 @0 @1))
3197 (bit_and:c (ordered @0 @0) (ordered:c@2 @0 @1))
3200 /* Simple range test simplifications. */
3201 /* A < B || A >= B -> true. */
3202 (for test1 (lt le le le ne ge)
3203 test2 (ge gt ge ne eq ne)
3205 (bit_ior:c (test1 @0 @1) (test2 @0 @1))
3206 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3207 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
3208 { constant_boolean_node (true, type); })))
3209 /* A < B && A >= B -> false. */
3210 (for test1 (lt lt lt le ne eq)
3211 test2 (ge gt eq gt eq gt)
3213 (bit_and:c (test1 @0 @1) (test2 @0 @1))
3214 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3215 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
3216 { constant_boolean_node (false, type); })))
3218 /* A & (2**N - 1) <= 2**K - 1 -> A & (2**N - 2**K) == 0
3219 A & (2**N - 1) > 2**K - 1 -> A & (2**N - 2**K) != 0
3221 Note that comparisons
3222 A & (2**N - 1) < 2**K -> A & (2**N - 2**K) == 0
3223 A & (2**N - 1) >= 2**K -> A & (2**N - 2**K) != 0
3224 will be canonicalized to above so there's no need to
3231 (cmp (bit_and@0 @1 INTEGER_CST@2) INTEGER_CST@3)
3232 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)))
3235 tree ty = TREE_TYPE (@0);
3236 unsigned prec = TYPE_PRECISION (ty);
3237 wide_int mask = wi::to_wide (@2, prec);
3238 wide_int rhs = wi::to_wide (@3, prec);
3239 signop sgn = TYPE_SIGN (ty);
3241 (if ((mask & (mask + 1)) == 0 && wi::gt_p (rhs, 0, sgn)
3242 && (rhs & (rhs + 1)) == 0 && wi::ge_p (mask, rhs, sgn))
3243 (eqcmp (bit_and @1 { wide_int_to_tree (ty, mask - rhs); })
3244 { build_zero_cst (ty); }))))))
3246 /* -A CMP -B -> B CMP A. */
3247 (for cmp (tcc_comparison)
3248 scmp (swapped_tcc_comparison)
3250 (cmp (negate @0) (negate @1))
3251 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3252 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3253 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
3256 (cmp (negate @0) CONSTANT_CLASS_P@1)
3257 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3258 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3259 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
3260 (with { tree tem = const_unop (NEGATE_EXPR, TREE_TYPE (@0), @1); }
3261 (if (tem && !TREE_OVERFLOW (tem))
3262 (scmp @0 { tem; }))))))
3264 /* Convert ABS_EXPR<x> == 0 or ABS_EXPR<x> != 0 to x == 0 or x != 0. */
3267 (op (abs @0) zerop@1)
3270 /* From fold_sign_changed_comparison and fold_widened_comparison.
3271 FIXME: the lack of symmetry is disturbing. */
3272 (for cmp (simple_comparison)
3274 (cmp (convert@0 @00) (convert?@1 @10))
3275 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3276 /* Disable this optimization if we're casting a function pointer
3277 type on targets that require function pointer canonicalization. */
3278 && !(targetm.have_canonicalize_funcptr_for_compare ()
3279 && TREE_CODE (TREE_TYPE (@00)) == POINTER_TYPE
3280 && TREE_CODE (TREE_TYPE (TREE_TYPE (@00))) == FUNCTION_TYPE)
3282 (if (TYPE_PRECISION (TREE_TYPE (@00)) == TYPE_PRECISION (TREE_TYPE (@0))
3283 && (TREE_CODE (@10) == INTEGER_CST
3285 && (TYPE_UNSIGNED (TREE_TYPE (@00)) == TYPE_UNSIGNED (TREE_TYPE (@0))
3288 && !POINTER_TYPE_P (TREE_TYPE (@00)))
3289 /* ??? The special-casing of INTEGER_CST conversion was in the original
3290 code and here to avoid a spurious overflow flag on the resulting
3291 constant which fold_convert produces. */
3292 (if (TREE_CODE (@1) == INTEGER_CST)
3293 (cmp @00 { force_fit_type (TREE_TYPE (@00), wi::to_widest (@1), 0,
3294 TREE_OVERFLOW (@1)); })
3295 (cmp @00 (convert @1)))
3297 (if (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@00)))
3298 /* If possible, express the comparison in the shorter mode. */
3299 (if ((cmp == EQ_EXPR || cmp == NE_EXPR
3300 || TYPE_UNSIGNED (TREE_TYPE (@0)) == TYPE_UNSIGNED (TREE_TYPE (@00))
3301 || (!TYPE_UNSIGNED (TREE_TYPE (@0))
3302 && TYPE_UNSIGNED (TREE_TYPE (@00))))
3303 && (types_match (TREE_TYPE (@10), TREE_TYPE (@00))
3304 || ((TYPE_PRECISION (TREE_TYPE (@00))
3305 >= TYPE_PRECISION (TREE_TYPE (@10)))
3306 && (TYPE_UNSIGNED (TREE_TYPE (@00))
3307 == TYPE_UNSIGNED (TREE_TYPE (@10))))
3308 || (TREE_CODE (@10) == INTEGER_CST
3309 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
3310 && int_fits_type_p (@10, TREE_TYPE (@00)))))
3311 (cmp @00 (convert @10))
3312 (if (TREE_CODE (@10) == INTEGER_CST
3313 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
3314 && !int_fits_type_p (@10, TREE_TYPE (@00)))
3317 tree min = lower_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
3318 tree max = upper_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
3319 bool above = integer_nonzerop (const_binop (LT_EXPR, type, max, @10));
3320 bool below = integer_nonzerop (const_binop (LT_EXPR, type, @10, min));
3322 (if (above || below)
3323 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
3324 { constant_boolean_node (cmp == EQ_EXPR ? false : true, type); }
3325 (if (cmp == LT_EXPR || cmp == LE_EXPR)
3326 { constant_boolean_node (above ? true : false, type); }
3327 (if (cmp == GT_EXPR || cmp == GE_EXPR)
3328 { constant_boolean_node (above ? false : true, type); }))))))))))))
3331 /* A local variable can never be pointed to by
3332 the default SSA name of an incoming parameter.
3333 SSA names are canonicalized to 2nd place. */
3335 (cmp addr@0 SSA_NAME@1)
3336 (if (SSA_NAME_IS_DEFAULT_DEF (@1)
3337 && TREE_CODE (SSA_NAME_VAR (@1)) == PARM_DECL)
3338 (with { tree base = get_base_address (TREE_OPERAND (@0, 0)); }
3339 (if (TREE_CODE (base) == VAR_DECL
3340 && auto_var_in_fn_p (base, current_function_decl))
3341 (if (cmp == NE_EXPR)
3342 { constant_boolean_node (true, type); }
3343 { constant_boolean_node (false, type); }))))))
3345 /* Equality compare simplifications from fold_binary */
3348 /* If we have (A | C) == D where C & ~D != 0, convert this into 0.
3349 Similarly for NE_EXPR. */
3351 (cmp (convert?@3 (bit_ior @0 INTEGER_CST@1)) INTEGER_CST@2)
3352 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
3353 && wi::bit_and_not (wi::to_wide (@1), wi::to_wide (@2)) != 0)
3354 { constant_boolean_node (cmp == NE_EXPR, type); }))
3356 /* (X ^ Y) == 0 becomes X == Y, and (X ^ Y) != 0 becomes X != Y. */
3358 (cmp (bit_xor @0 @1) integer_zerop)
3361 /* (X ^ Y) == Y becomes X == 0.
3362 Likewise (X ^ Y) == X becomes Y == 0. */
3364 (cmp:c (bit_xor:c @0 @1) @0)
3365 (cmp @1 { build_zero_cst (TREE_TYPE (@1)); }))
3367 /* (X ^ C1) op C2 can be rewritten as X op (C1 ^ C2). */
3369 (cmp (convert?@3 (bit_xor @0 INTEGER_CST@1)) INTEGER_CST@2)
3370 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)))
3371 (cmp @0 (bit_xor @1 (convert @2)))))
3374 (cmp (convert? addr@0) integer_zerop)
3375 (if (tree_single_nonzero_warnv_p (@0, NULL))
3376 { constant_boolean_node (cmp == NE_EXPR, type); })))
3378 /* If we have (A & C) == C where C is a power of 2, convert this into
3379 (A & C) != 0. Similarly for NE_EXPR. */
3383 (cmp (bit_and@2 @0 integer_pow2p@1) @1)
3384 (icmp @2 { build_zero_cst (TREE_TYPE (@0)); })))
3386 /* If we have (A & C) != 0 ? D : 0 where C and D are powers of 2,
3387 convert this into a shift followed by ANDing with D. */
3390 (ne (bit_and @0 integer_pow2p@1) integer_zerop)
3391 integer_pow2p@2 integer_zerop)
3393 int shift = (wi::exact_log2 (wi::to_wide (@2))
3394 - wi::exact_log2 (wi::to_wide (@1)));
3398 (lshift (convert @0) { build_int_cst (integer_type_node, shift); }) @2)
3400 (convert (rshift @0 { build_int_cst (integer_type_node, -shift); })) @2))))
3402 /* If we have (A & C) != 0 where C is the sign bit of A, convert
3403 this into A < 0. Similarly for (A & C) == 0 into A >= 0. */
3407 (cmp (bit_and (convert?@2 @0) integer_pow2p@1) integer_zerop)
3408 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3409 && type_has_mode_precision_p (TREE_TYPE (@0))
3410 && element_precision (@2) >= element_precision (@0)
3411 && wi::only_sign_bit_p (wi::to_wide (@1), element_precision (@0)))
3412 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
3413 (ncmp (convert:stype @0) { build_zero_cst (stype); })))))
3415 /* If we have A < 0 ? C : 0 where C is a power of 2, convert
3416 this into a right shift or sign extension followed by ANDing with C. */
3419 (lt @0 integer_zerop)
3420 integer_pow2p@1 integer_zerop)
3421 (if (!TYPE_UNSIGNED (TREE_TYPE (@0)))
3423 int shift = element_precision (@0) - wi::exact_log2 (wi::to_wide (@1)) - 1;
3427 (convert (rshift @0 { build_int_cst (integer_type_node, shift); }))
3429 /* Otherwise ctype must be wider than TREE_TYPE (@0) and pure
3430 sign extension followed by AND with C will achieve the effect. */
3431 (bit_and (convert @0) @1)))))
3433 /* When the addresses are not directly of decls compare base and offset.
3434 This implements some remaining parts of fold_comparison address
3435 comparisons but still no complete part of it. Still it is good
3436 enough to make fold_stmt not regress when not dispatching to fold_binary. */
3437 (for cmp (simple_comparison)
3439 (cmp (convert1?@2 addr@0) (convert2? addr@1))
3442 HOST_WIDE_INT off0, off1;
3443 tree base0 = get_addr_base_and_unit_offset (TREE_OPERAND (@0, 0), &off0);
3444 tree base1 = get_addr_base_and_unit_offset (TREE_OPERAND (@1, 0), &off1);
3445 if (base0 && TREE_CODE (base0) == MEM_REF)
3447 off0 += mem_ref_offset (base0).to_short_addr ();
3448 base0 = TREE_OPERAND (base0, 0);
3450 if (base1 && TREE_CODE (base1) == MEM_REF)
3452 off1 += mem_ref_offset (base1).to_short_addr ();
3453 base1 = TREE_OPERAND (base1, 0);
3456 (if (base0 && base1)
3460 /* Punt in GENERIC on variables with value expressions;
3461 the value expressions might point to fields/elements
3462 of other vars etc. */
3464 && ((VAR_P (base0) && DECL_HAS_VALUE_EXPR_P (base0))
3465 || (VAR_P (base1) && DECL_HAS_VALUE_EXPR_P (base1))))
3467 else if (decl_in_symtab_p (base0)
3468 && decl_in_symtab_p (base1))
3469 equal = symtab_node::get_create (base0)
3470 ->equal_address_to (symtab_node::get_create (base1));
3471 else if ((DECL_P (base0)
3472 || TREE_CODE (base0) == SSA_NAME
3473 || TREE_CODE (base0) == STRING_CST)
3475 || TREE_CODE (base1) == SSA_NAME
3476 || TREE_CODE (base1) == STRING_CST))
3477 equal = (base0 == base1);
3481 (if (cmp == EQ_EXPR)
3482 { constant_boolean_node (off0 == off1, type); })
3483 (if (cmp == NE_EXPR)
3484 { constant_boolean_node (off0 != off1, type); })
3485 (if (cmp == LT_EXPR)
3486 { constant_boolean_node (off0 < off1, type); })
3487 (if (cmp == LE_EXPR)
3488 { constant_boolean_node (off0 <= off1, type); })
3489 (if (cmp == GE_EXPR)
3490 { constant_boolean_node (off0 >= off1, type); })
3491 (if (cmp == GT_EXPR)
3492 { constant_boolean_node (off0 > off1, type); }))
3494 && DECL_P (base0) && DECL_P (base1)
3495 /* If we compare this as integers require equal offset. */
3496 && (!INTEGRAL_TYPE_P (TREE_TYPE (@2))
3499 (if (cmp == EQ_EXPR)
3500 { constant_boolean_node (false, type); })
3501 (if (cmp == NE_EXPR)
3502 { constant_boolean_node (true, type); })))))))))
3504 /* Simplify pointer equality compares using PTA. */
3508 (if (POINTER_TYPE_P (TREE_TYPE (@0))
3509 && ptrs_compare_unequal (@0, @1))
3510 { neeq == EQ_EXPR ? boolean_false_node : boolean_true_node; })))
3512 /* PR70920: Transform (intptr_t)x eq/ne CST to x eq/ne (typeof x) CST.
3513 and (typeof ptr_cst) x eq/ne ptr_cst to x eq/ne (typeof x) CST.
3514 Disable the transform if either operand is pointer to function.
3515 This broke pr22051-2.c for arm where function pointer
3516 canonicalizaion is not wanted. */
3520 (cmp (convert @0) INTEGER_CST@1)
3521 (if ((POINTER_TYPE_P (TREE_TYPE (@0)) && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@0)))
3522 && INTEGRAL_TYPE_P (TREE_TYPE (@1)))
3523 || (INTEGRAL_TYPE_P (TREE_TYPE (@0)) && POINTER_TYPE_P (TREE_TYPE (@1))
3524 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@1)))))
3525 (cmp @0 (convert @1)))))
3527 /* Non-equality compare simplifications from fold_binary */
3528 (for cmp (lt gt le ge)
3529 /* Comparisons with the highest or lowest possible integer of
3530 the specified precision will have known values. */
3532 (cmp (convert?@2 @0) INTEGER_CST@1)
3533 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
3534 && tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0)))
3537 tree arg1_type = TREE_TYPE (@1);
3538 unsigned int prec = TYPE_PRECISION (arg1_type);
3539 wide_int max = wi::max_value (arg1_type);
3540 wide_int signed_max = wi::max_value (prec, SIGNED);
3541 wide_int min = wi::min_value (arg1_type);
3544 (if (wi::to_wide (@1) == max)
3546 (if (cmp == GT_EXPR)
3547 { constant_boolean_node (false, type); })
3548 (if (cmp == GE_EXPR)
3550 (if (cmp == LE_EXPR)
3551 { constant_boolean_node (true, type); })
3552 (if (cmp == LT_EXPR)
3554 (if (wi::to_wide (@1) == min)
3556 (if (cmp == LT_EXPR)
3557 { constant_boolean_node (false, type); })
3558 (if (cmp == LE_EXPR)
3560 (if (cmp == GE_EXPR)
3561 { constant_boolean_node (true, type); })
3562 (if (cmp == GT_EXPR)
3564 (if (wi::to_wide (@1) == max - 1)
3566 (if (cmp == GT_EXPR)
3567 (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) + 1); }))
3568 (if (cmp == LE_EXPR)
3569 (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) + 1); }))))
3570 (if (wi::to_wide (@1) == min + 1)
3572 (if (cmp == GE_EXPR)
3573 (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) - 1); }))
3574 (if (cmp == LT_EXPR)
3575 (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) - 1); }))))
3576 (if (wi::to_wide (@1) == signed_max
3577 && TYPE_UNSIGNED (arg1_type)
3578 /* We will flip the signedness of the comparison operator
3579 associated with the mode of @1, so the sign bit is
3580 specified by this mode. Check that @1 is the signed
3581 max associated with this sign bit. */
3582 && prec == GET_MODE_PRECISION (SCALAR_INT_TYPE_MODE (arg1_type))
3583 /* signed_type does not work on pointer types. */
3584 && INTEGRAL_TYPE_P (arg1_type))
3585 /* The following case also applies to X < signed_max+1
3586 and X >= signed_max+1 because previous transformations. */
3587 (if (cmp == LE_EXPR || cmp == GT_EXPR)
3588 (with { tree st = signed_type_for (arg1_type); }
3589 (if (cmp == LE_EXPR)
3590 (ge (convert:st @0) { build_zero_cst (st); })
3591 (lt (convert:st @0) { build_zero_cst (st); }))))))))))
3593 (for cmp (unordered ordered unlt unle ungt unge uneq ltgt)
3594 /* If the second operand is NaN, the result is constant. */
3597 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
3598 && (cmp != LTGT_EXPR || ! flag_trapping_math))
3599 { constant_boolean_node (cmp == ORDERED_EXPR || cmp == LTGT_EXPR
3600 ? false : true, type); })))
3602 /* bool_var != 0 becomes bool_var. */
3604 (ne @0 integer_zerop)
3605 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
3606 && types_match (type, TREE_TYPE (@0)))
3608 /* bool_var == 1 becomes bool_var. */
3610 (eq @0 integer_onep)
3611 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
3612 && types_match (type, TREE_TYPE (@0)))
3615 bool_var == 0 becomes !bool_var or
3616 bool_var != 1 becomes !bool_var
3617 here because that only is good in assignment context as long
3618 as we require a tcc_comparison in GIMPLE_CONDs where we'd
3619 replace if (x == 0) with tem = ~x; if (tem != 0) which is
3620 clearly less optimal and which we'll transform again in forwprop. */
3622 /* When one argument is a constant, overflow detection can be simplified.
3623 Currently restricted to single use so as not to interfere too much with
3624 ADD_OVERFLOW detection in tree-ssa-math-opts.c.
3625 A + CST CMP A -> A CMP' CST' */
3626 (for cmp (lt le ge gt)
3629 (cmp:c (plus@2 @0 INTEGER_CST@1) @0)
3630 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
3631 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
3632 && wi::to_wide (@1) != 0
3634 (with { unsigned int prec = TYPE_PRECISION (TREE_TYPE (@0)); }
3635 (out @0 { wide_int_to_tree (TREE_TYPE (@0),
3636 wi::max_value (prec, UNSIGNED)
3637 - wi::to_wide (@1)); })))))
3639 /* To detect overflow in unsigned A - B, A < B is simpler than A - B > A.
3640 However, the detection logic for SUB_OVERFLOW in tree-ssa-math-opts.c
3641 expects the long form, so we restrict the transformation for now. */
3644 (cmp:c (minus@2 @0 @1) @0)
3645 (if (single_use (@2)
3646 && ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3647 && TYPE_UNSIGNED (TREE_TYPE (@0))
3648 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
3651 /* Testing for overflow is unnecessary if we already know the result. */
3656 (cmp:c (realpart (IFN_SUB_OVERFLOW@2 @0 @1)) @0)
3657 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
3658 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
3659 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
3664 (cmp:c (realpart (IFN_ADD_OVERFLOW:c@2 @0 @1)) @0)
3665 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
3666 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
3667 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
3669 /* For unsigned operands, -1 / B < A checks whether A * B would overflow.
3670 Simplify it to __builtin_mul_overflow (A, B, <unused>). */
3674 (cmp:c (trunc_div:s integer_all_onesp @1) @0)
3675 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) && !VECTOR_TYPE_P (TREE_TYPE (@0)))
3676 (with { tree t = TREE_TYPE (@0), cpx = build_complex_type (t); }
3677 (out (imagpart (IFN_MUL_OVERFLOW:cpx @0 @1)) { build_zero_cst (t); })))))
3679 /* Simplification of math builtins. These rules must all be optimizations
3680 as well as IL simplifications. If there is a possibility that the new
3681 form could be a pessimization, the rule should go in the canonicalization
3682 section that follows this one.
3684 Rules can generally go in this section if they satisfy one of
3687 - the rule describes an identity
3689 - the rule replaces calls with something as simple as addition or
3692 - the rule contains unary calls only and simplifies the surrounding
3693 arithmetic. (The idea here is to exclude non-unary calls in which
3694 one operand is constant and in which the call is known to be cheap
3695 when the operand has that value.) */
3697 (if (flag_unsafe_math_optimizations)
3698 /* Simplify sqrt(x) * sqrt(x) -> x. */
3700 (mult (SQRT@1 @0) @1)
3701 (if (!HONOR_SNANS (type))
3704 (for op (plus minus)
3705 /* Simplify (A / C) +- (B / C) -> (A +- B) / C. */
3709 (rdiv (op @0 @2) @1)))
3711 /* Simplify sqrt(x) * sqrt(y) -> sqrt(x*y). */
3712 (for root (SQRT CBRT)
3714 (mult (root:s @0) (root:s @1))
3715 (root (mult @0 @1))))
3717 /* Simplify expN(x) * expN(y) -> expN(x+y). */
3718 (for exps (EXP EXP2 EXP10 POW10)
3720 (mult (exps:s @0) (exps:s @1))
3721 (exps (plus @0 @1))))
3723 /* Simplify a/root(b/c) into a*root(c/b). */
3724 (for root (SQRT CBRT)
3726 (rdiv @0 (root:s (rdiv:s @1 @2)))
3727 (mult @0 (root (rdiv @2 @1)))))
3729 /* Simplify x/expN(y) into x*expN(-y). */
3730 (for exps (EXP EXP2 EXP10 POW10)
3732 (rdiv @0 (exps:s @1))
3733 (mult @0 (exps (negate @1)))))
3735 (for logs (LOG LOG2 LOG10 LOG10)
3736 exps (EXP EXP2 EXP10 POW10)
3737 /* logN(expN(x)) -> x. */
3741 /* expN(logN(x)) -> x. */
3746 /* Optimize logN(func()) for various exponential functions. We
3747 want to determine the value "x" and the power "exponent" in
3748 order to transform logN(x**exponent) into exponent*logN(x). */
3749 (for logs (LOG LOG LOG LOG2 LOG2 LOG2 LOG10 LOG10)
3750 exps (EXP2 EXP10 POW10 EXP EXP10 POW10 EXP EXP2)
3753 (if (SCALAR_FLOAT_TYPE_P (type))
3759 /* Prepare to do logN(exp(exponent)) -> exponent*logN(e). */
3760 x = build_real_truncate (type, dconst_e ());
3763 /* Prepare to do logN(exp2(exponent)) -> exponent*logN(2). */
3764 x = build_real (type, dconst2);
3768 /* Prepare to do logN(exp10(exponent)) -> exponent*logN(10). */
3770 REAL_VALUE_TYPE dconst10;
3771 real_from_integer (&dconst10, VOIDmode, 10, SIGNED);
3772 x = build_real (type, dconst10);
3779 (mult (logs { x; }) @0)))))
3787 (if (SCALAR_FLOAT_TYPE_P (type))
3793 /* Prepare to do logN(sqrt(x)) -> 0.5*logN(x). */
3794 x = build_real (type, dconsthalf);
3797 /* Prepare to do logN(cbrt(x)) -> (1/3)*logN(x). */
3798 x = build_real_truncate (type, dconst_third ());
3804 (mult { x; } (logs @0))))))
3806 /* logN(pow(x,exponent)) -> exponent*logN(x). */
3807 (for logs (LOG LOG2 LOG10)
3811 (mult @1 (logs @0))))
3813 /* pow(C,x) -> exp(log(C)*x) if C > 0. */
3818 (pows REAL_CST@0 @1)
3819 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0)
3820 && real_isfinite (TREE_REAL_CST_PTR (@0)))
3821 (exps (mult (logs @0) @1)))))
3826 exps (EXP EXP2 EXP10 POW10)
3827 /* sqrt(expN(x)) -> expN(x*0.5). */
3830 (exps (mult @0 { build_real (type, dconsthalf); })))
3831 /* cbrt(expN(x)) -> expN(x/3). */
3834 (exps (mult @0 { build_real_truncate (type, dconst_third ()); })))
3835 /* pow(expN(x), y) -> expN(x*y). */
3838 (exps (mult @0 @1))))
3840 /* tan(atan(x)) -> x. */
3847 /* cabs(x+0i) or cabs(0+xi) -> abs(x). */
3849 (CABS (complex:C @0 real_zerop@1))
3852 /* trunc(trunc(x)) -> trunc(x), etc. */
3853 (for fns (TRUNC FLOOR CEIL ROUND NEARBYINT RINT)
3857 /* f(x) -> x if x is integer valued and f does nothing for such values. */
3858 (for fns (TRUNC FLOOR CEIL ROUND NEARBYINT RINT)
3860 (fns integer_valued_real_p@0)
3863 /* hypot(x,0) and hypot(0,x) -> abs(x). */
3865 (HYPOT:c @0 real_zerop@1)
3868 /* pow(1,x) -> 1. */
3870 (POW real_onep@0 @1)
3874 /* copysign(x,x) -> x. */
3879 /* copysign(x,y) -> fabs(x) if y is nonnegative. */
3880 (COPYSIGN @0 tree_expr_nonnegative_p@1)
3883 (for scale (LDEXP SCALBN SCALBLN)
3884 /* ldexp(0, x) -> 0. */
3886 (scale real_zerop@0 @1)
3888 /* ldexp(x, 0) -> x. */
3890 (scale @0 integer_zerop@1)
3892 /* ldexp(x, y) -> x if x is +-Inf or NaN. */
3894 (scale REAL_CST@0 @1)
3895 (if (!real_isfinite (TREE_REAL_CST_PTR (@0)))
3898 /* Canonicalization of sequences of math builtins. These rules represent
3899 IL simplifications but are not necessarily optimizations.
3901 The sincos pass is responsible for picking "optimal" implementations
3902 of math builtins, which may be more complicated and can sometimes go
3903 the other way, e.g. converting pow into a sequence of sqrts.
3904 We only want to do these canonicalizations before the pass has run. */
3906 (if (flag_unsafe_math_optimizations && canonicalize_math_p ())
3907 /* Simplify tan(x) * cos(x) -> sin(x). */
3909 (mult:c (TAN:s @0) (COS:s @0))
3912 /* Simplify x * pow(x,c) -> pow(x,c+1). */
3914 (mult:c @0 (POW:s @0 REAL_CST@1))
3915 (if (!TREE_OVERFLOW (@1))
3916 (POW @0 (plus @1 { build_one_cst (type); }))))
3918 /* Simplify sin(x) / cos(x) -> tan(x). */
3920 (rdiv (SIN:s @0) (COS:s @0))
3923 /* Simplify cos(x) / sin(x) -> 1 / tan(x). */
3925 (rdiv (COS:s @0) (SIN:s @0))
3926 (rdiv { build_one_cst (type); } (TAN @0)))
3928 /* Simplify sin(x) / tan(x) -> cos(x). */
3930 (rdiv (SIN:s @0) (TAN:s @0))
3931 (if (! HONOR_NANS (@0)
3932 && ! HONOR_INFINITIES (@0))
3935 /* Simplify tan(x) / sin(x) -> 1.0 / cos(x). */
3937 (rdiv (TAN:s @0) (SIN:s @0))
3938 (if (! HONOR_NANS (@0)
3939 && ! HONOR_INFINITIES (@0))
3940 (rdiv { build_one_cst (type); } (COS @0))))
3942 /* Simplify pow(x,y) * pow(x,z) -> pow(x,y+z). */
3944 (mult (POW:s @0 @1) (POW:s @0 @2))
3945 (POW @0 (plus @1 @2)))
3947 /* Simplify pow(x,y) * pow(z,y) -> pow(x*z,y). */
3949 (mult (POW:s @0 @1) (POW:s @2 @1))
3950 (POW (mult @0 @2) @1))
3952 /* Simplify powi(x,y) * powi(z,y) -> powi(x*z,y). */
3954 (mult (POWI:s @0 @1) (POWI:s @2 @1))
3955 (POWI (mult @0 @2) @1))
3957 /* Simplify pow(x,c) / x -> pow(x,c-1). */
3959 (rdiv (POW:s @0 REAL_CST@1) @0)
3960 (if (!TREE_OVERFLOW (@1))
3961 (POW @0 (minus @1 { build_one_cst (type); }))))
3963 /* Simplify x / pow (y,z) -> x * pow(y,-z). */
3965 (rdiv @0 (POW:s @1 @2))
3966 (mult @0 (POW @1 (negate @2))))
3971 /* sqrt(sqrt(x)) -> pow(x,1/4). */
3974 (pows @0 { build_real (type, dconst_quarter ()); }))
3975 /* sqrt(cbrt(x)) -> pow(x,1/6). */
3978 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
3979 /* cbrt(sqrt(x)) -> pow(x,1/6). */
3982 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
3983 /* cbrt(cbrt(x)) -> pow(x,1/9), iff x is nonnegative. */
3985 (cbrts (cbrts tree_expr_nonnegative_p@0))
3986 (pows @0 { build_real_truncate (type, dconst_ninth ()); }))
3987 /* sqrt(pow(x,y)) -> pow(|x|,y*0.5). */
3989 (sqrts (pows @0 @1))
3990 (pows (abs @0) (mult @1 { build_real (type, dconsthalf); })))
3991 /* cbrt(pow(x,y)) -> pow(x,y/3), iff x is nonnegative. */
3993 (cbrts (pows tree_expr_nonnegative_p@0 @1))
3994 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
3995 /* pow(sqrt(x),y) -> pow(x,y*0.5). */
3997 (pows (sqrts @0) @1)
3998 (pows @0 (mult @1 { build_real (type, dconsthalf); })))
3999 /* pow(cbrt(x),y) -> pow(x,y/3) iff x is nonnegative. */
4001 (pows (cbrts tree_expr_nonnegative_p@0) @1)
4002 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
4003 /* pow(pow(x,y),z) -> pow(x,y*z) iff x is nonnegative. */
4005 (pows (pows tree_expr_nonnegative_p@0 @1) @2)
4006 (pows @0 (mult @1 @2))))
4008 /* cabs(x+xi) -> fabs(x)*sqrt(2). */
4010 (CABS (complex @0 @0))
4011 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
4013 /* hypot(x,x) -> fabs(x)*sqrt(2). */
4016 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
4018 /* cexp(x+yi) -> exp(x)*cexpi(y). */
4023 (cexps compositional_complex@0)
4024 (if (targetm.libc_has_function (function_c99_math_complex))
4026 (mult (exps@1 (realpart @0)) (realpart (cexpis:type@2 (imagpart @0))))
4027 (mult @1 (imagpart @2)))))))
4029 (if (canonicalize_math_p ())
4030 /* floor(x) -> trunc(x) if x is nonnegative. */
4034 (floors tree_expr_nonnegative_p@0)
4037 (match double_value_p
4039 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == double_type_node)))
4040 (for froms (BUILT_IN_TRUNCL
4052 /* truncl(extend(x)) -> extend(trunc(x)), etc., if x is a double. */
4053 (if (optimize && canonicalize_math_p ())
4055 (froms (convert double_value_p@0))
4056 (convert (tos @0)))))
4058 (match float_value_p
4060 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == float_type_node)))
4061 (for froms (BUILT_IN_TRUNCL BUILT_IN_TRUNC
4062 BUILT_IN_FLOORL BUILT_IN_FLOOR
4063 BUILT_IN_CEILL BUILT_IN_CEIL
4064 BUILT_IN_ROUNDL BUILT_IN_ROUND
4065 BUILT_IN_NEARBYINTL BUILT_IN_NEARBYINT
4066 BUILT_IN_RINTL BUILT_IN_RINT)
4067 tos (BUILT_IN_TRUNCF BUILT_IN_TRUNCF
4068 BUILT_IN_FLOORF BUILT_IN_FLOORF
4069 BUILT_IN_CEILF BUILT_IN_CEILF
4070 BUILT_IN_ROUNDF BUILT_IN_ROUNDF
4071 BUILT_IN_NEARBYINTF BUILT_IN_NEARBYINTF
4072 BUILT_IN_RINTF BUILT_IN_RINTF)
4073 /* truncl(extend(x)) and trunc(extend(x)) -> extend(truncf(x)), etc.,
4075 (if (optimize && canonicalize_math_p ()
4076 && targetm.libc_has_function (function_c99_misc))
4078 (froms (convert float_value_p@0))
4079 (convert (tos @0)))))
4081 (for froms (XFLOORL XCEILL XROUNDL XRINTL)
4082 tos (XFLOOR XCEIL XROUND XRINT)
4083 /* llfloorl(extend(x)) -> llfloor(x), etc., if x is a double. */
4084 (if (optimize && canonicalize_math_p ())
4086 (froms (convert double_value_p@0))
4089 (for froms (XFLOORL XCEILL XROUNDL XRINTL
4090 XFLOOR XCEIL XROUND XRINT)
4091 tos (XFLOORF XCEILF XROUNDF XRINTF)
4092 /* llfloorl(extend(x)) and llfloor(extend(x)) -> llfloorf(x), etc.,
4094 (if (optimize && canonicalize_math_p ())
4096 (froms (convert float_value_p@0))
4099 (if (canonicalize_math_p ())
4100 /* xfloor(x) -> fix_trunc(x) if x is nonnegative. */
4101 (for floors (IFLOOR LFLOOR LLFLOOR)
4103 (floors tree_expr_nonnegative_p@0)
4106 (if (canonicalize_math_p ())
4107 /* xfloor(x) -> fix_trunc(x), etc., if x is integer valued. */
4108 (for fns (IFLOOR LFLOOR LLFLOOR
4110 IROUND LROUND LLROUND)
4112 (fns integer_valued_real_p@0)
4114 (if (!flag_errno_math)
4115 /* xrint(x) -> fix_trunc(x), etc., if x is integer valued. */
4116 (for rints (IRINT LRINT LLRINT)
4118 (rints integer_valued_real_p@0)
4121 (if (canonicalize_math_p ())
4122 (for ifn (IFLOOR ICEIL IROUND IRINT)
4123 lfn (LFLOOR LCEIL LROUND LRINT)
4124 llfn (LLFLOOR LLCEIL LLROUND LLRINT)
4125 /* Canonicalize iround (x) to lround (x) on ILP32 targets where
4126 sizeof (int) == sizeof (long). */
4127 (if (TYPE_PRECISION (integer_type_node)
4128 == TYPE_PRECISION (long_integer_type_node))
4131 (lfn:long_integer_type_node @0)))
4132 /* Canonicalize llround (x) to lround (x) on LP64 targets where
4133 sizeof (long long) == sizeof (long). */
4134 (if (TYPE_PRECISION (long_long_integer_type_node)
4135 == TYPE_PRECISION (long_integer_type_node))
4138 (lfn:long_integer_type_node @0)))))
4140 /* cproj(x) -> x if we're ignoring infinities. */
4143 (if (!HONOR_INFINITIES (type))
4146 /* If the real part is inf and the imag part is known to be
4147 nonnegative, return (inf + 0i). */
4149 (CPROJ (complex REAL_CST@0 tree_expr_nonnegative_p@1))
4150 (if (real_isinf (TREE_REAL_CST_PTR (@0)))
4151 { build_complex_inf (type, false); }))
4153 /* If the imag part is inf, return (inf+I*copysign(0,imag)). */
4155 (CPROJ (complex @0 REAL_CST@1))
4156 (if (real_isinf (TREE_REAL_CST_PTR (@1)))
4157 { build_complex_inf (type, TREE_REAL_CST_PTR (@1)->sign); }))
4163 (pows @0 REAL_CST@1)
4165 const REAL_VALUE_TYPE *value = TREE_REAL_CST_PTR (@1);
4166 REAL_VALUE_TYPE tmp;
4169 /* pow(x,0) -> 1. */
4170 (if (real_equal (value, &dconst0))
4171 { build_real (type, dconst1); })
4172 /* pow(x,1) -> x. */
4173 (if (real_equal (value, &dconst1))
4175 /* pow(x,-1) -> 1/x. */
4176 (if (real_equal (value, &dconstm1))
4177 (rdiv { build_real (type, dconst1); } @0))
4178 /* pow(x,0.5) -> sqrt(x). */
4179 (if (flag_unsafe_math_optimizations
4180 && canonicalize_math_p ()
4181 && real_equal (value, &dconsthalf))
4183 /* pow(x,1/3) -> cbrt(x). */
4184 (if (flag_unsafe_math_optimizations
4185 && canonicalize_math_p ()
4186 && (tmp = real_value_truncate (TYPE_MODE (type), dconst_third ()),
4187 real_equal (value, &tmp)))
4190 /* powi(1,x) -> 1. */
4192 (POWI real_onep@0 @1)
4196 (POWI @0 INTEGER_CST@1)
4198 /* powi(x,0) -> 1. */
4199 (if (wi::to_wide (@1) == 0)
4200 { build_real (type, dconst1); })
4201 /* powi(x,1) -> x. */
4202 (if (wi::to_wide (@1) == 1)
4204 /* powi(x,-1) -> 1/x. */
4205 (if (wi::to_wide (@1) == -1)
4206 (rdiv { build_real (type, dconst1); } @0))))
4208 /* Narrowing of arithmetic and logical operations.
4210 These are conceptually similar to the transformations performed for
4211 the C/C++ front-ends by shorten_binary_op and shorten_compare. Long
4212 term we want to move all that code out of the front-ends into here. */
4214 /* If we have a narrowing conversion of an arithmetic operation where
4215 both operands are widening conversions from the same type as the outer
4216 narrowing conversion. Then convert the innermost operands to a suitable
4217 unsigned type (to avoid introducing undefined behavior), perform the
4218 operation and convert the result to the desired type. */
4219 (for op (plus minus)
4221 (convert (op:s (convert@2 @0) (convert?@3 @1)))
4222 (if (INTEGRAL_TYPE_P (type)
4223 /* We check for type compatibility between @0 and @1 below,
4224 so there's no need to check that @1/@3 are integral types. */
4225 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
4226 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
4227 /* The precision of the type of each operand must match the
4228 precision of the mode of each operand, similarly for the
4230 && type_has_mode_precision_p (TREE_TYPE (@0))
4231 && type_has_mode_precision_p (TREE_TYPE (@1))
4232 && type_has_mode_precision_p (type)
4233 /* The inner conversion must be a widening conversion. */
4234 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
4235 && types_match (@0, type)
4236 && (types_match (@0, @1)
4237 /* Or the second operand is const integer or converted const
4238 integer from valueize. */
4239 || TREE_CODE (@1) == INTEGER_CST))
4240 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
4241 (op @0 (convert @1))
4242 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
4243 (convert (op (convert:utype @0)
4244 (convert:utype @1))))))))
4246 /* This is another case of narrowing, specifically when there's an outer
4247 BIT_AND_EXPR which masks off bits outside the type of the innermost
4248 operands. Like the previous case we have to convert the operands
4249 to unsigned types to avoid introducing undefined behavior for the
4250 arithmetic operation. */
4251 (for op (minus plus)
4253 (bit_and (op:s (convert@2 @0) (convert@3 @1)) INTEGER_CST@4)
4254 (if (INTEGRAL_TYPE_P (type)
4255 /* We check for type compatibility between @0 and @1 below,
4256 so there's no need to check that @1/@3 are integral types. */
4257 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
4258 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
4259 /* The precision of the type of each operand must match the
4260 precision of the mode of each operand, similarly for the
4262 && type_has_mode_precision_p (TREE_TYPE (@0))
4263 && type_has_mode_precision_p (TREE_TYPE (@1))
4264 && type_has_mode_precision_p (type)
4265 /* The inner conversion must be a widening conversion. */
4266 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
4267 && types_match (@0, @1)
4268 && (tree_int_cst_min_precision (@4, TYPE_SIGN (TREE_TYPE (@0)))
4269 <= TYPE_PRECISION (TREE_TYPE (@0)))
4270 && (wi::to_wide (@4)
4271 & wi::mask (TYPE_PRECISION (TREE_TYPE (@0)),
4272 true, TYPE_PRECISION (type))) == 0)
4273 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
4274 (with { tree ntype = TREE_TYPE (@0); }
4275 (convert (bit_and (op @0 @1) (convert:ntype @4))))
4276 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
4277 (convert (bit_and (op (convert:utype @0) (convert:utype @1))
4278 (convert:utype @4))))))))
4280 /* Transform (@0 < @1 and @0 < @2) to use min,
4281 (@0 > @1 and @0 > @2) to use max */
4282 (for op (lt le gt ge)
4283 ext (min min max max)
4285 (bit_and (op:cs @0 @1) (op:cs @0 @2))
4286 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4287 && TREE_CODE (@0) != INTEGER_CST)
4288 (op @0 (ext @1 @2)))))
4291 /* signbit(x) -> 0 if x is nonnegative. */
4292 (SIGNBIT tree_expr_nonnegative_p@0)
4293 { integer_zero_node; })
4296 /* signbit(x) -> x<0 if x doesn't have signed zeros. */
4298 (if (!HONOR_SIGNED_ZEROS (@0))
4299 (convert (lt @0 { build_real (TREE_TYPE (@0), dconst0); }))))
4301 /* Transform comparisons of the form X +- C1 CMP C2 to X CMP C2 -+ C1. */
4303 (for op (plus minus)
4306 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
4307 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
4308 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
4309 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@0))
4310 && !TYPE_SATURATING (TREE_TYPE (@0)))
4311 (with { tree res = int_const_binop (rop, @2, @1); }
4312 (if (TREE_OVERFLOW (res)
4313 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
4314 { constant_boolean_node (cmp == NE_EXPR, type); }
4315 (if (single_use (@3))
4316 (cmp @0 { res; }))))))))
4317 (for cmp (lt le gt ge)
4318 (for op (plus minus)
4321 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
4322 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
4323 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
4324 (with { tree res = int_const_binop (rop, @2, @1); }
4325 (if (TREE_OVERFLOW (res))
4327 fold_overflow_warning (("assuming signed overflow does not occur "
4328 "when simplifying conditional to constant"),
4329 WARN_STRICT_OVERFLOW_CONDITIONAL);
4330 bool less = cmp == LE_EXPR || cmp == LT_EXPR;
4331 /* wi::ges_p (@2, 0) should be sufficient for a signed type. */
4332 bool ovf_high = wi::lt_p (wi::to_wide (@1), 0,
4333 TYPE_SIGN (TREE_TYPE (@1)))
4334 != (op == MINUS_EXPR);
4335 constant_boolean_node (less == ovf_high, type);
4337 (if (single_use (@3))
4340 fold_overflow_warning (("assuming signed overflow does not occur "
4341 "when changing X +- C1 cmp C2 to "
4343 WARN_STRICT_OVERFLOW_COMPARISON);
4345 (cmp @0 { res; })))))))))
4347 /* Canonicalizations of BIT_FIELD_REFs. */
4350 (BIT_FIELD_REF @0 @1 @2)
4352 (if (TREE_CODE (TREE_TYPE (@0)) == COMPLEX_TYPE
4353 && tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
4355 (if (integer_zerop (@2))
4356 (view_convert (realpart @0)))
4357 (if (tree_int_cst_equal (@2, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
4358 (view_convert (imagpart @0)))))
4359 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4360 && INTEGRAL_TYPE_P (type)
4361 /* On GIMPLE this should only apply to register arguments. */
4362 && (! GIMPLE || is_gimple_reg (@0))
4363 /* A bit-field-ref that referenced the full argument can be stripped. */
4364 && ((compare_tree_int (@1, TYPE_PRECISION (TREE_TYPE (@0))) == 0
4365 && integer_zerop (@2))
4366 /* Low-parts can be reduced to integral conversions.
4367 ??? The following doesn't work for PDP endian. */
4368 || (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN
4369 /* Don't even think about BITS_BIG_ENDIAN. */
4370 && TYPE_PRECISION (TREE_TYPE (@0)) % BITS_PER_UNIT == 0
4371 && TYPE_PRECISION (type) % BITS_PER_UNIT == 0
4372 && compare_tree_int (@2, (BYTES_BIG_ENDIAN
4373 ? (TYPE_PRECISION (TREE_TYPE (@0))
4374 - TYPE_PRECISION (type))
4378 /* Simplify vector extracts. */
4381 (BIT_FIELD_REF CONSTRUCTOR@0 @1 @2)
4382 (if (VECTOR_TYPE_P (TREE_TYPE (@0))
4383 && (types_match (type, TREE_TYPE (TREE_TYPE (@0)))
4384 || (VECTOR_TYPE_P (type)
4385 && types_match (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
4388 tree ctor = (TREE_CODE (@0) == SSA_NAME
4389 ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0);
4390 tree eltype = TREE_TYPE (TREE_TYPE (ctor));
4391 unsigned HOST_WIDE_INT width = tree_to_uhwi (TYPE_SIZE (eltype));
4392 unsigned HOST_WIDE_INT n = tree_to_uhwi (@1);
4393 unsigned HOST_WIDE_INT idx = tree_to_uhwi (@2);
4396 && (idx % width) == 0
4398 && ((idx + n) / width) <= TYPE_VECTOR_SUBPARTS (TREE_TYPE (ctor)))
4403 /* Constructor elements can be subvectors. */
4404 unsigned HOST_WIDE_INT k = 1;
4405 if (CONSTRUCTOR_NELTS (ctor) != 0)
4407 tree cons_elem = TREE_TYPE (CONSTRUCTOR_ELT (ctor, 0)->value);
4408 if (TREE_CODE (cons_elem) == VECTOR_TYPE)
4409 k = TYPE_VECTOR_SUBPARTS (cons_elem);
4413 /* We keep an exact subset of the constructor elements. */
4414 (if ((idx % k) == 0 && (n % k) == 0)
4415 (if (CONSTRUCTOR_NELTS (ctor) == 0)
4416 { build_constructor (type, NULL); }
4423 (if (idx < CONSTRUCTOR_NELTS (ctor))
4424 { CONSTRUCTOR_ELT (ctor, idx)->value; }
4425 { build_zero_cst (type); })
4427 vec<constructor_elt, va_gc> *vals;
4428 vec_alloc (vals, n);
4429 for (unsigned i = 0;
4430 i < n && idx + i < CONSTRUCTOR_NELTS (ctor); ++i)
4431 CONSTRUCTOR_APPEND_ELT (vals, NULL_TREE,
4432 CONSTRUCTOR_ELT (ctor, idx + i)->value);
4433 build_constructor (type, vals);
4435 /* The bitfield references a single constructor element. */
4436 (if (idx + n <= (idx / k + 1) * k)
4438 (if (CONSTRUCTOR_NELTS (ctor) <= idx / k)
4439 { build_zero_cst (type); })
4441 { CONSTRUCTOR_ELT (ctor, idx / k)->value; })
4442 (BIT_FIELD_REF { CONSTRUCTOR_ELT (ctor, idx / k)->value; }
4443 @1 { bitsize_int ((idx % k) * width); })))))))))
4445 /* Simplify a bit extraction from a bit insertion for the cases with
4446 the inserted element fully covering the extraction or the insertion
4447 not touching the extraction. */
4449 (BIT_FIELD_REF (bit_insert @0 @1 @ipos) @rsize @rpos)
4452 unsigned HOST_WIDE_INT isize;
4453 if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
4454 isize = TYPE_PRECISION (TREE_TYPE (@1));
4456 isize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (@1)));
4459 (if (wi::leu_p (wi::to_wide (@ipos), wi::to_wide (@rpos))
4460 && wi::leu_p (wi::to_wide (@rpos) + wi::to_wide (@rsize),
4461 wi::to_wide (@ipos) + isize))
4462 (BIT_FIELD_REF @1 @rsize { wide_int_to_tree (bitsizetype,
4464 - wi::to_wide (@ipos)); }))
4465 (if (wi::geu_p (wi::to_wide (@ipos),
4466 wi::to_wide (@rpos) + wi::to_wide (@rsize))
4467 || wi::geu_p (wi::to_wide (@rpos),
4468 wi::to_wide (@ipos) + isize))
4469 (BIT_FIELD_REF @0 @rsize @rpos)))))