1 /* Match-and-simplify patterns for shared GENERIC and GIMPLE folding.
2 This file is consumed by genmatch which produces gimple-match.c
3 and generic-match.c from it.
5 Copyright (C) 2014-2017 Free Software Foundation, Inc.
6 Contributed by Richard Biener <rguenther@suse.de>
7 and Prathamesh Kulkarni <bilbotheelffriend@gmail.com>
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
26 /* Generic tree predicates we inherit. */
28 integer_onep integer_zerop integer_all_onesp integer_minus_onep
29 integer_each_onep integer_truep integer_nonzerop
30 real_zerop real_onep real_minus_onep
33 tree_expr_nonnegative_p
40 (define_operator_list tcc_comparison
41 lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
42 (define_operator_list inverted_tcc_comparison
43 ge gt ne eq lt le ordered unordered ge gt le lt ltgt uneq)
44 (define_operator_list inverted_tcc_comparison_with_nans
45 unge ungt ne eq unlt unle ordered unordered ge gt le lt ltgt uneq)
46 (define_operator_list swapped_tcc_comparison
47 gt ge eq ne le lt unordered ordered ungt unge unlt unle uneq ltgt)
48 (define_operator_list simple_comparison lt le eq ne ge gt)
49 (define_operator_list swapped_simple_comparison gt ge eq ne le lt)
51 #include "cfn-operators.pd"
53 /* Define operand lists for math rounding functions {,i,l,ll}FN,
54 where the versions prefixed with "i" return an int, those prefixed with
55 "l" return a long and those prefixed with "ll" return a long long.
57 Also define operand lists:
59 X<FN>F for all float functions, in the order i, l, ll
60 X<FN> for all double functions, in the same order
61 X<FN>L for all long double functions, in the same order. */
62 #define DEFINE_INT_AND_FLOAT_ROUND_FN(FN) \
63 (define_operator_list X##FN##F BUILT_IN_I##FN##F \
66 (define_operator_list X##FN BUILT_IN_I##FN \
69 (define_operator_list X##FN##L BUILT_IN_I##FN##L \
73 DEFINE_INT_AND_FLOAT_ROUND_FN (FLOOR)
74 DEFINE_INT_AND_FLOAT_ROUND_FN (CEIL)
75 DEFINE_INT_AND_FLOAT_ROUND_FN (ROUND)
76 DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
78 /* Simplifications of operations with one constant operand and
79 simplifications to constants or single values. */
81 (for op (plus pointer_plus minus bit_ior bit_xor)
86 /* 0 +p index -> (type)index */
88 (pointer_plus integer_zerop @1)
89 (non_lvalue (convert @1)))
91 /* See if ARG1 is zero and X + ARG1 reduces to X.
92 Likewise if the operands are reversed. */
94 (plus:c @0 real_zerop@1)
95 (if (fold_real_zero_addition_p (type, @1, 0))
98 /* See if ARG1 is zero and X - ARG1 reduces to X. */
100 (minus @0 real_zerop@1)
101 (if (fold_real_zero_addition_p (type, @1, 1))
105 This is unsafe for certain floats even in non-IEEE formats.
106 In IEEE, it is unsafe because it does wrong for NaNs.
107 Also note that operand_equal_p is always false if an operand
111 (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (type))
112 { build_zero_cst (type); }))
115 (mult @0 integer_zerop@1)
118 /* Maybe fold x * 0 to 0. The expressions aren't the same
119 when x is NaN, since x * 0 is also NaN. Nor are they the
120 same in modes with signed zeros, since multiplying a
121 negative value by 0 gives -0, not +0. */
123 (mult @0 real_zerop@1)
124 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
127 /* In IEEE floating point, x*1 is not equivalent to x for snans.
128 Likewise for complex arithmetic with signed zeros. */
131 (if (!HONOR_SNANS (type)
132 && (!HONOR_SIGNED_ZEROS (type)
133 || !COMPLEX_FLOAT_TYPE_P (type)))
136 /* Transform x * -1.0 into -x. */
138 (mult @0 real_minus_onep)
139 (if (!HONOR_SNANS (type)
140 && (!HONOR_SIGNED_ZEROS (type)
141 || !COMPLEX_FLOAT_TYPE_P (type)))
144 /* X * 1, X / 1 -> X. */
145 (for op (mult trunc_div ceil_div floor_div round_div exact_div)
150 /* Preserve explicit divisions by 0: the C++ front-end wants to detect
151 undefined behavior in constexpr evaluation, and assuming that the division
152 traps enables better optimizations than these anyway. */
153 (for div (trunc_div ceil_div floor_div round_div exact_div)
154 /* 0 / X is always zero. */
156 (div integer_zerop@0 @1)
157 /* But not for 0 / 0 so that we can get the proper warnings and errors. */
158 (if (!integer_zerop (@1))
162 (div @0 integer_minus_onep@1)
163 (if (!TYPE_UNSIGNED (type))
168 /* But not for 0 / 0 so that we can get the proper warnings and errors.
169 And not for _Fract types where we can't build 1. */
170 (if (!integer_zerop (@0) && !ALL_FRACT_MODE_P (TYPE_MODE (type)))
171 { build_one_cst (type); }))
172 /* X / abs (X) is X < 0 ? -1 : 1. */
175 (if (INTEGRAL_TYPE_P (type)
176 && TYPE_OVERFLOW_UNDEFINED (type))
177 (cond (lt @0 { build_zero_cst (type); })
178 { build_minus_one_cst (type); } { build_one_cst (type); })))
181 (div:C @0 (negate @0))
182 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
183 && TYPE_OVERFLOW_UNDEFINED (type))
184 { build_minus_one_cst (type); })))
186 /* For unsigned integral types, FLOOR_DIV_EXPR is the same as
187 TRUNC_DIV_EXPR. Rewrite into the latter in this case. */
190 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
191 && TYPE_UNSIGNED (type))
194 /* Combine two successive divisions. Note that combining ceil_div
195 and floor_div is trickier and combining round_div even more so. */
196 (for div (trunc_div exact_div)
198 (div (div @0 INTEGER_CST@1) INTEGER_CST@2)
201 wide_int mul = wi::mul (@1, @2, TYPE_SIGN (type), &overflow_p);
204 (div @0 { wide_int_to_tree (type, mul); })
205 (if (TYPE_UNSIGNED (type)
206 || mul != wi::min_value (TYPE_PRECISION (type), SIGNED))
207 { build_zero_cst (type); })))))
209 /* Optimize A / A to 1.0 if we don't care about
210 NaNs or Infinities. */
213 (if (FLOAT_TYPE_P (type)
214 && ! HONOR_NANS (type)
215 && ! HONOR_INFINITIES (type))
216 { build_one_cst (type); }))
218 /* Optimize -A / A to -1.0 if we don't care about
219 NaNs or Infinities. */
221 (rdiv:C @0 (negate @0))
222 (if (FLOAT_TYPE_P (type)
223 && ! HONOR_NANS (type)
224 && ! HONOR_INFINITIES (type))
225 { build_minus_one_cst (type); }))
227 /* PR71078: x / abs(x) -> copysign (1.0, x) */
229 (rdiv:C (convert? @0) (convert? (abs @0)))
230 (if (SCALAR_FLOAT_TYPE_P (type)
231 && ! HONOR_NANS (type)
232 && ! HONOR_INFINITIES (type))
234 (if (types_match (type, float_type_node))
235 (BUILT_IN_COPYSIGNF { build_one_cst (type); } (convert @0)))
236 (if (types_match (type, double_type_node))
237 (BUILT_IN_COPYSIGN { build_one_cst (type); } (convert @0)))
238 (if (types_match (type, long_double_type_node))
239 (BUILT_IN_COPYSIGNL { build_one_cst (type); } (convert @0))))))
241 /* In IEEE floating point, x/1 is not equivalent to x for snans. */
244 (if (!HONOR_SNANS (type))
247 /* In IEEE floating point, x/-1 is not equivalent to -x for snans. */
249 (rdiv @0 real_minus_onep)
250 (if (!HONOR_SNANS (type))
253 (if (flag_reciprocal_math)
254 /* Convert (A/B)/C to A/(B*C) */
256 (rdiv (rdiv:s @0 @1) @2)
257 (rdiv @0 (mult @1 @2)))
259 /* Convert A/(B/C) to (A/B)*C */
261 (rdiv @0 (rdiv:s @1 @2))
262 (mult (rdiv @0 @1) @2)))
264 /* Optimize (X & (-A)) / A where A is a power of 2, to X >> log2(A) */
265 (for div (trunc_div ceil_div floor_div round_div exact_div)
267 (div (convert? (bit_and @0 INTEGER_CST@1)) INTEGER_CST@2)
268 (if (integer_pow2p (@2)
269 && tree_int_cst_sgn (@2) > 0
270 && wi::add (@2, @1) == 0
271 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
272 (rshift (convert @0) { build_int_cst (integer_type_node,
273 wi::exact_log2 (@2)); }))))
275 /* If ARG1 is a constant, we can convert this to a multiply by the
276 reciprocal. This does not have the same rounding properties,
277 so only do this if -freciprocal-math. We can actually
278 always safely do it if ARG1 is a power of two, but it's hard to
279 tell if it is or not in a portable manner. */
280 (for cst (REAL_CST COMPLEX_CST VECTOR_CST)
284 (if (flag_reciprocal_math
287 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @1); }
289 (mult @0 { tem; } )))
290 (if (cst != COMPLEX_CST)
291 (with { tree inverse = exact_inverse (type, @1); }
293 (mult @0 { inverse; } ))))))))
295 (for mod (ceil_mod floor_mod round_mod trunc_mod)
296 /* 0 % X is always zero. */
298 (mod integer_zerop@0 @1)
299 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
300 (if (!integer_zerop (@1))
302 /* X % 1 is always zero. */
304 (mod @0 integer_onep)
305 { build_zero_cst (type); })
306 /* X % -1 is zero. */
308 (mod @0 integer_minus_onep@1)
309 (if (!TYPE_UNSIGNED (type))
310 { build_zero_cst (type); }))
314 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
315 (if (!integer_zerop (@0))
316 { build_zero_cst (type); }))
317 /* (X % Y) % Y is just X % Y. */
319 (mod (mod@2 @0 @1) @1)
321 /* From extract_muldiv_1: (X * C1) % C2 is zero if C1 is a multiple of C2. */
323 (mod (mult @0 INTEGER_CST@1) INTEGER_CST@2)
324 (if (ANY_INTEGRAL_TYPE_P (type)
325 && TYPE_OVERFLOW_UNDEFINED (type)
326 && wi::multiple_of_p (@1, @2, TYPE_SIGN (type)))
327 { build_zero_cst (type); })))
329 /* X % -C is the same as X % C. */
331 (trunc_mod @0 INTEGER_CST@1)
332 (if (TYPE_SIGN (type) == SIGNED
333 && !TREE_OVERFLOW (@1)
335 && !TYPE_OVERFLOW_TRAPS (type)
336 /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */
337 && !sign_bit_p (@1, @1))
338 (trunc_mod @0 (negate @1))))
340 /* X % -Y is the same as X % Y. */
342 (trunc_mod @0 (convert? (negate @1)))
343 (if (INTEGRAL_TYPE_P (type)
344 && !TYPE_UNSIGNED (type)
345 && !TYPE_OVERFLOW_TRAPS (type)
346 && tree_nop_conversion_p (type, TREE_TYPE (@1))
347 /* Avoid this transformation if X might be INT_MIN or
348 Y might be -1, because we would then change valid
349 INT_MIN % -(-1) into invalid INT_MIN % -1. */
350 && (expr_not_equal_to (@0, TYPE_MIN_VALUE (type))
351 || expr_not_equal_to (@1, wi::minus_one (TYPE_PRECISION
353 (trunc_mod @0 (convert @1))))
355 /* X - (X / Y) * Y is the same as X % Y. */
357 (minus (convert1? @0) (convert2? (mult:c (trunc_div @@0 @@1) @1)))
358 (if (INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
359 (convert (trunc_mod @0 @1))))
361 /* Optimize TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR,
362 i.e. "X % C" into "X & (C - 1)", if X and C are positive.
363 Also optimize A % (C << N) where C is a power of 2,
364 to A & ((C << N) - 1). */
365 (match (power_of_two_cand @1)
367 (match (power_of_two_cand @1)
368 (lshift INTEGER_CST@1 @2))
369 (for mod (trunc_mod floor_mod)
371 (mod @0 (convert?@3 (power_of_two_cand@1 @2)))
372 (if ((TYPE_UNSIGNED (type)
373 || tree_expr_nonnegative_p (@0))
374 && tree_nop_conversion_p (type, TREE_TYPE (@3))
375 && integer_pow2p (@2) && tree_int_cst_sgn (@2) > 0)
376 (bit_and @0 (convert (minus @1 { build_int_cst (TREE_TYPE (@1), 1); }))))))
378 /* Simplify (unsigned t * 2)/2 -> unsigned t & 0x7FFFFFFF. */
380 (trunc_div (mult @0 integer_pow2p@1) @1)
381 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
382 (bit_and @0 { wide_int_to_tree
383 (type, wi::mask (TYPE_PRECISION (type) - wi::exact_log2 (@1),
384 false, TYPE_PRECISION (type))); })))
386 /* Simplify (unsigned t / 2) * 2 -> unsigned t & ~1. */
388 (mult (trunc_div @0 integer_pow2p@1) @1)
389 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
390 (bit_and @0 (negate @1))))
392 /* Simplify (t * 2) / 2) -> t. */
393 (for div (trunc_div ceil_div floor_div round_div exact_div)
395 (div (mult @0 @1) @1)
396 (if (ANY_INTEGRAL_TYPE_P (type)
397 && TYPE_OVERFLOW_UNDEFINED (type))
401 /* Simplify cos(-x) and cos(|x|) -> cos(x). Similarly for cosh. */
406 /* Simplify pow(-x, y) and pow(|x|,y) -> pow(x,y) if y is an even integer. */
409 (pows (op @0) REAL_CST@1)
410 (with { HOST_WIDE_INT n; }
411 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
413 /* Likewise for powi. */
416 (pows (op @0) INTEGER_CST@1)
417 (if (wi::bit_and (@1, 1) == 0)
419 /* Strip negate and abs from both operands of hypot. */
427 /* copysign(-x, y) and copysign(abs(x), y) -> copysign(x, y). */
428 (for copysigns (COPYSIGN)
430 (copysigns (op @0) @1)
433 /* abs(x)*abs(x) -> x*x. Should be valid for all types. */
438 /* cos(copysign(x, y)) -> cos(x). Similarly for cosh. */
442 (coss (copysigns @0 @1))
445 /* pow(copysign(x, y), z) -> pow(x, z) if z is an even integer. */
449 (pows (copysigns @0 @2) REAL_CST@1)
450 (with { HOST_WIDE_INT n; }
451 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
453 /* Likewise for powi. */
457 (pows (copysigns @0 @2) INTEGER_CST@1)
458 (if (wi::bit_and (@1, 1) == 0)
463 /* hypot(copysign(x, y), z) -> hypot(x, z). */
465 (hypots (copysigns @0 @1) @2)
467 /* hypot(x, copysign(y, z)) -> hypot(x, y). */
469 (hypots @0 (copysigns @1 @2))
472 /* copysign(x, CST) -> [-]abs (x). */
473 (for copysigns (COPYSIGN)
475 (copysigns @0 REAL_CST@1)
476 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
480 /* copysign(copysign(x, y), z) -> copysign(x, z). */
481 (for copysigns (COPYSIGN)
483 (copysigns (copysigns @0 @1) @2)
486 /* copysign(x,y)*copysign(x,y) -> x*x. */
487 (for copysigns (COPYSIGN)
489 (mult (copysigns@2 @0 @1) @2)
492 /* ccos(-x) -> ccos(x). Similarly for ccosh. */
493 (for ccoss (CCOS CCOSH)
498 /* cabs(-x) and cos(conj(x)) -> cabs(x). */
499 (for ops (conj negate)
505 /* Fold (a * (1 << b)) into (a << b) */
507 (mult:c @0 (convert? (lshift integer_onep@1 @2)))
508 (if (! FLOAT_TYPE_P (type)
509 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
512 /* Fold (C1/X)*C2 into (C1*C2)/X. */
514 (mult (rdiv@3 REAL_CST@0 @1) REAL_CST@2)
515 (if (flag_associative_math
518 { tree tem = const_binop (MULT_EXPR, type, @0, @2); }
520 (rdiv { tem; } @1)))))
522 /* Convert C1/(X*C2) into (C1/C2)/X */
524 (rdiv REAL_CST@0 (mult @1 REAL_CST@2))
525 (if (flag_reciprocal_math)
527 { tree tem = const_binop (RDIV_EXPR, type, @0, @2); }
529 (rdiv { tem; } @1)))))
531 /* Simplify ~X & X as zero. */
533 (bit_and:c (convert? @0) (convert? (bit_not @0)))
534 { build_zero_cst (type); })
536 /* PR71636: Transform x & ((1U << b) - 1) -> x & ~(~0U << b); */
538 (bit_and:c @0 (plus:s (lshift:s integer_onep @1) integer_minus_onep))
539 (if (TYPE_UNSIGNED (type))
540 (bit_and @0 (bit_not (lshift { build_all_ones_cst (type); } @1)))))
542 /* PR35691: Transform
543 (x == 0 & y == 0) -> (x | typeof(x)(y)) == 0.
544 (x != 0 | y != 0) -> (x | typeof(x)(y)) != 0. */
545 (for bitop (bit_and bit_ior)
548 (bitop (cmp @0 integer_zerop@2) (cmp @1 integer_zerop))
549 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
550 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
551 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
552 (cmp (bit_ior @0 (convert @1)) @2))))
554 /* Fold (A & ~B) - (A & B) into (A ^ B) - B. */
556 (minus (bit_and:cs @0 (bit_not @1)) (bit_and:cs @0 @1))
557 (minus (bit_xor @0 @1) @1))
559 (minus (bit_and:s @0 INTEGER_CST@2) (bit_and:s @0 INTEGER_CST@1))
560 (if (wi::bit_not (@2) == @1)
561 (minus (bit_xor @0 @1) @1)))
563 /* Fold (A & B) - (A & ~B) into B - (A ^ B). */
565 (minus (bit_and:cs @0 @1) (bit_and:cs @0 (bit_not @1)))
566 (minus @1 (bit_xor @0 @1)))
568 /* Simplify (X & ~Y) | (~X & Y) -> X ^ Y. */
570 (bit_ior (bit_and:c @0 (bit_not @1)) (bit_and:c (bit_not @0) @1))
573 (bit_ior:c (bit_and @0 INTEGER_CST@2) (bit_and (bit_not @0) INTEGER_CST@1))
574 (if (wi::bit_not (@2) == @1)
577 /* PR53979: Transform ((a ^ b) | a) -> (a | b) */
579 (bit_ior:c (bit_xor:c @0 @1) @0)
582 /* Simplify (~X & Y) to X ^ Y if we know that (X & ~Y) is 0. */
585 (bit_and (bit_not SSA_NAME@0) INTEGER_CST@1)
586 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
587 && (get_nonzero_bits (@0) & wi::bit_not (@1)) == 0)
591 /* X % Y is smaller than Y. */
594 (cmp (trunc_mod @0 @1) @1)
595 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
596 { constant_boolean_node (cmp == LT_EXPR, type); })))
599 (cmp @1 (trunc_mod @0 @1))
600 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
601 { constant_boolean_node (cmp == GT_EXPR, type); })))
605 (bit_ior @0 integer_all_onesp@1)
610 (bit_ior @0 integer_zerop)
615 (bit_and @0 integer_zerop@1)
621 (for op (bit_ior bit_xor plus)
623 (op:c (convert? @0) (convert? (bit_not @0)))
624 (convert { build_all_ones_cst (TREE_TYPE (@0)); })))
629 { build_zero_cst (type); })
631 /* Canonicalize X ^ ~0 to ~X. */
633 (bit_xor @0 integer_all_onesp@1)
638 (bit_and @0 integer_all_onesp)
641 /* x & x -> x, x | x -> x */
642 (for bitop (bit_and bit_ior)
647 /* x & C -> x if we know that x & ~C == 0. */
650 (bit_and SSA_NAME@0 INTEGER_CST@1)
651 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
652 && (get_nonzero_bits (@0) & wi::bit_not (@1)) == 0)
656 /* x + (x & 1) -> (x + 1) & ~1 */
658 (plus:c @0 (bit_and:s @0 integer_onep@1))
659 (bit_and (plus @0 @1) (bit_not @1)))
661 /* x & ~(x & y) -> x & ~y */
662 /* x | ~(x | y) -> x | ~y */
663 (for bitop (bit_and bit_ior)
665 (bitop:c @0 (bit_not (bitop:cs @0 @1)))
666 (bitop @0 (bit_not @1))))
668 /* (x | y) & ~x -> y & ~x */
669 /* (x & y) | ~x -> y | ~x */
670 (for bitop (bit_and bit_ior)
671 rbitop (bit_ior bit_and)
673 (bitop:c (rbitop:c @0 @1) (bit_not@2 @0))
676 /* (x & y) ^ (x | y) -> x ^ y */
678 (bit_xor:c (bit_and @0 @1) (bit_ior @0 @1))
681 /* (x ^ y) ^ (x | y) -> x & y */
683 (bit_xor:c (bit_xor @0 @1) (bit_ior @0 @1))
686 /* (x & y) + (x ^ y) -> x | y */
687 /* (x & y) | (x ^ y) -> x | y */
688 /* (x & y) ^ (x ^ y) -> x | y */
689 (for op (plus bit_ior bit_xor)
691 (op:c (bit_and @0 @1) (bit_xor @0 @1))
694 /* (x & y) + (x | y) -> x + y */
696 (plus:c (bit_and @0 @1) (bit_ior @0 @1))
699 /* (x + y) - (x | y) -> x & y */
701 (minus (plus @0 @1) (bit_ior @0 @1))
702 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
703 && !TYPE_SATURATING (type))
706 /* (x + y) - (x & y) -> x | y */
708 (minus (plus @0 @1) (bit_and @0 @1))
709 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
710 && !TYPE_SATURATING (type))
713 /* (x | y) - (x ^ y) -> x & y */
715 (minus (bit_ior @0 @1) (bit_xor @0 @1))
718 /* (x | y) - (x & y) -> x ^ y */
720 (minus (bit_ior @0 @1) (bit_and @0 @1))
723 /* (x | y) & ~(x & y) -> x ^ y */
725 (bit_and:c (bit_ior @0 @1) (bit_not (bit_and @0 @1)))
728 /* (x | y) & (~x ^ y) -> x & y */
730 (bit_and:c (bit_ior:c @0 @1) (bit_xor:c @1 (bit_not @0)))
733 /* ~x & ~y -> ~(x | y)
734 ~x | ~y -> ~(x & y) */
735 (for op (bit_and bit_ior)
736 rop (bit_ior bit_and)
738 (op (convert1? (bit_not @0)) (convert2? (bit_not @1)))
739 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
740 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
741 (bit_not (rop (convert @0) (convert @1))))))
743 /* If we are XORing or adding two BIT_AND_EXPR's, both of which are and'ing
744 with a constant, and the two constants have no bits in common,
745 we should treat this as a BIT_IOR_EXPR since this may produce more
747 (for op (bit_xor plus)
749 (op (convert1? (bit_and@4 @0 INTEGER_CST@1))
750 (convert2? (bit_and@5 @2 INTEGER_CST@3)))
751 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
752 && tree_nop_conversion_p (type, TREE_TYPE (@2))
753 && wi::bit_and (@1, @3) == 0)
754 (bit_ior (convert @4) (convert @5)))))
756 /* (X | Y) ^ X -> Y & ~ X*/
758 (bit_xor:c (convert1? (bit_ior:c @@0 @1)) (convert2? @0))
759 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
760 (convert (bit_and @1 (bit_not @0)))))
762 /* Convert ~X ^ ~Y to X ^ Y. */
764 (bit_xor (convert1? (bit_not @0)) (convert2? (bit_not @1)))
765 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
766 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
767 (bit_xor (convert @0) (convert @1))))
769 /* Convert ~X ^ C to X ^ ~C. */
771 (bit_xor (convert? (bit_not @0)) INTEGER_CST@1)
772 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
773 (bit_xor (convert @0) (bit_not @1))))
775 /* Fold (X & Y) ^ Y and (X ^ Y) & Y as ~X & Y. */
776 (for opo (bit_and bit_xor)
777 opi (bit_xor bit_and)
779 (opo:c (opi:c @0 @1) @1)
780 (bit_and (bit_not @0) @1)))
782 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
783 operands are another bit-wise operation with a common input. If so,
784 distribute the bit operations to save an operation and possibly two if
785 constants are involved. For example, convert
786 (A | B) & (A | C) into A | (B & C)
787 Further simplification will occur if B and C are constants. */
788 (for op (bit_and bit_ior bit_xor)
789 rop (bit_ior bit_and bit_and)
791 (op (convert? (rop:c @@0 @1)) (convert? (rop:c @0 @2)))
792 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
793 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
794 (rop (convert @0) (op (convert @1) (convert @2))))))
796 /* Some simple reassociation for bit operations, also handled in reassoc. */
797 /* (X & Y) & Y -> X & Y
798 (X | Y) | Y -> X | Y */
799 (for op (bit_and bit_ior)
801 (op:c (convert1?@2 (op:c @0 @@1)) (convert2? @1))
803 /* (X ^ Y) ^ Y -> X */
805 (bit_xor:c (convert1? (bit_xor:c @0 @@1)) (convert2? @1))
807 /* (X & Y) & (X & Z) -> (X & Y) & Z
808 (X | Y) | (X | Z) -> (X | Y) | Z */
809 (for op (bit_and bit_ior)
811 (op:c (convert1?@3 (op:c@4 @0 @1)) (convert2?@5 (op:c@6 @0 @2)))
812 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
813 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
814 (if (single_use (@5) && single_use (@6))
816 (if (single_use (@3) && single_use (@4))
817 (op (convert @1) @5))))))
818 /* (X ^ Y) ^ (X ^ Z) -> Y ^ Z */
820 (bit_xor (convert1? (bit_xor:c @0 @1)) (convert2? (bit_xor:c @0 @2)))
821 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
822 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
823 (bit_xor (convert @1) (convert @2))))
832 (abs tree_expr_nonnegative_p@0)
835 /* A few cases of fold-const.c negate_expr_p predicate. */
838 (if ((INTEGRAL_TYPE_P (type)
839 && TYPE_OVERFLOW_WRAPS (type))
840 || (!TYPE_OVERFLOW_SANITIZED (type)
841 && may_negate_without_overflow_p (t)))))
846 (if (!TYPE_OVERFLOW_SANITIZED (type))))
849 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (t)))))
850 /* VECTOR_CST handling of non-wrapping types would recurse in unsupported
854 (if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type))))
856 /* (-A) * (-B) -> A * B */
858 (mult:c (convert1? (negate @0)) (convert2? negate_expr_p@1))
859 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
860 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
861 (mult (convert @0) (convert (negate @1)))))
863 /* -(A + B) -> (-B) - A. */
865 (negate (plus:c @0 negate_expr_p@1))
866 (if (!HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type))
867 && !HONOR_SIGNED_ZEROS (element_mode (type)))
868 (minus (negate @1) @0)))
870 /* A - B -> A + (-B) if B is easily negatable. */
872 (minus @0 negate_expr_p@1)
873 (if (!FIXED_POINT_TYPE_P (type))
874 (plus @0 (negate @1))))
876 /* Try to fold (type) X op CST -> (type) (X op ((type-x) CST))
878 For bitwise binary operations apply operand conversions to the
879 binary operation result instead of to the operands. This allows
880 to combine successive conversions and bitwise binary operations.
881 We combine the above two cases by using a conditional convert. */
882 (for bitop (bit_and bit_ior bit_xor)
884 (bitop (convert @0) (convert? @1))
885 (if (((TREE_CODE (@1) == INTEGER_CST
886 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
887 && int_fits_type_p (@1, TREE_TYPE (@0)))
888 || types_match (@0, @1))
889 /* ??? This transform conflicts with fold-const.c doing
890 Convert (T)(x & c) into (T)x & (T)c, if c is an integer
891 constants (if x has signed type, the sign bit cannot be set
892 in c). This folds extension into the BIT_AND_EXPR.
893 Restrict it to GIMPLE to avoid endless recursions. */
894 && (bitop != BIT_AND_EXPR || GIMPLE)
895 && (/* That's a good idea if the conversion widens the operand, thus
896 after hoisting the conversion the operation will be narrower. */
897 TYPE_PRECISION (TREE_TYPE (@0)) < TYPE_PRECISION (type)
898 /* It's also a good idea if the conversion is to a non-integer
900 || GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT
901 /* Or if the precision of TO is not the same as the precision
903 || TYPE_PRECISION (type) != GET_MODE_PRECISION (TYPE_MODE (type))))
904 (convert (bitop @0 (convert @1))))))
906 (for bitop (bit_and bit_ior)
907 rbitop (bit_ior bit_and)
908 /* (x | y) & x -> x */
909 /* (x & y) | x -> x */
911 (bitop:c (rbitop:c @0 @1) @0)
913 /* (~x | y) & x -> x & y */
914 /* (~x & y) | x -> x | y */
916 (bitop:c (rbitop:c (bit_not @0) @1) @0)
919 /* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */
921 (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
922 (bit_ior (bit_and @0 @2) (bit_and @1 @2)))
924 /* Combine successive equal operations with constants. */
925 (for bitop (bit_and bit_ior bit_xor)
927 (bitop (bitop @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
928 (bitop @0 (bitop @1 @2))))
930 /* Try simple folding for X op !X, and X op X with the help
931 of the truth_valued_p and logical_inverted_value predicates. */
932 (match truth_valued_p
934 (if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1)))
935 (for op (tcc_comparison truth_and truth_andif truth_or truth_orif truth_xor)
936 (match truth_valued_p
938 (match truth_valued_p
941 (match (logical_inverted_value @0)
943 (match (logical_inverted_value @0)
944 (bit_not truth_valued_p@0))
945 (match (logical_inverted_value @0)
946 (eq @0 integer_zerop))
947 (match (logical_inverted_value @0)
948 (ne truth_valued_p@0 integer_truep))
949 (match (logical_inverted_value @0)
950 (bit_xor truth_valued_p@0 integer_truep))
954 (bit_and:c @0 (logical_inverted_value @0))
955 { build_zero_cst (type); })
956 /* X | !X and X ^ !X -> 1, , if X is truth-valued. */
957 (for op (bit_ior bit_xor)
959 (op:c truth_valued_p@0 (logical_inverted_value @0))
960 { constant_boolean_node (true, type); }))
961 /* X ==/!= !X is false/true. */
964 (op:c truth_valued_p@0 (logical_inverted_value @0))
965 { constant_boolean_node (op == NE_EXPR ? true : false, type); }))
969 (bit_not (bit_not @0))
972 /* Convert ~ (-A) to A - 1. */
974 (bit_not (convert? (negate @0)))
975 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
976 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
977 (convert (minus @0 { build_each_one_cst (TREE_TYPE (@0)); }))))
979 /* Convert ~ (A - 1) or ~ (A + -1) to -A. */
981 (bit_not (convert? (minus @0 integer_each_onep)))
982 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
983 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
984 (convert (negate @0))))
986 (bit_not (convert? (plus @0 integer_all_onesp)))
987 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
988 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
989 (convert (negate @0))))
991 /* Part of convert ~(X ^ Y) to ~X ^ Y or X ^ ~Y if ~X or ~Y simplify. */
993 (bit_not (convert? (bit_xor @0 INTEGER_CST@1)))
994 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
995 (convert (bit_xor @0 (bit_not @1)))))
997 (bit_not (convert? (bit_xor:c (bit_not @0) @1)))
998 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
999 (convert (bit_xor @0 @1))))
1001 /* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */
1003 (bit_ior:c (bit_and:cs @0 (bit_not @2)) (bit_and:cs @1 @2))
1004 (bit_xor (bit_and (bit_xor @0 @1) @2) @0))
1006 /* Fold A - (A & B) into ~B & A. */
1008 (minus (convert1? @0) (convert2?:s (bit_and:cs @@0 @1)))
1009 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1010 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1011 (convert (bit_and (bit_not @1) @0))))
1013 /* For integral types with undefined overflow and C != 0 fold
1014 x * C EQ/NE y * C into x EQ/NE y. */
1017 (cmp (mult:c @0 @1) (mult:c @2 @1))
1018 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1019 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1020 && tree_expr_nonzero_p (@1))
1023 /* For integral types with undefined overflow and C != 0 fold
1024 x * C RELOP y * C into:
1026 x RELOP y for nonnegative C
1027 y RELOP x for negative C */
1028 (for cmp (lt gt le ge)
1030 (cmp (mult:c @0 @1) (mult:c @2 @1))
1031 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1032 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1033 (if (tree_expr_nonnegative_p (@1) && tree_expr_nonzero_p (@1))
1035 (if (TREE_CODE (@1) == INTEGER_CST
1036 && wi::neg_p (@1, TYPE_SIGN (TREE_TYPE (@1))))
1039 /* ((X inner_op C0) outer_op C1)
1040 With X being a tree where value_range has reasoned certain bits to always be
1041 zero throughout its computed value range,
1042 inner_op = {|,^}, outer_op = {|,^} and inner_op != outer_op
1043 where zero_mask has 1's for all bits that are sure to be 0 in
1045 if (inner_op == '^') C0 &= ~C1;
1046 if ((C0 & ~zero_mask) == 0) then emit (X outer_op (C0 outer_op C1)
1047 if ((C1 & ~zero_mask) == 0) then emit (X inner_op (C0 outer_op C1)
1049 (for inner_op (bit_ior bit_xor)
1050 outer_op (bit_xor bit_ior)
1053 (inner_op:s @2 INTEGER_CST@0) INTEGER_CST@1)
1057 wide_int zero_mask_not;
1061 if (TREE_CODE (@2) == SSA_NAME)
1062 zero_mask_not = get_nonzero_bits (@2);
1066 if (inner_op == BIT_XOR_EXPR)
1068 C0 = wi::bit_and_not (@0, @1);
1069 cst_emit = wi::bit_or (C0, @1);
1074 cst_emit = wi::bit_xor (@0, @1);
1077 (if (!fail && wi::bit_and (C0, zero_mask_not) == 0)
1078 (outer_op @2 { wide_int_to_tree (type, cst_emit); })
1079 (if (!fail && wi::bit_and (@1, zero_mask_not) == 0)
1080 (inner_op @2 { wide_int_to_tree (type, cst_emit); }))))))
1082 /* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */
1084 (pointer_plus (pointer_plus:s @0 @1) @3)
1085 (pointer_plus @0 (plus @1 @3)))
1091 tem4 = (unsigned long) tem3;
1096 (pointer_plus @0 (convert?@2 (minus@3 (convert @1) (convert @0))))
1097 /* Conditionally look through a sign-changing conversion. */
1098 (if (TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@3))
1099 && ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@1)))
1100 || (GENERIC && type == TREE_TYPE (@1))))
1104 tem = (sizetype) ptr;
1108 and produce the simpler and easier to analyze with respect to alignment
1109 ... = ptr & ~algn; */
1111 (pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1)))
1112 (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), wi::bit_not (@1)); }
1113 (bit_and @0 { algn; })))
1115 /* Try folding difference of addresses. */
1117 (minus (convert ADDR_EXPR@0) (convert @1))
1118 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1119 (with { HOST_WIDE_INT diff; }
1120 (if (ptr_difference_const (@0, @1, &diff))
1121 { build_int_cst_type (type, diff); }))))
1123 (minus (convert @0) (convert ADDR_EXPR@1))
1124 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1125 (with { HOST_WIDE_INT diff; }
1126 (if (ptr_difference_const (@0, @1, &diff))
1127 { build_int_cst_type (type, diff); }))))
1129 /* If arg0 is derived from the address of an object or function, we may
1130 be able to fold this expression using the object or function's
1133 (bit_and (convert? @0) INTEGER_CST@1)
1134 (if (POINTER_TYPE_P (TREE_TYPE (@0))
1135 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1139 unsigned HOST_WIDE_INT bitpos;
1140 get_pointer_alignment_1 (@0, &align, &bitpos);
1142 (if (wi::ltu_p (@1, align / BITS_PER_UNIT))
1143 { wide_int_to_tree (type, wi::bit_and (@1, bitpos / BITS_PER_UNIT)); }))))
1146 /* We can't reassociate at all for saturating types. */
1147 (if (!TYPE_SATURATING (type))
1149 /* Contract negates. */
1150 /* A + (-B) -> A - B */
1152 (plus:c @0 (convert? (negate @1)))
1153 /* Apply STRIP_NOPS on the negate. */
1154 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1155 && !TYPE_OVERFLOW_SANITIZED (type))
1159 if (INTEGRAL_TYPE_P (type)
1160 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
1161 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
1163 (convert (minus (convert:t1 @0) (convert:t1 @1))))))
1164 /* A - (-B) -> A + B */
1166 (minus @0 (convert? (negate @1)))
1167 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1168 && !TYPE_OVERFLOW_SANITIZED (type))
1172 if (INTEGRAL_TYPE_P (type)
1173 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
1174 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
1176 (convert (plus (convert:t1 @0) (convert:t1 @1))))))
1179 (negate (convert? (negate @1)))
1180 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1181 && !TYPE_OVERFLOW_SANITIZED (type))
1184 /* We can't reassociate floating-point unless -fassociative-math
1185 or fixed-point plus or minus because of saturation to +-Inf. */
1186 (if ((!FLOAT_TYPE_P (type) || flag_associative_math)
1187 && !FIXED_POINT_TYPE_P (type))
1189 /* Match patterns that allow contracting a plus-minus pair
1190 irrespective of overflow issues. */
1191 /* (A +- B) - A -> +- B */
1192 /* (A +- B) -+ B -> A */
1193 /* A - (A +- B) -> -+ B */
1194 /* A +- (B -+ A) -> +- B */
1196 (minus (plus:c @0 @1) @0)
1199 (minus (minus @0 @1) @0)
1202 (plus:c (minus @0 @1) @1)
1205 (minus @0 (plus:c @0 @1))
1208 (minus @0 (minus @0 @1))
1211 /* (A +- CST1) +- CST2 -> A + CST3 */
1212 (for outer_op (plus minus)
1213 (for inner_op (plus minus)
1215 (outer_op (inner_op @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1216 /* If the constant operation overflows we cannot do the transform
1217 as we would introduce undefined overflow, for example
1218 with (a - 1) + INT_MIN. */
1219 (with { tree cst = const_binop (outer_op == inner_op
1220 ? PLUS_EXPR : MINUS_EXPR, type, @1, @2); }
1221 (if (cst && !TREE_OVERFLOW (cst))
1222 (inner_op @0 { cst; } ))))))
1224 /* (CST1 - A) +- CST2 -> CST3 - A */
1225 (for outer_op (plus minus)
1227 (outer_op (minus CONSTANT_CLASS_P@1 @0) CONSTANT_CLASS_P@2)
1228 (with { tree cst = const_binop (outer_op, type, @1, @2); }
1229 (if (cst && !TREE_OVERFLOW (cst))
1230 (minus { cst; } @0)))))
1232 /* CST1 - (CST2 - A) -> CST3 + A */
1234 (minus CONSTANT_CLASS_P@1 (minus CONSTANT_CLASS_P@2 @0))
1235 (with { tree cst = const_binop (MINUS_EXPR, type, @1, @2); }
1236 (if (cst && !TREE_OVERFLOW (cst))
1237 (plus { cst; } @0))))
1241 (plus:c (bit_not @0) @0)
1242 (if (!TYPE_OVERFLOW_TRAPS (type))
1243 { build_all_ones_cst (type); }))
1247 (plus (convert? (bit_not @0)) integer_each_onep)
1248 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1249 (negate (convert @0))))
1253 (minus (convert? (negate @0)) integer_each_onep)
1254 (if (!TYPE_OVERFLOW_TRAPS (type)
1255 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1256 (bit_not (convert @0))))
1260 (minus integer_all_onesp @0)
1263 /* (T)(P + A) - (T)P -> (T) A */
1264 (for add (plus pointer_plus)
1266 (minus (convert (add @@0 @1))
1268 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1269 /* For integer types, if A has a smaller type
1270 than T the result depends on the possible
1272 E.g. T=size_t, A=(unsigned)429497295, P>0.
1273 However, if an overflow in P + A would cause
1274 undefined behavior, we can assume that there
1276 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1277 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1278 /* For pointer types, if the conversion of A to the
1279 final type requires a sign- or zero-extension,
1280 then we have to punt - it is not defined which
1282 || (POINTER_TYPE_P (TREE_TYPE (@0))
1283 && TREE_CODE (@1) == INTEGER_CST
1284 && tree_int_cst_sign_bit (@1) == 0))
1287 /* (T)P - (T)(P + A) -> -(T) A */
1288 (for add (plus pointer_plus)
1291 (convert (add @@0 @1)))
1292 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1293 /* For integer types, if A has a smaller type
1294 than T the result depends on the possible
1296 E.g. T=size_t, A=(unsigned)429497295, P>0.
1297 However, if an overflow in P + A would cause
1298 undefined behavior, we can assume that there
1300 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1301 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1302 /* For pointer types, if the conversion of A to the
1303 final type requires a sign- or zero-extension,
1304 then we have to punt - it is not defined which
1306 || (POINTER_TYPE_P (TREE_TYPE (@0))
1307 && TREE_CODE (@1) == INTEGER_CST
1308 && tree_int_cst_sign_bit (@1) == 0))
1309 (negate (convert @1)))))
1311 /* (T)(P + A) - (T)(P + B) -> (T)A - (T)B */
1312 (for add (plus pointer_plus)
1314 (minus (convert (add @@0 @1))
1315 (convert (add @0 @2)))
1316 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1317 /* For integer types, if A has a smaller type
1318 than T the result depends on the possible
1320 E.g. T=size_t, A=(unsigned)429497295, P>0.
1321 However, if an overflow in P + A would cause
1322 undefined behavior, we can assume that there
1324 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1325 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1326 /* For pointer types, if the conversion of A to the
1327 final type requires a sign- or zero-extension,
1328 then we have to punt - it is not defined which
1330 || (POINTER_TYPE_P (TREE_TYPE (@0))
1331 && TREE_CODE (@1) == INTEGER_CST
1332 && tree_int_cst_sign_bit (@1) == 0
1333 && TREE_CODE (@2) == INTEGER_CST
1334 && tree_int_cst_sign_bit (@2) == 0))
1335 (minus (convert @1) (convert @2)))))))
1338 /* Simplifications of MIN_EXPR, MAX_EXPR, fmin() and fmax(). */
1340 (for minmax (min max FMIN FMAX)
1344 /* min(max(x,y),y) -> y. */
1346 (min:c (max:c @0 @1) @1)
1348 /* max(min(x,y),y) -> y. */
1350 (max:c (min:c @0 @1) @1)
1352 /* max(a,-a) -> abs(a). */
1354 (max:c @0 (negate @0))
1355 (if (TREE_CODE (type) != COMPLEX_TYPE
1356 && (! ANY_INTEGRAL_TYPE_P (type)
1357 || TYPE_OVERFLOW_UNDEFINED (type)))
1359 /* min(a,-a) -> -abs(a). */
1361 (min:c @0 (negate @0))
1362 (if (TREE_CODE (type) != COMPLEX_TYPE
1363 && (! ANY_INTEGRAL_TYPE_P (type)
1364 || TYPE_OVERFLOW_UNDEFINED (type)))
1369 (if (INTEGRAL_TYPE_P (type)
1370 && TYPE_MIN_VALUE (type)
1371 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
1373 (if (INTEGRAL_TYPE_P (type)
1374 && TYPE_MAX_VALUE (type)
1375 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
1380 (if (INTEGRAL_TYPE_P (type)
1381 && TYPE_MAX_VALUE (type)
1382 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
1384 (if (INTEGRAL_TYPE_P (type)
1385 && TYPE_MIN_VALUE (type)
1386 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
1389 /* max (a, a + CST) -> a + CST where CST is positive. */
1390 /* max (a, a + CST) -> a where CST is negative. */
1392 (max:c @0 (plus@2 @0 INTEGER_CST@1))
1393 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1394 (if (tree_int_cst_sgn (@1) > 0)
1398 /* min (a, a + CST) -> a where CST is positive. */
1399 /* min (a, a + CST) -> a + CST where CST is negative. */
1401 (min:c @0 (plus@2 @0 INTEGER_CST@1))
1402 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1403 (if (tree_int_cst_sgn (@1) > 0)
1407 /* (convert (minmax ((convert (x) c)))) -> minmax (x c) if x is promoted
1408 and the outer convert demotes the expression back to x's type. */
1409 (for minmax (min max)
1411 (convert (minmax@0 (convert @1) INTEGER_CST@2))
1412 (if (INTEGRAL_TYPE_P (type)
1413 && types_match (@1, type) && int_fits_type_p (@2, type)
1414 && TYPE_SIGN (TREE_TYPE (@0)) == TYPE_SIGN (type)
1415 && TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type))
1416 (minmax @1 (convert @2)))))
1418 (for minmax (FMIN FMAX)
1419 /* If either argument is NaN, return the other one. Avoid the
1420 transformation if we get (and honor) a signalling NaN. */
1422 (minmax:c @0 REAL_CST@1)
1423 (if (real_isnan (TREE_REAL_CST_PTR (@1))
1424 && (!HONOR_SNANS (@1) || !TREE_REAL_CST (@1).signalling))
1426 /* Convert fmin/fmax to MIN_EXPR/MAX_EXPR. C99 requires these
1427 functions to return the numeric arg if the other one is NaN.
1428 MIN and MAX don't honor that, so only transform if -ffinite-math-only
1429 is set. C99 doesn't require -0.0 to be handled, so we don't have to
1430 worry about it either. */
1431 (if (flag_finite_math_only)
1438 /* min (-A, -B) -> -max (A, B) */
1439 (for minmax (min max FMIN FMAX)
1440 maxmin (max min FMAX FMIN)
1442 (minmax (negate:s@2 @0) (negate:s@3 @1))
1443 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
1444 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1445 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
1446 (negate (maxmin @0 @1)))))
1447 /* MIN (~X, ~Y) -> ~MAX (X, Y)
1448 MAX (~X, ~Y) -> ~MIN (X, Y) */
1449 (for minmax (min max)
1452 (minmax (bit_not:s@2 @0) (bit_not:s@3 @1))
1453 (bit_not (maxmin @0 @1))))
1455 /* MIN (X, Y) == X -> X <= Y */
1456 (for minmax (min min max max)
1460 (cmp:c (minmax:c @0 @1) @0)
1461 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)))
1463 /* MIN (X, 5) == 0 -> X == 0
1464 MIN (X, 5) == 7 -> false */
1467 (cmp (min @0 INTEGER_CST@1) INTEGER_CST@2)
1468 (if (wi::lt_p (@1, @2, TYPE_SIGN (TREE_TYPE (@0))))
1469 { constant_boolean_node (cmp == NE_EXPR, type); }
1470 (if (wi::gt_p (@1, @2, TYPE_SIGN (TREE_TYPE (@0))))
1474 (cmp (max @0 INTEGER_CST@1) INTEGER_CST@2)
1475 (if (wi::gt_p (@1, @2, TYPE_SIGN (TREE_TYPE (@0))))
1476 { constant_boolean_node (cmp == NE_EXPR, type); }
1477 (if (wi::lt_p (@1, @2, TYPE_SIGN (TREE_TYPE (@0))))
1479 /* MIN (X, C1) < C2 -> X < C2 || C1 < C2 */
1480 (for minmax (min min max max min min max max )
1481 cmp (lt le gt ge gt ge lt le )
1482 comb (bit_ior bit_ior bit_ior bit_ior bit_and bit_and bit_and bit_and)
1484 (cmp (minmax @0 INTEGER_CST@1) INTEGER_CST@2)
1485 (comb (cmp @0 @2) (cmp @1 @2))))
1487 /* Simplifications of shift and rotates. */
1489 (for rotate (lrotate rrotate)
1491 (rotate integer_all_onesp@0 @1)
1494 /* Optimize -1 >> x for arithmetic right shifts. */
1496 (rshift integer_all_onesp@0 @1)
1497 (if (!TYPE_UNSIGNED (type)
1498 && tree_expr_nonnegative_p (@1))
1501 /* Optimize (x >> c) << c into x & (-1<<c). */
1503 (lshift (rshift @0 INTEGER_CST@1) @1)
1504 (if (wi::ltu_p (@1, element_precision (type)))
1505 (bit_and @0 (lshift { build_minus_one_cst (type); } @1))))
1507 /* Optimize (x << c) >> c into x & ((unsigned)-1 >> c) for unsigned
1510 (rshift (lshift @0 INTEGER_CST@1) @1)
1511 (if (TYPE_UNSIGNED (type)
1512 && (wi::ltu_p (@1, element_precision (type))))
1513 (bit_and @0 (rshift { build_minus_one_cst (type); } @1))))
1515 (for shiftrotate (lrotate rrotate lshift rshift)
1517 (shiftrotate @0 integer_zerop)
1520 (shiftrotate integer_zerop@0 @1)
1522 /* Prefer vector1 << scalar to vector1 << vector2
1523 if vector2 is uniform. */
1524 (for vec (VECTOR_CST CONSTRUCTOR)
1526 (shiftrotate @0 vec@1)
1527 (with { tree tem = uniform_vector_p (@1); }
1529 (shiftrotate @0 { tem; }))))))
1531 /* Simplify X << Y where Y's low width bits are 0 to X, as only valid
1532 Y is 0. Similarly for X >> Y. */
1534 (for shift (lshift rshift)
1536 (shift @0 SSA_NAME@1)
1537 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
1539 int width = ceil_log2 (element_precision (TREE_TYPE (@0)));
1540 int prec = TYPE_PRECISION (TREE_TYPE (@1));
1542 (if ((get_nonzero_bits (@1) & wi::mask (width, false, prec)) == 0)
1546 /* Rewrite an LROTATE_EXPR by a constant into an
1547 RROTATE_EXPR by a new constant. */
1549 (lrotate @0 INTEGER_CST@1)
1550 (rrotate @0 { const_binop (MINUS_EXPR, TREE_TYPE (@1),
1551 build_int_cst (TREE_TYPE (@1),
1552 element_precision (type)), @1); }))
1554 /* Turn (a OP c1) OP c2 into a OP (c1+c2). */
1555 (for op (lrotate rrotate rshift lshift)
1557 (op (op @0 INTEGER_CST@1) INTEGER_CST@2)
1558 (with { unsigned int prec = element_precision (type); }
1559 (if (wi::ge_p (@1, 0, TYPE_SIGN (TREE_TYPE (@1)))
1560 && wi::lt_p (@1, prec, TYPE_SIGN (TREE_TYPE (@1)))
1561 && wi::ge_p (@2, 0, TYPE_SIGN (TREE_TYPE (@2)))
1562 && wi::lt_p (@2, prec, TYPE_SIGN (TREE_TYPE (@2))))
1563 (with { unsigned int low = wi::add (@1, @2).to_uhwi (); }
1564 /* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2
1565 being well defined. */
1567 (if (op == LROTATE_EXPR || op == RROTATE_EXPR)
1568 (op @0 { build_int_cst (TREE_TYPE (@1), low % prec); })
1569 (if (TYPE_UNSIGNED (type) || op == LSHIFT_EXPR)
1570 { build_zero_cst (type); }
1571 (op @0 { build_int_cst (TREE_TYPE (@1), prec - 1); })))
1572 (op @0 { build_int_cst (TREE_TYPE (@1), low); })))))))
1575 /* ((1 << A) & 1) != 0 -> A == 0
1576 ((1 << A) & 1) == 0 -> A != 0 */
1580 (cmp (bit_and (lshift integer_onep @0) integer_onep) integer_zerop)
1581 (icmp @0 { build_zero_cst (TREE_TYPE (@0)); })))
1583 /* (CST1 << A) == CST2 -> A == ctz (CST2) - ctz (CST1)
1584 (CST1 << A) != CST2 -> A != ctz (CST2) - ctz (CST1)
1588 (cmp (lshift INTEGER_CST@0 @1) INTEGER_CST@2)
1589 (with { int cand = wi::ctz (@2) - wi::ctz (@0); }
1591 || (!integer_zerop (@2)
1592 && wi::ne_p (wi::lshift (@0, cand), @2)))
1593 { constant_boolean_node (cmp == NE_EXPR, type); }
1594 (if (!integer_zerop (@2)
1595 && wi::eq_p (wi::lshift (@0, cand), @2))
1596 (cmp @1 { build_int_cst (TREE_TYPE (@1), cand); }))))))
1598 /* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1))
1599 (X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1))
1600 if the new mask might be further optimized. */
1601 (for shift (lshift rshift)
1603 (bit_and (convert?:s@4 (shift:s@5 (convert1?@3 @0) INTEGER_CST@1))
1605 (if (tree_nop_conversion_p (TREE_TYPE (@4), TREE_TYPE (@5))
1606 && TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT
1607 && tree_fits_uhwi_p (@1)
1608 && tree_to_uhwi (@1) > 0
1609 && tree_to_uhwi (@1) < TYPE_PRECISION (type))
1612 unsigned int shiftc = tree_to_uhwi (@1);
1613 unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (@2);
1614 unsigned HOST_WIDE_INT newmask, zerobits = 0;
1615 tree shift_type = TREE_TYPE (@3);
1618 if (shift == LSHIFT_EXPR)
1619 zerobits = ((HOST_WIDE_INT_1U << shiftc) - 1);
1620 else if (shift == RSHIFT_EXPR
1621 && (TYPE_PRECISION (shift_type)
1622 == GET_MODE_PRECISION (TYPE_MODE (shift_type))))
1624 prec = TYPE_PRECISION (TREE_TYPE (@3));
1626 /* See if more bits can be proven as zero because of
1629 && TYPE_UNSIGNED (TREE_TYPE (@0)))
1631 tree inner_type = TREE_TYPE (@0);
1632 if ((TYPE_PRECISION (inner_type)
1633 == GET_MODE_PRECISION (TYPE_MODE (inner_type)))
1634 && TYPE_PRECISION (inner_type) < prec)
1636 prec = TYPE_PRECISION (inner_type);
1637 /* See if we can shorten the right shift. */
1639 shift_type = inner_type;
1640 /* Otherwise X >> C1 is all zeros, so we'll optimize
1641 it into (X, 0) later on by making sure zerobits
1645 zerobits = HOST_WIDE_INT_M1U;
1648 zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc;
1649 zerobits <<= prec - shiftc;
1651 /* For arithmetic shift if sign bit could be set, zerobits
1652 can contain actually sign bits, so no transformation is
1653 possible, unless MASK masks them all away. In that
1654 case the shift needs to be converted into logical shift. */
1655 if (!TYPE_UNSIGNED (TREE_TYPE (@3))
1656 && prec == TYPE_PRECISION (TREE_TYPE (@3)))
1658 if ((mask & zerobits) == 0)
1659 shift_type = unsigned_type_for (TREE_TYPE (@3));
1665 /* ((X << 16) & 0xff00) is (X, 0). */
1666 (if ((mask & zerobits) == mask)
1667 { build_int_cst (type, 0); }
1668 (with { newmask = mask | zerobits; }
1669 (if (newmask != mask && (newmask & (newmask + 1)) == 0)
1672 /* Only do the transformation if NEWMASK is some integer
1674 for (prec = BITS_PER_UNIT;
1675 prec < HOST_BITS_PER_WIDE_INT; prec <<= 1)
1676 if (newmask == (HOST_WIDE_INT_1U << prec) - 1)
1679 (if (prec < HOST_BITS_PER_WIDE_INT
1680 || newmask == HOST_WIDE_INT_M1U)
1682 { tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); }
1683 (if (!tree_int_cst_equal (newmaskt, @2))
1684 (if (shift_type != TREE_TYPE (@3))
1685 (bit_and (convert (shift:shift_type (convert @3) @1)) { newmaskt; })
1686 (bit_and @4 { newmaskt; })))))))))))))
1688 /* Fold (X {&,^,|} C2) << C1 into (X << C1) {&,^,|} (C2 << C1)
1689 (X {&,^,|} C2) >> C1 into (X >> C1) & (C2 >> C1). */
1690 (for shift (lshift rshift)
1691 (for bit_op (bit_and bit_xor bit_ior)
1693 (shift (convert?:s (bit_op:s @0 INTEGER_CST@2)) INTEGER_CST@1)
1694 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1695 (with { tree mask = int_const_binop (shift, fold_convert (type, @2), @1); }
1696 (bit_op (shift (convert @0) @1) { mask; }))))))
1698 /* ~(~X >> Y) -> X >> Y (for arithmetic shift). */
1700 (bit_not (convert1?:s (rshift:s (convert2?@0 (bit_not @1)) @2)))
1701 (if (!TYPE_UNSIGNED (TREE_TYPE (@0))
1702 && (element_precision (TREE_TYPE (@0))
1703 <= element_precision (TREE_TYPE (@1))
1704 || !TYPE_UNSIGNED (TREE_TYPE (@1))))
1706 { tree shift_type = TREE_TYPE (@0); }
1707 (convert (rshift (convert:shift_type @1) @2)))))
1709 /* ~(~X >>r Y) -> X >>r Y
1710 ~(~X <<r Y) -> X <<r Y */
1711 (for rotate (lrotate rrotate)
1713 (bit_not (convert1?:s (rotate:s (convert2?@0 (bit_not @1)) @2)))
1714 (if ((element_precision (TREE_TYPE (@0))
1715 <= element_precision (TREE_TYPE (@1))
1716 || !TYPE_UNSIGNED (TREE_TYPE (@1)))
1717 && (element_precision (type) <= element_precision (TREE_TYPE (@0))
1718 || !TYPE_UNSIGNED (TREE_TYPE (@0))))
1720 { tree rotate_type = TREE_TYPE (@0); }
1721 (convert (rotate (convert:rotate_type @1) @2))))))
1723 /* Simplifications of conversions. */
1725 /* Basic strip-useless-type-conversions / strip_nops. */
1726 (for cvt (convert view_convert float fix_trunc)
1729 (if ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@0)))
1730 || (GENERIC && type == TREE_TYPE (@0)))
1733 /* Contract view-conversions. */
1735 (view_convert (view_convert @0))
1738 /* For integral conversions with the same precision or pointer
1739 conversions use a NOP_EXPR instead. */
1742 (if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
1743 && (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
1744 && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0)))
1747 /* Strip inner integral conversions that do not change precision or size. */
1749 (view_convert (convert@0 @1))
1750 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
1751 && (INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
1752 && (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
1753 && (TYPE_SIZE (TREE_TYPE (@0)) == TYPE_SIZE (TREE_TYPE (@1))))
1756 /* Re-association barriers around constants and other re-association
1757 barriers can be removed. */
1759 (paren CONSTANT_CLASS_P@0)
1762 (paren (paren@1 @0))
1765 /* Handle cases of two conversions in a row. */
1766 (for ocvt (convert float fix_trunc)
1767 (for icvt (convert float)
1772 tree inside_type = TREE_TYPE (@0);
1773 tree inter_type = TREE_TYPE (@1);
1774 int inside_int = INTEGRAL_TYPE_P (inside_type);
1775 int inside_ptr = POINTER_TYPE_P (inside_type);
1776 int inside_float = FLOAT_TYPE_P (inside_type);
1777 int inside_vec = VECTOR_TYPE_P (inside_type);
1778 unsigned int inside_prec = TYPE_PRECISION (inside_type);
1779 int inside_unsignedp = TYPE_UNSIGNED (inside_type);
1780 int inter_int = INTEGRAL_TYPE_P (inter_type);
1781 int inter_ptr = POINTER_TYPE_P (inter_type);
1782 int inter_float = FLOAT_TYPE_P (inter_type);
1783 int inter_vec = VECTOR_TYPE_P (inter_type);
1784 unsigned int inter_prec = TYPE_PRECISION (inter_type);
1785 int inter_unsignedp = TYPE_UNSIGNED (inter_type);
1786 int final_int = INTEGRAL_TYPE_P (type);
1787 int final_ptr = POINTER_TYPE_P (type);
1788 int final_float = FLOAT_TYPE_P (type);
1789 int final_vec = VECTOR_TYPE_P (type);
1790 unsigned int final_prec = TYPE_PRECISION (type);
1791 int final_unsignedp = TYPE_UNSIGNED (type);
1794 /* In addition to the cases of two conversions in a row
1795 handled below, if we are converting something to its own
1796 type via an object of identical or wider precision, neither
1797 conversion is needed. */
1798 (if (((GIMPLE && useless_type_conversion_p (type, inside_type))
1800 && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type)))
1801 && (((inter_int || inter_ptr) && final_int)
1802 || (inter_float && final_float))
1803 && inter_prec >= final_prec)
1806 /* Likewise, if the intermediate and initial types are either both
1807 float or both integer, we don't need the middle conversion if the
1808 former is wider than the latter and doesn't change the signedness
1809 (for integers). Avoid this if the final type is a pointer since
1810 then we sometimes need the middle conversion. */
1811 (if (((inter_int && inside_int) || (inter_float && inside_float))
1812 && (final_int || final_float)
1813 && inter_prec >= inside_prec
1814 && (inter_float || inter_unsignedp == inside_unsignedp))
1817 /* If we have a sign-extension of a zero-extended value, we can
1818 replace that by a single zero-extension. Likewise if the
1819 final conversion does not change precision we can drop the
1820 intermediate conversion. */
1821 (if (inside_int && inter_int && final_int
1822 && ((inside_prec < inter_prec && inter_prec < final_prec
1823 && inside_unsignedp && !inter_unsignedp)
1824 || final_prec == inter_prec))
1827 /* Two conversions in a row are not needed unless:
1828 - some conversion is floating-point (overstrict for now), or
1829 - some conversion is a vector (overstrict for now), or
1830 - the intermediate type is narrower than both initial and
1832 - the intermediate type and innermost type differ in signedness,
1833 and the outermost type is wider than the intermediate, or
1834 - the initial type is a pointer type and the precisions of the
1835 intermediate and final types differ, or
1836 - the final type is a pointer type and the precisions of the
1837 initial and intermediate types differ. */
1838 (if (! inside_float && ! inter_float && ! final_float
1839 && ! inside_vec && ! inter_vec && ! final_vec
1840 && (inter_prec >= inside_prec || inter_prec >= final_prec)
1841 && ! (inside_int && inter_int
1842 && inter_unsignedp != inside_unsignedp
1843 && inter_prec < final_prec)
1844 && ((inter_unsignedp && inter_prec > inside_prec)
1845 == (final_unsignedp && final_prec > inter_prec))
1846 && ! (inside_ptr && inter_prec != final_prec)
1847 && ! (final_ptr && inside_prec != inter_prec))
1850 /* A truncation to an unsigned type (a zero-extension) should be
1851 canonicalized as bitwise and of a mask. */
1852 (if (GIMPLE /* PR70366: doing this in GENERIC breaks -Wconversion. */
1853 && final_int && inter_int && inside_int
1854 && final_prec == inside_prec
1855 && final_prec > inter_prec
1857 (convert (bit_and @0 { wide_int_to_tree
1859 wi::mask (inter_prec, false,
1860 TYPE_PRECISION (inside_type))); })))
1862 /* If we are converting an integer to a floating-point that can
1863 represent it exactly and back to an integer, we can skip the
1864 floating-point conversion. */
1865 (if (GIMPLE /* PR66211 */
1866 && inside_int && inter_float && final_int &&
1867 (unsigned) significand_size (TYPE_MODE (inter_type))
1868 >= inside_prec - !inside_unsignedp)
1871 /* If we have a narrowing conversion to an integral type that is fed by a
1872 BIT_AND_EXPR, we might be able to remove the BIT_AND_EXPR if it merely
1873 masks off bits outside the final type (and nothing else). */
1875 (convert (bit_and @0 INTEGER_CST@1))
1876 (if (INTEGRAL_TYPE_P (type)
1877 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
1878 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))
1879 && operand_equal_p (@1, build_low_bits_mask (TREE_TYPE (@1),
1880 TYPE_PRECISION (type)), 0))
1884 /* (X /[ex] A) * A -> X. */
1886 (mult (convert1? (exact_div @0 @@1)) (convert2? @1))
1889 /* Canonicalization of binary operations. */
1891 /* Convert X + -C into X - C. */
1893 (plus @0 REAL_CST@1)
1894 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
1895 (with { tree tem = const_unop (NEGATE_EXPR, type, @1); }
1896 (if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
1897 (minus @0 { tem; })))))
1899 /* Convert x+x into x*2. */
1902 (if (SCALAR_FLOAT_TYPE_P (type))
1903 (mult @0 { build_real (type, dconst2); })
1904 (if (INTEGRAL_TYPE_P (type))
1905 (mult @0 { build_int_cst (type, 2); }))))
1908 (minus integer_zerop @1)
1911 /* (ARG0 - ARG1) is the same as (-ARG1 + ARG0). So check whether
1912 ARG0 is zero and X + ARG0 reduces to X, since that would mean
1913 (-ARG1 + ARG0) reduces to -ARG1. */
1915 (minus real_zerop@0 @1)
1916 (if (fold_real_zero_addition_p (type, @0, 0))
1919 /* Transform x * -1 into -x. */
1921 (mult @0 integer_minus_onep)
1924 /* True if we can easily extract the real and imaginary parts of a complex
1926 (match compositional_complex
1927 (convert? (complex @0 @1)))
1929 /* COMPLEX_EXPR and REALPART/IMAGPART_EXPR cancellations. */
1931 (complex (realpart @0) (imagpart @0))
1934 (realpart (complex @0 @1))
1937 (imagpart (complex @0 @1))
1940 /* Sometimes we only care about half of a complex expression. */
1942 (realpart (convert?:s (conj:s @0)))
1943 (convert (realpart @0)))
1945 (imagpart (convert?:s (conj:s @0)))
1946 (convert (negate (imagpart @0))))
1947 (for part (realpart imagpart)
1948 (for op (plus minus)
1950 (part (convert?:s@2 (op:s @0 @1)))
1951 (convert (op (part @0) (part @1))))))
1953 (realpart (convert?:s (CEXPI:s @0)))
1956 (imagpart (convert?:s (CEXPI:s @0)))
1959 /* conj(conj(x)) -> x */
1961 (conj (convert? (conj @0)))
1962 (if (tree_nop_conversion_p (TREE_TYPE (@0), type))
1965 /* conj({x,y}) -> {x,-y} */
1967 (conj (convert?:s (complex:s @0 @1)))
1968 (with { tree itype = TREE_TYPE (type); }
1969 (complex (convert:itype @0) (negate (convert:itype @1)))))
1971 /* BSWAP simplifications, transforms checked by gcc.dg/builtin-bswap-8.c. */
1972 (for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32 BUILT_IN_BSWAP64)
1977 (bswap (bit_not (bswap @0)))
1979 (for bitop (bit_xor bit_ior bit_and)
1981 (bswap (bitop:c (bswap @0) @1))
1982 (bitop @0 (bswap @1)))))
1985 /* Combine COND_EXPRs and VEC_COND_EXPRs. */
1987 /* Simplify constant conditions.
1988 Only optimize constant conditions when the selected branch
1989 has the same type as the COND_EXPR. This avoids optimizing
1990 away "c ? x : throw", where the throw has a void type.
1991 Note that we cannot throw away the fold-const.c variant nor
1992 this one as we depend on doing this transform before possibly
1993 A ? B : B -> B triggers and the fold-const.c one can optimize
1994 0 ? A : B to B even if A has side-effects. Something
1995 genmatch cannot handle. */
1997 (cond INTEGER_CST@0 @1 @2)
1998 (if (integer_zerop (@0))
1999 (if (!VOID_TYPE_P (TREE_TYPE (@2)) || VOID_TYPE_P (type))
2001 (if (!VOID_TYPE_P (TREE_TYPE (@1)) || VOID_TYPE_P (type))
2004 (vec_cond VECTOR_CST@0 @1 @2)
2005 (if (integer_all_onesp (@0))
2007 (if (integer_zerop (@0))
2010 /* Simplification moved from fold_cond_expr_with_comparison. It may also
2012 /* This pattern implements two kinds simplification:
2015 (cond (cmp (convert1? x) c1) (convert2? x) c2) -> (minmax (x c)) if:
2016 1) Conversions are type widening from smaller type.
2017 2) Const c1 equals to c2 after canonicalizing comparison.
2018 3) Comparison has tree code LT, LE, GT or GE.
2019 This specific pattern is needed when (cmp (convert x) c) may not
2020 be simplified by comparison patterns because of multiple uses of
2021 x. It also makes sense here because simplifying across multiple
2022 referred var is always benefitial for complicated cases.
2025 (cond (eq (convert1? x) c1) (convert2? x) c2) -> (cond (eq x c1) c1 c2). */
2026 (for cmp (lt le gt ge eq)
2028 (cond (cmp (convert1? @1) INTEGER_CST@3) (convert2? @1) INTEGER_CST@2)
2031 tree from_type = TREE_TYPE (@1);
2032 tree c1_type = TREE_TYPE (@3), c2_type = TREE_TYPE (@2);
2033 enum tree_code code = ERROR_MARK;
2035 if (INTEGRAL_TYPE_P (from_type)
2036 && int_fits_type_p (@2, from_type)
2037 && (types_match (c1_type, from_type)
2038 || (TYPE_PRECISION (c1_type) > TYPE_PRECISION (from_type)
2039 && (TYPE_UNSIGNED (from_type)
2040 || TYPE_SIGN (c1_type) == TYPE_SIGN (from_type))))
2041 && (types_match (c2_type, from_type)
2042 || (TYPE_PRECISION (c2_type) > TYPE_PRECISION (from_type)
2043 && (TYPE_UNSIGNED (from_type)
2044 || TYPE_SIGN (c2_type) == TYPE_SIGN (from_type)))))
2048 if (wi::to_widest (@3) == (wi::to_widest (@2) - 1))
2050 /* X <= Y - 1 equals to X < Y. */
2053 /* X > Y - 1 equals to X >= Y. */
2057 if (wi::to_widest (@3) == (wi::to_widest (@2) + 1))
2059 /* X < Y + 1 equals to X <= Y. */
2062 /* X >= Y + 1 equals to X > Y. */
2066 if (code != ERROR_MARK
2067 || wi::to_widest (@2) == wi::to_widest (@3))
2069 if (cmp == LT_EXPR || cmp == LE_EXPR)
2071 if (cmp == GT_EXPR || cmp == GE_EXPR)
2075 /* Can do A == C1 ? A : C2 -> A == C1 ? C1 : C2? */
2076 else if (int_fits_type_p (@3, from_type))
2080 (if (code == MAX_EXPR)
2081 (convert (max @1 (convert @2)))
2082 (if (code == MIN_EXPR)
2083 (convert (min @1 (convert @2)))
2084 (if (code == EQ_EXPR)
2085 (convert (cond (eq @1 (convert @3))
2086 (convert:from_type @3) (convert:from_type @2)))))))))
2088 /* (cond (cmp (convert? x) c1) (op x c2) c3) -> (op (minmax x c1) c2) if:
2090 1) OP is PLUS or MINUS.
2091 2) CMP is LT, LE, GT or GE.
2092 3) C3 == (C1 op C2), and computation doesn't have undefined behavior.
2094 This pattern also handles special cases like:
2096 A) Operand x is a unsigned to signed type conversion and c1 is
2097 integer zero. In this case,
2098 (signed type)x < 0 <=> x > MAX_VAL(signed type)
2099 (signed type)x >= 0 <=> x <= MAX_VAL(signed type)
2100 B) Const c1 may not equal to (C3 op' C2). In this case we also
2101 check equality for (c1+1) and (c1-1) by adjusting comparison
2104 TODO: Though signed type is handled by this pattern, it cannot be
2105 simplified at the moment because C standard requires additional
2106 type promotion. In order to match&simplify it here, the IR needs
2107 to be cleaned up by other optimizers, i.e, VRP. */
2108 (for op (plus minus)
2109 (for cmp (lt le gt ge)
2111 (cond (cmp (convert? @X) INTEGER_CST@1) (op @X INTEGER_CST@2) INTEGER_CST@3)
2112 (with { tree from_type = TREE_TYPE (@X), to_type = TREE_TYPE (@1); }
2113 (if (types_match (from_type, to_type)
2114 /* Check if it is special case A). */
2115 || (TYPE_UNSIGNED (from_type)
2116 && !TYPE_UNSIGNED (to_type)
2117 && TYPE_PRECISION (from_type) == TYPE_PRECISION (to_type)
2118 && integer_zerop (@1)
2119 && (cmp == LT_EXPR || cmp == GE_EXPR)))
2122 bool overflow = false;
2123 enum tree_code code, cmp_code = cmp;
2124 wide_int real_c1, c1 = @1, c2 = @2, c3 = @3;
2125 signop sgn = TYPE_SIGN (from_type);
2127 /* Handle special case A), given x of unsigned type:
2128 ((signed type)x < 0) <=> (x > MAX_VAL(signed type))
2129 ((signed type)x >= 0) <=> (x <= MAX_VAL(signed type)) */
2130 if (!types_match (from_type, to_type))
2132 if (cmp_code == LT_EXPR)
2134 if (cmp_code == GE_EXPR)
2136 c1 = wi::max_value (to_type);
2138 /* To simplify this pattern, we require c3 = (c1 op c2). Here we
2139 compute (c3 op' c2) and check if it equals to c1 with op' being
2140 the inverted operator of op. Make sure overflow doesn't happen
2141 if it is undefined. */
2142 if (op == PLUS_EXPR)
2143 real_c1 = wi::sub (c3, c2, sgn, &overflow);
2145 real_c1 = wi::add (c3, c2, sgn, &overflow);
2148 if (!overflow || !TYPE_OVERFLOW_UNDEFINED (from_type))
2150 /* Check if c1 equals to real_c1. Boundary condition is handled
2151 by adjusting comparison operation if necessary. */
2152 if (!wi::cmp (wi::sub (real_c1, 1, sgn, &overflow), c1, sgn)
2155 /* X <= Y - 1 equals to X < Y. */
2156 if (cmp_code == LE_EXPR)
2158 /* X > Y - 1 equals to X >= Y. */
2159 if (cmp_code == GT_EXPR)
2162 if (!wi::cmp (wi::add (real_c1, 1, sgn, &overflow), c1, sgn)
2165 /* X < Y + 1 equals to X <= Y. */
2166 if (cmp_code == LT_EXPR)
2168 /* X >= Y + 1 equals to X > Y. */
2169 if (cmp_code == GE_EXPR)
2172 if (code != cmp_code || !wi::cmp (real_c1, c1, sgn))
2174 if (cmp_code == LT_EXPR || cmp_code == LE_EXPR)
2176 if (cmp_code == GT_EXPR || cmp_code == GE_EXPR)
2181 (if (code == MAX_EXPR)
2182 (op (max @X { wide_int_to_tree (from_type, real_c1); })
2183 { wide_int_to_tree (from_type, c2); })
2184 (if (code == MIN_EXPR)
2185 (op (min @X { wide_int_to_tree (from_type, real_c1); })
2186 { wide_int_to_tree (from_type, c2); })))))))))
2188 (for cnd (cond vec_cond)
2189 /* A ? B : (A ? X : C) -> A ? B : C. */
2191 (cnd @0 (cnd @0 @1 @2) @3)
2194 (cnd @0 @1 (cnd @0 @2 @3))
2196 /* A ? B : (!A ? C : X) -> A ? B : C. */
2197 /* ??? This matches embedded conditions open-coded because genmatch
2198 would generate matching code for conditions in separate stmts only.
2199 The following is still important to merge then and else arm cases
2200 from if-conversion. */
2202 (cnd @0 @1 (cnd @2 @3 @4))
2203 (if (COMPARISON_CLASS_P (@0)
2204 && COMPARISON_CLASS_P (@2)
2205 && invert_tree_comparison
2206 (TREE_CODE (@0), HONOR_NANS (TREE_OPERAND (@0, 0))) == TREE_CODE (@2)
2207 && operand_equal_p (TREE_OPERAND (@0, 0), TREE_OPERAND (@2, 0), 0)
2208 && operand_equal_p (TREE_OPERAND (@0, 1), TREE_OPERAND (@2, 1), 0))
2211 (cnd @0 (cnd @1 @2 @3) @4)
2212 (if (COMPARISON_CLASS_P (@0)
2213 && COMPARISON_CLASS_P (@1)
2214 && invert_tree_comparison
2215 (TREE_CODE (@0), HONOR_NANS (TREE_OPERAND (@0, 0))) == TREE_CODE (@1)
2216 && operand_equal_p (TREE_OPERAND (@0, 0), TREE_OPERAND (@1, 0), 0)
2217 && operand_equal_p (TREE_OPERAND (@0, 1), TREE_OPERAND (@1, 1), 0))
2220 /* A ? B : B -> B. */
2225 /* !A ? B : C -> A ? C : B. */
2227 (cnd (logical_inverted_value truth_valued_p@0) @1 @2)
2230 /* A + (B vcmp C ? 1 : 0) -> A - (B vcmp C ? -1 : 0), since vector comparisons
2231 return all -1 or all 0 results. */
2232 /* ??? We could instead convert all instances of the vec_cond to negate,
2233 but that isn't necessarily a win on its own. */
2235 (plus:c @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
2236 (if (VECTOR_TYPE_P (type)
2237 && TYPE_VECTOR_SUBPARTS (type) == TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1))
2238 && (TYPE_MODE (TREE_TYPE (type))
2239 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
2240 (minus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
2242 /* ... likewise A - (B vcmp C ? 1 : 0) -> A + (B vcmp C ? -1 : 0). */
2244 (minus @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
2245 (if (VECTOR_TYPE_P (type)
2246 && TYPE_VECTOR_SUBPARTS (type) == TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1))
2247 && (TYPE_MODE (TREE_TYPE (type))
2248 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
2249 (plus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
2252 /* Simplifications of comparisons. */
2254 /* See if we can reduce the magnitude of a constant involved in a
2255 comparison by changing the comparison code. This is a canonicalization
2256 formerly done by maybe_canonicalize_comparison_1. */
2260 (cmp @0 INTEGER_CST@1)
2261 (if (tree_int_cst_sgn (@1) == -1)
2262 (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::add (@1, 1)); }))))
2266 (cmp @0 INTEGER_CST@1)
2267 (if (tree_int_cst_sgn (@1) == 1)
2268 (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::sub (@1, 1)); }))))
2271 /* We can simplify a logical negation of a comparison to the
2272 inverted comparison. As we cannot compute an expression
2273 operator using invert_tree_comparison we have to simulate
2274 that with expression code iteration. */
2275 (for cmp (tcc_comparison)
2276 icmp (inverted_tcc_comparison)
2277 ncmp (inverted_tcc_comparison_with_nans)
2278 /* Ideally we'd like to combine the following two patterns
2279 and handle some more cases by using
2280 (logical_inverted_value (cmp @0 @1))
2281 here but for that genmatch would need to "inline" that.
2282 For now implement what forward_propagate_comparison did. */
2284 (bit_not (cmp @0 @1))
2285 (if (VECTOR_TYPE_P (type)
2286 || (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1))
2287 /* Comparison inversion may be impossible for trapping math,
2288 invert_tree_comparison will tell us. But we can't use
2289 a computed operator in the replacement tree thus we have
2290 to play the trick below. */
2291 (with { enum tree_code ic = invert_tree_comparison
2292 (cmp, HONOR_NANS (@0)); }
2298 (bit_xor (cmp @0 @1) integer_truep)
2299 (with { enum tree_code ic = invert_tree_comparison
2300 (cmp, HONOR_NANS (@0)); }
2306 /* Transform comparisons of the form X - Y CMP 0 to X CMP Y.
2307 ??? The transformation is valid for the other operators if overflow
2308 is undefined for the type, but performing it here badly interacts
2309 with the transformation in fold_cond_expr_with_comparison which
2310 attempts to synthetize ABS_EXPR. */
2313 (cmp (minus@2 @0 @1) integer_zerop)
2314 (if (single_use (@2))
2317 /* Transform comparisons of the form X * C1 CMP 0 to X CMP 0 in the
2318 signed arithmetic case. That form is created by the compiler
2319 often enough for folding it to be of value. One example is in
2320 computing loop trip counts after Operator Strength Reduction. */
2321 (for cmp (simple_comparison)
2322 scmp (swapped_simple_comparison)
2324 (cmp (mult@3 @0 INTEGER_CST@1) integer_zerop@2)
2325 /* Handle unfolded multiplication by zero. */
2326 (if (integer_zerop (@1))
2328 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2329 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
2331 /* If @1 is negative we swap the sense of the comparison. */
2332 (if (tree_int_cst_sgn (@1) < 0)
2336 /* Simplify comparison of something with itself. For IEEE
2337 floating-point, we can only do some of these simplifications. */
2341 (if (! FLOAT_TYPE_P (TREE_TYPE (@0))
2342 || ! HONOR_NANS (@0))
2343 { constant_boolean_node (true, type); }
2344 (if (cmp != EQ_EXPR)
2350 || ! FLOAT_TYPE_P (TREE_TYPE (@0))
2351 || ! HONOR_NANS (@0))
2352 { constant_boolean_node (false, type); })))
2353 (for cmp (unle unge uneq)
2356 { constant_boolean_node (true, type); }))
2357 (for cmp (unlt ungt)
2363 (if (!flag_trapping_math)
2364 { constant_boolean_node (false, type); }))
2366 /* Fold ~X op ~Y as Y op X. */
2367 (for cmp (simple_comparison)
2369 (cmp (bit_not@2 @0) (bit_not@3 @1))
2370 (if (single_use (@2) && single_use (@3))
2373 /* Fold ~X op C as X op' ~C, where op' is the swapped comparison. */
2374 (for cmp (simple_comparison)
2375 scmp (swapped_simple_comparison)
2377 (cmp (bit_not@2 @0) CONSTANT_CLASS_P@1)
2378 (if (single_use (@2)
2379 && (TREE_CODE (@1) == INTEGER_CST || TREE_CODE (@1) == VECTOR_CST))
2380 (scmp @0 (bit_not @1)))))
2382 (for cmp (simple_comparison)
2383 /* Fold (double)float1 CMP (double)float2 into float1 CMP float2. */
2385 (cmp (convert@2 @0) (convert? @1))
2386 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
2387 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
2388 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
2389 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
2390 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))))
2393 tree type1 = TREE_TYPE (@1);
2394 if (TREE_CODE (@1) == REAL_CST && !DECIMAL_FLOAT_TYPE_P (type1))
2396 REAL_VALUE_TYPE orig = TREE_REAL_CST (@1);
2397 if (TYPE_PRECISION (type1) > TYPE_PRECISION (float_type_node)
2398 && exact_real_truncate (TYPE_MODE (float_type_node), &orig))
2399 type1 = float_type_node;
2400 if (TYPE_PRECISION (type1) > TYPE_PRECISION (double_type_node)
2401 && exact_real_truncate (TYPE_MODE (double_type_node), &orig))
2402 type1 = double_type_node;
2405 = (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type1)
2406 ? TREE_TYPE (@0) : type1);
2408 (if (TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (newtype))
2409 (cmp (convert:newtype @0) (convert:newtype @1))))))
2413 /* IEEE doesn't distinguish +0 and -0 in comparisons. */
2415 /* a CMP (-0) -> a CMP 0 */
2416 (if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)))
2417 (cmp @0 { build_real (TREE_TYPE (@1), dconst0); }))
2418 /* x != NaN is always true, other ops are always false. */
2419 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
2420 && ! HONOR_SNANS (@1))
2421 { constant_boolean_node (cmp == NE_EXPR, type); })
2422 /* Fold comparisons against infinity. */
2423 (if (REAL_VALUE_ISINF (TREE_REAL_CST (@1))
2424 && MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (@1))))
2427 REAL_VALUE_TYPE max;
2428 enum tree_code code = cmp;
2429 bool neg = REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1));
2431 code = swap_tree_comparison (code);
2434 /* x > +Inf is always false, if with ignore sNANs. */
2435 (if (code == GT_EXPR
2436 && ! HONOR_SNANS (@0))
2437 { constant_boolean_node (false, type); })
2438 (if (code == LE_EXPR)
2439 /* x <= +Inf is always true, if we don't case about NaNs. */
2440 (if (! HONOR_NANS (@0))
2441 { constant_boolean_node (true, type); }
2442 /* x <= +Inf is the same as x == x, i.e. !isnan(x). */
2444 /* x == +Inf and x >= +Inf are always equal to x > DBL_MAX. */
2445 (if (code == EQ_EXPR || code == GE_EXPR)
2446 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
2448 (lt @0 { build_real (TREE_TYPE (@0), max); })
2449 (gt @0 { build_real (TREE_TYPE (@0), max); }))))
2450 /* x < +Inf is always equal to x <= DBL_MAX. */
2451 (if (code == LT_EXPR)
2452 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
2454 (ge @0 { build_real (TREE_TYPE (@0), max); })
2455 (le @0 { build_real (TREE_TYPE (@0), max); }))))
2456 /* x != +Inf is always equal to !(x > DBL_MAX). */
2457 (if (code == NE_EXPR)
2458 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
2459 (if (! HONOR_NANS (@0))
2461 (ge @0 { build_real (TREE_TYPE (@0), max); })
2462 (le @0 { build_real (TREE_TYPE (@0), max); }))
2464 (bit_xor (lt @0 { build_real (TREE_TYPE (@0), max); })
2465 { build_one_cst (type); })
2466 (bit_xor (gt @0 { build_real (TREE_TYPE (@0), max); })
2467 { build_one_cst (type); }))))))))))
2469 /* If this is a comparison of a real constant with a PLUS_EXPR
2470 or a MINUS_EXPR of a real constant, we can convert it into a
2471 comparison with a revised real constant as long as no overflow
2472 occurs when unsafe_math_optimizations are enabled. */
2473 (if (flag_unsafe_math_optimizations)
2474 (for op (plus minus)
2476 (cmp (op @0 REAL_CST@1) REAL_CST@2)
2479 tree tem = const_binop (op == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR,
2480 TREE_TYPE (@1), @2, @1);
2482 (if (tem && !TREE_OVERFLOW (tem))
2483 (cmp @0 { tem; }))))))
2485 /* Likewise, we can simplify a comparison of a real constant with
2486 a MINUS_EXPR whose first operand is also a real constant, i.e.
2487 (c1 - x) < c2 becomes x > c1-c2. Reordering is allowed on
2488 floating-point types only if -fassociative-math is set. */
2489 (if (flag_associative_math)
2491 (cmp (minus REAL_CST@0 @1) REAL_CST@2)
2492 (with { tree tem = const_binop (MINUS_EXPR, TREE_TYPE (@1), @0, @2); }
2493 (if (tem && !TREE_OVERFLOW (tem))
2494 (cmp { tem; } @1)))))
2496 /* Fold comparisons against built-in math functions. */
2497 (if (flag_unsafe_math_optimizations
2498 && ! flag_errno_math)
2501 (cmp (sq @0) REAL_CST@1)
2503 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
2505 /* sqrt(x) < y is always false, if y is negative. */
2506 (if (cmp == EQ_EXPR || cmp == LT_EXPR || cmp == LE_EXPR)
2507 { constant_boolean_node (false, type); })
2508 /* sqrt(x) > y is always true, if y is negative and we
2509 don't care about NaNs, i.e. negative values of x. */
2510 (if (cmp == NE_EXPR || !HONOR_NANS (@0))
2511 { constant_boolean_node (true, type); })
2512 /* sqrt(x) > y is the same as x >= 0, if y is negative. */
2513 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })))
2514 (if (real_equal (TREE_REAL_CST_PTR (@1), &dconst0))
2516 /* sqrt(x) < 0 is always false. */
2517 (if (cmp == LT_EXPR)
2518 { constant_boolean_node (false, type); })
2519 /* sqrt(x) >= 0 is always true if we don't care about NaNs. */
2520 (if (cmp == GE_EXPR && !HONOR_NANS (@0))
2521 { constant_boolean_node (true, type); })
2522 /* sqrt(x) <= 0 -> x == 0. */
2523 (if (cmp == LE_EXPR)
2525 /* Otherwise sqrt(x) cmp 0 -> x cmp 0. Here cmp can be >=, >,
2526 == or !=. In the last case:
2528 (sqrt(x) != 0) == (NaN != 0) == true == (x != 0)
2530 if x is negative or NaN. Due to -funsafe-math-optimizations,
2531 the results for other x follow from natural arithmetic. */
2533 (if (cmp == GT_EXPR || cmp == GE_EXPR)
2537 real_arithmetic (&c2, MULT_EXPR,
2538 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
2539 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
2541 (if (REAL_VALUE_ISINF (c2))
2542 /* sqrt(x) > y is x == +Inf, when y is very large. */
2543 (if (HONOR_INFINITIES (@0))
2544 (eq @0 { build_real (TREE_TYPE (@0), c2); })
2545 { constant_boolean_node (false, type); })
2546 /* sqrt(x) > c is the same as x > c*c. */
2547 (cmp @0 { build_real (TREE_TYPE (@0), c2); }))))
2548 (if (cmp == LT_EXPR || cmp == LE_EXPR)
2552 real_arithmetic (&c2, MULT_EXPR,
2553 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
2554 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
2556 (if (REAL_VALUE_ISINF (c2))
2558 /* sqrt(x) < y is always true, when y is a very large
2559 value and we don't care about NaNs or Infinities. */
2560 (if (! HONOR_NANS (@0) && ! HONOR_INFINITIES (@0))
2561 { constant_boolean_node (true, type); })
2562 /* sqrt(x) < y is x != +Inf when y is very large and we
2563 don't care about NaNs. */
2564 (if (! HONOR_NANS (@0))
2565 (ne @0 { build_real (TREE_TYPE (@0), c2); }))
2566 /* sqrt(x) < y is x >= 0 when y is very large and we
2567 don't care about Infinities. */
2568 (if (! HONOR_INFINITIES (@0))
2569 (ge @0 { build_real (TREE_TYPE (@0), dconst0); }))
2570 /* sqrt(x) < y is x >= 0 && x != +Inf, when y is large. */
2573 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
2574 (ne @0 { build_real (TREE_TYPE (@0), c2); }))))
2575 /* sqrt(x) < c is the same as x < c*c, if we ignore NaNs. */
2576 (if (! HONOR_NANS (@0))
2577 (cmp @0 { build_real (TREE_TYPE (@0), c2); })
2578 /* sqrt(x) < c is the same as x >= 0 && x < c*c. */
2581 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
2582 (cmp @0 { build_real (TREE_TYPE (@0), c2); }))))))))))))
2584 /* Fold A /[ex] B CMP C to A CMP B * C. */
2587 (cmp (exact_div @0 @1) INTEGER_CST@2)
2588 (if (!integer_zerop (@1))
2589 (if (wi::eq_p (@2, 0))
2591 (if (TREE_CODE (@1) == INTEGER_CST)
2595 wide_int prod = wi::mul (@2, @1, TYPE_SIGN (TREE_TYPE (@1)), &ovf);
2598 { constant_boolean_node (cmp == NE_EXPR, type); }
2599 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))))
2600 (for cmp (lt le gt ge)
2602 (cmp (exact_div @0 INTEGER_CST@1) INTEGER_CST@2)
2603 (if (wi::gt_p (@1, 0, TYPE_SIGN (TREE_TYPE (@1))))
2607 wide_int prod = wi::mul (@2, @1, TYPE_SIGN (TREE_TYPE (@1)), &ovf);
2610 { constant_boolean_node (wi::lt_p (@2, 0, TYPE_SIGN (TREE_TYPE (@2)))
2611 != (cmp == LT_EXPR || cmp == LE_EXPR), type); }
2612 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))
2614 /* Unordered tests if either argument is a NaN. */
2616 (bit_ior (unordered @0 @0) (unordered @1 @1))
2617 (if (types_match (@0, @1))
2620 (bit_and (ordered @0 @0) (ordered @1 @1))
2621 (if (types_match (@0, @1))
2624 (bit_ior:c (unordered @0 @0) (unordered:c@2 @0 @1))
2627 (bit_and:c (ordered @0 @0) (ordered:c@2 @0 @1))
2630 /* Simple range test simplifications. */
2631 /* A < B || A >= B -> true. */
2632 (for test1 (lt le le le ne ge)
2633 test2 (ge gt ge ne eq ne)
2635 (bit_ior:c (test1 @0 @1) (test2 @0 @1))
2636 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
2637 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
2638 { constant_boolean_node (true, type); })))
2639 /* A < B && A >= B -> false. */
2640 (for test1 (lt lt lt le ne eq)
2641 test2 (ge gt eq gt eq gt)
2643 (bit_and:c (test1 @0 @1) (test2 @0 @1))
2644 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
2645 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
2646 { constant_boolean_node (false, type); })))
2648 /* -A CMP -B -> B CMP A. */
2649 (for cmp (tcc_comparison)
2650 scmp (swapped_tcc_comparison)
2652 (cmp (negate @0) (negate @1))
2653 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
2654 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2655 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
2658 (cmp (negate @0) CONSTANT_CLASS_P@1)
2659 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
2660 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2661 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
2662 (with { tree tem = const_unop (NEGATE_EXPR, TREE_TYPE (@0), @1); }
2663 (if (tem && !TREE_OVERFLOW (tem))
2664 (scmp @0 { tem; }))))))
2666 /* Convert ABS_EXPR<x> == 0 or ABS_EXPR<x> != 0 to x == 0 or x != 0. */
2669 (op (abs @0) zerop@1)
2672 /* From fold_sign_changed_comparison and fold_widened_comparison. */
2673 (for cmp (simple_comparison)
2675 (cmp (convert@0 @00) (convert?@1 @10))
2676 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
2677 /* Disable this optimization if we're casting a function pointer
2678 type on targets that require function pointer canonicalization. */
2679 && !(targetm.have_canonicalize_funcptr_for_compare ()
2680 && TREE_CODE (TREE_TYPE (@00)) == POINTER_TYPE
2681 && TREE_CODE (TREE_TYPE (TREE_TYPE (@00))) == FUNCTION_TYPE)
2683 (if (TYPE_PRECISION (TREE_TYPE (@00)) == TYPE_PRECISION (TREE_TYPE (@0))
2684 && (TREE_CODE (@10) == INTEGER_CST
2685 || (@1 != @10 && types_match (TREE_TYPE (@10), TREE_TYPE (@00))))
2686 && (TYPE_UNSIGNED (TREE_TYPE (@00)) == TYPE_UNSIGNED (TREE_TYPE (@0))
2689 && (POINTER_TYPE_P (TREE_TYPE (@00)) == POINTER_TYPE_P (TREE_TYPE (@0))))
2690 /* ??? The special-casing of INTEGER_CST conversion was in the original
2691 code and here to avoid a spurious overflow flag on the resulting
2692 constant which fold_convert produces. */
2693 (if (TREE_CODE (@1) == INTEGER_CST)
2694 (cmp @00 { force_fit_type (TREE_TYPE (@00), wi::to_widest (@1), 0,
2695 TREE_OVERFLOW (@1)); })
2696 (cmp @00 (convert @1)))
2698 (if (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@00)))
2699 /* If possible, express the comparison in the shorter mode. */
2700 (if ((cmp == EQ_EXPR || cmp == NE_EXPR
2701 || TYPE_UNSIGNED (TREE_TYPE (@0)) == TYPE_UNSIGNED (TREE_TYPE (@00))
2702 || (!TYPE_UNSIGNED (TREE_TYPE (@0))
2703 && TYPE_UNSIGNED (TREE_TYPE (@00))))
2704 && (types_match (TREE_TYPE (@10), TREE_TYPE (@00))
2705 || ((TYPE_PRECISION (TREE_TYPE (@00))
2706 >= TYPE_PRECISION (TREE_TYPE (@10)))
2707 && (TYPE_UNSIGNED (TREE_TYPE (@00))
2708 == TYPE_UNSIGNED (TREE_TYPE (@10))))
2709 || (TREE_CODE (@10) == INTEGER_CST
2710 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
2711 && int_fits_type_p (@10, TREE_TYPE (@00)))))
2712 (cmp @00 (convert @10))
2713 (if (TREE_CODE (@10) == INTEGER_CST
2714 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
2715 && !int_fits_type_p (@10, TREE_TYPE (@00)))
2718 tree min = lower_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
2719 tree max = upper_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
2720 bool above = integer_nonzerop (const_binop (LT_EXPR, type, max, @10));
2721 bool below = integer_nonzerop (const_binop (LT_EXPR, type, @10, min));
2723 (if (above || below)
2724 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
2725 { constant_boolean_node (cmp == EQ_EXPR ? false : true, type); }
2726 (if (cmp == LT_EXPR || cmp == LE_EXPR)
2727 { constant_boolean_node (above ? true : false, type); }
2728 (if (cmp == GT_EXPR || cmp == GE_EXPR)
2729 { constant_boolean_node (above ? false : true, type); }))))))))))))
2732 /* A local variable can never be pointed to by
2733 the default SSA name of an incoming parameter.
2734 SSA names are canonicalized to 2nd place. */
2736 (cmp addr@0 SSA_NAME@1)
2737 (if (SSA_NAME_IS_DEFAULT_DEF (@1)
2738 && TREE_CODE (SSA_NAME_VAR (@1)) == PARM_DECL)
2739 (with { tree base = get_base_address (TREE_OPERAND (@0, 0)); }
2740 (if (TREE_CODE (base) == VAR_DECL
2741 && auto_var_in_fn_p (base, current_function_decl))
2742 (if (cmp == NE_EXPR)
2743 { constant_boolean_node (true, type); }
2744 { constant_boolean_node (false, type); }))))))
2746 /* Equality compare simplifications from fold_binary */
2749 /* If we have (A | C) == D where C & ~D != 0, convert this into 0.
2750 Similarly for NE_EXPR. */
2752 (cmp (convert?@3 (bit_ior @0 INTEGER_CST@1)) INTEGER_CST@2)
2753 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
2754 && wi::bit_and_not (@1, @2) != 0)
2755 { constant_boolean_node (cmp == NE_EXPR, type); }))
2757 /* (X ^ Y) == 0 becomes X == Y, and (X ^ Y) != 0 becomes X != Y. */
2759 (cmp (bit_xor @0 @1) integer_zerop)
2762 /* (X ^ Y) == Y becomes X == 0.
2763 Likewise (X ^ Y) == X becomes Y == 0. */
2765 (cmp:c (bit_xor:c @0 @1) @0)
2766 (cmp @1 { build_zero_cst (TREE_TYPE (@1)); }))
2768 /* (X ^ C1) op C2 can be rewritten as X op (C1 ^ C2). */
2770 (cmp (convert?@3 (bit_xor @0 INTEGER_CST@1)) INTEGER_CST@2)
2771 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)))
2772 (cmp @0 (bit_xor @1 (convert @2)))))
2775 (cmp (convert? addr@0) integer_zerop)
2776 (if (tree_single_nonzero_warnv_p (@0, NULL))
2777 { constant_boolean_node (cmp == NE_EXPR, type); })))
2779 /* If we have (A & C) == C where C is a power of 2, convert this into
2780 (A & C) != 0. Similarly for NE_EXPR. */
2784 (cmp (bit_and@2 @0 integer_pow2p@1) @1)
2785 (icmp @2 { build_zero_cst (TREE_TYPE (@0)); })))
2787 /* If we have (A & C) != 0 ? D : 0 where C and D are powers of 2,
2788 convert this into a shift followed by ANDing with D. */
2791 (ne (bit_and @0 integer_pow2p@1) integer_zerop)
2792 integer_pow2p@2 integer_zerop)
2794 int shift = wi::exact_log2 (@2) - wi::exact_log2 (@1);
2798 (lshift (convert @0) { build_int_cst (integer_type_node, shift); }) @2)
2800 (convert (rshift @0 { build_int_cst (integer_type_node, -shift); })) @2))))
2802 /* If we have (A & C) != 0 where C is the sign bit of A, convert
2803 this into A < 0. Similarly for (A & C) == 0 into A >= 0. */
2807 (cmp (bit_and (convert?@2 @0) integer_pow2p@1) integer_zerop)
2808 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
2809 && (TYPE_PRECISION (TREE_TYPE (@0))
2810 == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@0))))
2811 && element_precision (@2) >= element_precision (@0)
2812 && wi::only_sign_bit_p (@1, element_precision (@0)))
2813 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
2814 (ncmp (convert:stype @0) { build_zero_cst (stype); })))))
2816 /* If we have A < 0 ? C : 0 where C is a power of 2, convert
2817 this into a right shift or sign extension followed by ANDing with C. */
2820 (lt @0 integer_zerop)
2821 integer_pow2p@1 integer_zerop)
2822 (if (!TYPE_UNSIGNED (TREE_TYPE (@0)))
2824 int shift = element_precision (@0) - wi::exact_log2 (@1) - 1;
2828 (convert (rshift @0 { build_int_cst (integer_type_node, shift); }))
2830 /* Otherwise ctype must be wider than TREE_TYPE (@0) and pure
2831 sign extension followed by AND with C will achieve the effect. */
2832 (bit_and (convert @0) @1)))))
2834 /* When the addresses are not directly of decls compare base and offset.
2835 This implements some remaining parts of fold_comparison address
2836 comparisons but still no complete part of it. Still it is good
2837 enough to make fold_stmt not regress when not dispatching to fold_binary. */
2838 (for cmp (simple_comparison)
2840 (cmp (convert1?@2 addr@0) (convert2? addr@1))
2843 HOST_WIDE_INT off0, off1;
2844 tree base0 = get_addr_base_and_unit_offset (TREE_OPERAND (@0, 0), &off0);
2845 tree base1 = get_addr_base_and_unit_offset (TREE_OPERAND (@1, 0), &off1);
2846 if (base0 && TREE_CODE (base0) == MEM_REF)
2848 off0 += mem_ref_offset (base0).to_short_addr ();
2849 base0 = TREE_OPERAND (base0, 0);
2851 if (base1 && TREE_CODE (base1) == MEM_REF)
2853 off1 += mem_ref_offset (base1).to_short_addr ();
2854 base1 = TREE_OPERAND (base1, 0);
2857 (if (base0 && base1)
2861 /* Punt in GENERIC on variables with value expressions;
2862 the value expressions might point to fields/elements
2863 of other vars etc. */
2865 && ((VAR_P (base0) && DECL_HAS_VALUE_EXPR_P (base0))
2866 || (VAR_P (base1) && DECL_HAS_VALUE_EXPR_P (base1))))
2868 else if (decl_in_symtab_p (base0)
2869 && decl_in_symtab_p (base1))
2870 equal = symtab_node::get_create (base0)
2871 ->equal_address_to (symtab_node::get_create (base1));
2872 else if ((DECL_P (base0)
2873 || TREE_CODE (base0) == SSA_NAME
2874 || TREE_CODE (base0) == STRING_CST)
2876 || TREE_CODE (base1) == SSA_NAME
2877 || TREE_CODE (base1) == STRING_CST))
2878 equal = (base0 == base1);
2881 && (cmp == EQ_EXPR || cmp == NE_EXPR
2882 /* If the offsets are equal we can ignore overflow. */
2884 || POINTER_TYPE_OVERFLOW_UNDEFINED
2885 /* Or if we compare using pointers to decls or strings. */
2886 || (POINTER_TYPE_P (TREE_TYPE (@2))
2887 && (DECL_P (base0) || TREE_CODE (base0) == STRING_CST))))
2889 (if (cmp == EQ_EXPR)
2890 { constant_boolean_node (off0 == off1, type); })
2891 (if (cmp == NE_EXPR)
2892 { constant_boolean_node (off0 != off1, type); })
2893 (if (cmp == LT_EXPR)
2894 { constant_boolean_node (off0 < off1, type); })
2895 (if (cmp == LE_EXPR)
2896 { constant_boolean_node (off0 <= off1, type); })
2897 (if (cmp == GE_EXPR)
2898 { constant_boolean_node (off0 >= off1, type); })
2899 (if (cmp == GT_EXPR)
2900 { constant_boolean_node (off0 > off1, type); }))
2902 && DECL_P (base0) && DECL_P (base1)
2903 /* If we compare this as integers require equal offset. */
2904 && (!INTEGRAL_TYPE_P (TREE_TYPE (@2))
2907 (if (cmp == EQ_EXPR)
2908 { constant_boolean_node (false, type); })
2909 (if (cmp == NE_EXPR)
2910 { constant_boolean_node (true, type); })))))))))
2912 /* Simplify pointer equality compares using PTA. */
2916 (if (POINTER_TYPE_P (TREE_TYPE (@0))
2917 && ptrs_compare_unequal (@0, @1))
2918 { neeq == EQ_EXPR ? boolean_false_node : boolean_true_node; })))
2920 /* PR70920: Transform (intptr_t)x eq/ne CST to x eq/ne (typeof x) CST.
2921 and (typeof ptr_cst) x eq/ne ptr_cst to x eq/ne (typeof x) CST.
2922 Disable the transform if either operand is pointer to function.
2923 This broke pr22051-2.c for arm where function pointer
2924 canonicalizaion is not wanted. */
2928 (cmp (convert @0) INTEGER_CST@1)
2929 (if ((POINTER_TYPE_P (TREE_TYPE (@0)) && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@0)))
2930 && INTEGRAL_TYPE_P (TREE_TYPE (@1)))
2931 || (INTEGRAL_TYPE_P (TREE_TYPE (@0)) && POINTER_TYPE_P (TREE_TYPE (@1))
2932 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@1)))))
2933 (cmp @0 (convert @1)))))
2935 /* Non-equality compare simplifications from fold_binary */
2936 (for cmp (lt gt le ge)
2937 /* Comparisons with the highest or lowest possible integer of
2938 the specified precision will have known values. */
2940 (cmp (convert?@2 @0) INTEGER_CST@1)
2941 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
2942 && tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0)))
2945 tree arg1_type = TREE_TYPE (@1);
2946 unsigned int prec = TYPE_PRECISION (arg1_type);
2947 wide_int max = wi::max_value (arg1_type);
2948 wide_int signed_max = wi::max_value (prec, SIGNED);
2949 wide_int min = wi::min_value (arg1_type);
2952 (if (wi::eq_p (@1, max))
2954 (if (cmp == GT_EXPR)
2955 { constant_boolean_node (false, type); })
2956 (if (cmp == GE_EXPR)
2958 (if (cmp == LE_EXPR)
2959 { constant_boolean_node (true, type); })
2960 (if (cmp == LT_EXPR)
2962 (if (wi::eq_p (@1, min))
2964 (if (cmp == LT_EXPR)
2965 { constant_boolean_node (false, type); })
2966 (if (cmp == LE_EXPR)
2968 (if (cmp == GE_EXPR)
2969 { constant_boolean_node (true, type); })
2970 (if (cmp == GT_EXPR)
2972 (if (wi::eq_p (@1, max - 1))
2974 (if (cmp == GT_EXPR)
2975 (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::add (@1, 1)); }))
2976 (if (cmp == LE_EXPR)
2977 (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::add (@1, 1)); }))))
2978 (if (wi::eq_p (@1, min + 1))
2980 (if (cmp == GE_EXPR)
2981 (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::sub (@1, 1)); }))
2982 (if (cmp == LT_EXPR)
2983 (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::sub (@1, 1)); }))))
2984 (if (wi::eq_p (@1, signed_max)
2985 && TYPE_UNSIGNED (arg1_type)
2986 /* We will flip the signedness of the comparison operator
2987 associated with the mode of @1, so the sign bit is
2988 specified by this mode. Check that @1 is the signed
2989 max associated with this sign bit. */
2990 && prec == GET_MODE_PRECISION (TYPE_MODE (arg1_type))
2991 /* signed_type does not work on pointer types. */
2992 && INTEGRAL_TYPE_P (arg1_type))
2993 /* The following case also applies to X < signed_max+1
2994 and X >= signed_max+1 because previous transformations. */
2995 (if (cmp == LE_EXPR || cmp == GT_EXPR)
2996 (with { tree st = signed_type_for (arg1_type); }
2997 (if (cmp == LE_EXPR)
2998 (ge (convert:st @0) { build_zero_cst (st); })
2999 (lt (convert:st @0) { build_zero_cst (st); }))))))))))
3001 (for cmp (unordered ordered unlt unle ungt unge uneq ltgt)
3002 /* If the second operand is NaN, the result is constant. */
3005 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
3006 && (cmp != LTGT_EXPR || ! flag_trapping_math))
3007 { constant_boolean_node (cmp == ORDERED_EXPR || cmp == LTGT_EXPR
3008 ? false : true, type); })))
3010 /* bool_var != 0 becomes bool_var. */
3012 (ne @0 integer_zerop)
3013 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
3014 && types_match (type, TREE_TYPE (@0)))
3016 /* bool_var == 1 becomes bool_var. */
3018 (eq @0 integer_onep)
3019 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
3020 && types_match (type, TREE_TYPE (@0)))
3023 bool_var == 0 becomes !bool_var or
3024 bool_var != 1 becomes !bool_var
3025 here because that only is good in assignment context as long
3026 as we require a tcc_comparison in GIMPLE_CONDs where we'd
3027 replace if (x == 0) with tem = ~x; if (tem != 0) which is
3028 clearly less optimal and which we'll transform again in forwprop. */
3030 /* When one argument is a constant, overflow detection can be simplified.
3031 Currently restricted to single use so as not to interfere too much with
3032 ADD_OVERFLOW detection in tree-ssa-math-opts.c.
3033 A + CST CMP A -> A CMP' CST' */
3034 (for cmp (lt le ge gt)
3037 (cmp:c (plus@2 @0 INTEGER_CST@1) @0)
3038 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
3039 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
3042 (out @0 { wide_int_to_tree (TREE_TYPE (@0), wi::max_value
3043 (TYPE_PRECISION (TREE_TYPE (@0)), UNSIGNED) - @1); }))))
3045 /* To detect overflow in unsigned A - B, A < B is simpler than A - B > A.
3046 However, the detection logic for SUB_OVERFLOW in tree-ssa-math-opts.c
3047 expects the long form, so we restrict the transformation for now. */
3050 (cmp:c (minus@2 @0 @1) @0)
3051 (if (single_use (@2)
3052 && ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3053 && TYPE_UNSIGNED (TREE_TYPE (@0))
3054 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
3057 /* Testing for overflow is unnecessary if we already know the result. */
3062 (cmp:c (realpart (IFN_SUB_OVERFLOW@2 @0 @1)) @0)
3063 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
3064 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
3065 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
3070 (cmp:c (realpart (IFN_ADD_OVERFLOW:c@2 @0 @1)) @0)
3071 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
3072 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
3073 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
3075 /* For unsigned operands, -1 / B < A checks whether A * B would overflow.
3076 Simplify it to __builtin_mul_overflow (A, B, <unused>). */
3080 (cmp:c (trunc_div:s integer_all_onesp @1) @0)
3081 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) && !VECTOR_TYPE_P (TREE_TYPE (@0)))
3082 (with { tree t = TREE_TYPE (@0), cpx = build_complex_type (t); }
3083 (out (imagpart (IFN_MUL_OVERFLOW:cpx @0 @1)) { build_zero_cst (t); })))))
3085 /* Simplification of math builtins. These rules must all be optimizations
3086 as well as IL simplifications. If there is a possibility that the new
3087 form could be a pessimization, the rule should go in the canonicalization
3088 section that follows this one.
3090 Rules can generally go in this section if they satisfy one of
3093 - the rule describes an identity
3095 - the rule replaces calls with something as simple as addition or
3098 - the rule contains unary calls only and simplifies the surrounding
3099 arithmetic. (The idea here is to exclude non-unary calls in which
3100 one operand is constant and in which the call is known to be cheap
3101 when the operand has that value.) */
3103 (if (flag_unsafe_math_optimizations)
3104 /* Simplify sqrt(x) * sqrt(x) -> x. */
3106 (mult (SQRT@1 @0) @1)
3107 (if (!HONOR_SNANS (type))
3110 /* Simplify sqrt(x) * sqrt(y) -> sqrt(x*y). */
3111 (for root (SQRT CBRT)
3113 (mult (root:s @0) (root:s @1))
3114 (root (mult @0 @1))))
3116 /* Simplify expN(x) * expN(y) -> expN(x+y). */
3117 (for exps (EXP EXP2 EXP10 POW10)
3119 (mult (exps:s @0) (exps:s @1))
3120 (exps (plus @0 @1))))
3122 /* Simplify a/root(b/c) into a*root(c/b). */
3123 (for root (SQRT CBRT)
3125 (rdiv @0 (root:s (rdiv:s @1 @2)))
3126 (mult @0 (root (rdiv @2 @1)))))
3128 /* Simplify x/expN(y) into x*expN(-y). */
3129 (for exps (EXP EXP2 EXP10 POW10)
3131 (rdiv @0 (exps:s @1))
3132 (mult @0 (exps (negate @1)))))
3134 (for logs (LOG LOG2 LOG10 LOG10)
3135 exps (EXP EXP2 EXP10 POW10)
3136 /* logN(expN(x)) -> x. */
3140 /* expN(logN(x)) -> x. */
3145 /* Optimize logN(func()) for various exponential functions. We
3146 want to determine the value "x" and the power "exponent" in
3147 order to transform logN(x**exponent) into exponent*logN(x). */
3148 (for logs (LOG LOG LOG LOG2 LOG2 LOG2 LOG10 LOG10)
3149 exps (EXP2 EXP10 POW10 EXP EXP10 POW10 EXP EXP2)
3152 (if (SCALAR_FLOAT_TYPE_P (type))
3158 /* Prepare to do logN(exp(exponent)) -> exponent*logN(e). */
3159 x = build_real_truncate (type, dconst_e ());
3162 /* Prepare to do logN(exp2(exponent)) -> exponent*logN(2). */
3163 x = build_real (type, dconst2);
3167 /* Prepare to do logN(exp10(exponent)) -> exponent*logN(10). */
3169 REAL_VALUE_TYPE dconst10;
3170 real_from_integer (&dconst10, VOIDmode, 10, SIGNED);
3171 x = build_real (type, dconst10);
3178 (mult (logs { x; }) @0)))))
3186 (if (SCALAR_FLOAT_TYPE_P (type))
3192 /* Prepare to do logN(sqrt(x)) -> 0.5*logN(x). */
3193 x = build_real (type, dconsthalf);
3196 /* Prepare to do logN(cbrt(x)) -> (1/3)*logN(x). */
3197 x = build_real_truncate (type, dconst_third ());
3203 (mult { x; } (logs @0))))))
3205 /* logN(pow(x,exponent)) -> exponent*logN(x). */
3206 (for logs (LOG LOG2 LOG10)
3210 (mult @1 (logs @0))))
3215 exps (EXP EXP2 EXP10 POW10)
3216 /* sqrt(expN(x)) -> expN(x*0.5). */
3219 (exps (mult @0 { build_real (type, dconsthalf); })))
3220 /* cbrt(expN(x)) -> expN(x/3). */
3223 (exps (mult @0 { build_real_truncate (type, dconst_third ()); })))
3224 /* pow(expN(x), y) -> expN(x*y). */
3227 (exps (mult @0 @1))))
3229 /* tan(atan(x)) -> x. */
3236 /* cabs(x+0i) or cabs(0+xi) -> abs(x). */
3238 (CABS (complex:C @0 real_zerop@1))
3241 /* trunc(trunc(x)) -> trunc(x), etc. */
3242 (for fns (TRUNC FLOOR CEIL ROUND NEARBYINT RINT)
3246 /* f(x) -> x if x is integer valued and f does nothing for such values. */
3247 (for fns (TRUNC FLOOR CEIL ROUND NEARBYINT RINT)
3249 (fns integer_valued_real_p@0)
3252 /* hypot(x,0) and hypot(0,x) -> abs(x). */
3254 (HYPOT:c @0 real_zerop@1)
3257 /* pow(1,x) -> 1. */
3259 (POW real_onep@0 @1)
3263 /* copysign(x,x) -> x. */
3268 /* copysign(x,y) -> fabs(x) if y is nonnegative. */
3269 (COPYSIGN @0 tree_expr_nonnegative_p@1)
3272 (for scale (LDEXP SCALBN SCALBLN)
3273 /* ldexp(0, x) -> 0. */
3275 (scale real_zerop@0 @1)
3277 /* ldexp(x, 0) -> x. */
3279 (scale @0 integer_zerop@1)
3281 /* ldexp(x, y) -> x if x is +-Inf or NaN. */
3283 (scale REAL_CST@0 @1)
3284 (if (!real_isfinite (TREE_REAL_CST_PTR (@0)))
3287 /* Canonicalization of sequences of math builtins. These rules represent
3288 IL simplifications but are not necessarily optimizations.
3290 The sincos pass is responsible for picking "optimal" implementations
3291 of math builtins, which may be more complicated and can sometimes go
3292 the other way, e.g. converting pow into a sequence of sqrts.
3293 We only want to do these canonicalizations before the pass has run. */
3295 (if (flag_unsafe_math_optimizations && canonicalize_math_p ())
3296 /* Simplify tan(x) * cos(x) -> sin(x). */
3298 (mult:c (TAN:s @0) (COS:s @0))
3301 /* Simplify x * pow(x,c) -> pow(x,c+1). */
3303 (mult:c @0 (POW:s @0 REAL_CST@1))
3304 (if (!TREE_OVERFLOW (@1))
3305 (POW @0 (plus @1 { build_one_cst (type); }))))
3307 /* Simplify sin(x) / cos(x) -> tan(x). */
3309 (rdiv (SIN:s @0) (COS:s @0))
3312 /* Simplify cos(x) / sin(x) -> 1 / tan(x). */
3314 (rdiv (COS:s @0) (SIN:s @0))
3315 (rdiv { build_one_cst (type); } (TAN @0)))
3317 /* Simplify sin(x) / tan(x) -> cos(x). */
3319 (rdiv (SIN:s @0) (TAN:s @0))
3320 (if (! HONOR_NANS (@0)
3321 && ! HONOR_INFINITIES (@0))
3324 /* Simplify tan(x) / sin(x) -> 1.0 / cos(x). */
3326 (rdiv (TAN:s @0) (SIN:s @0))
3327 (if (! HONOR_NANS (@0)
3328 && ! HONOR_INFINITIES (@0))
3329 (rdiv { build_one_cst (type); } (COS @0))))
3331 /* Simplify pow(x,y) * pow(x,z) -> pow(x,y+z). */
3333 (mult (POW:s @0 @1) (POW:s @0 @2))
3334 (POW @0 (plus @1 @2)))
3336 /* Simplify pow(x,y) * pow(z,y) -> pow(x*z,y). */
3338 (mult (POW:s @0 @1) (POW:s @2 @1))
3339 (POW (mult @0 @2) @1))
3341 /* Simplify powi(x,y) * powi(z,y) -> powi(x*z,y). */
3343 (mult (POWI:s @0 @1) (POWI:s @2 @1))
3344 (POWI (mult @0 @2) @1))
3346 /* Simplify pow(x,c) / x -> pow(x,c-1). */
3348 (rdiv (POW:s @0 REAL_CST@1) @0)
3349 (if (!TREE_OVERFLOW (@1))
3350 (POW @0 (minus @1 { build_one_cst (type); }))))
3352 /* Simplify x / pow (y,z) -> x * pow(y,-z). */
3354 (rdiv @0 (POW:s @1 @2))
3355 (mult @0 (POW @1 (negate @2))))
3360 /* sqrt(sqrt(x)) -> pow(x,1/4). */
3363 (pows @0 { build_real (type, dconst_quarter ()); }))
3364 /* sqrt(cbrt(x)) -> pow(x,1/6). */
3367 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
3368 /* cbrt(sqrt(x)) -> pow(x,1/6). */
3371 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
3372 /* cbrt(cbrt(x)) -> pow(x,1/9), iff x is nonnegative. */
3374 (cbrts (cbrts tree_expr_nonnegative_p@0))
3375 (pows @0 { build_real_truncate (type, dconst_ninth ()); }))
3376 /* sqrt(pow(x,y)) -> pow(|x|,y*0.5). */
3378 (sqrts (pows @0 @1))
3379 (pows (abs @0) (mult @1 { build_real (type, dconsthalf); })))
3380 /* cbrt(pow(x,y)) -> pow(x,y/3), iff x is nonnegative. */
3382 (cbrts (pows tree_expr_nonnegative_p@0 @1))
3383 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
3384 /* pow(sqrt(x),y) -> pow(x,y*0.5). */
3386 (pows (sqrts @0) @1)
3387 (pows @0 (mult @1 { build_real (type, dconsthalf); })))
3388 /* pow(cbrt(x),y) -> pow(x,y/3) iff x is nonnegative. */
3390 (pows (cbrts tree_expr_nonnegative_p@0) @1)
3391 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
3392 /* pow(pow(x,y),z) -> pow(x,y*z) iff x is nonnegative. */
3394 (pows (pows tree_expr_nonnegative_p@0 @1) @2)
3395 (pows @0 (mult @1 @2))))
3397 /* cabs(x+xi) -> fabs(x)*sqrt(2). */
3399 (CABS (complex @0 @0))
3400 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
3402 /* hypot(x,x) -> fabs(x)*sqrt(2). */
3405 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
3407 /* cexp(x+yi) -> exp(x)*cexpi(y). */
3412 (cexps compositional_complex@0)
3413 (if (targetm.libc_has_function (function_c99_math_complex))
3415 (mult (exps@1 (realpart @0)) (realpart (cexpis:type@2 (imagpart @0))))
3416 (mult @1 (imagpart @2)))))))
3418 (if (canonicalize_math_p ())
3419 /* floor(x) -> trunc(x) if x is nonnegative. */
3423 (floors tree_expr_nonnegative_p@0)
3426 (match double_value_p
3428 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == double_type_node)))
3429 (for froms (BUILT_IN_TRUNCL
3441 /* truncl(extend(x)) -> extend(trunc(x)), etc., if x is a double. */
3442 (if (optimize && canonicalize_math_p ())
3444 (froms (convert double_value_p@0))
3445 (convert (tos @0)))))
3447 (match float_value_p
3449 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == float_type_node)))
3450 (for froms (BUILT_IN_TRUNCL BUILT_IN_TRUNC
3451 BUILT_IN_FLOORL BUILT_IN_FLOOR
3452 BUILT_IN_CEILL BUILT_IN_CEIL
3453 BUILT_IN_ROUNDL BUILT_IN_ROUND
3454 BUILT_IN_NEARBYINTL BUILT_IN_NEARBYINT
3455 BUILT_IN_RINTL BUILT_IN_RINT)
3456 tos (BUILT_IN_TRUNCF BUILT_IN_TRUNCF
3457 BUILT_IN_FLOORF BUILT_IN_FLOORF
3458 BUILT_IN_CEILF BUILT_IN_CEILF
3459 BUILT_IN_ROUNDF BUILT_IN_ROUNDF
3460 BUILT_IN_NEARBYINTF BUILT_IN_NEARBYINTF
3461 BUILT_IN_RINTF BUILT_IN_RINTF)
3462 /* truncl(extend(x)) and trunc(extend(x)) -> extend(truncf(x)), etc.,
3464 (if (optimize && canonicalize_math_p ()
3465 && targetm.libc_has_function (function_c99_misc))
3467 (froms (convert float_value_p@0))
3468 (convert (tos @0)))))
3470 (for froms (XFLOORL XCEILL XROUNDL XRINTL)
3471 tos (XFLOOR XCEIL XROUND XRINT)
3472 /* llfloorl(extend(x)) -> llfloor(x), etc., if x is a double. */
3473 (if (optimize && canonicalize_math_p ())
3475 (froms (convert double_value_p@0))
3478 (for froms (XFLOORL XCEILL XROUNDL XRINTL
3479 XFLOOR XCEIL XROUND XRINT)
3480 tos (XFLOORF XCEILF XROUNDF XRINTF)
3481 /* llfloorl(extend(x)) and llfloor(extend(x)) -> llfloorf(x), etc.,
3483 (if (optimize && canonicalize_math_p ())
3485 (froms (convert float_value_p@0))
3488 (if (canonicalize_math_p ())
3489 /* xfloor(x) -> fix_trunc(x) if x is nonnegative. */
3490 (for floors (IFLOOR LFLOOR LLFLOOR)
3492 (floors tree_expr_nonnegative_p@0)
3495 (if (canonicalize_math_p ())
3496 /* xfloor(x) -> fix_trunc(x), etc., if x is integer valued. */
3497 (for fns (IFLOOR LFLOOR LLFLOOR
3499 IROUND LROUND LLROUND)
3501 (fns integer_valued_real_p@0)
3503 (if (!flag_errno_math)
3504 /* xrint(x) -> fix_trunc(x), etc., if x is integer valued. */
3505 (for rints (IRINT LRINT LLRINT)
3507 (rints integer_valued_real_p@0)
3510 (if (canonicalize_math_p ())
3511 (for ifn (IFLOOR ICEIL IROUND IRINT)
3512 lfn (LFLOOR LCEIL LROUND LRINT)
3513 llfn (LLFLOOR LLCEIL LLROUND LLRINT)
3514 /* Canonicalize iround (x) to lround (x) on ILP32 targets where
3515 sizeof (int) == sizeof (long). */
3516 (if (TYPE_PRECISION (integer_type_node)
3517 == TYPE_PRECISION (long_integer_type_node))
3520 (lfn:long_integer_type_node @0)))
3521 /* Canonicalize llround (x) to lround (x) on LP64 targets where
3522 sizeof (long long) == sizeof (long). */
3523 (if (TYPE_PRECISION (long_long_integer_type_node)
3524 == TYPE_PRECISION (long_integer_type_node))
3527 (lfn:long_integer_type_node @0)))))
3529 /* cproj(x) -> x if we're ignoring infinities. */
3532 (if (!HONOR_INFINITIES (type))
3535 /* If the real part is inf and the imag part is known to be
3536 nonnegative, return (inf + 0i). */
3538 (CPROJ (complex REAL_CST@0 tree_expr_nonnegative_p@1))
3539 (if (real_isinf (TREE_REAL_CST_PTR (@0)))
3540 { build_complex_inf (type, false); }))
3542 /* If the imag part is inf, return (inf+I*copysign(0,imag)). */
3544 (CPROJ (complex @0 REAL_CST@1))
3545 (if (real_isinf (TREE_REAL_CST_PTR (@1)))
3546 { build_complex_inf (type, TREE_REAL_CST_PTR (@1)->sign); }))
3552 (pows @0 REAL_CST@1)
3554 const REAL_VALUE_TYPE *value = TREE_REAL_CST_PTR (@1);
3555 REAL_VALUE_TYPE tmp;
3558 /* pow(x,0) -> 1. */
3559 (if (real_equal (value, &dconst0))
3560 { build_real (type, dconst1); })
3561 /* pow(x,1) -> x. */
3562 (if (real_equal (value, &dconst1))
3564 /* pow(x,-1) -> 1/x. */
3565 (if (real_equal (value, &dconstm1))
3566 (rdiv { build_real (type, dconst1); } @0))
3567 /* pow(x,0.5) -> sqrt(x). */
3568 (if (flag_unsafe_math_optimizations
3569 && canonicalize_math_p ()
3570 && real_equal (value, &dconsthalf))
3572 /* pow(x,1/3) -> cbrt(x). */
3573 (if (flag_unsafe_math_optimizations
3574 && canonicalize_math_p ()
3575 && (tmp = real_value_truncate (TYPE_MODE (type), dconst_third ()),
3576 real_equal (value, &tmp)))
3579 /* powi(1,x) -> 1. */
3581 (POWI real_onep@0 @1)
3585 (POWI @0 INTEGER_CST@1)
3587 /* powi(x,0) -> 1. */
3588 (if (wi::eq_p (@1, 0))
3589 { build_real (type, dconst1); })
3590 /* powi(x,1) -> x. */
3591 (if (wi::eq_p (@1, 1))
3593 /* powi(x,-1) -> 1/x. */
3594 (if (wi::eq_p (@1, -1))
3595 (rdiv { build_real (type, dconst1); } @0))))
3597 /* Narrowing of arithmetic and logical operations.
3599 These are conceptually similar to the transformations performed for
3600 the C/C++ front-ends by shorten_binary_op and shorten_compare. Long
3601 term we want to move all that code out of the front-ends into here. */
3603 /* If we have a narrowing conversion of an arithmetic operation where
3604 both operands are widening conversions from the same type as the outer
3605 narrowing conversion. Then convert the innermost operands to a suitable
3606 unsigned type (to avoid introducing undefined behavior), perform the
3607 operation and convert the result to the desired type. */
3608 (for op (plus minus)
3610 (convert (op:s (convert@2 @0) (convert?@3 @1)))
3611 (if (INTEGRAL_TYPE_P (type)
3612 /* We check for type compatibility between @0 and @1 below,
3613 so there's no need to check that @1/@3 are integral types. */
3614 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
3615 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
3616 /* The precision of the type of each operand must match the
3617 precision of the mode of each operand, similarly for the
3619 && (TYPE_PRECISION (TREE_TYPE (@0))
3620 == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@0))))
3621 && (TYPE_PRECISION (TREE_TYPE (@1))
3622 == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@1))))
3623 && TYPE_PRECISION (type) == GET_MODE_PRECISION (TYPE_MODE (type))
3624 /* The inner conversion must be a widening conversion. */
3625 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
3626 && types_match (@0, type)
3627 && (types_match (@0, @1)
3628 /* Or the second operand is const integer or converted const
3629 integer from valueize. */
3630 || TREE_CODE (@1) == INTEGER_CST))
3631 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
3632 (op @0 (convert @1))
3633 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
3634 (convert (op (convert:utype @0)
3635 (convert:utype @1))))))))
3637 /* This is another case of narrowing, specifically when there's an outer
3638 BIT_AND_EXPR which masks off bits outside the type of the innermost
3639 operands. Like the previous case we have to convert the operands
3640 to unsigned types to avoid introducing undefined behavior for the
3641 arithmetic operation. */
3642 (for op (minus plus)
3644 (bit_and (op:s (convert@2 @0) (convert@3 @1)) INTEGER_CST@4)
3645 (if (INTEGRAL_TYPE_P (type)
3646 /* We check for type compatibility between @0 and @1 below,
3647 so there's no need to check that @1/@3 are integral types. */
3648 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
3649 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
3650 /* The precision of the type of each operand must match the
3651 precision of the mode of each operand, similarly for the
3653 && (TYPE_PRECISION (TREE_TYPE (@0))
3654 == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@0))))
3655 && (TYPE_PRECISION (TREE_TYPE (@1))
3656 == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@1))))
3657 && TYPE_PRECISION (type) == GET_MODE_PRECISION (TYPE_MODE (type))
3658 /* The inner conversion must be a widening conversion. */
3659 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
3660 && types_match (@0, @1)
3661 && (tree_int_cst_min_precision (@4, TYPE_SIGN (TREE_TYPE (@0)))
3662 <= TYPE_PRECISION (TREE_TYPE (@0)))
3663 && (wi::bit_and (@4, wi::mask (TYPE_PRECISION (TREE_TYPE (@0)),
3664 true, TYPE_PRECISION (type))) == 0))
3665 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
3666 (with { tree ntype = TREE_TYPE (@0); }
3667 (convert (bit_and (op @0 @1) (convert:ntype @4))))
3668 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
3669 (convert (bit_and (op (convert:utype @0) (convert:utype @1))
3670 (convert:utype @4))))))))
3672 /* Transform (@0 < @1 and @0 < @2) to use min,
3673 (@0 > @1 and @0 > @2) to use max */
3674 (for op (lt le gt ge)
3675 ext (min min max max)
3677 (bit_and (op:cs @0 @1) (op:cs @0 @2))
3678 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3679 && TREE_CODE (@0) != INTEGER_CST)
3680 (op @0 (ext @1 @2)))))
3683 /* signbit(x) -> 0 if x is nonnegative. */
3684 (SIGNBIT tree_expr_nonnegative_p@0)
3685 { integer_zero_node; })
3688 /* signbit(x) -> x<0 if x doesn't have signed zeros. */
3690 (if (!HONOR_SIGNED_ZEROS (@0))
3691 (convert (lt @0 { build_real (TREE_TYPE (@0), dconst0); }))))
3693 /* Transform comparisons of the form X +- C1 CMP C2 to X CMP C2 -+ C1. */
3695 (for op (plus minus)
3698 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
3699 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
3700 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
3701 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@0))
3702 && !TYPE_SATURATING (TREE_TYPE (@0)))
3703 (with { tree res = int_const_binop (rop, @2, @1); }
3704 (if (TREE_OVERFLOW (res)
3705 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
3706 { constant_boolean_node (cmp == NE_EXPR, type); }
3707 (if (single_use (@3))
3708 (cmp @0 { res; }))))))))
3709 (for cmp (lt le gt ge)
3710 (for op (plus minus)
3713 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
3714 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
3715 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
3716 (with { tree res = int_const_binop (rop, @2, @1); }
3717 (if (TREE_OVERFLOW (res))
3719 fold_overflow_warning (("assuming signed overflow does not occur "
3720 "when simplifying conditional to constant"),
3721 WARN_STRICT_OVERFLOW_CONDITIONAL);
3722 bool less = cmp == LE_EXPR || cmp == LT_EXPR;
3723 /* wi::ges_p (@2, 0) should be sufficient for a signed type. */
3724 bool ovf_high = wi::lt_p (@1, 0, TYPE_SIGN (TREE_TYPE (@1)))
3725 != (op == MINUS_EXPR);
3726 constant_boolean_node (less == ovf_high, type);
3728 (if (single_use (@3))
3731 fold_overflow_warning (("assuming signed overflow does not occur "
3732 "when changing X +- C1 cmp C2 to "
3734 WARN_STRICT_OVERFLOW_COMPARISON);
3736 (cmp @0 { res; })))))))))
3738 /* Canonicalizations of BIT_FIELD_REFs. */
3741 (BIT_FIELD_REF @0 @1 @2)
3743 (if (TREE_CODE (TREE_TYPE (@0)) == COMPLEX_TYPE
3744 && tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
3746 (if (integer_zerop (@2))
3747 (view_convert (realpart @0)))
3748 (if (tree_int_cst_equal (@2, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
3749 (view_convert (imagpart @0)))))
3750 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3751 && INTEGRAL_TYPE_P (type)
3752 /* On GIMPLE this should only apply to register arguments. */
3753 && (! GIMPLE || is_gimple_reg (@0))
3754 /* A bit-field-ref that referenced the full argument can be stripped. */
3755 && ((compare_tree_int (@1, TYPE_PRECISION (TREE_TYPE (@0))) == 0
3756 && integer_zerop (@2))
3757 /* Low-parts can be reduced to integral conversions.
3758 ??? The following doesn't work for PDP endian. */
3759 || (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN
3760 /* Don't even think about BITS_BIG_ENDIAN. */
3761 && TYPE_PRECISION (TREE_TYPE (@0)) % BITS_PER_UNIT == 0
3762 && TYPE_PRECISION (type) % BITS_PER_UNIT == 0
3763 && compare_tree_int (@2, (BYTES_BIG_ENDIAN
3764 ? (TYPE_PRECISION (TREE_TYPE (@0))
3765 - TYPE_PRECISION (type))
3769 /* Simplify vector extracts. */
3772 (BIT_FIELD_REF CONSTRUCTOR@0 @1 @2)
3773 (if (VECTOR_TYPE_P (TREE_TYPE (@0))
3774 && (types_match (type, TREE_TYPE (TREE_TYPE (@0)))
3775 || (VECTOR_TYPE_P (type)
3776 && types_match (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
3779 tree ctor = (TREE_CODE (@0) == SSA_NAME
3780 ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0);
3781 tree eltype = TREE_TYPE (TREE_TYPE (ctor));
3782 unsigned HOST_WIDE_INT width = tree_to_uhwi (TYPE_SIZE (eltype));
3783 unsigned HOST_WIDE_INT n = tree_to_uhwi (@1);
3784 unsigned HOST_WIDE_INT idx = tree_to_uhwi (@2);
3787 && (idx % width) == 0
3789 && ((idx + n) / width) <= TYPE_VECTOR_SUBPARTS (TREE_TYPE (ctor)))
3794 /* Constructor elements can be subvectors. */
3795 unsigned HOST_WIDE_INT k = 1;
3796 if (CONSTRUCTOR_NELTS (ctor) != 0)
3798 tree cons_elem = TREE_TYPE (CONSTRUCTOR_ELT (ctor, 0)->value);
3799 if (TREE_CODE (cons_elem) == VECTOR_TYPE)
3800 k = TYPE_VECTOR_SUBPARTS (cons_elem);
3804 /* We keep an exact subset of the constructor elements. */
3805 (if ((idx % k) == 0 && (n % k) == 0)
3806 (if (CONSTRUCTOR_NELTS (ctor) == 0)
3807 { build_constructor (type, NULL); }
3814 (if (idx < CONSTRUCTOR_NELTS (ctor))
3815 { CONSTRUCTOR_ELT (ctor, idx)->value; }
3816 { build_zero_cst (type); })
3818 vec<constructor_elt, va_gc> *vals;
3819 vec_alloc (vals, n);
3820 for (unsigned i = 0;
3821 i < n && idx + i < CONSTRUCTOR_NELTS (ctor); ++i)
3822 CONSTRUCTOR_APPEND_ELT (vals, NULL_TREE,
3823 CONSTRUCTOR_ELT (ctor, idx + i)->value);
3824 build_constructor (type, vals);
3826 /* The bitfield references a single constructor element. */
3827 (if (idx + n <= (idx / k + 1) * k)
3829 (if (CONSTRUCTOR_NELTS (ctor) <= idx / k)
3830 { build_zero_cst (type); })
3832 { CONSTRUCTOR_ELT (ctor, idx / k)->value; })
3833 (BIT_FIELD_REF { CONSTRUCTOR_ELT (ctor, idx / k)->value; }
3834 @1 { bitsize_int ((idx % k) * width); })))))))))