1 /* Match-and-simplify patterns for shared GENERIC and GIMPLE folding.
2 This file is consumed by genmatch which produces gimple-match.c
3 and generic-match.c from it.
5 Copyright (C) 2014-2018 Free Software Foundation, Inc.
6 Contributed by Richard Biener <rguenther@suse.de>
7 and Prathamesh Kulkarni <bilbotheelffriend@gmail.com>
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
26 /* Generic tree predicates we inherit. */
28 integer_onep integer_zerop integer_all_onesp integer_minus_onep
29 integer_each_onep integer_truep integer_nonzerop
30 real_zerop real_onep real_minus_onep
33 tree_expr_nonnegative_p
40 (define_operator_list tcc_comparison
41 lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
42 (define_operator_list inverted_tcc_comparison
43 ge gt ne eq lt le ordered unordered ge gt le lt ltgt uneq)
44 (define_operator_list inverted_tcc_comparison_with_nans
45 unge ungt ne eq unlt unle ordered unordered ge gt le lt ltgt uneq)
46 (define_operator_list swapped_tcc_comparison
47 gt ge eq ne le lt unordered ordered ungt unge unlt unle uneq ltgt)
48 (define_operator_list simple_comparison lt le eq ne ge gt)
49 (define_operator_list swapped_simple_comparison gt ge eq ne le lt)
51 #include "cfn-operators.pd"
53 /* Define operand lists for math rounding functions {,i,l,ll}FN,
54 where the versions prefixed with "i" return an int, those prefixed with
55 "l" return a long and those prefixed with "ll" return a long long.
57 Also define operand lists:
59 X<FN>F for all float functions, in the order i, l, ll
60 X<FN> for all double functions, in the same order
61 X<FN>L for all long double functions, in the same order. */
62 #define DEFINE_INT_AND_FLOAT_ROUND_FN(FN) \
63 (define_operator_list X##FN##F BUILT_IN_I##FN##F \
66 (define_operator_list X##FN BUILT_IN_I##FN \
69 (define_operator_list X##FN##L BUILT_IN_I##FN##L \
73 DEFINE_INT_AND_FLOAT_ROUND_FN (FLOOR)
74 DEFINE_INT_AND_FLOAT_ROUND_FN (CEIL)
75 DEFINE_INT_AND_FLOAT_ROUND_FN (ROUND)
76 DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
78 /* Binary operations and their associated IFN_COND_* function. */
79 (define_operator_list UNCOND_BINARY
81 mult trunc_div trunc_mod rdiv
83 bit_and bit_ior bit_xor)
84 (define_operator_list COND_BINARY
85 IFN_COND_ADD IFN_COND_SUB
86 IFN_COND_MUL IFN_COND_DIV IFN_COND_MOD IFN_COND_RDIV
87 IFN_COND_MIN IFN_COND_MAX
88 IFN_COND_AND IFN_COND_IOR IFN_COND_XOR)
90 /* Same for ternary operations. */
91 (define_operator_list UNCOND_TERNARY
92 IFN_FMA IFN_FMS IFN_FNMA IFN_FNMS)
93 (define_operator_list COND_TERNARY
94 IFN_COND_FMA IFN_COND_FMS IFN_COND_FNMA IFN_COND_FNMS)
96 /* As opposed to convert?, this still creates a single pattern, so
97 it is not a suitable replacement for convert? in all cases. */
98 (match (nop_convert @0)
100 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))))
101 (match (nop_convert @0)
103 (if (VECTOR_TYPE_P (type) && VECTOR_TYPE_P (TREE_TYPE (@0))
104 && known_eq (TYPE_VECTOR_SUBPARTS (type),
105 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@0)))
106 && tree_nop_conversion_p (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
107 /* This one has to be last, or it shadows the others. */
108 (match (nop_convert @0)
111 /* Transform likes of (char) ABS_EXPR <(int) x> into (char) ABSU_EXPR <x>
112 ABSU_EXPR returns unsigned absolute value of the operand and the operand
113 of the ABSU_EXPR will have the corresponding signed type. */
114 (simplify (abs (convert @0))
115 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
116 && !TYPE_UNSIGNED (TREE_TYPE (@0))
117 && element_precision (type) > element_precision (TREE_TYPE (@0)))
118 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
119 (convert (absu:utype @0)))))
122 /* Simplifications of operations with one constant operand and
123 simplifications to constants or single values. */
125 (for op (plus pointer_plus minus bit_ior bit_xor)
127 (op @0 integer_zerop)
130 /* 0 +p index -> (type)index */
132 (pointer_plus integer_zerop @1)
133 (non_lvalue (convert @1)))
135 /* ptr - 0 -> (type)ptr */
137 (pointer_diff @0 integer_zerop)
140 /* See if ARG1 is zero and X + ARG1 reduces to X.
141 Likewise if the operands are reversed. */
143 (plus:c @0 real_zerop@1)
144 (if (fold_real_zero_addition_p (type, @1, 0))
147 /* See if ARG1 is zero and X - ARG1 reduces to X. */
149 (minus @0 real_zerop@1)
150 (if (fold_real_zero_addition_p (type, @1, 1))
154 This is unsafe for certain floats even in non-IEEE formats.
155 In IEEE, it is unsafe because it does wrong for NaNs.
156 Also note that operand_equal_p is always false if an operand
160 (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (type))
161 { build_zero_cst (type); }))
163 (pointer_diff @@0 @0)
164 { build_zero_cst (type); })
167 (mult @0 integer_zerop@1)
170 /* Maybe fold x * 0 to 0. The expressions aren't the same
171 when x is NaN, since x * 0 is also NaN. Nor are they the
172 same in modes with signed zeros, since multiplying a
173 negative value by 0 gives -0, not +0. */
175 (mult @0 real_zerop@1)
176 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
179 /* In IEEE floating point, x*1 is not equivalent to x for snans.
180 Likewise for complex arithmetic with signed zeros. */
183 (if (!HONOR_SNANS (type)
184 && (!HONOR_SIGNED_ZEROS (type)
185 || !COMPLEX_FLOAT_TYPE_P (type)))
188 /* Transform x * -1.0 into -x. */
190 (mult @0 real_minus_onep)
191 (if (!HONOR_SNANS (type)
192 && (!HONOR_SIGNED_ZEROS (type)
193 || !COMPLEX_FLOAT_TYPE_P (type)))
196 (for cmp (gt ge lt le)
197 outp (convert convert negate negate)
198 outn (negate negate convert convert)
199 /* Transform (X > 0.0 ? 1.0 : -1.0) into copysign(1, X). */
200 /* Transform (X >= 0.0 ? 1.0 : -1.0) into copysign(1, X). */
201 /* Transform (X < 0.0 ? 1.0 : -1.0) into copysign(1,-X). */
202 /* Transform (X <= 0.0 ? 1.0 : -1.0) into copysign(1,-X). */
204 (cond (cmp @0 real_zerop) real_onep@1 real_minus_onep)
205 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)
206 && types_match (type, TREE_TYPE (@0)))
208 (if (types_match (type, float_type_node))
209 (BUILT_IN_COPYSIGNF @1 (outp @0)))
210 (if (types_match (type, double_type_node))
211 (BUILT_IN_COPYSIGN @1 (outp @0)))
212 (if (types_match (type, long_double_type_node))
213 (BUILT_IN_COPYSIGNL @1 (outp @0))))))
214 /* Transform (X > 0.0 ? -1.0 : 1.0) into copysign(1,-X). */
215 /* Transform (X >= 0.0 ? -1.0 : 1.0) into copysign(1,-X). */
216 /* Transform (X < 0.0 ? -1.0 : 1.0) into copysign(1,X). */
217 /* Transform (X <= 0.0 ? -1.0 : 1.0) into copysign(1,X). */
219 (cond (cmp @0 real_zerop) real_minus_onep real_onep@1)
220 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)
221 && types_match (type, TREE_TYPE (@0)))
223 (if (types_match (type, float_type_node))
224 (BUILT_IN_COPYSIGNF @1 (outn @0)))
225 (if (types_match (type, double_type_node))
226 (BUILT_IN_COPYSIGN @1 (outn @0)))
227 (if (types_match (type, long_double_type_node))
228 (BUILT_IN_COPYSIGNL @1 (outn @0)))))))
230 /* Transform X * copysign (1.0, X) into abs(X). */
232 (mult:c @0 (COPYSIGN_ALL real_onep @0))
233 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
236 /* Transform X * copysign (1.0, -X) into -abs(X). */
238 (mult:c @0 (COPYSIGN_ALL real_onep (negate @0)))
239 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
242 /* Transform copysign (CST, X) into copysign (ABS(CST), X). */
244 (COPYSIGN_ALL REAL_CST@0 @1)
245 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@0)))
246 (COPYSIGN_ALL (negate @0) @1)))
248 /* X * 1, X / 1 -> X. */
249 (for op (mult trunc_div ceil_div floor_div round_div exact_div)
254 /* (A / (1 << B)) -> (A >> B).
255 Only for unsigned A. For signed A, this would not preserve rounding
257 For example: (-1 / ( 1 << B)) != -1 >> B. */
259 (trunc_div @0 (lshift integer_onep@1 @2))
260 (if ((TYPE_UNSIGNED (type) || tree_expr_nonnegative_p (@0))
261 && (!VECTOR_TYPE_P (type)
262 || target_supports_op_p (type, RSHIFT_EXPR, optab_vector)
263 || target_supports_op_p (type, RSHIFT_EXPR, optab_scalar)))
266 /* Preserve explicit divisions by 0: the C++ front-end wants to detect
267 undefined behavior in constexpr evaluation, and assuming that the division
268 traps enables better optimizations than these anyway. */
269 (for div (trunc_div ceil_div floor_div round_div exact_div)
270 /* 0 / X is always zero. */
272 (div integer_zerop@0 @1)
273 /* But not for 0 / 0 so that we can get the proper warnings and errors. */
274 (if (!integer_zerop (@1))
278 (div @0 integer_minus_onep@1)
279 (if (!TYPE_UNSIGNED (type))
284 /* But not for 0 / 0 so that we can get the proper warnings and errors.
285 And not for _Fract types where we can't build 1. */
286 (if (!integer_zerop (@0) && !ALL_FRACT_MODE_P (TYPE_MODE (type)))
287 { build_one_cst (type); }))
288 /* X / abs (X) is X < 0 ? -1 : 1. */
291 (if (INTEGRAL_TYPE_P (type)
292 && TYPE_OVERFLOW_UNDEFINED (type))
293 (cond (lt @0 { build_zero_cst (type); })
294 { build_minus_one_cst (type); } { build_one_cst (type); })))
297 (div:C @0 (negate @0))
298 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
299 && TYPE_OVERFLOW_UNDEFINED (type))
300 { build_minus_one_cst (type); })))
302 /* For unsigned integral types, FLOOR_DIV_EXPR is the same as
303 TRUNC_DIV_EXPR. Rewrite into the latter in this case. */
306 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
307 && TYPE_UNSIGNED (type))
310 /* Combine two successive divisions. Note that combining ceil_div
311 and floor_div is trickier and combining round_div even more so. */
312 (for div (trunc_div exact_div)
314 (div (div @0 INTEGER_CST@1) INTEGER_CST@2)
316 wi::overflow_type overflow;
317 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
318 TYPE_SIGN (type), &overflow);
321 (div @0 { wide_int_to_tree (type, mul); })
322 (if (TYPE_UNSIGNED (type)
323 || mul != wi::min_value (TYPE_PRECISION (type), SIGNED))
324 { build_zero_cst (type); })))))
326 /* Combine successive multiplications. Similar to above, but handling
327 overflow is different. */
329 (mult (mult @0 INTEGER_CST@1) INTEGER_CST@2)
331 wi::overflow_type overflow;
332 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
333 TYPE_SIGN (type), &overflow);
335 /* Skip folding on overflow: the only special case is @1 * @2 == -INT_MIN,
336 otherwise undefined overflow implies that @0 must be zero. */
337 (if (!overflow || TYPE_OVERFLOW_WRAPS (type))
338 (mult @0 { wide_int_to_tree (type, mul); }))))
340 /* Optimize A / A to 1.0 if we don't care about
341 NaNs or Infinities. */
344 (if (FLOAT_TYPE_P (type)
345 && ! HONOR_NANS (type)
346 && ! HONOR_INFINITIES (type))
347 { build_one_cst (type); }))
349 /* Optimize -A / A to -1.0 if we don't care about
350 NaNs or Infinities. */
352 (rdiv:C @0 (negate @0))
353 (if (FLOAT_TYPE_P (type)
354 && ! HONOR_NANS (type)
355 && ! HONOR_INFINITIES (type))
356 { build_minus_one_cst (type); }))
358 /* PR71078: x / abs(x) -> copysign (1.0, x) */
360 (rdiv:C (convert? @0) (convert? (abs @0)))
361 (if (SCALAR_FLOAT_TYPE_P (type)
362 && ! HONOR_NANS (type)
363 && ! HONOR_INFINITIES (type))
365 (if (types_match (type, float_type_node))
366 (BUILT_IN_COPYSIGNF { build_one_cst (type); } (convert @0)))
367 (if (types_match (type, double_type_node))
368 (BUILT_IN_COPYSIGN { build_one_cst (type); } (convert @0)))
369 (if (types_match (type, long_double_type_node))
370 (BUILT_IN_COPYSIGNL { build_one_cst (type); } (convert @0))))))
372 /* In IEEE floating point, x/1 is not equivalent to x for snans. */
375 (if (!HONOR_SNANS (type))
378 /* In IEEE floating point, x/-1 is not equivalent to -x for snans. */
380 (rdiv @0 real_minus_onep)
381 (if (!HONOR_SNANS (type))
384 (if (flag_reciprocal_math)
385 /* Convert (A/B)/C to A/(B*C). */
387 (rdiv (rdiv:s @0 @1) @2)
388 (rdiv @0 (mult @1 @2)))
390 /* Canonicalize x / (C1 * y) to (x * C2) / y. */
392 (rdiv @0 (mult:s @1 REAL_CST@2))
394 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @2); }
396 (rdiv (mult @0 { tem; } ) @1))))
398 /* Convert A/(B/C) to (A/B)*C */
400 (rdiv @0 (rdiv:s @1 @2))
401 (mult (rdiv @0 @1) @2)))
403 /* Simplify x / (- y) to -x / y. */
405 (rdiv @0 (negate @1))
406 (rdiv (negate @0) @1))
408 (if (flag_unsafe_math_optimizations)
409 /* Simplify (C / x op 0.0) to x op 0.0 for C != 0, C != Inf/Nan.
410 Since C / x may underflow to zero, do this only for unsafe math. */
411 (for op (lt le gt ge)
414 (op (rdiv REAL_CST@0 @1) real_zerop@2)
415 (if (!HONOR_SIGNED_ZEROS (@1) && !HONOR_INFINITIES (@1))
417 (if (real_less (&dconst0, TREE_REAL_CST_PTR (@0)))
419 /* For C < 0, use the inverted operator. */
420 (if (real_less (TREE_REAL_CST_PTR (@0), &dconst0))
423 /* Optimize (X & (-A)) / A where A is a power of 2, to X >> log2(A) */
424 (for div (trunc_div ceil_div floor_div round_div exact_div)
426 (div (convert? (bit_and @0 INTEGER_CST@1)) INTEGER_CST@2)
427 (if (integer_pow2p (@2)
428 && tree_int_cst_sgn (@2) > 0
429 && tree_nop_conversion_p (type, TREE_TYPE (@0))
430 && wi::to_wide (@2) + wi::to_wide (@1) == 0)
432 { build_int_cst (integer_type_node,
433 wi::exact_log2 (wi::to_wide (@2))); }))))
435 /* If ARG1 is a constant, we can convert this to a multiply by the
436 reciprocal. This does not have the same rounding properties,
437 so only do this if -freciprocal-math. We can actually
438 always safely do it if ARG1 is a power of two, but it's hard to
439 tell if it is or not in a portable manner. */
440 (for cst (REAL_CST COMPLEX_CST VECTOR_CST)
444 (if (flag_reciprocal_math
447 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @1); }
449 (mult @0 { tem; } )))
450 (if (cst != COMPLEX_CST)
451 (with { tree inverse = exact_inverse (type, @1); }
453 (mult @0 { inverse; } ))))))))
455 (for mod (ceil_mod floor_mod round_mod trunc_mod)
456 /* 0 % X is always zero. */
458 (mod integer_zerop@0 @1)
459 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
460 (if (!integer_zerop (@1))
462 /* X % 1 is always zero. */
464 (mod @0 integer_onep)
465 { build_zero_cst (type); })
466 /* X % -1 is zero. */
468 (mod @0 integer_minus_onep@1)
469 (if (!TYPE_UNSIGNED (type))
470 { build_zero_cst (type); }))
474 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
475 (if (!integer_zerop (@0))
476 { build_zero_cst (type); }))
477 /* (X % Y) % Y is just X % Y. */
479 (mod (mod@2 @0 @1) @1)
481 /* From extract_muldiv_1: (X * C1) % C2 is zero if C1 is a multiple of C2. */
483 (mod (mult @0 INTEGER_CST@1) INTEGER_CST@2)
484 (if (ANY_INTEGRAL_TYPE_P (type)
485 && TYPE_OVERFLOW_UNDEFINED (type)
486 && wi::multiple_of_p (wi::to_wide (@1), wi::to_wide (@2),
488 { build_zero_cst (type); }))
489 /* For (X % C) == 0, if X is signed and C is power of 2, use unsigned
490 modulo and comparison, since it is simpler and equivalent. */
493 (cmp (mod @0 integer_pow2p@2) integer_zerop@1)
494 (if (!TYPE_UNSIGNED (TREE_TYPE (@0)))
495 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
496 (cmp (mod (convert:utype @0) (convert:utype @2)) (convert:utype @1)))))))
498 /* X % -C is the same as X % C. */
500 (trunc_mod @0 INTEGER_CST@1)
501 (if (TYPE_SIGN (type) == SIGNED
502 && !TREE_OVERFLOW (@1)
503 && wi::neg_p (wi::to_wide (@1))
504 && !TYPE_OVERFLOW_TRAPS (type)
505 /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */
506 && !sign_bit_p (@1, @1))
507 (trunc_mod @0 (negate @1))))
509 /* X % -Y is the same as X % Y. */
511 (trunc_mod @0 (convert? (negate @1)))
512 (if (INTEGRAL_TYPE_P (type)
513 && !TYPE_UNSIGNED (type)
514 && !TYPE_OVERFLOW_TRAPS (type)
515 && tree_nop_conversion_p (type, TREE_TYPE (@1))
516 /* Avoid this transformation if X might be INT_MIN or
517 Y might be -1, because we would then change valid
518 INT_MIN % -(-1) into invalid INT_MIN % -1. */
519 && (expr_not_equal_to (@0, wi::to_wide (TYPE_MIN_VALUE (type)))
520 || expr_not_equal_to (@1, wi::minus_one (TYPE_PRECISION
522 (trunc_mod @0 (convert @1))))
524 /* X - (X / Y) * Y is the same as X % Y. */
526 (minus (convert1? @0) (convert2? (mult:c (trunc_div @@0 @@1) @1)))
527 (if (INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
528 (convert (trunc_mod @0 @1))))
530 /* Optimize TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR,
531 i.e. "X % C" into "X & (C - 1)", if X and C are positive.
532 Also optimize A % (C << N) where C is a power of 2,
533 to A & ((C << N) - 1). */
534 (match (power_of_two_cand @1)
536 (match (power_of_two_cand @1)
537 (lshift INTEGER_CST@1 @2))
538 (for mod (trunc_mod floor_mod)
540 (mod @0 (convert?@3 (power_of_two_cand@1 @2)))
541 (if ((TYPE_UNSIGNED (type)
542 || tree_expr_nonnegative_p (@0))
543 && tree_nop_conversion_p (type, TREE_TYPE (@3))
544 && integer_pow2p (@2) && tree_int_cst_sgn (@2) > 0)
545 (bit_and @0 (convert (minus @1 { build_int_cst (TREE_TYPE (@1), 1); }))))))
547 /* Simplify (unsigned t * 2)/2 -> unsigned t & 0x7FFFFFFF. */
549 (trunc_div (mult @0 integer_pow2p@1) @1)
550 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
551 (bit_and @0 { wide_int_to_tree
552 (type, wi::mask (TYPE_PRECISION (type)
553 - wi::exact_log2 (wi::to_wide (@1)),
554 false, TYPE_PRECISION (type))); })))
556 /* Simplify (unsigned t / 2) * 2 -> unsigned t & ~1. */
558 (mult (trunc_div @0 integer_pow2p@1) @1)
559 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
560 (bit_and @0 (negate @1))))
562 /* Simplify (t * 2) / 2) -> t. */
563 (for div (trunc_div ceil_div floor_div round_div exact_div)
565 (div (mult:c @0 @1) @1)
566 (if (ANY_INTEGRAL_TYPE_P (type)
567 && TYPE_OVERFLOW_UNDEFINED (type))
571 /* Simplify cos(-x) and cos(|x|) -> cos(x). Similarly for cosh. */
576 /* Simplify pow(-x, y) and pow(|x|,y) -> pow(x,y) if y is an even integer. */
579 (pows (op @0) REAL_CST@1)
580 (with { HOST_WIDE_INT n; }
581 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
583 /* Likewise for powi. */
586 (pows (op @0) INTEGER_CST@1)
587 (if ((wi::to_wide (@1) & 1) == 0)
589 /* Strip negate and abs from both operands of hypot. */
597 /* copysign(-x, y) and copysign(abs(x), y) -> copysign(x, y). */
598 (for copysigns (COPYSIGN_ALL)
600 (copysigns (op @0) @1)
603 /* abs(x)*abs(x) -> x*x. Should be valid for all types. */
608 /* Convert absu(x)*absu(x) -> x*x. */
610 (mult (absu@1 @0) @1)
611 (mult (convert@2 @0) @2))
613 /* cos(copysign(x, y)) -> cos(x). Similarly for cosh. */
617 (coss (copysigns @0 @1))
620 /* pow(copysign(x, y), z) -> pow(x, z) if z is an even integer. */
624 (pows (copysigns @0 @2) REAL_CST@1)
625 (with { HOST_WIDE_INT n; }
626 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
628 /* Likewise for powi. */
632 (pows (copysigns @0 @2) INTEGER_CST@1)
633 (if ((wi::to_wide (@1) & 1) == 0)
638 /* hypot(copysign(x, y), z) -> hypot(x, z). */
640 (hypots (copysigns @0 @1) @2)
642 /* hypot(x, copysign(y, z)) -> hypot(x, y). */
644 (hypots @0 (copysigns @1 @2))
647 /* copysign(x, CST) -> [-]abs (x). */
648 (for copysigns (COPYSIGN_ALL)
650 (copysigns @0 REAL_CST@1)
651 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
655 /* copysign(copysign(x, y), z) -> copysign(x, z). */
656 (for copysigns (COPYSIGN_ALL)
658 (copysigns (copysigns @0 @1) @2)
661 /* copysign(x,y)*copysign(x,y) -> x*x. */
662 (for copysigns (COPYSIGN_ALL)
664 (mult (copysigns@2 @0 @1) @2)
667 /* ccos(-x) -> ccos(x). Similarly for ccosh. */
668 (for ccoss (CCOS CCOSH)
673 /* cabs(-x) and cos(conj(x)) -> cabs(x). */
674 (for ops (conj negate)
680 /* Fold (a * (1 << b)) into (a << b) */
682 (mult:c @0 (convert? (lshift integer_onep@1 @2)))
683 (if (! FLOAT_TYPE_P (type)
684 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
687 /* Fold (1 << (C - x)) where C = precision(type) - 1
688 into ((1 << C) >> x). */
690 (lshift integer_onep@0 (minus@1 INTEGER_CST@2 @3))
691 (if (INTEGRAL_TYPE_P (type)
692 && wi::eq_p (wi::to_wide (@2), TYPE_PRECISION (type) - 1)
694 (if (TYPE_UNSIGNED (type))
695 (rshift (lshift @0 @2) @3)
697 { tree utype = unsigned_type_for (type); }
698 (convert (rshift (lshift (convert:utype @0) @2) @3))))))
700 /* Fold (C1/X)*C2 into (C1*C2)/X. */
702 (mult (rdiv@3 REAL_CST@0 @1) REAL_CST@2)
703 (if (flag_associative_math
706 { tree tem = const_binop (MULT_EXPR, type, @0, @2); }
708 (rdiv { tem; } @1)))))
710 /* Simplify ~X & X as zero. */
712 (bit_and:c (convert? @0) (convert? (bit_not @0)))
713 { build_zero_cst (type); })
715 /* PR71636: Transform x & ((1U << b) - 1) -> x & ~(~0U << b); */
717 (bit_and:c @0 (plus:s (lshift:s integer_onep @1) integer_minus_onep))
718 (if (TYPE_UNSIGNED (type))
719 (bit_and @0 (bit_not (lshift { build_all_ones_cst (type); } @1)))))
721 (for bitop (bit_and bit_ior)
723 /* PR35691: Transform
724 (x == 0 & y == 0) -> (x | typeof(x)(y)) == 0.
725 (x != 0 | y != 0) -> (x | typeof(x)(y)) != 0. */
727 (bitop (cmp @0 integer_zerop@2) (cmp @1 integer_zerop))
728 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
729 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
730 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
731 (cmp (bit_ior @0 (convert @1)) @2)))
733 (x == -1 & y == -1) -> (x & typeof(x)(y)) == -1.
734 (x != -1 | y != -1) -> (x & typeof(x)(y)) != -1. */
736 (bitop (cmp @0 integer_all_onesp@2) (cmp @1 integer_all_onesp))
737 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
738 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
739 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
740 (cmp (bit_and @0 (convert @1)) @2))))
742 /* Fold (A & ~B) - (A & B) into (A ^ B) - B. */
744 (minus (bit_and:cs @0 (bit_not @1)) (bit_and:cs @0 @1))
745 (minus (bit_xor @0 @1) @1))
747 (minus (bit_and:s @0 INTEGER_CST@2) (bit_and:s @0 INTEGER_CST@1))
748 (if (~wi::to_wide (@2) == wi::to_wide (@1))
749 (minus (bit_xor @0 @1) @1)))
751 /* Fold (A & B) - (A & ~B) into B - (A ^ B). */
753 (minus (bit_and:cs @0 @1) (bit_and:cs @0 (bit_not @1)))
754 (minus @1 (bit_xor @0 @1)))
756 /* Simplify (X & ~Y) |^+ (~X & Y) -> X ^ Y. */
757 (for op (bit_ior bit_xor plus)
759 (op (bit_and:c @0 (bit_not @1)) (bit_and:c (bit_not @0) @1))
762 (op:c (bit_and @0 INTEGER_CST@2) (bit_and (bit_not @0) INTEGER_CST@1))
763 (if (~wi::to_wide (@2) == wi::to_wide (@1))
766 /* PR53979: Transform ((a ^ b) | a) -> (a | b) */
768 (bit_ior:c (bit_xor:c @0 @1) @0)
771 /* (a & ~b) | (a ^ b) --> a ^ b */
773 (bit_ior:c (bit_and:c @0 (bit_not @1)) (bit_xor:c@2 @0 @1))
776 /* (a & ~b) ^ ~a --> ~(a & b) */
778 (bit_xor:c (bit_and:cs @0 (bit_not @1)) (bit_not @0))
779 (bit_not (bit_and @0 @1)))
781 /* (a | b) & ~(a ^ b) --> a & b */
783 (bit_and:c (bit_ior @0 @1) (bit_not (bit_xor:c @0 @1)))
786 /* a | ~(a ^ b) --> a | ~b */
788 (bit_ior:c @0 (bit_not:s (bit_xor:c @0 @1)))
789 (bit_ior @0 (bit_not @1)))
791 /* (a | b) | (a &^ b) --> a | b */
792 (for op (bit_and bit_xor)
794 (bit_ior:c (bit_ior@2 @0 @1) (op:c @0 @1))
797 /* (a & b) | ~(a ^ b) --> ~(a ^ b) */
799 (bit_ior:c (bit_and:c @0 @1) (bit_not@2 (bit_xor @0 @1)))
802 /* ~(~a & b) --> a | ~b */
804 (bit_not (bit_and:cs (bit_not @0) @1))
805 (bit_ior @0 (bit_not @1)))
807 /* ~(~a | b) --> a & ~b */
809 (bit_not (bit_ior:cs (bit_not @0) @1))
810 (bit_and @0 (bit_not @1)))
812 /* Simplify (~X & Y) to X ^ Y if we know that (X & ~Y) is 0. */
815 (bit_and (bit_not SSA_NAME@0) INTEGER_CST@1)
816 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
817 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
821 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
822 ((A & N) + B) & M -> (A + B) & M
823 Similarly if (N & M) == 0,
824 ((A | N) + B) & M -> (A + B) & M
825 and for - instead of + (or unary - instead of +)
826 and/or ^ instead of |.
827 If B is constant and (B & M) == 0, fold into A & M. */
829 (for bitop (bit_and bit_ior bit_xor)
831 (bit_and (op:s (bitop:s@0 @3 INTEGER_CST@4) @1) INTEGER_CST@2)
834 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, bitop,
835 @3, @4, @1, ERROR_MARK, NULL_TREE,
838 (convert (bit_and (op (convert:utype { pmop[0]; })
839 (convert:utype { pmop[1]; }))
840 (convert:utype @2))))))
842 (bit_and (op:s @0 (bitop:s@1 @3 INTEGER_CST@4)) INTEGER_CST@2)
845 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK,
846 NULL_TREE, NULL_TREE, @1, bitop, @3,
849 (convert (bit_and (op (convert:utype { pmop[0]; })
850 (convert:utype { pmop[1]; }))
851 (convert:utype @2)))))))
853 (bit_and (op:s @0 @1) INTEGER_CST@2)
856 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK,
857 NULL_TREE, NULL_TREE, @1, ERROR_MARK,
858 NULL_TREE, NULL_TREE, pmop); }
860 (convert (bit_and (op (convert:utype { pmop[0]; })
861 (convert:utype { pmop[1]; }))
862 (convert:utype @2)))))))
863 (for bitop (bit_and bit_ior bit_xor)
865 (bit_and (negate:s (bitop:s@0 @2 INTEGER_CST@3)) INTEGER_CST@1)
868 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @1, NEGATE_EXPR, @0,
869 bitop, @2, @3, NULL_TREE, ERROR_MARK,
870 NULL_TREE, NULL_TREE, pmop); }
872 (convert (bit_and (negate (convert:utype { pmop[0]; }))
873 (convert:utype @1)))))))
875 /* X % Y is smaller than Y. */
878 (cmp (trunc_mod @0 @1) @1)
879 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
880 { constant_boolean_node (cmp == LT_EXPR, type); })))
883 (cmp @1 (trunc_mod @0 @1))
884 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
885 { constant_boolean_node (cmp == GT_EXPR, type); })))
889 (bit_ior @0 integer_all_onesp@1)
894 (bit_ior @0 integer_zerop)
899 (bit_and @0 integer_zerop@1)
905 (for op (bit_ior bit_xor plus)
907 (op:c (convert? @0) (convert? (bit_not @0)))
908 (convert { build_all_ones_cst (TREE_TYPE (@0)); })))
913 { build_zero_cst (type); })
915 /* Canonicalize X ^ ~0 to ~X. */
917 (bit_xor @0 integer_all_onesp@1)
922 (bit_and @0 integer_all_onesp)
925 /* x & x -> x, x | x -> x */
926 (for bitop (bit_and bit_ior)
931 /* x & C -> x if we know that x & ~C == 0. */
934 (bit_and SSA_NAME@0 INTEGER_CST@1)
935 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
936 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
940 /* x + (x & 1) -> (x + 1) & ~1 */
942 (plus:c @0 (bit_and:s @0 integer_onep@1))
943 (bit_and (plus @0 @1) (bit_not @1)))
945 /* x & ~(x & y) -> x & ~y */
946 /* x | ~(x | y) -> x | ~y */
947 (for bitop (bit_and bit_ior)
949 (bitop:c @0 (bit_not (bitop:cs @0 @1)))
950 (bitop @0 (bit_not @1))))
952 /* (~x & y) | ~(x | y) -> ~x */
954 (bit_ior:c (bit_and:c (bit_not@2 @0) @1) (bit_not (bit_ior:c @0 @1)))
957 /* (x | y) ^ (x | ~y) -> ~x */
959 (bit_xor:c (bit_ior:c @0 @1) (bit_ior:c @0 (bit_not @1)))
962 /* (x & y) | ~(x | y) -> ~(x ^ y) */
964 (bit_ior:c (bit_and:s @0 @1) (bit_not:s (bit_ior:s @0 @1)))
965 (bit_not (bit_xor @0 @1)))
967 /* (~x | y) ^ (x ^ y) -> x | ~y */
969 (bit_xor:c (bit_ior:cs (bit_not @0) @1) (bit_xor:s @0 @1))
970 (bit_ior @0 (bit_not @1)))
972 /* (x ^ y) | ~(x | y) -> ~(x & y) */
974 (bit_ior:c (bit_xor:s @0 @1) (bit_not:s (bit_ior:s @0 @1)))
975 (bit_not (bit_and @0 @1)))
977 /* (x | y) & ~x -> y & ~x */
978 /* (x & y) | ~x -> y | ~x */
979 (for bitop (bit_and bit_ior)
980 rbitop (bit_ior bit_and)
982 (bitop:c (rbitop:c @0 @1) (bit_not@2 @0))
985 /* (x & y) ^ (x | y) -> x ^ y */
987 (bit_xor:c (bit_and @0 @1) (bit_ior @0 @1))
990 /* (x ^ y) ^ (x | y) -> x & y */
992 (bit_xor:c (bit_xor @0 @1) (bit_ior @0 @1))
995 /* (x & y) + (x ^ y) -> x | y */
996 /* (x & y) | (x ^ y) -> x | y */
997 /* (x & y) ^ (x ^ y) -> x | y */
998 (for op (plus bit_ior bit_xor)
1000 (op:c (bit_and @0 @1) (bit_xor @0 @1))
1003 /* (x & y) + (x | y) -> x + y */
1005 (plus:c (bit_and @0 @1) (bit_ior @0 @1))
1008 /* (x + y) - (x | y) -> x & y */
1010 (minus (plus @0 @1) (bit_ior @0 @1))
1011 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1012 && !TYPE_SATURATING (type))
1015 /* (x + y) - (x & y) -> x | y */
1017 (minus (plus @0 @1) (bit_and @0 @1))
1018 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1019 && !TYPE_SATURATING (type))
1022 /* (x | y) - (x ^ y) -> x & y */
1024 (minus (bit_ior @0 @1) (bit_xor @0 @1))
1027 /* (x | y) - (x & y) -> x ^ y */
1029 (minus (bit_ior @0 @1) (bit_and @0 @1))
1032 /* (x | y) & ~(x & y) -> x ^ y */
1034 (bit_and:c (bit_ior @0 @1) (bit_not (bit_and @0 @1)))
1037 /* (x | y) & (~x ^ y) -> x & y */
1039 (bit_and:c (bit_ior:c @0 @1) (bit_xor:c @1 (bit_not @0)))
1042 /* (~x | y) & (x | ~y) -> ~(x ^ y) */
1044 (bit_and (bit_ior:cs (bit_not @0) @1) (bit_ior:cs @0 (bit_not @1)))
1045 (bit_not (bit_xor @0 @1)))
1047 /* (~x | y) ^ (x | ~y) -> x ^ y */
1049 (bit_xor (bit_ior:c (bit_not @0) @1) (bit_ior:c @0 (bit_not @1)))
1052 /* ~x & ~y -> ~(x | y)
1053 ~x | ~y -> ~(x & y) */
1054 (for op (bit_and bit_ior)
1055 rop (bit_ior bit_and)
1057 (op (convert1? (bit_not @0)) (convert2? (bit_not @1)))
1058 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1059 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
1060 (bit_not (rop (convert @0) (convert @1))))))
1062 /* If we are XORing or adding two BIT_AND_EXPR's, both of which are and'ing
1063 with a constant, and the two constants have no bits in common,
1064 we should treat this as a BIT_IOR_EXPR since this may produce more
1066 (for op (bit_xor plus)
1068 (op (convert1? (bit_and@4 @0 INTEGER_CST@1))
1069 (convert2? (bit_and@5 @2 INTEGER_CST@3)))
1070 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1071 && tree_nop_conversion_p (type, TREE_TYPE (@2))
1072 && (wi::to_wide (@1) & wi::to_wide (@3)) == 0)
1073 (bit_ior (convert @4) (convert @5)))))
1075 /* (X | Y) ^ X -> Y & ~ X*/
1077 (bit_xor:c (convert1? (bit_ior:c @@0 @1)) (convert2? @0))
1078 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1079 (convert (bit_and @1 (bit_not @0)))))
1081 /* Convert ~X ^ ~Y to X ^ Y. */
1083 (bit_xor (convert1? (bit_not @0)) (convert2? (bit_not @1)))
1084 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1085 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
1086 (bit_xor (convert @0) (convert @1))))
1088 /* Convert ~X ^ C to X ^ ~C. */
1090 (bit_xor (convert? (bit_not @0)) INTEGER_CST@1)
1091 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1092 (bit_xor (convert @0) (bit_not @1))))
1094 /* Fold (X & Y) ^ Y and (X ^ Y) & Y as ~X & Y. */
1095 (for opo (bit_and bit_xor)
1096 opi (bit_xor bit_and)
1098 (opo:c (opi:cs @0 @1) @1)
1099 (bit_and (bit_not @0) @1)))
1101 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
1102 operands are another bit-wise operation with a common input. If so,
1103 distribute the bit operations to save an operation and possibly two if
1104 constants are involved. For example, convert
1105 (A | B) & (A | C) into A | (B & C)
1106 Further simplification will occur if B and C are constants. */
1107 (for op (bit_and bit_ior bit_xor)
1108 rop (bit_ior bit_and bit_and)
1110 (op (convert? (rop:c @@0 @1)) (convert? (rop:c @0 @2)))
1111 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1112 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
1113 (rop (convert @0) (op (convert @1) (convert @2))))))
1115 /* Some simple reassociation for bit operations, also handled in reassoc. */
1116 /* (X & Y) & Y -> X & Y
1117 (X | Y) | Y -> X | Y */
1118 (for op (bit_and bit_ior)
1120 (op:c (convert1?@2 (op:c @0 @@1)) (convert2? @1))
1122 /* (X ^ Y) ^ Y -> X */
1124 (bit_xor:c (convert1? (bit_xor:c @0 @@1)) (convert2? @1))
1126 /* (X & Y) & (X & Z) -> (X & Y) & Z
1127 (X | Y) | (X | Z) -> (X | Y) | Z */
1128 (for op (bit_and bit_ior)
1130 (op (convert1?@3 (op:c@4 @0 @1)) (convert2?@5 (op:c@6 @0 @2)))
1131 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1132 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
1133 (if (single_use (@5) && single_use (@6))
1134 (op @3 (convert @2))
1135 (if (single_use (@3) && single_use (@4))
1136 (op (convert @1) @5))))))
1137 /* (X ^ Y) ^ (X ^ Z) -> Y ^ Z */
1139 (bit_xor (convert1? (bit_xor:c @0 @1)) (convert2? (bit_xor:c @0 @2)))
1140 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1141 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
1142 (bit_xor (convert @1) (convert @2))))
1144 /* Convert abs (abs (X)) into abs (X).
1145 also absu (absu (X)) into absu (X). */
1151 (absu (convert@2 (absu@1 @0)))
1152 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@1)))
1155 /* Convert abs[u] (-X) -> abs[u] (X). */
1164 /* Convert abs[u] (X) where X is nonnegative -> (X). */
1166 (abs tree_expr_nonnegative_p@0)
1170 (absu tree_expr_nonnegative_p@0)
1173 /* A few cases of fold-const.c negate_expr_p predicate. */
1174 (match negate_expr_p
1176 (if ((INTEGRAL_TYPE_P (type)
1177 && TYPE_UNSIGNED (type))
1178 || (!TYPE_OVERFLOW_SANITIZED (type)
1179 && may_negate_without_overflow_p (t)))))
1180 (match negate_expr_p
1182 (match negate_expr_p
1184 (if (!TYPE_OVERFLOW_SANITIZED (type))))
1185 (match negate_expr_p
1187 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (t)))))
1188 /* VECTOR_CST handling of non-wrapping types would recurse in unsupported
1190 (match negate_expr_p
1192 (if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type))))
1193 (match negate_expr_p
1195 (if ((ANY_INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type))
1196 || (FLOAT_TYPE_P (type)
1197 && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
1198 && !HONOR_SIGNED_ZEROS (type)))))
1200 /* (-A) * (-B) -> A * B */
1202 (mult:c (convert1? (negate @0)) (convert2? negate_expr_p@1))
1203 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1204 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1205 (mult (convert @0) (convert (negate @1)))))
1207 /* -(A + B) -> (-B) - A. */
1209 (negate (plus:c @0 negate_expr_p@1))
1210 (if (!HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type))
1211 && !HONOR_SIGNED_ZEROS (element_mode (type)))
1212 (minus (negate @1) @0)))
1214 /* -(A - B) -> B - A. */
1216 (negate (minus @0 @1))
1217 (if ((ANY_INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_SANITIZED (type))
1218 || (FLOAT_TYPE_P (type)
1219 && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
1220 && !HONOR_SIGNED_ZEROS (type)))
1223 (negate (pointer_diff @0 @1))
1224 (if (TYPE_OVERFLOW_UNDEFINED (type))
1225 (pointer_diff @1 @0)))
1227 /* A - B -> A + (-B) if B is easily negatable. */
1229 (minus @0 negate_expr_p@1)
1230 (if (!FIXED_POINT_TYPE_P (type))
1231 (plus @0 (negate @1))))
1233 /* Try to fold (type) X op CST -> (type) (X op ((type-x) CST))
1235 For bitwise binary operations apply operand conversions to the
1236 binary operation result instead of to the operands. This allows
1237 to combine successive conversions and bitwise binary operations.
1238 We combine the above two cases by using a conditional convert. */
1239 (for bitop (bit_and bit_ior bit_xor)
1241 (bitop (convert @0) (convert? @1))
1242 (if (((TREE_CODE (@1) == INTEGER_CST
1243 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
1244 && int_fits_type_p (@1, TREE_TYPE (@0)))
1245 || types_match (@0, @1))
1246 /* ??? This transform conflicts with fold-const.c doing
1247 Convert (T)(x & c) into (T)x & (T)c, if c is an integer
1248 constants (if x has signed type, the sign bit cannot be set
1249 in c). This folds extension into the BIT_AND_EXPR.
1250 Restrict it to GIMPLE to avoid endless recursions. */
1251 && (bitop != BIT_AND_EXPR || GIMPLE)
1252 && (/* That's a good idea if the conversion widens the operand, thus
1253 after hoisting the conversion the operation will be narrower. */
1254 TYPE_PRECISION (TREE_TYPE (@0)) < TYPE_PRECISION (type)
1255 /* It's also a good idea if the conversion is to a non-integer
1257 || GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT
1258 /* Or if the precision of TO is not the same as the precision
1260 || !type_has_mode_precision_p (type)))
1261 (convert (bitop @0 (convert @1))))))
1263 (for bitop (bit_and bit_ior)
1264 rbitop (bit_ior bit_and)
1265 /* (x | y) & x -> x */
1266 /* (x & y) | x -> x */
1268 (bitop:c (rbitop:c @0 @1) @0)
1270 /* (~x | y) & x -> x & y */
1271 /* (~x & y) | x -> x | y */
1273 (bitop:c (rbitop:c (bit_not @0) @1) @0)
1276 /* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */
1278 (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1279 (bit_ior (bit_and @0 @2) (bit_and @1 @2)))
1281 /* Combine successive equal operations with constants. */
1282 (for bitop (bit_and bit_ior bit_xor)
1284 (bitop (bitop @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1285 (if (!CONSTANT_CLASS_P (@0))
1286 /* This is the canonical form regardless of whether (bitop @1 @2) can be
1287 folded to a constant. */
1288 (bitop @0 (bitop @1 @2))
1289 /* In this case we have three constants and (bitop @0 @1) doesn't fold
1290 to a constant. This can happen if @0 or @1 is a POLY_INT_CST and if
1291 the values involved are such that the operation can't be decided at
1292 compile time. Try folding one of @0 or @1 with @2 to see whether
1293 that combination can be decided at compile time.
1295 Keep the existing form if both folds fail, to avoid endless
1297 (with { tree cst1 = const_binop (bitop, type, @0, @2); }
1299 (bitop @1 { cst1; })
1300 (with { tree cst2 = const_binop (bitop, type, @1, @2); }
1302 (bitop @0 { cst2; }))))))))
1304 /* Try simple folding for X op !X, and X op X with the help
1305 of the truth_valued_p and logical_inverted_value predicates. */
1306 (match truth_valued_p
1308 (if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1)))
1309 (for op (tcc_comparison truth_and truth_andif truth_or truth_orif truth_xor)
1310 (match truth_valued_p
1312 (match truth_valued_p
1315 (match (logical_inverted_value @0)
1317 (match (logical_inverted_value @0)
1318 (bit_not truth_valued_p@0))
1319 (match (logical_inverted_value @0)
1320 (eq @0 integer_zerop))
1321 (match (logical_inverted_value @0)
1322 (ne truth_valued_p@0 integer_truep))
1323 (match (logical_inverted_value @0)
1324 (bit_xor truth_valued_p@0 integer_truep))
1328 (bit_and:c @0 (logical_inverted_value @0))
1329 { build_zero_cst (type); })
1330 /* X | !X and X ^ !X -> 1, , if X is truth-valued. */
1331 (for op (bit_ior bit_xor)
1333 (op:c truth_valued_p@0 (logical_inverted_value @0))
1334 { constant_boolean_node (true, type); }))
1335 /* X ==/!= !X is false/true. */
1338 (op:c truth_valued_p@0 (logical_inverted_value @0))
1339 { constant_boolean_node (op == NE_EXPR ? true : false, type); }))
1343 (bit_not (bit_not @0))
1346 /* Convert ~ (-A) to A - 1. */
1348 (bit_not (convert? (negate @0)))
1349 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1350 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1351 (convert (minus @0 { build_each_one_cst (TREE_TYPE (@0)); }))))
1353 /* Convert - (~A) to A + 1. */
1355 (negate (nop_convert (bit_not @0)))
1356 (plus (view_convert @0) { build_each_one_cst (type); }))
1358 /* Convert ~ (A - 1) or ~ (A + -1) to -A. */
1360 (bit_not (convert? (minus @0 integer_each_onep)))
1361 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1362 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1363 (convert (negate @0))))
1365 (bit_not (convert? (plus @0 integer_all_onesp)))
1366 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1367 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1368 (convert (negate @0))))
1370 /* Part of convert ~(X ^ Y) to ~X ^ Y or X ^ ~Y if ~X or ~Y simplify. */
1372 (bit_not (convert? (bit_xor @0 INTEGER_CST@1)))
1373 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1374 (convert (bit_xor @0 (bit_not @1)))))
1376 (bit_not (convert? (bit_xor:c (bit_not @0) @1)))
1377 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1378 (convert (bit_xor @0 @1))))
1380 /* Otherwise prefer ~(X ^ Y) to ~X ^ Y as more canonical. */
1382 (bit_xor:c (nop_convert:s (bit_not:s @0)) @1)
1383 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1384 (bit_not (bit_xor (view_convert @0) @1))))
1386 /* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */
1388 (bit_ior:c (bit_and:cs @0 (bit_not @2)) (bit_and:cs @1 @2))
1389 (bit_xor (bit_and (bit_xor @0 @1) @2) @0))
1391 /* Fold A - (A & B) into ~B & A. */
1393 (minus (convert1? @0) (convert2?:s (bit_and:cs @@0 @1)))
1394 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1395 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1396 (convert (bit_and (bit_not @1) @0))))
1398 /* (m1 CMP m2) * d -> (m1 CMP m2) ? d : 0 */
1399 (for cmp (gt lt ge le)
1401 (mult (convert (cmp @0 @1)) @2)
1402 (cond (cmp @0 @1) @2 { build_zero_cst (type); })))
1404 /* For integral types with undefined overflow and C != 0 fold
1405 x * C EQ/NE y * C into x EQ/NE y. */
1408 (cmp (mult:c @0 @1) (mult:c @2 @1))
1409 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1410 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1411 && tree_expr_nonzero_p (@1))
1414 /* For integral types with wrapping overflow and C odd fold
1415 x * C EQ/NE y * C into x EQ/NE y. */
1418 (cmp (mult @0 INTEGER_CST@1) (mult @2 @1))
1419 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1420 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
1421 && (TREE_INT_CST_LOW (@1) & 1) != 0)
1424 /* For integral types with undefined overflow and C != 0 fold
1425 x * C RELOP y * C into:
1427 x RELOP y for nonnegative C
1428 y RELOP x for negative C */
1429 (for cmp (lt gt le ge)
1431 (cmp (mult:c @0 @1) (mult:c @2 @1))
1432 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1433 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1434 (if (tree_expr_nonnegative_p (@1) && tree_expr_nonzero_p (@1))
1436 (if (TREE_CODE (@1) == INTEGER_CST
1437 && wi::neg_p (wi::to_wide (@1), TYPE_SIGN (TREE_TYPE (@1))))
1440 /* (X - 1U) <= INT_MAX-1U into (int) X > 0. */
1444 (cmp (plus @0 integer_minus_onep@1) INTEGER_CST@2)
1445 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1446 && TYPE_UNSIGNED (TREE_TYPE (@0))
1447 && TYPE_PRECISION (TREE_TYPE (@0)) > 1
1448 && (wi::to_wide (@2)
1449 == wi::max_value (TYPE_PRECISION (TREE_TYPE (@0)), SIGNED) - 1))
1450 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
1451 (icmp (convert:stype @0) { build_int_cst (stype, 0); })))))
1453 /* X / 4 < Y / 4 iff X < Y when the division is known to be exact. */
1454 (for cmp (simple_comparison)
1456 (cmp (exact_div @0 INTEGER_CST@2) (exact_div @1 @2))
1457 (if (wi::gt_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2))))
1460 /* X / C1 op C2 into a simple range test. */
1461 (for cmp (simple_comparison)
1463 (cmp (trunc_div:s @0 INTEGER_CST@1) INTEGER_CST@2)
1464 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1465 && integer_nonzerop (@1)
1466 && !TREE_OVERFLOW (@1)
1467 && !TREE_OVERFLOW (@2))
1468 (with { tree lo, hi; bool neg_overflow;
1469 enum tree_code code = fold_div_compare (cmp, @1, @2, &lo, &hi,
1472 (if (code == LT_EXPR || code == GE_EXPR)
1473 (if (TREE_OVERFLOW (lo))
1474 { build_int_cst (type, (code == LT_EXPR) ^ neg_overflow); }
1475 (if (code == LT_EXPR)
1478 (if (code == LE_EXPR || code == GT_EXPR)
1479 (if (TREE_OVERFLOW (hi))
1480 { build_int_cst (type, (code == LE_EXPR) ^ neg_overflow); }
1481 (if (code == LE_EXPR)
1485 { build_int_cst (type, code == NE_EXPR); })
1486 (if (code == EQ_EXPR && !hi)
1488 (if (code == EQ_EXPR && !lo)
1490 (if (code == NE_EXPR && !hi)
1492 (if (code == NE_EXPR && !lo)
1495 { build_range_check (UNKNOWN_LOCATION, type, @0, code == EQ_EXPR,
1499 tree etype = range_check_type (TREE_TYPE (@0));
1502 if (! TYPE_UNSIGNED (etype))
1503 etype = unsigned_type_for (etype);
1504 hi = fold_convert (etype, hi);
1505 lo = fold_convert (etype, lo);
1506 hi = const_binop (MINUS_EXPR, etype, hi, lo);
1509 (if (etype && hi && !TREE_OVERFLOW (hi))
1510 (if (code == EQ_EXPR)
1511 (le (minus (convert:etype @0) { lo; }) { hi; })
1512 (gt (minus (convert:etype @0) { lo; }) { hi; })))))))))
1514 /* X + Z < Y + Z is the same as X < Y when there is no overflow. */
1515 (for op (lt le ge gt)
1517 (op (plus:c @0 @2) (plus:c @1 @2))
1518 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1519 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1521 /* For equality and subtraction, this is also true with wrapping overflow. */
1522 (for op (eq ne minus)
1524 (op (plus:c @0 @2) (plus:c @1 @2))
1525 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1526 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1527 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1530 /* X - Z < Y - Z is the same as X < Y when there is no overflow. */
1531 (for op (lt le ge gt)
1533 (op (minus @0 @2) (minus @1 @2))
1534 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1535 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1537 /* For equality and subtraction, this is also true with wrapping overflow. */
1538 (for op (eq ne minus)
1540 (op (minus @0 @2) (minus @1 @2))
1541 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1542 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1543 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1545 /* And for pointers... */
1546 (for op (simple_comparison)
1548 (op (pointer_diff@3 @0 @2) (pointer_diff @1 @2))
1549 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1552 (minus (pointer_diff@3 @0 @2) (pointer_diff @1 @2))
1553 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3))
1554 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1555 (pointer_diff @0 @1)))
1557 /* Z - X < Z - Y is the same as Y < X when there is no overflow. */
1558 (for op (lt le ge gt)
1560 (op (minus @2 @0) (minus @2 @1))
1561 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1562 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1564 /* For equality and subtraction, this is also true with wrapping overflow. */
1565 (for op (eq ne minus)
1567 (op (minus @2 @0) (minus @2 @1))
1568 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1569 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1570 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1572 /* And for pointers... */
1573 (for op (simple_comparison)
1575 (op (pointer_diff@3 @2 @0) (pointer_diff @2 @1))
1576 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1579 (minus (pointer_diff@3 @2 @0) (pointer_diff @2 @1))
1580 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3))
1581 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1582 (pointer_diff @1 @0)))
1584 /* X + Y < Y is the same as X < 0 when there is no overflow. */
1585 (for op (lt le gt ge)
1587 (op:c (plus:c@2 @0 @1) @1)
1588 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1589 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1590 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
1591 && (CONSTANT_CLASS_P (@0) || single_use (@2)))
1592 (op @0 { build_zero_cst (TREE_TYPE (@0)); }))))
1593 /* For equality, this is also true with wrapping overflow. */
1596 (op:c (nop_convert@3 (plus:c@2 @0 (convert1? @1))) (convert2? @1))
1597 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1598 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1599 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
1600 && (CONSTANT_CLASS_P (@0) || (single_use (@2) && single_use (@3)))
1601 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@2))
1602 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@1)))
1603 (op @0 { build_zero_cst (TREE_TYPE (@0)); })))
1605 (op:c (nop_convert@3 (pointer_plus@2 (convert1? @0) @1)) (convert2? @0))
1606 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0))
1607 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
1608 && (CONSTANT_CLASS_P (@1) || (single_use (@2) && single_use (@3))))
1609 (op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
1611 /* X - Y < X is the same as Y > 0 when there is no overflow.
1612 For equality, this is also true with wrapping overflow. */
1613 (for op (simple_comparison)
1615 (op:c @0 (minus@2 @0 @1))
1616 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1617 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1618 || ((op == EQ_EXPR || op == NE_EXPR)
1619 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1620 && (CONSTANT_CLASS_P (@1) || single_use (@2)))
1621 (op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
1624 (X / Y) == 0 -> X < Y if X, Y are unsigned.
1625 (X / Y) != 0 -> X >= Y, if X, Y are unsigned. */
1629 (cmp (trunc_div @0 @1) integer_zerop)
1630 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
1631 /* Complex ==/!= is allowed, but not </>=. */
1632 && TREE_CODE (TREE_TYPE (@0)) != COMPLEX_TYPE
1633 && (VECTOR_TYPE_P (type) || !VECTOR_TYPE_P (TREE_TYPE (@0))))
1636 /* X == C - X can never be true if C is odd. */
1639 (cmp:c (convert? @0) (convert1? (minus INTEGER_CST@1 (convert2? @0))))
1640 (if (TREE_INT_CST_LOW (@1) & 1)
1641 { constant_boolean_node (cmp == NE_EXPR, type); })))
1643 /* Arguments on which one can call get_nonzero_bits to get the bits
1645 (match with_possible_nonzero_bits
1647 (match with_possible_nonzero_bits
1649 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))))
1650 /* Slightly extended version, do not make it recursive to keep it cheap. */
1651 (match (with_possible_nonzero_bits2 @0)
1652 with_possible_nonzero_bits@0)
1653 (match (with_possible_nonzero_bits2 @0)
1654 (bit_and:c with_possible_nonzero_bits@0 @2))
1656 /* Same for bits that are known to be set, but we do not have
1657 an equivalent to get_nonzero_bits yet. */
1658 (match (with_certain_nonzero_bits2 @0)
1660 (match (with_certain_nonzero_bits2 @0)
1661 (bit_ior @1 INTEGER_CST@0))
1663 /* X == C (or X & Z == Y | C) is impossible if ~nonzero(X) & C != 0. */
1666 (cmp:c (with_possible_nonzero_bits2 @0) (with_certain_nonzero_bits2 @1))
1667 (if (wi::bit_and_not (wi::to_wide (@1), get_nonzero_bits (@0)) != 0)
1668 { constant_boolean_node (cmp == NE_EXPR, type); })))
1670 /* ((X inner_op C0) outer_op C1)
1671 With X being a tree where value_range has reasoned certain bits to always be
1672 zero throughout its computed value range,
1673 inner_op = {|,^}, outer_op = {|,^} and inner_op != outer_op
1674 where zero_mask has 1's for all bits that are sure to be 0 in
1676 if (inner_op == '^') C0 &= ~C1;
1677 if ((C0 & ~zero_mask) == 0) then emit (X outer_op (C0 outer_op C1)
1678 if ((C1 & ~zero_mask) == 0) then emit (X inner_op (C0 outer_op C1)
1680 (for inner_op (bit_ior bit_xor)
1681 outer_op (bit_xor bit_ior)
1684 (inner_op:s @2 INTEGER_CST@0) INTEGER_CST@1)
1688 wide_int zero_mask_not;
1692 if (TREE_CODE (@2) == SSA_NAME)
1693 zero_mask_not = get_nonzero_bits (@2);
1697 if (inner_op == BIT_XOR_EXPR)
1699 C0 = wi::bit_and_not (wi::to_wide (@0), wi::to_wide (@1));
1700 cst_emit = C0 | wi::to_wide (@1);
1704 C0 = wi::to_wide (@0);
1705 cst_emit = C0 ^ wi::to_wide (@1);
1708 (if (!fail && (C0 & zero_mask_not) == 0)
1709 (outer_op @2 { wide_int_to_tree (type, cst_emit); })
1710 (if (!fail && (wi::to_wide (@1) & zero_mask_not) == 0)
1711 (inner_op @2 { wide_int_to_tree (type, cst_emit); }))))))
1713 /* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */
1715 (pointer_plus (pointer_plus:s @0 @1) @3)
1716 (pointer_plus @0 (plus @1 @3)))
1722 tem4 = (unsigned long) tem3;
1727 (pointer_plus @0 (convert?@2 (minus@3 (convert @1) (convert @0))))
1728 /* Conditionally look through a sign-changing conversion. */
1729 (if (TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@3))
1730 && ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@1)))
1731 || (GENERIC && type == TREE_TYPE (@1))))
1734 (pointer_plus @0 (convert?@2 (pointer_diff@3 @1 @@0)))
1735 (if (TYPE_PRECISION (TREE_TYPE (@2)) >= TYPE_PRECISION (TREE_TYPE (@3)))
1739 tem = (sizetype) ptr;
1743 and produce the simpler and easier to analyze with respect to alignment
1744 ... = ptr & ~algn; */
1746 (pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1)))
1747 (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), ~wi::to_wide (@1)); }
1748 (bit_and @0 { algn; })))
1750 /* Try folding difference of addresses. */
1752 (minus (convert ADDR_EXPR@0) (convert @1))
1753 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1754 (with { poly_int64 diff; }
1755 (if (ptr_difference_const (@0, @1, &diff))
1756 { build_int_cst_type (type, diff); }))))
1758 (minus (convert @0) (convert ADDR_EXPR@1))
1759 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1760 (with { poly_int64 diff; }
1761 (if (ptr_difference_const (@0, @1, &diff))
1762 { build_int_cst_type (type, diff); }))))
1764 (pointer_diff (convert?@2 ADDR_EXPR@0) (convert1?@3 @1))
1765 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0))
1766 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1)))
1767 (with { poly_int64 diff; }
1768 (if (ptr_difference_const (@0, @1, &diff))
1769 { build_int_cst_type (type, diff); }))))
1771 (pointer_diff (convert?@2 @0) (convert1?@3 ADDR_EXPR@1))
1772 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0))
1773 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1)))
1774 (with { poly_int64 diff; }
1775 (if (ptr_difference_const (@0, @1, &diff))
1776 { build_int_cst_type (type, diff); }))))
1778 /* If arg0 is derived from the address of an object or function, we may
1779 be able to fold this expression using the object or function's
1782 (bit_and (convert? @0) INTEGER_CST@1)
1783 (if (POINTER_TYPE_P (TREE_TYPE (@0))
1784 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1788 unsigned HOST_WIDE_INT bitpos;
1789 get_pointer_alignment_1 (@0, &align, &bitpos);
1791 (if (wi::ltu_p (wi::to_wide (@1), align / BITS_PER_UNIT))
1792 { wide_int_to_tree (type, (wi::to_wide (@1)
1793 & (bitpos / BITS_PER_UNIT))); }))))
1796 /* We can't reassociate at all for saturating types. */
1797 (if (!TYPE_SATURATING (type))
1799 /* Contract negates. */
1800 /* A + (-B) -> A - B */
1802 (plus:c @0 (convert? (negate @1)))
1803 /* Apply STRIP_NOPS on the negate. */
1804 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1805 && !TYPE_OVERFLOW_SANITIZED (type))
1809 if (INTEGRAL_TYPE_P (type)
1810 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
1811 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
1813 (convert (minus (convert:t1 @0) (convert:t1 @1))))))
1814 /* A - (-B) -> A + B */
1816 (minus @0 (convert? (negate @1)))
1817 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1818 && !TYPE_OVERFLOW_SANITIZED (type))
1822 if (INTEGRAL_TYPE_P (type)
1823 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
1824 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
1826 (convert (plus (convert:t1 @0) (convert:t1 @1))))))
1828 Sign-extension is ok except for INT_MIN, which thankfully cannot
1829 happen without overflow. */
1831 (negate (convert (negate @1)))
1832 (if (INTEGRAL_TYPE_P (type)
1833 && (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@1))
1834 || (!TYPE_UNSIGNED (TREE_TYPE (@1))
1835 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
1836 && !TYPE_OVERFLOW_SANITIZED (type)
1837 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
1840 (negate (convert negate_expr_p@1))
1841 (if (SCALAR_FLOAT_TYPE_P (type)
1842 && ((DECIMAL_FLOAT_TYPE_P (type)
1843 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))
1844 && TYPE_PRECISION (type) >= TYPE_PRECISION (TREE_TYPE (@1)))
1845 || !HONOR_SIGN_DEPENDENT_ROUNDING (type)))
1846 (convert (negate @1))))
1848 (negate (nop_convert (negate @1)))
1849 (if (!TYPE_OVERFLOW_SANITIZED (type)
1850 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
1853 /* We can't reassociate floating-point unless -fassociative-math
1854 or fixed-point plus or minus because of saturation to +-Inf. */
1855 (if ((!FLOAT_TYPE_P (type) || flag_associative_math)
1856 && !FIXED_POINT_TYPE_P (type))
1858 /* Match patterns that allow contracting a plus-minus pair
1859 irrespective of overflow issues. */
1860 /* (A +- B) - A -> +- B */
1861 /* (A +- B) -+ B -> A */
1862 /* A - (A +- B) -> -+ B */
1863 /* A +- (B -+ A) -> +- B */
1865 (minus (plus:c @0 @1) @0)
1868 (minus (minus @0 @1) @0)
1871 (plus:c (minus @0 @1) @1)
1874 (minus @0 (plus:c @0 @1))
1877 (minus @0 (minus @0 @1))
1879 /* (A +- B) + (C - A) -> C +- B */
1880 /* (A + B) - (A - C) -> B + C */
1881 /* More cases are handled with comparisons. */
1883 (plus:c (plus:c @0 @1) (minus @2 @0))
1886 (plus:c (minus @0 @1) (minus @2 @0))
1889 (plus:c (pointer_diff @0 @1) (pointer_diff @2 @0))
1890 (if (TYPE_OVERFLOW_UNDEFINED (type)
1891 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0)))
1892 (pointer_diff @2 @1)))
1894 (minus (plus:c @0 @1) (minus @0 @2))
1897 /* (A +- CST1) +- CST2 -> A + CST3
1898 Use view_convert because it is safe for vectors and equivalent for
1900 (for outer_op (plus minus)
1901 (for inner_op (plus minus)
1902 neg_inner_op (minus plus)
1904 (outer_op (nop_convert (inner_op @0 CONSTANT_CLASS_P@1))
1906 /* If one of the types wraps, use that one. */
1907 (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
1908 /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse
1909 forever if something doesn't simplify into a constant. */
1910 (if (!CONSTANT_CLASS_P (@0))
1911 (if (outer_op == PLUS_EXPR)
1912 (plus (view_convert @0) (inner_op @2 (view_convert @1)))
1913 (minus (view_convert @0) (neg_inner_op @2 (view_convert @1)))))
1914 (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1915 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
1916 (if (outer_op == PLUS_EXPR)
1917 (view_convert (plus @0 (inner_op (view_convert @2) @1)))
1918 (view_convert (minus @0 (neg_inner_op (view_convert @2) @1))))
1919 /* If the constant operation overflows we cannot do the transform
1920 directly as we would introduce undefined overflow, for example
1921 with (a - 1) + INT_MIN. */
1922 (if (types_match (type, @0))
1923 (with { tree cst = const_binop (outer_op == inner_op
1924 ? PLUS_EXPR : MINUS_EXPR,
1926 (if (cst && !TREE_OVERFLOW (cst))
1927 (inner_op @0 { cst; } )
1928 /* X+INT_MAX+1 is X-INT_MIN. */
1929 (if (INTEGRAL_TYPE_P (type) && cst
1930 && wi::to_wide (cst) == wi::min_value (type))
1931 (neg_inner_op @0 { wide_int_to_tree (type, wi::to_wide (cst)); })
1932 /* Last resort, use some unsigned type. */
1933 (with { tree utype = unsigned_type_for (type); }
1935 (view_convert (inner_op
1936 (view_convert:utype @0)
1938 { drop_tree_overflow (cst); }))))))))))))))
1940 /* (CST1 - A) +- CST2 -> CST3 - A */
1941 (for outer_op (plus minus)
1943 (outer_op (minus CONSTANT_CLASS_P@1 @0) CONSTANT_CLASS_P@2)
1944 (with { tree cst = const_binop (outer_op, type, @1, @2); }
1945 (if (cst && !TREE_OVERFLOW (cst))
1946 (minus { cst; } @0)))))
1948 /* CST1 - (CST2 - A) -> CST3 + A */
1950 (minus CONSTANT_CLASS_P@1 (minus CONSTANT_CLASS_P@2 @0))
1951 (with { tree cst = const_binop (MINUS_EXPR, type, @1, @2); }
1952 (if (cst && !TREE_OVERFLOW (cst))
1953 (plus { cst; } @0))))
1957 (plus:c (bit_not @0) @0)
1958 (if (!TYPE_OVERFLOW_TRAPS (type))
1959 { build_all_ones_cst (type); }))
1963 (plus (convert? (bit_not @0)) integer_each_onep)
1964 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1965 (negate (convert @0))))
1969 (minus (convert? (negate @0)) integer_each_onep)
1970 (if (!TYPE_OVERFLOW_TRAPS (type)
1971 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1972 (bit_not (convert @0))))
1976 (minus integer_all_onesp @0)
1979 /* (T)(P + A) - (T)P -> (T) A */
1981 (minus (convert (plus:c @@0 @1))
1983 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1984 /* For integer types, if A has a smaller type
1985 than T the result depends on the possible
1987 E.g. T=size_t, A=(unsigned)429497295, P>0.
1988 However, if an overflow in P + A would cause
1989 undefined behavior, we can assume that there
1991 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1992 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
1995 (minus (convert (pointer_plus @@0 @1))
1997 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1998 /* For pointer types, if the conversion of A to the
1999 final type requires a sign- or zero-extension,
2000 then we have to punt - it is not defined which
2002 || (POINTER_TYPE_P (TREE_TYPE (@0))
2003 && TREE_CODE (@1) == INTEGER_CST
2004 && tree_int_cst_sign_bit (@1) == 0))
2007 (pointer_diff (pointer_plus @@0 @1) @0)
2008 /* The second argument of pointer_plus must be interpreted as signed, and
2009 thus sign-extended if necessary. */
2010 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
2011 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
2012 second arg is unsigned even when we need to consider it as signed,
2013 we don't want to diagnose overflow here. */
2014 (convert (view_convert:stype @1))))
2016 /* (T)P - (T)(P + A) -> -(T) A */
2018 (minus (convert? @0)
2019 (convert (plus:c @@0 @1)))
2020 (if (INTEGRAL_TYPE_P (type)
2021 && TYPE_OVERFLOW_UNDEFINED (type)
2022 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
2023 (with { tree utype = unsigned_type_for (type); }
2024 (convert (negate (convert:utype @1))))
2025 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2026 /* For integer types, if A has a smaller type
2027 than T the result depends on the possible
2029 E.g. T=size_t, A=(unsigned)429497295, P>0.
2030 However, if an overflow in P + A would cause
2031 undefined behavior, we can assume that there
2033 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2034 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
2035 (negate (convert @1)))))
2038 (convert (pointer_plus @@0 @1)))
2039 (if (INTEGRAL_TYPE_P (type)
2040 && TYPE_OVERFLOW_UNDEFINED (type)
2041 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
2042 (with { tree utype = unsigned_type_for (type); }
2043 (convert (negate (convert:utype @1))))
2044 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2045 /* For pointer types, if the conversion of A to the
2046 final type requires a sign- or zero-extension,
2047 then we have to punt - it is not defined which
2049 || (POINTER_TYPE_P (TREE_TYPE (@0))
2050 && TREE_CODE (@1) == INTEGER_CST
2051 && tree_int_cst_sign_bit (@1) == 0))
2052 (negate (convert @1)))))
2054 (pointer_diff @0 (pointer_plus @@0 @1))
2055 /* The second argument of pointer_plus must be interpreted as signed, and
2056 thus sign-extended if necessary. */
2057 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
2058 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
2059 second arg is unsigned even when we need to consider it as signed,
2060 we don't want to diagnose overflow here. */
2061 (negate (convert (view_convert:stype @1)))))
2063 /* (T)(P + A) - (T)(P + B) -> (T)A - (T)B */
2065 (minus (convert (plus:c @@0 @1))
2066 (convert (plus:c @0 @2)))
2067 (if (INTEGRAL_TYPE_P (type)
2068 && TYPE_OVERFLOW_UNDEFINED (type)
2069 && element_precision (type) <= element_precision (TREE_TYPE (@1))
2070 && element_precision (type) <= element_precision (TREE_TYPE (@2)))
2071 (with { tree utype = unsigned_type_for (type); }
2072 (convert (minus (convert:utype @1) (convert:utype @2))))
2073 (if (((element_precision (type) <= element_precision (TREE_TYPE (@1)))
2074 == (element_precision (type) <= element_precision (TREE_TYPE (@2))))
2075 && (element_precision (type) <= element_precision (TREE_TYPE (@1))
2076 /* For integer types, if A has a smaller type
2077 than T the result depends on the possible
2079 E.g. T=size_t, A=(unsigned)429497295, P>0.
2080 However, if an overflow in P + A would cause
2081 undefined behavior, we can assume that there
2083 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2084 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
2085 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))
2086 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@2)))))
2087 (minus (convert @1) (convert @2)))))
2089 (minus (convert (pointer_plus @@0 @1))
2090 (convert (pointer_plus @0 @2)))
2091 (if (INTEGRAL_TYPE_P (type)
2092 && TYPE_OVERFLOW_UNDEFINED (type)
2093 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
2094 (with { tree utype = unsigned_type_for (type); }
2095 (convert (minus (convert:utype @1) (convert:utype @2))))
2096 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2097 /* For pointer types, if the conversion of A to the
2098 final type requires a sign- or zero-extension,
2099 then we have to punt - it is not defined which
2101 || (POINTER_TYPE_P (TREE_TYPE (@0))
2102 && TREE_CODE (@1) == INTEGER_CST
2103 && tree_int_cst_sign_bit (@1) == 0
2104 && TREE_CODE (@2) == INTEGER_CST
2105 && tree_int_cst_sign_bit (@2) == 0))
2106 (minus (convert @1) (convert @2)))))
2108 (pointer_diff (pointer_plus @@0 @1) (pointer_plus @0 @2))
2109 /* The second argument of pointer_plus must be interpreted as signed, and
2110 thus sign-extended if necessary. */
2111 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
2112 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
2113 second arg is unsigned even when we need to consider it as signed,
2114 we don't want to diagnose overflow here. */
2115 (minus (convert (view_convert:stype @1))
2116 (convert (view_convert:stype @2)))))))
2118 /* (A * C) +- (B * C) -> (A+-B) * C and (A * C) +- A -> A * (C+-1).
2119 Modeled after fold_plusminus_mult_expr. */
2120 (if (!TYPE_SATURATING (type)
2121 && (!FLOAT_TYPE_P (type) || flag_associative_math))
2122 (for plusminus (plus minus)
2124 (plusminus (mult:cs@3 @0 @1) (mult:cs@4 @0 @2))
2125 (if ((!ANY_INTEGRAL_TYPE_P (type)
2126 || TYPE_OVERFLOW_WRAPS (type)
2127 || (INTEGRAL_TYPE_P (type)
2128 && tree_expr_nonzero_p (@0)
2129 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type)))))
2130 /* If @1 +- @2 is constant require a hard single-use on either
2131 original operand (but not on both). */
2132 && (single_use (@3) || single_use (@4)))
2133 (mult (plusminus @1 @2) @0)))
2134 /* We cannot generate constant 1 for fract. */
2135 (if (!ALL_FRACT_MODE_P (TYPE_MODE (type)))
2137 (plusminus @0 (mult:c@3 @0 @2))
2138 (if ((!ANY_INTEGRAL_TYPE_P (type)
2139 || TYPE_OVERFLOW_WRAPS (type)
2140 || (INTEGRAL_TYPE_P (type)
2141 && tree_expr_nonzero_p (@0)
2142 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type)))))
2144 (mult (plusminus { build_one_cst (type); } @2) @0)))
2146 (plusminus (mult:c@3 @0 @2) @0)
2147 (if ((!ANY_INTEGRAL_TYPE_P (type)
2148 || TYPE_OVERFLOW_WRAPS (type)
2149 || (INTEGRAL_TYPE_P (type)
2150 && tree_expr_nonzero_p (@0)
2151 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type)))))
2153 (mult (plusminus @2 { build_one_cst (type); }) @0))))))
2155 /* Simplifications of MIN_EXPR, MAX_EXPR, fmin() and fmax(). */
2157 (for minmax (min max FMIN_ALL FMAX_ALL)
2161 /* min(max(x,y),y) -> y. */
2163 (min:c (max:c @0 @1) @1)
2165 /* max(min(x,y),y) -> y. */
2167 (max:c (min:c @0 @1) @1)
2169 /* max(a,-a) -> abs(a). */
2171 (max:c @0 (negate @0))
2172 (if (TREE_CODE (type) != COMPLEX_TYPE
2173 && (! ANY_INTEGRAL_TYPE_P (type)
2174 || TYPE_OVERFLOW_UNDEFINED (type)))
2176 /* min(a,-a) -> -abs(a). */
2178 (min:c @0 (negate @0))
2179 (if (TREE_CODE (type) != COMPLEX_TYPE
2180 && (! ANY_INTEGRAL_TYPE_P (type)
2181 || TYPE_OVERFLOW_UNDEFINED (type)))
2186 (if (INTEGRAL_TYPE_P (type)
2187 && TYPE_MIN_VALUE (type)
2188 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
2190 (if (INTEGRAL_TYPE_P (type)
2191 && TYPE_MAX_VALUE (type)
2192 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
2197 (if (INTEGRAL_TYPE_P (type)
2198 && TYPE_MAX_VALUE (type)
2199 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
2201 (if (INTEGRAL_TYPE_P (type)
2202 && TYPE_MIN_VALUE (type)
2203 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
2206 /* max (a, a + CST) -> a + CST where CST is positive. */
2207 /* max (a, a + CST) -> a where CST is negative. */
2209 (max:c @0 (plus@2 @0 INTEGER_CST@1))
2210 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2211 (if (tree_int_cst_sgn (@1) > 0)
2215 /* min (a, a + CST) -> a where CST is positive. */
2216 /* min (a, a + CST) -> a + CST where CST is negative. */
2218 (min:c @0 (plus@2 @0 INTEGER_CST@1))
2219 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2220 (if (tree_int_cst_sgn (@1) > 0)
2224 /* (convert (minmax ((convert (x) c)))) -> minmax (x c) if x is promoted
2225 and the outer convert demotes the expression back to x's type. */
2226 (for minmax (min max)
2228 (convert (minmax@0 (convert @1) INTEGER_CST@2))
2229 (if (INTEGRAL_TYPE_P (type)
2230 && types_match (@1, type) && int_fits_type_p (@2, type)
2231 && TYPE_SIGN (TREE_TYPE (@0)) == TYPE_SIGN (type)
2232 && TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type))
2233 (minmax @1 (convert @2)))))
2235 (for minmax (FMIN_ALL FMAX_ALL)
2236 /* If either argument is NaN, return the other one. Avoid the
2237 transformation if we get (and honor) a signalling NaN. */
2239 (minmax:c @0 REAL_CST@1)
2240 (if (real_isnan (TREE_REAL_CST_PTR (@1))
2241 && (!HONOR_SNANS (@1) || !TREE_REAL_CST (@1).signalling))
2243 /* Convert fmin/fmax to MIN_EXPR/MAX_EXPR. C99 requires these
2244 functions to return the numeric arg if the other one is NaN.
2245 MIN and MAX don't honor that, so only transform if -ffinite-math-only
2246 is set. C99 doesn't require -0.0 to be handled, so we don't have to
2247 worry about it either. */
2248 (if (flag_finite_math_only)
2255 /* min (-A, -B) -> -max (A, B) */
2256 (for minmax (min max FMIN_ALL FMAX_ALL)
2257 maxmin (max min FMAX_ALL FMIN_ALL)
2259 (minmax (negate:s@2 @0) (negate:s@3 @1))
2260 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
2261 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2262 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
2263 (negate (maxmin @0 @1)))))
2264 /* MIN (~X, ~Y) -> ~MAX (X, Y)
2265 MAX (~X, ~Y) -> ~MIN (X, Y) */
2266 (for minmax (min max)
2269 (minmax (bit_not:s@2 @0) (bit_not:s@3 @1))
2270 (bit_not (maxmin @0 @1))))
2272 /* MIN (X, Y) == X -> X <= Y */
2273 (for minmax (min min max max)
2277 (cmp:c (minmax:c @0 @1) @0)
2278 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)))
2280 /* MIN (X, 5) == 0 -> X == 0
2281 MIN (X, 5) == 7 -> false */
2284 (cmp (min @0 INTEGER_CST@1) INTEGER_CST@2)
2285 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
2286 TYPE_SIGN (TREE_TYPE (@0))))
2287 { constant_boolean_node (cmp == NE_EXPR, type); }
2288 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
2289 TYPE_SIGN (TREE_TYPE (@0))))
2293 (cmp (max @0 INTEGER_CST@1) INTEGER_CST@2)
2294 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
2295 TYPE_SIGN (TREE_TYPE (@0))))
2296 { constant_boolean_node (cmp == NE_EXPR, type); }
2297 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
2298 TYPE_SIGN (TREE_TYPE (@0))))
2300 /* MIN (X, C1) < C2 -> X < C2 || C1 < C2 */
2301 (for minmax (min min max max min min max max )
2302 cmp (lt le gt ge gt ge lt le )
2303 comb (bit_ior bit_ior bit_ior bit_ior bit_and bit_and bit_and bit_and)
2305 (cmp (minmax @0 INTEGER_CST@1) INTEGER_CST@2)
2306 (comb (cmp @0 @2) (cmp @1 @2))))
2308 /* Simplifications of shift and rotates. */
2310 (for rotate (lrotate rrotate)
2312 (rotate integer_all_onesp@0 @1)
2315 /* Optimize -1 >> x for arithmetic right shifts. */
2317 (rshift integer_all_onesp@0 @1)
2318 (if (!TYPE_UNSIGNED (type)
2319 && tree_expr_nonnegative_p (@1))
2322 /* Optimize (x >> c) << c into x & (-1<<c). */
2324 (lshift (rshift @0 INTEGER_CST@1) @1)
2325 (if (wi::ltu_p (wi::to_wide (@1), element_precision (type)))
2326 (bit_and @0 (lshift { build_minus_one_cst (type); } @1))))
2328 /* Optimize (x << c) >> c into x & ((unsigned)-1 >> c) for unsigned
2331 (rshift (lshift @0 INTEGER_CST@1) @1)
2332 (if (TYPE_UNSIGNED (type)
2333 && (wi::ltu_p (wi::to_wide (@1), element_precision (type))))
2334 (bit_and @0 (rshift { build_minus_one_cst (type); } @1))))
2336 (for shiftrotate (lrotate rrotate lshift rshift)
2338 (shiftrotate @0 integer_zerop)
2341 (shiftrotate integer_zerop@0 @1)
2343 /* Prefer vector1 << scalar to vector1 << vector2
2344 if vector2 is uniform. */
2345 (for vec (VECTOR_CST CONSTRUCTOR)
2347 (shiftrotate @0 vec@1)
2348 (with { tree tem = uniform_vector_p (@1); }
2350 (shiftrotate @0 { tem; }))))))
2352 /* Simplify X << Y where Y's low width bits are 0 to X, as only valid
2353 Y is 0. Similarly for X >> Y. */
2355 (for shift (lshift rshift)
2357 (shift @0 SSA_NAME@1)
2358 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
2360 int width = ceil_log2 (element_precision (TREE_TYPE (@0)));
2361 int prec = TYPE_PRECISION (TREE_TYPE (@1));
2363 (if ((get_nonzero_bits (@1) & wi::mask (width, false, prec)) == 0)
2367 /* Rewrite an LROTATE_EXPR by a constant into an
2368 RROTATE_EXPR by a new constant. */
2370 (lrotate @0 INTEGER_CST@1)
2371 (rrotate @0 { const_binop (MINUS_EXPR, TREE_TYPE (@1),
2372 build_int_cst (TREE_TYPE (@1),
2373 element_precision (type)), @1); }))
2375 /* Turn (a OP c1) OP c2 into a OP (c1+c2). */
2376 (for op (lrotate rrotate rshift lshift)
2378 (op (op @0 INTEGER_CST@1) INTEGER_CST@2)
2379 (with { unsigned int prec = element_precision (type); }
2380 (if (wi::ge_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1)))
2381 && wi::lt_p (wi::to_wide (@1), prec, TYPE_SIGN (TREE_TYPE (@1)))
2382 && wi::ge_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2)))
2383 && wi::lt_p (wi::to_wide (@2), prec, TYPE_SIGN (TREE_TYPE (@2))))
2384 (with { unsigned int low = (tree_to_uhwi (@1)
2385 + tree_to_uhwi (@2)); }
2386 /* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2
2387 being well defined. */
2389 (if (op == LROTATE_EXPR || op == RROTATE_EXPR)
2390 (op @0 { build_int_cst (TREE_TYPE (@1), low % prec); })
2391 (if (TYPE_UNSIGNED (type) || op == LSHIFT_EXPR)
2392 { build_zero_cst (type); }
2393 (op @0 { build_int_cst (TREE_TYPE (@1), prec - 1); })))
2394 (op @0 { build_int_cst (TREE_TYPE (@1), low); })))))))
2397 /* ((1 << A) & 1) != 0 -> A == 0
2398 ((1 << A) & 1) == 0 -> A != 0 */
2402 (cmp (bit_and (lshift integer_onep @0) integer_onep) integer_zerop)
2403 (icmp @0 { build_zero_cst (TREE_TYPE (@0)); })))
2405 /* (CST1 << A) == CST2 -> A == ctz (CST2) - ctz (CST1)
2406 (CST1 << A) != CST2 -> A != ctz (CST2) - ctz (CST1)
2410 (cmp (lshift INTEGER_CST@0 @1) INTEGER_CST@2)
2411 (with { int cand = wi::ctz (wi::to_wide (@2)) - wi::ctz (wi::to_wide (@0)); }
2413 || (!integer_zerop (@2)
2414 && wi::lshift (wi::to_wide (@0), cand) != wi::to_wide (@2)))
2415 { constant_boolean_node (cmp == NE_EXPR, type); }
2416 (if (!integer_zerop (@2)
2417 && wi::lshift (wi::to_wide (@0), cand) == wi::to_wide (@2))
2418 (cmp @1 { build_int_cst (TREE_TYPE (@1), cand); }))))))
2420 /* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1))
2421 (X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1))
2422 if the new mask might be further optimized. */
2423 (for shift (lshift rshift)
2425 (bit_and (convert?:s@4 (shift:s@5 (convert1?@3 @0) INTEGER_CST@1))
2427 (if (tree_nop_conversion_p (TREE_TYPE (@4), TREE_TYPE (@5))
2428 && TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT
2429 && tree_fits_uhwi_p (@1)
2430 && tree_to_uhwi (@1) > 0
2431 && tree_to_uhwi (@1) < TYPE_PRECISION (type))
2434 unsigned int shiftc = tree_to_uhwi (@1);
2435 unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (@2);
2436 unsigned HOST_WIDE_INT newmask, zerobits = 0;
2437 tree shift_type = TREE_TYPE (@3);
2440 if (shift == LSHIFT_EXPR)
2441 zerobits = ((HOST_WIDE_INT_1U << shiftc) - 1);
2442 else if (shift == RSHIFT_EXPR
2443 && type_has_mode_precision_p (shift_type))
2445 prec = TYPE_PRECISION (TREE_TYPE (@3));
2447 /* See if more bits can be proven as zero because of
2450 && TYPE_UNSIGNED (TREE_TYPE (@0)))
2452 tree inner_type = TREE_TYPE (@0);
2453 if (type_has_mode_precision_p (inner_type)
2454 && TYPE_PRECISION (inner_type) < prec)
2456 prec = TYPE_PRECISION (inner_type);
2457 /* See if we can shorten the right shift. */
2459 shift_type = inner_type;
2460 /* Otherwise X >> C1 is all zeros, so we'll optimize
2461 it into (X, 0) later on by making sure zerobits
2465 zerobits = HOST_WIDE_INT_M1U;
2468 zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc;
2469 zerobits <<= prec - shiftc;
2471 /* For arithmetic shift if sign bit could be set, zerobits
2472 can contain actually sign bits, so no transformation is
2473 possible, unless MASK masks them all away. In that
2474 case the shift needs to be converted into logical shift. */
2475 if (!TYPE_UNSIGNED (TREE_TYPE (@3))
2476 && prec == TYPE_PRECISION (TREE_TYPE (@3)))
2478 if ((mask & zerobits) == 0)
2479 shift_type = unsigned_type_for (TREE_TYPE (@3));
2485 /* ((X << 16) & 0xff00) is (X, 0). */
2486 (if ((mask & zerobits) == mask)
2487 { build_int_cst (type, 0); }
2488 (with { newmask = mask | zerobits; }
2489 (if (newmask != mask && (newmask & (newmask + 1)) == 0)
2492 /* Only do the transformation if NEWMASK is some integer
2494 for (prec = BITS_PER_UNIT;
2495 prec < HOST_BITS_PER_WIDE_INT; prec <<= 1)
2496 if (newmask == (HOST_WIDE_INT_1U << prec) - 1)
2499 (if (prec < HOST_BITS_PER_WIDE_INT
2500 || newmask == HOST_WIDE_INT_M1U)
2502 { tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); }
2503 (if (!tree_int_cst_equal (newmaskt, @2))
2504 (if (shift_type != TREE_TYPE (@3))
2505 (bit_and (convert (shift:shift_type (convert @3) @1)) { newmaskt; })
2506 (bit_and @4 { newmaskt; })))))))))))))
2508 /* Fold (X {&,^,|} C2) << C1 into (X << C1) {&,^,|} (C2 << C1)
2509 (X {&,^,|} C2) >> C1 into (X >> C1) & (C2 >> C1). */
2510 (for shift (lshift rshift)
2511 (for bit_op (bit_and bit_xor bit_ior)
2513 (shift (convert?:s (bit_op:s @0 INTEGER_CST@2)) INTEGER_CST@1)
2514 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2515 (with { tree mask = int_const_binop (shift, fold_convert (type, @2), @1); }
2516 (bit_op (shift (convert @0) @1) { mask; }))))))
2518 /* ~(~X >> Y) -> X >> Y (for arithmetic shift). */
2520 (bit_not (convert1?:s (rshift:s (convert2?@0 (bit_not @1)) @2)))
2521 (if (!TYPE_UNSIGNED (TREE_TYPE (@0))
2522 && (element_precision (TREE_TYPE (@0))
2523 <= element_precision (TREE_TYPE (@1))
2524 || !TYPE_UNSIGNED (TREE_TYPE (@1))))
2526 { tree shift_type = TREE_TYPE (@0); }
2527 (convert (rshift (convert:shift_type @1) @2)))))
2529 /* ~(~X >>r Y) -> X >>r Y
2530 ~(~X <<r Y) -> X <<r Y */
2531 (for rotate (lrotate rrotate)
2533 (bit_not (convert1?:s (rotate:s (convert2?@0 (bit_not @1)) @2)))
2534 (if ((element_precision (TREE_TYPE (@0))
2535 <= element_precision (TREE_TYPE (@1))
2536 || !TYPE_UNSIGNED (TREE_TYPE (@1)))
2537 && (element_precision (type) <= element_precision (TREE_TYPE (@0))
2538 || !TYPE_UNSIGNED (TREE_TYPE (@0))))
2540 { tree rotate_type = TREE_TYPE (@0); }
2541 (convert (rotate (convert:rotate_type @1) @2))))))
2543 /* Simplifications of conversions. */
2545 /* Basic strip-useless-type-conversions / strip_nops. */
2546 (for cvt (convert view_convert float fix_trunc)
2549 (if ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@0)))
2550 || (GENERIC && type == TREE_TYPE (@0)))
2553 /* Contract view-conversions. */
2555 (view_convert (view_convert @0))
2558 /* For integral conversions with the same precision or pointer
2559 conversions use a NOP_EXPR instead. */
2562 (if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
2563 && (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
2564 && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0)))
2567 /* Strip inner integral conversions that do not change precision or size, or
2568 zero-extend while keeping the same size (for bool-to-char). */
2570 (view_convert (convert@0 @1))
2571 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
2572 && (INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
2573 && TYPE_SIZE (TREE_TYPE (@0)) == TYPE_SIZE (TREE_TYPE (@1))
2574 && (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))
2575 || (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@1))
2576 && TYPE_UNSIGNED (TREE_TYPE (@1)))))
2579 /* Re-association barriers around constants and other re-association
2580 barriers can be removed. */
2582 (paren CONSTANT_CLASS_P@0)
2585 (paren (paren@1 @0))
2588 /* Handle cases of two conversions in a row. */
2589 (for ocvt (convert float fix_trunc)
2590 (for icvt (convert float)
2595 tree inside_type = TREE_TYPE (@0);
2596 tree inter_type = TREE_TYPE (@1);
2597 int inside_int = INTEGRAL_TYPE_P (inside_type);
2598 int inside_ptr = POINTER_TYPE_P (inside_type);
2599 int inside_float = FLOAT_TYPE_P (inside_type);
2600 int inside_vec = VECTOR_TYPE_P (inside_type);
2601 unsigned int inside_prec = TYPE_PRECISION (inside_type);
2602 int inside_unsignedp = TYPE_UNSIGNED (inside_type);
2603 int inter_int = INTEGRAL_TYPE_P (inter_type);
2604 int inter_ptr = POINTER_TYPE_P (inter_type);
2605 int inter_float = FLOAT_TYPE_P (inter_type);
2606 int inter_vec = VECTOR_TYPE_P (inter_type);
2607 unsigned int inter_prec = TYPE_PRECISION (inter_type);
2608 int inter_unsignedp = TYPE_UNSIGNED (inter_type);
2609 int final_int = INTEGRAL_TYPE_P (type);
2610 int final_ptr = POINTER_TYPE_P (type);
2611 int final_float = FLOAT_TYPE_P (type);
2612 int final_vec = VECTOR_TYPE_P (type);
2613 unsigned int final_prec = TYPE_PRECISION (type);
2614 int final_unsignedp = TYPE_UNSIGNED (type);
2617 /* In addition to the cases of two conversions in a row
2618 handled below, if we are converting something to its own
2619 type via an object of identical or wider precision, neither
2620 conversion is needed. */
2621 (if (((GIMPLE && useless_type_conversion_p (type, inside_type))
2623 && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type)))
2624 && (((inter_int || inter_ptr) && final_int)
2625 || (inter_float && final_float))
2626 && inter_prec >= final_prec)
2629 /* Likewise, if the intermediate and initial types are either both
2630 float or both integer, we don't need the middle conversion if the
2631 former is wider than the latter and doesn't change the signedness
2632 (for integers). Avoid this if the final type is a pointer since
2633 then we sometimes need the middle conversion. */
2634 (if (((inter_int && inside_int) || (inter_float && inside_float))
2635 && (final_int || final_float)
2636 && inter_prec >= inside_prec
2637 && (inter_float || inter_unsignedp == inside_unsignedp))
2640 /* If we have a sign-extension of a zero-extended value, we can
2641 replace that by a single zero-extension. Likewise if the
2642 final conversion does not change precision we can drop the
2643 intermediate conversion. */
2644 (if (inside_int && inter_int && final_int
2645 && ((inside_prec < inter_prec && inter_prec < final_prec
2646 && inside_unsignedp && !inter_unsignedp)
2647 || final_prec == inter_prec))
2650 /* Two conversions in a row are not needed unless:
2651 - some conversion is floating-point (overstrict for now), or
2652 - some conversion is a vector (overstrict for now), or
2653 - the intermediate type is narrower than both initial and
2655 - the intermediate type and innermost type differ in signedness,
2656 and the outermost type is wider than the intermediate, or
2657 - the initial type is a pointer type and the precisions of the
2658 intermediate and final types differ, or
2659 - the final type is a pointer type and the precisions of the
2660 initial and intermediate types differ. */
2661 (if (! inside_float && ! inter_float && ! final_float
2662 && ! inside_vec && ! inter_vec && ! final_vec
2663 && (inter_prec >= inside_prec || inter_prec >= final_prec)
2664 && ! (inside_int && inter_int
2665 && inter_unsignedp != inside_unsignedp
2666 && inter_prec < final_prec)
2667 && ((inter_unsignedp && inter_prec > inside_prec)
2668 == (final_unsignedp && final_prec > inter_prec))
2669 && ! (inside_ptr && inter_prec != final_prec)
2670 && ! (final_ptr && inside_prec != inter_prec))
2673 /* A truncation to an unsigned type (a zero-extension) should be
2674 canonicalized as bitwise and of a mask. */
2675 (if (GIMPLE /* PR70366: doing this in GENERIC breaks -Wconversion. */
2676 && final_int && inter_int && inside_int
2677 && final_prec == inside_prec
2678 && final_prec > inter_prec
2680 (convert (bit_and @0 { wide_int_to_tree
2682 wi::mask (inter_prec, false,
2683 TYPE_PRECISION (inside_type))); })))
2685 /* If we are converting an integer to a floating-point that can
2686 represent it exactly and back to an integer, we can skip the
2687 floating-point conversion. */
2688 (if (GIMPLE /* PR66211 */
2689 && inside_int && inter_float && final_int &&
2690 (unsigned) significand_size (TYPE_MODE (inter_type))
2691 >= inside_prec - !inside_unsignedp)
2694 /* If we have a narrowing conversion to an integral type that is fed by a
2695 BIT_AND_EXPR, we might be able to remove the BIT_AND_EXPR if it merely
2696 masks off bits outside the final type (and nothing else). */
2698 (convert (bit_and @0 INTEGER_CST@1))
2699 (if (INTEGRAL_TYPE_P (type)
2700 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
2701 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))
2702 && operand_equal_p (@1, build_low_bits_mask (TREE_TYPE (@1),
2703 TYPE_PRECISION (type)), 0))
2707 /* (X /[ex] A) * A -> X. */
2709 (mult (convert1? (exact_div @0 @@1)) (convert2? @1))
2712 /* ((X /[ex] A) +- B) * A --> X +- A * B. */
2713 (for op (plus minus)
2715 (mult (convert1? (op (convert2? (exact_div @0 INTEGER_CST@@1)) INTEGER_CST@2)) @1)
2716 (if (tree_nop_conversion_p (type, TREE_TYPE (@2))
2717 && tree_nop_conversion_p (TREE_TYPE (@0), TREE_TYPE (@2)))
2720 wi::overflow_type overflow;
2721 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
2722 TYPE_SIGN (type), &overflow);
2724 (if (types_match (type, TREE_TYPE (@2))
2725 && types_match (TREE_TYPE (@0), TREE_TYPE (@2)) && !overflow)
2726 (op @0 { wide_int_to_tree (type, mul); })
2727 (with { tree utype = unsigned_type_for (type); }
2728 (convert (op (convert:utype @0)
2729 (mult (convert:utype @1) (convert:utype @2))))))))))
2731 /* Canonicalization of binary operations. */
2733 /* Convert X + -C into X - C. */
2735 (plus @0 REAL_CST@1)
2736 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
2737 (with { tree tem = const_unop (NEGATE_EXPR, type, @1); }
2738 (if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
2739 (minus @0 { tem; })))))
2741 /* Convert x+x into x*2. */
2744 (if (SCALAR_FLOAT_TYPE_P (type))
2745 (mult @0 { build_real (type, dconst2); })
2746 (if (INTEGRAL_TYPE_P (type))
2747 (mult @0 { build_int_cst (type, 2); }))))
2751 (minus integer_zerop @1)
2754 (pointer_diff integer_zerop @1)
2755 (negate (convert @1)))
2757 /* (ARG0 - ARG1) is the same as (-ARG1 + ARG0). So check whether
2758 ARG0 is zero and X + ARG0 reduces to X, since that would mean
2759 (-ARG1 + ARG0) reduces to -ARG1. */
2761 (minus real_zerop@0 @1)
2762 (if (fold_real_zero_addition_p (type, @0, 0))
2765 /* Transform x * -1 into -x. */
2767 (mult @0 integer_minus_onep)
2770 /* Reassociate (X * CST) * Y to (X * Y) * CST. This does not introduce
2771 signed overflow for CST != 0 && CST != -1. */
2773 (mult:c (mult:s@3 @0 INTEGER_CST@1) @2)
2774 (if (TREE_CODE (@2) != INTEGER_CST
2776 && !integer_zerop (@1) && !integer_minus_onep (@1))
2777 (mult (mult @0 @2) @1)))
2779 /* True if we can easily extract the real and imaginary parts of a complex
2781 (match compositional_complex
2782 (convert? (complex @0 @1)))
2784 /* COMPLEX_EXPR and REALPART/IMAGPART_EXPR cancellations. */
2786 (complex (realpart @0) (imagpart @0))
2789 (realpart (complex @0 @1))
2792 (imagpart (complex @0 @1))
2795 /* Sometimes we only care about half of a complex expression. */
2797 (realpart (convert?:s (conj:s @0)))
2798 (convert (realpart @0)))
2800 (imagpart (convert?:s (conj:s @0)))
2801 (convert (negate (imagpart @0))))
2802 (for part (realpart imagpart)
2803 (for op (plus minus)
2805 (part (convert?:s@2 (op:s @0 @1)))
2806 (convert (op (part @0) (part @1))))))
2808 (realpart (convert?:s (CEXPI:s @0)))
2811 (imagpart (convert?:s (CEXPI:s @0)))
2814 /* conj(conj(x)) -> x */
2816 (conj (convert? (conj @0)))
2817 (if (tree_nop_conversion_p (TREE_TYPE (@0), type))
2820 /* conj({x,y}) -> {x,-y} */
2822 (conj (convert?:s (complex:s @0 @1)))
2823 (with { tree itype = TREE_TYPE (type); }
2824 (complex (convert:itype @0) (negate (convert:itype @1)))))
2826 /* BSWAP simplifications, transforms checked by gcc.dg/builtin-bswap-8.c. */
2827 (for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32 BUILT_IN_BSWAP64)
2832 (bswap (bit_not (bswap @0)))
2834 (for bitop (bit_xor bit_ior bit_and)
2836 (bswap (bitop:c (bswap @0) @1))
2837 (bitop @0 (bswap @1)))))
2840 /* Combine COND_EXPRs and VEC_COND_EXPRs. */
2842 /* Simplify constant conditions.
2843 Only optimize constant conditions when the selected branch
2844 has the same type as the COND_EXPR. This avoids optimizing
2845 away "c ? x : throw", where the throw has a void type.
2846 Note that we cannot throw away the fold-const.c variant nor
2847 this one as we depend on doing this transform before possibly
2848 A ? B : B -> B triggers and the fold-const.c one can optimize
2849 0 ? A : B to B even if A has side-effects. Something
2850 genmatch cannot handle. */
2852 (cond INTEGER_CST@0 @1 @2)
2853 (if (integer_zerop (@0))
2854 (if (!VOID_TYPE_P (TREE_TYPE (@2)) || VOID_TYPE_P (type))
2856 (if (!VOID_TYPE_P (TREE_TYPE (@1)) || VOID_TYPE_P (type))
2859 (vec_cond VECTOR_CST@0 @1 @2)
2860 (if (integer_all_onesp (@0))
2862 (if (integer_zerop (@0))
2865 /* Simplification moved from fold_cond_expr_with_comparison. It may also
2867 /* This pattern implements two kinds simplification:
2870 (cond (cmp (convert1? x) c1) (convert2? x) c2) -> (minmax (x c)) if:
2871 1) Conversions are type widening from smaller type.
2872 2) Const c1 equals to c2 after canonicalizing comparison.
2873 3) Comparison has tree code LT, LE, GT or GE.
2874 This specific pattern is needed when (cmp (convert x) c) may not
2875 be simplified by comparison patterns because of multiple uses of
2876 x. It also makes sense here because simplifying across multiple
2877 referred var is always benefitial for complicated cases.
2880 (cond (eq (convert1? x) c1) (convert2? x) c2) -> (cond (eq x c1) c1 c2). */
2881 (for cmp (lt le gt ge eq)
2883 (cond (cmp (convert1? @1) INTEGER_CST@3) (convert2? @1) INTEGER_CST@2)
2886 tree from_type = TREE_TYPE (@1);
2887 tree c1_type = TREE_TYPE (@3), c2_type = TREE_TYPE (@2);
2888 enum tree_code code = ERROR_MARK;
2890 if (INTEGRAL_TYPE_P (from_type)
2891 && int_fits_type_p (@2, from_type)
2892 && (types_match (c1_type, from_type)
2893 || (TYPE_PRECISION (c1_type) > TYPE_PRECISION (from_type)
2894 && (TYPE_UNSIGNED (from_type)
2895 || TYPE_SIGN (c1_type) == TYPE_SIGN (from_type))))
2896 && (types_match (c2_type, from_type)
2897 || (TYPE_PRECISION (c2_type) > TYPE_PRECISION (from_type)
2898 && (TYPE_UNSIGNED (from_type)
2899 || TYPE_SIGN (c2_type) == TYPE_SIGN (from_type)))))
2903 if (wi::to_widest (@3) == (wi::to_widest (@2) - 1))
2905 /* X <= Y - 1 equals to X < Y. */
2908 /* X > Y - 1 equals to X >= Y. */
2912 if (wi::to_widest (@3) == (wi::to_widest (@2) + 1))
2914 /* X < Y + 1 equals to X <= Y. */
2917 /* X >= Y + 1 equals to X > Y. */
2921 if (code != ERROR_MARK
2922 || wi::to_widest (@2) == wi::to_widest (@3))
2924 if (cmp == LT_EXPR || cmp == LE_EXPR)
2926 if (cmp == GT_EXPR || cmp == GE_EXPR)
2930 /* Can do A == C1 ? A : C2 -> A == C1 ? C1 : C2? */
2931 else if (int_fits_type_p (@3, from_type))
2935 (if (code == MAX_EXPR)
2936 (convert (max @1 (convert @2)))
2937 (if (code == MIN_EXPR)
2938 (convert (min @1 (convert @2)))
2939 (if (code == EQ_EXPR)
2940 (convert (cond (eq @1 (convert @3))
2941 (convert:from_type @3) (convert:from_type @2)))))))))
2943 /* (cond (cmp (convert? x) c1) (op x c2) c3) -> (op (minmax x c1) c2) if:
2945 1) OP is PLUS or MINUS.
2946 2) CMP is LT, LE, GT or GE.
2947 3) C3 == (C1 op C2), and computation doesn't have undefined behavior.
2949 This pattern also handles special cases like:
2951 A) Operand x is a unsigned to signed type conversion and c1 is
2952 integer zero. In this case,
2953 (signed type)x < 0 <=> x > MAX_VAL(signed type)
2954 (signed type)x >= 0 <=> x <= MAX_VAL(signed type)
2955 B) Const c1 may not equal to (C3 op' C2). In this case we also
2956 check equality for (c1+1) and (c1-1) by adjusting comparison
2959 TODO: Though signed type is handled by this pattern, it cannot be
2960 simplified at the moment because C standard requires additional
2961 type promotion. In order to match&simplify it here, the IR needs
2962 to be cleaned up by other optimizers, i.e, VRP. */
2963 (for op (plus minus)
2964 (for cmp (lt le gt ge)
2966 (cond (cmp (convert? @X) INTEGER_CST@1) (op @X INTEGER_CST@2) INTEGER_CST@3)
2967 (with { tree from_type = TREE_TYPE (@X), to_type = TREE_TYPE (@1); }
2968 (if (types_match (from_type, to_type)
2969 /* Check if it is special case A). */
2970 || (TYPE_UNSIGNED (from_type)
2971 && !TYPE_UNSIGNED (to_type)
2972 && TYPE_PRECISION (from_type) == TYPE_PRECISION (to_type)
2973 && integer_zerop (@1)
2974 && (cmp == LT_EXPR || cmp == GE_EXPR)))
2977 wi::overflow_type overflow = wi::OVF_NONE;
2978 enum tree_code code, cmp_code = cmp;
2980 wide_int c1 = wi::to_wide (@1);
2981 wide_int c2 = wi::to_wide (@2);
2982 wide_int c3 = wi::to_wide (@3);
2983 signop sgn = TYPE_SIGN (from_type);
2985 /* Handle special case A), given x of unsigned type:
2986 ((signed type)x < 0) <=> (x > MAX_VAL(signed type))
2987 ((signed type)x >= 0) <=> (x <= MAX_VAL(signed type)) */
2988 if (!types_match (from_type, to_type))
2990 if (cmp_code == LT_EXPR)
2992 if (cmp_code == GE_EXPR)
2994 c1 = wi::max_value (to_type);
2996 /* To simplify this pattern, we require c3 = (c1 op c2). Here we
2997 compute (c3 op' c2) and check if it equals to c1 with op' being
2998 the inverted operator of op. Make sure overflow doesn't happen
2999 if it is undefined. */
3000 if (op == PLUS_EXPR)
3001 real_c1 = wi::sub (c3, c2, sgn, &overflow);
3003 real_c1 = wi::add (c3, c2, sgn, &overflow);
3006 if (!overflow || !TYPE_OVERFLOW_UNDEFINED (from_type))
3008 /* Check if c1 equals to real_c1. Boundary condition is handled
3009 by adjusting comparison operation if necessary. */
3010 if (!wi::cmp (wi::sub (real_c1, 1, sgn, &overflow), c1, sgn)
3013 /* X <= Y - 1 equals to X < Y. */
3014 if (cmp_code == LE_EXPR)
3016 /* X > Y - 1 equals to X >= Y. */
3017 if (cmp_code == GT_EXPR)
3020 if (!wi::cmp (wi::add (real_c1, 1, sgn, &overflow), c1, sgn)
3023 /* X < Y + 1 equals to X <= Y. */
3024 if (cmp_code == LT_EXPR)
3026 /* X >= Y + 1 equals to X > Y. */
3027 if (cmp_code == GE_EXPR)
3030 if (code != cmp_code || !wi::cmp (real_c1, c1, sgn))
3032 if (cmp_code == LT_EXPR || cmp_code == LE_EXPR)
3034 if (cmp_code == GT_EXPR || cmp_code == GE_EXPR)
3039 (if (code == MAX_EXPR)
3040 (op (max @X { wide_int_to_tree (from_type, real_c1); })
3041 { wide_int_to_tree (from_type, c2); })
3042 (if (code == MIN_EXPR)
3043 (op (min @X { wide_int_to_tree (from_type, real_c1); })
3044 { wide_int_to_tree (from_type, c2); })))))))))
3046 (for cnd (cond vec_cond)
3047 /* A ? B : (A ? X : C) -> A ? B : C. */
3049 (cnd @0 (cnd @0 @1 @2) @3)
3052 (cnd @0 @1 (cnd @0 @2 @3))
3054 /* A ? B : (!A ? C : X) -> A ? B : C. */
3055 /* ??? This matches embedded conditions open-coded because genmatch
3056 would generate matching code for conditions in separate stmts only.
3057 The following is still important to merge then and else arm cases
3058 from if-conversion. */
3060 (cnd @0 @1 (cnd @2 @3 @4))
3061 (if (inverse_conditions_p (@0, @2))
3064 (cnd @0 (cnd @1 @2 @3) @4)
3065 (if (inverse_conditions_p (@0, @1))
3068 /* A ? B : B -> B. */
3073 /* !A ? B : C -> A ? C : B. */
3075 (cnd (logical_inverted_value truth_valued_p@0) @1 @2)
3078 /* A + (B vcmp C ? 1 : 0) -> A - (B vcmp C ? -1 : 0), since vector comparisons
3079 return all -1 or all 0 results. */
3080 /* ??? We could instead convert all instances of the vec_cond to negate,
3081 but that isn't necessarily a win on its own. */
3083 (plus:c @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
3084 (if (VECTOR_TYPE_P (type)
3085 && known_eq (TYPE_VECTOR_SUBPARTS (type),
3086 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
3087 && (TYPE_MODE (TREE_TYPE (type))
3088 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
3089 (minus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
3091 /* ... likewise A - (B vcmp C ? 1 : 0) -> A + (B vcmp C ? -1 : 0). */
3093 (minus @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
3094 (if (VECTOR_TYPE_P (type)
3095 && known_eq (TYPE_VECTOR_SUBPARTS (type),
3096 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
3097 && (TYPE_MODE (TREE_TYPE (type))
3098 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
3099 (plus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
3102 /* Simplifications of comparisons. */
3104 /* See if we can reduce the magnitude of a constant involved in a
3105 comparison by changing the comparison code. This is a canonicalization
3106 formerly done by maybe_canonicalize_comparison_1. */
3110 (cmp @0 INTEGER_CST@1)
3111 (if (tree_int_cst_sgn (@1) == -1)
3112 (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) + 1); }))))
3116 (cmp @0 INTEGER_CST@1)
3117 (if (tree_int_cst_sgn (@1) == 1)
3118 (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) - 1); }))))
3121 /* We can simplify a logical negation of a comparison to the
3122 inverted comparison. As we cannot compute an expression
3123 operator using invert_tree_comparison we have to simulate
3124 that with expression code iteration. */
3125 (for cmp (tcc_comparison)
3126 icmp (inverted_tcc_comparison)
3127 ncmp (inverted_tcc_comparison_with_nans)
3128 /* Ideally we'd like to combine the following two patterns
3129 and handle some more cases by using
3130 (logical_inverted_value (cmp @0 @1))
3131 here but for that genmatch would need to "inline" that.
3132 For now implement what forward_propagate_comparison did. */
3134 (bit_not (cmp @0 @1))
3135 (if (VECTOR_TYPE_P (type)
3136 || (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1))
3137 /* Comparison inversion may be impossible for trapping math,
3138 invert_tree_comparison will tell us. But we can't use
3139 a computed operator in the replacement tree thus we have
3140 to play the trick below. */
3141 (with { enum tree_code ic = invert_tree_comparison
3142 (cmp, HONOR_NANS (@0)); }
3148 (bit_xor (cmp @0 @1) integer_truep)
3149 (with { enum tree_code ic = invert_tree_comparison
3150 (cmp, HONOR_NANS (@0)); }
3156 /* Transform comparisons of the form X - Y CMP 0 to X CMP Y.
3157 ??? The transformation is valid for the other operators if overflow
3158 is undefined for the type, but performing it here badly interacts
3159 with the transformation in fold_cond_expr_with_comparison which
3160 attempts to synthetize ABS_EXPR. */
3162 (for sub (minus pointer_diff)
3164 (cmp (sub@2 @0 @1) integer_zerop)
3165 (if (single_use (@2))
3168 /* Transform comparisons of the form X * C1 CMP 0 to X CMP 0 in the
3169 signed arithmetic case. That form is created by the compiler
3170 often enough for folding it to be of value. One example is in
3171 computing loop trip counts after Operator Strength Reduction. */
3172 (for cmp (simple_comparison)
3173 scmp (swapped_simple_comparison)
3175 (cmp (mult@3 @0 INTEGER_CST@1) integer_zerop@2)
3176 /* Handle unfolded multiplication by zero. */
3177 (if (integer_zerop (@1))
3179 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3180 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
3182 /* If @1 is negative we swap the sense of the comparison. */
3183 (if (tree_int_cst_sgn (@1) < 0)
3187 /* Simplify comparison of something with itself. For IEEE
3188 floating-point, we can only do some of these simplifications. */
3192 (if (! FLOAT_TYPE_P (TREE_TYPE (@0))
3193 || ! HONOR_NANS (@0))
3194 { constant_boolean_node (true, type); }
3195 (if (cmp != EQ_EXPR)
3201 || ! FLOAT_TYPE_P (TREE_TYPE (@0))
3202 || ! HONOR_NANS (@0))
3203 { constant_boolean_node (false, type); })))
3204 (for cmp (unle unge uneq)
3207 { constant_boolean_node (true, type); }))
3208 (for cmp (unlt ungt)
3214 (if (!flag_trapping_math)
3215 { constant_boolean_node (false, type); }))
3217 /* Fold ~X op ~Y as Y op X. */
3218 (for cmp (simple_comparison)
3220 (cmp (bit_not@2 @0) (bit_not@3 @1))
3221 (if (single_use (@2) && single_use (@3))
3224 /* Fold ~X op C as X op' ~C, where op' is the swapped comparison. */
3225 (for cmp (simple_comparison)
3226 scmp (swapped_simple_comparison)
3228 (cmp (bit_not@2 @0) CONSTANT_CLASS_P@1)
3229 (if (single_use (@2)
3230 && (TREE_CODE (@1) == INTEGER_CST || TREE_CODE (@1) == VECTOR_CST))
3231 (scmp @0 (bit_not @1)))))
3233 (for cmp (simple_comparison)
3234 /* Fold (double)float1 CMP (double)float2 into float1 CMP float2. */
3236 (cmp (convert@2 @0) (convert? @1))
3237 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3238 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
3239 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
3240 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
3241 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))))
3244 tree type1 = TREE_TYPE (@1);
3245 if (TREE_CODE (@1) == REAL_CST && !DECIMAL_FLOAT_TYPE_P (type1))
3247 REAL_VALUE_TYPE orig = TREE_REAL_CST (@1);
3248 if (TYPE_PRECISION (type1) > TYPE_PRECISION (float_type_node)
3249 && exact_real_truncate (TYPE_MODE (float_type_node), &orig))
3250 type1 = float_type_node;
3251 if (TYPE_PRECISION (type1) > TYPE_PRECISION (double_type_node)
3252 && exact_real_truncate (TYPE_MODE (double_type_node), &orig))
3253 type1 = double_type_node;
3256 = (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type1)
3257 ? TREE_TYPE (@0) : type1);
3259 (if (TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (newtype))
3260 (cmp (convert:newtype @0) (convert:newtype @1))))))
3264 /* IEEE doesn't distinguish +0 and -0 in comparisons. */
3266 /* a CMP (-0) -> a CMP 0 */
3267 (if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)))
3268 (cmp @0 { build_real (TREE_TYPE (@1), dconst0); }))
3269 /* x != NaN is always true, other ops are always false. */
3270 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
3271 && ! HONOR_SNANS (@1))
3272 { constant_boolean_node (cmp == NE_EXPR, type); })
3273 /* Fold comparisons against infinity. */
3274 (if (REAL_VALUE_ISINF (TREE_REAL_CST (@1))
3275 && MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (@1))))
3278 REAL_VALUE_TYPE max;
3279 enum tree_code code = cmp;
3280 bool neg = REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1));
3282 code = swap_tree_comparison (code);
3285 /* x > +Inf is always false, if we ignore NaNs or exceptions. */
3286 (if (code == GT_EXPR
3287 && !(HONOR_NANS (@0) && flag_trapping_math))
3288 { constant_boolean_node (false, type); })
3289 (if (code == LE_EXPR)
3290 /* x <= +Inf is always true, if we don't care about NaNs. */
3291 (if (! HONOR_NANS (@0))
3292 { constant_boolean_node (true, type); }
3293 /* x <= +Inf is the same as x == x, i.e. !isnan(x), but this loses
3294 an "invalid" exception. */
3295 (if (!flag_trapping_math)
3297 /* x == +Inf and x >= +Inf are always equal to x > DBL_MAX, but
3298 for == this introduces an exception for x a NaN. */
3299 (if ((code == EQ_EXPR && !(HONOR_NANS (@0) && flag_trapping_math))
3301 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3303 (lt @0 { build_real (TREE_TYPE (@0), max); })
3304 (gt @0 { build_real (TREE_TYPE (@0), max); }))))
3305 /* x < +Inf is always equal to x <= DBL_MAX. */
3306 (if (code == LT_EXPR)
3307 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3309 (ge @0 { build_real (TREE_TYPE (@0), max); })
3310 (le @0 { build_real (TREE_TYPE (@0), max); }))))
3311 /* x != +Inf is always equal to !(x > DBL_MAX), but this introduces
3312 an exception for x a NaN so use an unordered comparison. */
3313 (if (code == NE_EXPR)
3314 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3315 (if (! HONOR_NANS (@0))
3317 (ge @0 { build_real (TREE_TYPE (@0), max); })
3318 (le @0 { build_real (TREE_TYPE (@0), max); }))
3320 (unge @0 { build_real (TREE_TYPE (@0), max); })
3321 (unle @0 { build_real (TREE_TYPE (@0), max); }))))))))))
3323 /* If this is a comparison of a real constant with a PLUS_EXPR
3324 or a MINUS_EXPR of a real constant, we can convert it into a
3325 comparison with a revised real constant as long as no overflow
3326 occurs when unsafe_math_optimizations are enabled. */
3327 (if (flag_unsafe_math_optimizations)
3328 (for op (plus minus)
3330 (cmp (op @0 REAL_CST@1) REAL_CST@2)
3333 tree tem = const_binop (op == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR,
3334 TREE_TYPE (@1), @2, @1);
3336 (if (tem && !TREE_OVERFLOW (tem))
3337 (cmp @0 { tem; }))))))
3339 /* Likewise, we can simplify a comparison of a real constant with
3340 a MINUS_EXPR whose first operand is also a real constant, i.e.
3341 (c1 - x) < c2 becomes x > c1-c2. Reordering is allowed on
3342 floating-point types only if -fassociative-math is set. */
3343 (if (flag_associative_math)
3345 (cmp (minus REAL_CST@0 @1) REAL_CST@2)
3346 (with { tree tem = const_binop (MINUS_EXPR, TREE_TYPE (@1), @0, @2); }
3347 (if (tem && !TREE_OVERFLOW (tem))
3348 (cmp { tem; } @1)))))
3350 /* Fold comparisons against built-in math functions. */
3351 (if (flag_unsafe_math_optimizations
3352 && ! flag_errno_math)
3355 (cmp (sq @0) REAL_CST@1)
3357 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
3359 /* sqrt(x) < y is always false, if y is negative. */
3360 (if (cmp == EQ_EXPR || cmp == LT_EXPR || cmp == LE_EXPR)
3361 { constant_boolean_node (false, type); })
3362 /* sqrt(x) > y is always true, if y is negative and we
3363 don't care about NaNs, i.e. negative values of x. */
3364 (if (cmp == NE_EXPR || !HONOR_NANS (@0))
3365 { constant_boolean_node (true, type); })
3366 /* sqrt(x) > y is the same as x >= 0, if y is negative. */
3367 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })))
3368 (if (real_equal (TREE_REAL_CST_PTR (@1), &dconst0))
3370 /* sqrt(x) < 0 is always false. */
3371 (if (cmp == LT_EXPR)
3372 { constant_boolean_node (false, type); })
3373 /* sqrt(x) >= 0 is always true if we don't care about NaNs. */
3374 (if (cmp == GE_EXPR && !HONOR_NANS (@0))
3375 { constant_boolean_node (true, type); })
3376 /* sqrt(x) <= 0 -> x == 0. */
3377 (if (cmp == LE_EXPR)
3379 /* Otherwise sqrt(x) cmp 0 -> x cmp 0. Here cmp can be >=, >,
3380 == or !=. In the last case:
3382 (sqrt(x) != 0) == (NaN != 0) == true == (x != 0)
3384 if x is negative or NaN. Due to -funsafe-math-optimizations,
3385 the results for other x follow from natural arithmetic. */
3387 (if (cmp == GT_EXPR || cmp == GE_EXPR)
3391 real_arithmetic (&c2, MULT_EXPR,
3392 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
3393 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
3395 (if (REAL_VALUE_ISINF (c2))
3396 /* sqrt(x) > y is x == +Inf, when y is very large. */
3397 (if (HONOR_INFINITIES (@0))
3398 (eq @0 { build_real (TREE_TYPE (@0), c2); })
3399 { constant_boolean_node (false, type); })
3400 /* sqrt(x) > c is the same as x > c*c. */
3401 (cmp @0 { build_real (TREE_TYPE (@0), c2); }))))
3402 (if (cmp == LT_EXPR || cmp == LE_EXPR)
3406 real_arithmetic (&c2, MULT_EXPR,
3407 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
3408 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
3410 (if (REAL_VALUE_ISINF (c2))
3412 /* sqrt(x) < y is always true, when y is a very large
3413 value and we don't care about NaNs or Infinities. */
3414 (if (! HONOR_NANS (@0) && ! HONOR_INFINITIES (@0))
3415 { constant_boolean_node (true, type); })
3416 /* sqrt(x) < y is x != +Inf when y is very large and we
3417 don't care about NaNs. */
3418 (if (! HONOR_NANS (@0))
3419 (ne @0 { build_real (TREE_TYPE (@0), c2); }))
3420 /* sqrt(x) < y is x >= 0 when y is very large and we
3421 don't care about Infinities. */
3422 (if (! HONOR_INFINITIES (@0))
3423 (ge @0 { build_real (TREE_TYPE (@0), dconst0); }))
3424 /* sqrt(x) < y is x >= 0 && x != +Inf, when y is large. */
3427 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
3428 (ne @0 { build_real (TREE_TYPE (@0), c2); }))))
3429 /* sqrt(x) < c is the same as x < c*c, if we ignore NaNs. */
3430 (if (! HONOR_NANS (@0))
3431 (cmp @0 { build_real (TREE_TYPE (@0), c2); })
3432 /* sqrt(x) < c is the same as x >= 0 && x < c*c. */
3435 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
3436 (cmp @0 { build_real (TREE_TYPE (@0), c2); })))))))))
3437 /* Transform sqrt(x) cmp sqrt(y) -> x cmp y. */
3439 (cmp (sq @0) (sq @1))
3440 (if (! HONOR_NANS (@0))
3443 /* Optimize various special cases of (FTYPE) N CMP (FTYPE) M. */
3444 (for cmp (lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
3445 icmp (lt le eq ne ge gt unordered ordered lt le gt ge eq ne)
3447 (cmp (float@0 @1) (float @2))
3448 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@0))
3449 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
3452 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@0))));
3453 tree type1 = TREE_TYPE (@1);
3454 bool type1_signed_p = TYPE_SIGN (type1) == SIGNED;
3455 tree type2 = TREE_TYPE (@2);
3456 bool type2_signed_p = TYPE_SIGN (type2) == SIGNED;
3458 (if (fmt.can_represent_integral_type_p (type1)
3459 && fmt.can_represent_integral_type_p (type2))
3460 (if (cmp == ORDERED_EXPR || cmp == UNORDERED_EXPR)
3461 { constant_boolean_node (cmp == ORDERED_EXPR, type); }
3462 (if (TYPE_PRECISION (type1) > TYPE_PRECISION (type2)
3463 && type1_signed_p >= type2_signed_p)
3464 (icmp @1 (convert @2))
3465 (if (TYPE_PRECISION (type1) < TYPE_PRECISION (type2)
3466 && type1_signed_p <= type2_signed_p)
3467 (icmp (convert:type2 @1) @2)
3468 (if (TYPE_PRECISION (type1) == TYPE_PRECISION (type2)
3469 && type1_signed_p == type2_signed_p)
3470 (icmp @1 @2))))))))))
3472 /* Optimize various special cases of (FTYPE) N CMP CST. */
3473 (for cmp (lt le eq ne ge gt)
3474 icmp (le le eq ne ge ge)
3476 (cmp (float @0) REAL_CST@1)
3477 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@1))
3478 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1)))
3481 tree itype = TREE_TYPE (@0);
3482 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@1))));
3483 const REAL_VALUE_TYPE *cst = TREE_REAL_CST_PTR (@1);
3484 /* Be careful to preserve any potential exceptions due to
3485 NaNs. qNaNs are ok in == or != context.
3486 TODO: relax under -fno-trapping-math or
3487 -fno-signaling-nans. */
3489 = real_isnan (cst) && (cst->signalling
3490 || (cmp != EQ_EXPR && cmp != NE_EXPR));
3492 /* TODO: allow non-fitting itype and SNaNs when
3493 -fno-trapping-math. */
3494 (if (fmt.can_represent_integral_type_p (itype) && ! exception_p)
3497 signop isign = TYPE_SIGN (itype);
3498 REAL_VALUE_TYPE imin, imax;
3499 real_from_integer (&imin, fmt, wi::min_value (itype), isign);
3500 real_from_integer (&imax, fmt, wi::max_value (itype), isign);
3502 REAL_VALUE_TYPE icst;
3503 if (cmp == GT_EXPR || cmp == GE_EXPR)
3504 real_ceil (&icst, fmt, cst);
3505 else if (cmp == LT_EXPR || cmp == LE_EXPR)
3506 real_floor (&icst, fmt, cst);
3508 real_trunc (&icst, fmt, cst);
3510 bool cst_int_p = !real_isnan (cst) && real_identical (&icst, cst);
3512 bool overflow_p = false;
3514 = real_to_integer (&icst, &overflow_p, TYPE_PRECISION (itype));
3517 /* Optimize cases when CST is outside of ITYPE's range. */
3518 (if (real_compare (LT_EXPR, cst, &imin))
3519 { constant_boolean_node (cmp == GT_EXPR || cmp == GE_EXPR || cmp == NE_EXPR,
3521 (if (real_compare (GT_EXPR, cst, &imax))
3522 { constant_boolean_node (cmp == LT_EXPR || cmp == LE_EXPR || cmp == NE_EXPR,
3524 /* Remove cast if CST is an integer representable by ITYPE. */
3526 (cmp @0 { gcc_assert (!overflow_p);
3527 wide_int_to_tree (itype, icst_val); })
3529 /* When CST is fractional, optimize
3530 (FTYPE) N == CST -> 0
3531 (FTYPE) N != CST -> 1. */
3532 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
3533 { constant_boolean_node (cmp == NE_EXPR, type); })
3534 /* Otherwise replace with sensible integer constant. */
3537 gcc_checking_assert (!overflow_p);
3539 (icmp @0 { wide_int_to_tree (itype, icst_val); })))))))))
3541 /* Fold A /[ex] B CMP C to A CMP B * C. */
3544 (cmp (exact_div @0 @1) INTEGER_CST@2)
3545 (if (!integer_zerop (@1))
3546 (if (wi::to_wide (@2) == 0)
3548 (if (TREE_CODE (@1) == INTEGER_CST)
3551 wi::overflow_type ovf;
3552 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
3553 TYPE_SIGN (TREE_TYPE (@1)), &ovf);
3556 { constant_boolean_node (cmp == NE_EXPR, type); }
3557 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))))
3558 (for cmp (lt le gt ge)
3560 (cmp (exact_div @0 INTEGER_CST@1) INTEGER_CST@2)
3561 (if (wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))))
3564 wi::overflow_type ovf;
3565 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
3566 TYPE_SIGN (TREE_TYPE (@1)), &ovf);
3569 { constant_boolean_node (wi::lt_p (wi::to_wide (@2), 0,
3570 TYPE_SIGN (TREE_TYPE (@2)))
3571 != (cmp == LT_EXPR || cmp == LE_EXPR), type); }
3572 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))
3574 /* Unordered tests if either argument is a NaN. */
3576 (bit_ior (unordered @0 @0) (unordered @1 @1))
3577 (if (types_match (@0, @1))
3580 (bit_and (ordered @0 @0) (ordered @1 @1))
3581 (if (types_match (@0, @1))
3584 (bit_ior:c (unordered @0 @0) (unordered:c@2 @0 @1))
3587 (bit_and:c (ordered @0 @0) (ordered:c@2 @0 @1))
3590 /* Simple range test simplifications. */
3591 /* A < B || A >= B -> true. */
3592 (for test1 (lt le le le ne ge)
3593 test2 (ge gt ge ne eq ne)
3595 (bit_ior:c (test1 @0 @1) (test2 @0 @1))
3596 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3597 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
3598 { constant_boolean_node (true, type); })))
3599 /* A < B && A >= B -> false. */
3600 (for test1 (lt lt lt le ne eq)
3601 test2 (ge gt eq gt eq gt)
3603 (bit_and:c (test1 @0 @1) (test2 @0 @1))
3604 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3605 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
3606 { constant_boolean_node (false, type); })))
3608 /* A & (2**N - 1) <= 2**K - 1 -> A & (2**N - 2**K) == 0
3609 A & (2**N - 1) > 2**K - 1 -> A & (2**N - 2**K) != 0
3611 Note that comparisons
3612 A & (2**N - 1) < 2**K -> A & (2**N - 2**K) == 0
3613 A & (2**N - 1) >= 2**K -> A & (2**N - 2**K) != 0
3614 will be canonicalized to above so there's no need to
3621 (cmp (bit_and@0 @1 INTEGER_CST@2) INTEGER_CST@3)
3622 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)))
3625 tree ty = TREE_TYPE (@0);
3626 unsigned prec = TYPE_PRECISION (ty);
3627 wide_int mask = wi::to_wide (@2, prec);
3628 wide_int rhs = wi::to_wide (@3, prec);
3629 signop sgn = TYPE_SIGN (ty);
3631 (if ((mask & (mask + 1)) == 0 && wi::gt_p (rhs, 0, sgn)
3632 && (rhs & (rhs + 1)) == 0 && wi::ge_p (mask, rhs, sgn))
3633 (eqcmp (bit_and @1 { wide_int_to_tree (ty, mask - rhs); })
3634 { build_zero_cst (ty); }))))))
3636 /* -A CMP -B -> B CMP A. */
3637 (for cmp (tcc_comparison)
3638 scmp (swapped_tcc_comparison)
3640 (cmp (negate @0) (negate @1))
3641 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3642 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3643 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
3646 (cmp (negate @0) CONSTANT_CLASS_P@1)
3647 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3648 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3649 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
3650 (with { tree tem = const_unop (NEGATE_EXPR, TREE_TYPE (@0), @1); }
3651 (if (tem && !TREE_OVERFLOW (tem))
3652 (scmp @0 { tem; }))))))
3654 /* Convert ABS_EXPR<x> == 0 or ABS_EXPR<x> != 0 to x == 0 or x != 0. */
3657 (op (abs @0) zerop@1)
3660 /* From fold_sign_changed_comparison and fold_widened_comparison.
3661 FIXME: the lack of symmetry is disturbing. */
3662 (for cmp (simple_comparison)
3664 (cmp (convert@0 @00) (convert?@1 @10))
3665 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3666 /* Disable this optimization if we're casting a function pointer
3667 type on targets that require function pointer canonicalization. */
3668 && !(targetm.have_canonicalize_funcptr_for_compare ()
3669 && ((POINTER_TYPE_P (TREE_TYPE (@00))
3670 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@00))))
3671 || (POINTER_TYPE_P (TREE_TYPE (@10))
3672 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@10))))))
3674 (if (TYPE_PRECISION (TREE_TYPE (@00)) == TYPE_PRECISION (TREE_TYPE (@0))
3675 && (TREE_CODE (@10) == INTEGER_CST
3677 && (TYPE_UNSIGNED (TREE_TYPE (@00)) == TYPE_UNSIGNED (TREE_TYPE (@0))
3680 && !POINTER_TYPE_P (TREE_TYPE (@00)))
3681 /* ??? The special-casing of INTEGER_CST conversion was in the original
3682 code and here to avoid a spurious overflow flag on the resulting
3683 constant which fold_convert produces. */
3684 (if (TREE_CODE (@1) == INTEGER_CST)
3685 (cmp @00 { force_fit_type (TREE_TYPE (@00), wi::to_widest (@1), 0,
3686 TREE_OVERFLOW (@1)); })
3687 (cmp @00 (convert @1)))
3689 (if (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@00)))
3690 /* If possible, express the comparison in the shorter mode. */
3691 (if ((cmp == EQ_EXPR || cmp == NE_EXPR
3692 || TYPE_UNSIGNED (TREE_TYPE (@0)) == TYPE_UNSIGNED (TREE_TYPE (@00))
3693 || (!TYPE_UNSIGNED (TREE_TYPE (@0))
3694 && TYPE_UNSIGNED (TREE_TYPE (@00))))
3695 && (types_match (TREE_TYPE (@10), TREE_TYPE (@00))
3696 || ((TYPE_PRECISION (TREE_TYPE (@00))
3697 >= TYPE_PRECISION (TREE_TYPE (@10)))
3698 && (TYPE_UNSIGNED (TREE_TYPE (@00))
3699 == TYPE_UNSIGNED (TREE_TYPE (@10))))
3700 || (TREE_CODE (@10) == INTEGER_CST
3701 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
3702 && int_fits_type_p (@10, TREE_TYPE (@00)))))
3703 (cmp @00 (convert @10))
3704 (if (TREE_CODE (@10) == INTEGER_CST
3705 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
3706 && !int_fits_type_p (@10, TREE_TYPE (@00)))
3709 tree min = lower_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
3710 tree max = upper_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
3711 bool above = integer_nonzerop (const_binop (LT_EXPR, type, max, @10));
3712 bool below = integer_nonzerop (const_binop (LT_EXPR, type, @10, min));
3714 (if (above || below)
3715 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
3716 { constant_boolean_node (cmp == EQ_EXPR ? false : true, type); }
3717 (if (cmp == LT_EXPR || cmp == LE_EXPR)
3718 { constant_boolean_node (above ? true : false, type); }
3719 (if (cmp == GT_EXPR || cmp == GE_EXPR)
3720 { constant_boolean_node (above ? false : true, type); }))))))))))))
3723 /* A local variable can never be pointed to by
3724 the default SSA name of an incoming parameter.
3725 SSA names are canonicalized to 2nd place. */
3727 (cmp addr@0 SSA_NAME@1)
3728 (if (SSA_NAME_IS_DEFAULT_DEF (@1)
3729 && TREE_CODE (SSA_NAME_VAR (@1)) == PARM_DECL)
3730 (with { tree base = get_base_address (TREE_OPERAND (@0, 0)); }
3731 (if (TREE_CODE (base) == VAR_DECL
3732 && auto_var_in_fn_p (base, current_function_decl))
3733 (if (cmp == NE_EXPR)
3734 { constant_boolean_node (true, type); }
3735 { constant_boolean_node (false, type); }))))))
3737 /* Equality compare simplifications from fold_binary */
3740 /* If we have (A | C) == D where C & ~D != 0, convert this into 0.
3741 Similarly for NE_EXPR. */
3743 (cmp (convert?@3 (bit_ior @0 INTEGER_CST@1)) INTEGER_CST@2)
3744 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
3745 && wi::bit_and_not (wi::to_wide (@1), wi::to_wide (@2)) != 0)
3746 { constant_boolean_node (cmp == NE_EXPR, type); }))
3748 /* (X ^ Y) == 0 becomes X == Y, and (X ^ Y) != 0 becomes X != Y. */
3750 (cmp (bit_xor @0 @1) integer_zerop)
3753 /* (X ^ Y) == Y becomes X == 0.
3754 Likewise (X ^ Y) == X becomes Y == 0. */
3756 (cmp:c (bit_xor:c @0 @1) @0)
3757 (cmp @1 { build_zero_cst (TREE_TYPE (@1)); }))
3759 /* (X ^ C1) op C2 can be rewritten as X op (C1 ^ C2). */
3761 (cmp (convert?@3 (bit_xor @0 INTEGER_CST@1)) INTEGER_CST@2)
3762 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)))
3763 (cmp @0 (bit_xor @1 (convert @2)))))
3766 (cmp (convert? addr@0) integer_zerop)
3767 (if (tree_single_nonzero_warnv_p (@0, NULL))
3768 { constant_boolean_node (cmp == NE_EXPR, type); })))
3770 /* If we have (A & C) == C where C is a power of 2, convert this into
3771 (A & C) != 0. Similarly for NE_EXPR. */
3775 (cmp (bit_and@2 @0 integer_pow2p@1) @1)
3776 (icmp @2 { build_zero_cst (TREE_TYPE (@0)); })))
3778 /* If we have (A & C) != 0 ? D : 0 where C and D are powers of 2,
3779 convert this into a shift followed by ANDing with D. */
3782 (ne (bit_and @0 integer_pow2p@1) integer_zerop)
3783 INTEGER_CST@2 integer_zerop)
3784 (if (integer_pow2p (@2))
3786 int shift = (wi::exact_log2 (wi::to_wide (@2))
3787 - wi::exact_log2 (wi::to_wide (@1)));
3791 (lshift (convert @0) { build_int_cst (integer_type_node, shift); }) @2)
3793 (convert (rshift @0 { build_int_cst (integer_type_node, -shift); }))
3796 /* If we have (A & C) != 0 where C is the sign bit of A, convert
3797 this into A < 0. Similarly for (A & C) == 0 into A >= 0. */
3801 (cmp (bit_and (convert?@2 @0) integer_pow2p@1) integer_zerop)
3802 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3803 && type_has_mode_precision_p (TREE_TYPE (@0))
3804 && element_precision (@2) >= element_precision (@0)
3805 && wi::only_sign_bit_p (wi::to_wide (@1), element_precision (@0)))
3806 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
3807 (ncmp (convert:stype @0) { build_zero_cst (stype); })))))
3809 /* If we have A < 0 ? C : 0 where C is a power of 2, convert
3810 this into a right shift or sign extension followed by ANDing with C. */
3813 (lt @0 integer_zerop)
3814 INTEGER_CST@1 integer_zerop)
3815 (if (integer_pow2p (@1)
3816 && !TYPE_UNSIGNED (TREE_TYPE (@0)))
3818 int shift = element_precision (@0) - wi::exact_log2 (wi::to_wide (@1)) - 1;
3822 (convert (rshift @0 { build_int_cst (integer_type_node, shift); }))
3824 /* Otherwise ctype must be wider than TREE_TYPE (@0) and pure
3825 sign extension followed by AND with C will achieve the effect. */
3826 (bit_and (convert @0) @1)))))
3828 /* When the addresses are not directly of decls compare base and offset.
3829 This implements some remaining parts of fold_comparison address
3830 comparisons but still no complete part of it. Still it is good
3831 enough to make fold_stmt not regress when not dispatching to fold_binary. */
3832 (for cmp (simple_comparison)
3834 (cmp (convert1?@2 addr@0) (convert2? addr@1))
3837 poly_int64 off0, off1;
3838 tree base0 = get_addr_base_and_unit_offset (TREE_OPERAND (@0, 0), &off0);
3839 tree base1 = get_addr_base_and_unit_offset (TREE_OPERAND (@1, 0), &off1);
3840 if (base0 && TREE_CODE (base0) == MEM_REF)
3842 off0 += mem_ref_offset (base0).force_shwi ();
3843 base0 = TREE_OPERAND (base0, 0);
3845 if (base1 && TREE_CODE (base1) == MEM_REF)
3847 off1 += mem_ref_offset (base1).force_shwi ();
3848 base1 = TREE_OPERAND (base1, 0);
3851 (if (base0 && base1)
3855 /* Punt in GENERIC on variables with value expressions;
3856 the value expressions might point to fields/elements
3857 of other vars etc. */
3859 && ((VAR_P (base0) && DECL_HAS_VALUE_EXPR_P (base0))
3860 || (VAR_P (base1) && DECL_HAS_VALUE_EXPR_P (base1))))
3862 else if (decl_in_symtab_p (base0)
3863 && decl_in_symtab_p (base1))
3864 equal = symtab_node::get_create (base0)
3865 ->equal_address_to (symtab_node::get_create (base1));
3866 else if ((DECL_P (base0)
3867 || TREE_CODE (base0) == SSA_NAME
3868 || TREE_CODE (base0) == STRING_CST)
3870 || TREE_CODE (base1) == SSA_NAME
3871 || TREE_CODE (base1) == STRING_CST))
3872 equal = (base0 == base1);
3875 && (cmp == EQ_EXPR || cmp == NE_EXPR
3876 /* If the offsets are equal we can ignore overflow. */
3877 || known_eq (off0, off1)
3878 || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
3879 /* Or if we compare using pointers to decls or strings. */
3880 || (POINTER_TYPE_P (TREE_TYPE (@2))
3881 && (DECL_P (base0) || TREE_CODE (base0) == STRING_CST))))
3883 (if (cmp == EQ_EXPR && (known_eq (off0, off1) || known_ne (off0, off1)))
3884 { constant_boolean_node (known_eq (off0, off1), type); })
3885 (if (cmp == NE_EXPR && (known_eq (off0, off1) || known_ne (off0, off1)))
3886 { constant_boolean_node (known_ne (off0, off1), type); })
3887 (if (cmp == LT_EXPR && (known_lt (off0, off1) || known_ge (off0, off1)))
3888 { constant_boolean_node (known_lt (off0, off1), type); })
3889 (if (cmp == LE_EXPR && (known_le (off0, off1) || known_gt (off0, off1)))
3890 { constant_boolean_node (known_le (off0, off1), type); })
3891 (if (cmp == GE_EXPR && (known_ge (off0, off1) || known_lt (off0, off1)))
3892 { constant_boolean_node (known_ge (off0, off1), type); })
3893 (if (cmp == GT_EXPR && (known_gt (off0, off1) || known_le (off0, off1)))
3894 { constant_boolean_node (known_gt (off0, off1), type); }))
3896 && DECL_P (base0) && DECL_P (base1)
3897 /* If we compare this as integers require equal offset. */
3898 && (!INTEGRAL_TYPE_P (TREE_TYPE (@2))
3899 || known_eq (off0, off1)))
3901 (if (cmp == EQ_EXPR)
3902 { constant_boolean_node (false, type); })
3903 (if (cmp == NE_EXPR)
3904 { constant_boolean_node (true, type); })))))))))
3906 /* Simplify pointer equality compares using PTA. */
3910 (if (POINTER_TYPE_P (TREE_TYPE (@0))
3911 && ptrs_compare_unequal (@0, @1))
3912 { constant_boolean_node (neeq != EQ_EXPR, type); })))
3914 /* PR70920: Transform (intptr_t)x eq/ne CST to x eq/ne (typeof x) CST.
3915 and (typeof ptr_cst) x eq/ne ptr_cst to x eq/ne (typeof x) CST.
3916 Disable the transform if either operand is pointer to function.
3917 This broke pr22051-2.c for arm where function pointer
3918 canonicalizaion is not wanted. */
3922 (cmp (convert @0) INTEGER_CST@1)
3923 (if (((POINTER_TYPE_P (TREE_TYPE (@0))
3924 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@0)))
3925 && INTEGRAL_TYPE_P (TREE_TYPE (@1)))
3926 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3927 && POINTER_TYPE_P (TREE_TYPE (@1))
3928 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@1)))))
3929 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
3930 (cmp @0 (convert @1)))))
3932 /* Non-equality compare simplifications from fold_binary */
3933 (for cmp (lt gt le ge)
3934 /* Comparisons with the highest or lowest possible integer of
3935 the specified precision will have known values. */
3937 (cmp (convert?@2 @0) INTEGER_CST@1)
3938 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
3939 && tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0)))
3942 tree arg1_type = TREE_TYPE (@1);
3943 unsigned int prec = TYPE_PRECISION (arg1_type);
3944 wide_int max = wi::max_value (arg1_type);
3945 wide_int signed_max = wi::max_value (prec, SIGNED);
3946 wide_int min = wi::min_value (arg1_type);
3949 (if (wi::to_wide (@1) == max)
3951 (if (cmp == GT_EXPR)
3952 { constant_boolean_node (false, type); })
3953 (if (cmp == GE_EXPR)
3955 (if (cmp == LE_EXPR)
3956 { constant_boolean_node (true, type); })
3957 (if (cmp == LT_EXPR)
3959 (if (wi::to_wide (@1) == min)
3961 (if (cmp == LT_EXPR)
3962 { constant_boolean_node (false, type); })
3963 (if (cmp == LE_EXPR)
3965 (if (cmp == GE_EXPR)
3966 { constant_boolean_node (true, type); })
3967 (if (cmp == GT_EXPR)
3969 (if (wi::to_wide (@1) == max - 1)
3971 (if (cmp == GT_EXPR)
3972 (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) + 1); }))
3973 (if (cmp == LE_EXPR)
3974 (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) + 1); }))))
3975 (if (wi::to_wide (@1) == min + 1)
3977 (if (cmp == GE_EXPR)
3978 (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) - 1); }))
3979 (if (cmp == LT_EXPR)
3980 (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) - 1); }))))
3981 (if (wi::to_wide (@1) == signed_max
3982 && TYPE_UNSIGNED (arg1_type)
3983 /* We will flip the signedness of the comparison operator
3984 associated with the mode of @1, so the sign bit is
3985 specified by this mode. Check that @1 is the signed
3986 max associated with this sign bit. */
3987 && prec == GET_MODE_PRECISION (SCALAR_INT_TYPE_MODE (arg1_type))
3988 /* signed_type does not work on pointer types. */
3989 && INTEGRAL_TYPE_P (arg1_type))
3990 /* The following case also applies to X < signed_max+1
3991 and X >= signed_max+1 because previous transformations. */
3992 (if (cmp == LE_EXPR || cmp == GT_EXPR)
3993 (with { tree st = signed_type_for (arg1_type); }
3994 (if (cmp == LE_EXPR)
3995 (ge (convert:st @0) { build_zero_cst (st); })
3996 (lt (convert:st @0) { build_zero_cst (st); }))))))))))
3998 (for cmp (unordered ordered unlt unle ungt unge uneq ltgt)
3999 /* If the second operand is NaN, the result is constant. */
4002 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
4003 && (cmp != LTGT_EXPR || ! flag_trapping_math))
4004 { constant_boolean_node (cmp == ORDERED_EXPR || cmp == LTGT_EXPR
4005 ? false : true, type); })))
4007 /* bool_var != 0 becomes bool_var. */
4009 (ne @0 integer_zerop)
4010 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
4011 && types_match (type, TREE_TYPE (@0)))
4013 /* bool_var == 1 becomes bool_var. */
4015 (eq @0 integer_onep)
4016 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
4017 && types_match (type, TREE_TYPE (@0)))
4020 bool_var == 0 becomes !bool_var or
4021 bool_var != 1 becomes !bool_var
4022 here because that only is good in assignment context as long
4023 as we require a tcc_comparison in GIMPLE_CONDs where we'd
4024 replace if (x == 0) with tem = ~x; if (tem != 0) which is
4025 clearly less optimal and which we'll transform again in forwprop. */
4027 /* When one argument is a constant, overflow detection can be simplified.
4028 Currently restricted to single use so as not to interfere too much with
4029 ADD_OVERFLOW detection in tree-ssa-math-opts.c.
4030 A + CST CMP A -> A CMP' CST' */
4031 (for cmp (lt le ge gt)
4034 (cmp:c (plus@2 @0 INTEGER_CST@1) @0)
4035 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
4036 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
4037 && wi::to_wide (@1) != 0
4039 (with { unsigned int prec = TYPE_PRECISION (TREE_TYPE (@0)); }
4040 (out @0 { wide_int_to_tree (TREE_TYPE (@0),
4041 wi::max_value (prec, UNSIGNED)
4042 - wi::to_wide (@1)); })))))
4044 /* To detect overflow in unsigned A - B, A < B is simpler than A - B > A.
4045 However, the detection logic for SUB_OVERFLOW in tree-ssa-math-opts.c
4046 expects the long form, so we restrict the transformation for now. */
4049 (cmp:c (minus@2 @0 @1) @0)
4050 (if (single_use (@2)
4051 && ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
4052 && TYPE_UNSIGNED (TREE_TYPE (@0))
4053 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
4056 /* Testing for overflow is unnecessary if we already know the result. */
4061 (cmp:c (realpart (IFN_SUB_OVERFLOW@2 @0 @1)) @0)
4062 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
4063 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
4064 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
4069 (cmp:c (realpart (IFN_ADD_OVERFLOW:c@2 @0 @1)) @0)
4070 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
4071 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
4072 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
4074 /* For unsigned operands, -1 / B < A checks whether A * B would overflow.
4075 Simplify it to __builtin_mul_overflow (A, B, <unused>). */
4079 (cmp:c (trunc_div:s integer_all_onesp @1) @0)
4080 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) && !VECTOR_TYPE_P (TREE_TYPE (@0)))
4081 (with { tree t = TREE_TYPE (@0), cpx = build_complex_type (t); }
4082 (out (imagpart (IFN_MUL_OVERFLOW:cpx @0 @1)) { build_zero_cst (t); })))))
4084 /* Simplification of math builtins. These rules must all be optimizations
4085 as well as IL simplifications. If there is a possibility that the new
4086 form could be a pessimization, the rule should go in the canonicalization
4087 section that follows this one.
4089 Rules can generally go in this section if they satisfy one of
4092 - the rule describes an identity
4094 - the rule replaces calls with something as simple as addition or
4097 - the rule contains unary calls only and simplifies the surrounding
4098 arithmetic. (The idea here is to exclude non-unary calls in which
4099 one operand is constant and in which the call is known to be cheap
4100 when the operand has that value.) */
4102 (if (flag_unsafe_math_optimizations)
4103 /* Simplify sqrt(x) * sqrt(x) -> x. */
4105 (mult (SQRT_ALL@1 @0) @1)
4106 (if (!HONOR_SNANS (type))
4109 (for op (plus minus)
4110 /* Simplify (A / C) +- (B / C) -> (A +- B) / C. */
4114 (rdiv (op @0 @2) @1)))
4116 (for cmp (lt le gt ge)
4117 neg_cmp (gt ge lt le)
4118 /* Simplify (x * C1) cmp C2 -> x cmp (C2 / C1), where C1 != 0. */
4120 (cmp (mult @0 REAL_CST@1) REAL_CST@2)
4122 { tree tem = const_binop (RDIV_EXPR, type, @2, @1); }
4124 && !(REAL_VALUE_ISINF (TREE_REAL_CST (tem))
4125 || (real_zerop (tem) && !real_zerop (@1))))
4127 (if (real_less (&dconst0, TREE_REAL_CST_PTR (@1)))
4129 (if (real_less (TREE_REAL_CST_PTR (@1), &dconst0))
4130 (neg_cmp @0 { tem; })))))))
4132 /* Simplify sqrt(x) * sqrt(y) -> sqrt(x*y). */
4133 (for root (SQRT CBRT)
4135 (mult (root:s @0) (root:s @1))
4136 (root (mult @0 @1))))
4138 /* Simplify expN(x) * expN(y) -> expN(x+y). */
4139 (for exps (EXP EXP2 EXP10 POW10)
4141 (mult (exps:s @0) (exps:s @1))
4142 (exps (plus @0 @1))))
4144 /* Simplify a/root(b/c) into a*root(c/b). */
4145 (for root (SQRT CBRT)
4147 (rdiv @0 (root:s (rdiv:s @1 @2)))
4148 (mult @0 (root (rdiv @2 @1)))))
4150 /* Simplify x/expN(y) into x*expN(-y). */
4151 (for exps (EXP EXP2 EXP10 POW10)
4153 (rdiv @0 (exps:s @1))
4154 (mult @0 (exps (negate @1)))))
4156 (for logs (LOG LOG2 LOG10 LOG10)
4157 exps (EXP EXP2 EXP10 POW10)
4158 /* logN(expN(x)) -> x. */
4162 /* expN(logN(x)) -> x. */
4167 /* Optimize logN(func()) for various exponential functions. We
4168 want to determine the value "x" and the power "exponent" in
4169 order to transform logN(x**exponent) into exponent*logN(x). */
4170 (for logs (LOG LOG LOG LOG2 LOG2 LOG2 LOG10 LOG10)
4171 exps (EXP2 EXP10 POW10 EXP EXP10 POW10 EXP EXP2)
4174 (if (SCALAR_FLOAT_TYPE_P (type))
4180 /* Prepare to do logN(exp(exponent)) -> exponent*logN(e). */
4181 x = build_real_truncate (type, dconst_e ());
4184 /* Prepare to do logN(exp2(exponent)) -> exponent*logN(2). */
4185 x = build_real (type, dconst2);
4189 /* Prepare to do logN(exp10(exponent)) -> exponent*logN(10). */
4191 REAL_VALUE_TYPE dconst10;
4192 real_from_integer (&dconst10, VOIDmode, 10, SIGNED);
4193 x = build_real (type, dconst10);
4200 (mult (logs { x; }) @0)))))
4208 (if (SCALAR_FLOAT_TYPE_P (type))
4214 /* Prepare to do logN(sqrt(x)) -> 0.5*logN(x). */
4215 x = build_real (type, dconsthalf);
4218 /* Prepare to do logN(cbrt(x)) -> (1/3)*logN(x). */
4219 x = build_real_truncate (type, dconst_third ());
4225 (mult { x; } (logs @0))))))
4227 /* logN(pow(x,exponent)) -> exponent*logN(x). */
4228 (for logs (LOG LOG2 LOG10)
4232 (mult @1 (logs @0))))
4234 /* pow(C,x) -> exp(log(C)*x) if C > 0,
4235 or if C is a positive power of 2,
4236 pow(C,x) -> exp2(log2(C)*x). */
4244 (pows REAL_CST@0 @1)
4245 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0)
4246 && real_isfinite (TREE_REAL_CST_PTR (@0))
4247 /* As libmvec doesn't have a vectorized exp2, defer optimizing
4248 the use_exp2 case until after vectorization. It seems actually
4249 beneficial for all constants to postpone this until later,
4250 because exp(log(C)*x), while faster, will have worse precision
4251 and if x folds into a constant too, that is unnecessary
4253 && canonicalize_math_after_vectorization_p ())
4255 const REAL_VALUE_TYPE *const value = TREE_REAL_CST_PTR (@0);
4256 bool use_exp2 = false;
4257 if (targetm.libc_has_function (function_c99_misc)
4258 && value->cl == rvc_normal)
4260 REAL_VALUE_TYPE frac_rvt = *value;
4261 SET_REAL_EXP (&frac_rvt, 1);
4262 if (real_equal (&frac_rvt, &dconst1))
4267 (if (optimize_pow_to_exp (@0, @1))
4268 (exps (mult (logs @0) @1)))
4269 (exp2s (mult (log2s @0) @1)))))))
4272 /* pow(C,x)*expN(y) -> expN(logN(C)*x+y) if C > 0. */
4274 exps (EXP EXP2 EXP10 POW10)
4275 logs (LOG LOG2 LOG10 LOG10)
4277 (mult:c (pows:s REAL_CST@0 @1) (exps:s @2))
4278 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0)
4279 && real_isfinite (TREE_REAL_CST_PTR (@0)))
4280 (exps (plus (mult (logs @0) @1) @2)))))
4285 exps (EXP EXP2 EXP10 POW10)
4286 /* sqrt(expN(x)) -> expN(x*0.5). */
4289 (exps (mult @0 { build_real (type, dconsthalf); })))
4290 /* cbrt(expN(x)) -> expN(x/3). */
4293 (exps (mult @0 { build_real_truncate (type, dconst_third ()); })))
4294 /* pow(expN(x), y) -> expN(x*y). */
4297 (exps (mult @0 @1))))
4299 /* tan(atan(x)) -> x. */
4306 /* Simplify sin(atan(x)) -> x / sqrt(x*x + 1). */
4310 copysigns (COPYSIGN)
4315 REAL_VALUE_TYPE r_cst;
4316 build_sinatan_real (&r_cst, type);
4317 tree t_cst = build_real (type, r_cst);
4318 tree t_one = build_one_cst (type);
4320 (if (SCALAR_FLOAT_TYPE_P (type))
4321 (cond (le (abs @0) { t_cst; })
4322 (rdiv @0 (sqrts (plus (mult @0 @0) { t_one; })))
4323 (copysigns { t_one; } @0))))))
4325 /* Simplify cos(atan(x)) -> 1 / sqrt(x*x + 1). */
4329 copysigns (COPYSIGN)
4334 REAL_VALUE_TYPE r_cst;
4335 build_sinatan_real (&r_cst, type);
4336 tree t_cst = build_real (type, r_cst);
4337 tree t_one = build_one_cst (type);
4338 tree t_zero = build_zero_cst (type);
4340 (if (SCALAR_FLOAT_TYPE_P (type))
4341 (cond (le (abs @0) { t_cst; })
4342 (rdiv { t_one; } (sqrts (plus (mult @0 @0) { t_one; })))
4343 (copysigns { t_zero; } @0))))))
4345 /* cabs(x+0i) or cabs(0+xi) -> abs(x). */
4347 (CABS (complex:C @0 real_zerop@1))
4350 /* trunc(trunc(x)) -> trunc(x), etc. */
4351 (for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL)
4355 /* f(x) -> x if x is integer valued and f does nothing for such values. */
4356 (for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL)
4358 (fns integer_valued_real_p@0)
4361 /* hypot(x,0) and hypot(0,x) -> abs(x). */
4363 (HYPOT:c @0 real_zerop@1)
4366 /* pow(1,x) -> 1. */
4368 (POW real_onep@0 @1)
4372 /* copysign(x,x) -> x. */
4373 (COPYSIGN_ALL @0 @0)
4377 /* copysign(x,y) -> fabs(x) if y is nonnegative. */
4378 (COPYSIGN_ALL @0 tree_expr_nonnegative_p@1)
4381 (for scale (LDEXP SCALBN SCALBLN)
4382 /* ldexp(0, x) -> 0. */
4384 (scale real_zerop@0 @1)
4386 /* ldexp(x, 0) -> x. */
4388 (scale @0 integer_zerop@1)
4390 /* ldexp(x, y) -> x if x is +-Inf or NaN. */
4392 (scale REAL_CST@0 @1)
4393 (if (!real_isfinite (TREE_REAL_CST_PTR (@0)))
4396 /* Canonicalization of sequences of math builtins. These rules represent
4397 IL simplifications but are not necessarily optimizations.
4399 The sincos pass is responsible for picking "optimal" implementations
4400 of math builtins, which may be more complicated and can sometimes go
4401 the other way, e.g. converting pow into a sequence of sqrts.
4402 We only want to do these canonicalizations before the pass has run. */
4404 (if (flag_unsafe_math_optimizations && canonicalize_math_p ())
4405 /* Simplify tan(x) * cos(x) -> sin(x). */
4407 (mult:c (TAN:s @0) (COS:s @0))
4410 /* Simplify x * pow(x,c) -> pow(x,c+1). */
4412 (mult:c @0 (POW:s @0 REAL_CST@1))
4413 (if (!TREE_OVERFLOW (@1))
4414 (POW @0 (plus @1 { build_one_cst (type); }))))
4416 /* Simplify sin(x) / cos(x) -> tan(x). */
4418 (rdiv (SIN:s @0) (COS:s @0))
4421 /* Simplify cos(x) / sin(x) -> 1 / tan(x). */
4423 (rdiv (COS:s @0) (SIN:s @0))
4424 (rdiv { build_one_cst (type); } (TAN @0)))
4426 /* Simplify sin(x) / tan(x) -> cos(x). */
4428 (rdiv (SIN:s @0) (TAN:s @0))
4429 (if (! HONOR_NANS (@0)
4430 && ! HONOR_INFINITIES (@0))
4433 /* Simplify tan(x) / sin(x) -> 1.0 / cos(x). */
4435 (rdiv (TAN:s @0) (SIN:s @0))
4436 (if (! HONOR_NANS (@0)
4437 && ! HONOR_INFINITIES (@0))
4438 (rdiv { build_one_cst (type); } (COS @0))))
4440 /* Simplify pow(x,y) * pow(x,z) -> pow(x,y+z). */
4442 (mult (POW:s @0 @1) (POW:s @0 @2))
4443 (POW @0 (plus @1 @2)))
4445 /* Simplify pow(x,y) * pow(z,y) -> pow(x*z,y). */
4447 (mult (POW:s @0 @1) (POW:s @2 @1))
4448 (POW (mult @0 @2) @1))
4450 /* Simplify powi(x,y) * powi(z,y) -> powi(x*z,y). */
4452 (mult (POWI:s @0 @1) (POWI:s @2 @1))
4453 (POWI (mult @0 @2) @1))
4455 /* Simplify pow(x,c) / x -> pow(x,c-1). */
4457 (rdiv (POW:s @0 REAL_CST@1) @0)
4458 (if (!TREE_OVERFLOW (@1))
4459 (POW @0 (minus @1 { build_one_cst (type); }))))
4461 /* Simplify x / pow (y,z) -> x * pow(y,-z). */
4463 (rdiv @0 (POW:s @1 @2))
4464 (mult @0 (POW @1 (negate @2))))
4469 /* sqrt(sqrt(x)) -> pow(x,1/4). */
4472 (pows @0 { build_real (type, dconst_quarter ()); }))
4473 /* sqrt(cbrt(x)) -> pow(x,1/6). */
4476 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
4477 /* cbrt(sqrt(x)) -> pow(x,1/6). */
4480 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
4481 /* cbrt(cbrt(x)) -> pow(x,1/9), iff x is nonnegative. */
4483 (cbrts (cbrts tree_expr_nonnegative_p@0))
4484 (pows @0 { build_real_truncate (type, dconst_ninth ()); }))
4485 /* sqrt(pow(x,y)) -> pow(|x|,y*0.5). */
4487 (sqrts (pows @0 @1))
4488 (pows (abs @0) (mult @1 { build_real (type, dconsthalf); })))
4489 /* cbrt(pow(x,y)) -> pow(x,y/3), iff x is nonnegative. */
4491 (cbrts (pows tree_expr_nonnegative_p@0 @1))
4492 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
4493 /* pow(sqrt(x),y) -> pow(x,y*0.5). */
4495 (pows (sqrts @0) @1)
4496 (pows @0 (mult @1 { build_real (type, dconsthalf); })))
4497 /* pow(cbrt(x),y) -> pow(x,y/3) iff x is nonnegative. */
4499 (pows (cbrts tree_expr_nonnegative_p@0) @1)
4500 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
4501 /* pow(pow(x,y),z) -> pow(x,y*z) iff x is nonnegative. */
4503 (pows (pows tree_expr_nonnegative_p@0 @1) @2)
4504 (pows @0 (mult @1 @2))))
4506 /* cabs(x+xi) -> fabs(x)*sqrt(2). */
4508 (CABS (complex @0 @0))
4509 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
4511 /* hypot(x,x) -> fabs(x)*sqrt(2). */
4514 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
4516 /* cexp(x+yi) -> exp(x)*cexpi(y). */
4521 (cexps compositional_complex@0)
4522 (if (targetm.libc_has_function (function_c99_math_complex))
4524 (mult (exps@1 (realpart @0)) (realpart (cexpis:type@2 (imagpart @0))))
4525 (mult @1 (imagpart @2)))))))
4527 (if (canonicalize_math_p ())
4528 /* floor(x) -> trunc(x) if x is nonnegative. */
4529 (for floors (FLOOR_ALL)
4532 (floors tree_expr_nonnegative_p@0)
4535 (match double_value_p
4537 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == double_type_node)))
4538 (for froms (BUILT_IN_TRUNCL
4550 /* truncl(extend(x)) -> extend(trunc(x)), etc., if x is a double. */
4551 (if (optimize && canonicalize_math_p ())
4553 (froms (convert double_value_p@0))
4554 (convert (tos @0)))))
4556 (match float_value_p
4558 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == float_type_node)))
4559 (for froms (BUILT_IN_TRUNCL BUILT_IN_TRUNC
4560 BUILT_IN_FLOORL BUILT_IN_FLOOR
4561 BUILT_IN_CEILL BUILT_IN_CEIL
4562 BUILT_IN_ROUNDL BUILT_IN_ROUND
4563 BUILT_IN_NEARBYINTL BUILT_IN_NEARBYINT
4564 BUILT_IN_RINTL BUILT_IN_RINT)
4565 tos (BUILT_IN_TRUNCF BUILT_IN_TRUNCF
4566 BUILT_IN_FLOORF BUILT_IN_FLOORF
4567 BUILT_IN_CEILF BUILT_IN_CEILF
4568 BUILT_IN_ROUNDF BUILT_IN_ROUNDF
4569 BUILT_IN_NEARBYINTF BUILT_IN_NEARBYINTF
4570 BUILT_IN_RINTF BUILT_IN_RINTF)
4571 /* truncl(extend(x)) and trunc(extend(x)) -> extend(truncf(x)), etc.,
4573 (if (optimize && canonicalize_math_p ()
4574 && targetm.libc_has_function (function_c99_misc))
4576 (froms (convert float_value_p@0))
4577 (convert (tos @0)))))
4579 (for froms (XFLOORL XCEILL XROUNDL XRINTL)
4580 tos (XFLOOR XCEIL XROUND XRINT)
4581 /* llfloorl(extend(x)) -> llfloor(x), etc., if x is a double. */
4582 (if (optimize && canonicalize_math_p ())
4584 (froms (convert double_value_p@0))
4587 (for froms (XFLOORL XCEILL XROUNDL XRINTL
4588 XFLOOR XCEIL XROUND XRINT)
4589 tos (XFLOORF XCEILF XROUNDF XRINTF)
4590 /* llfloorl(extend(x)) and llfloor(extend(x)) -> llfloorf(x), etc.,
4592 (if (optimize && canonicalize_math_p ())
4594 (froms (convert float_value_p@0))
4597 (if (canonicalize_math_p ())
4598 /* xfloor(x) -> fix_trunc(x) if x is nonnegative. */
4599 (for floors (IFLOOR LFLOOR LLFLOOR)
4601 (floors tree_expr_nonnegative_p@0)
4604 (if (canonicalize_math_p ())
4605 /* xfloor(x) -> fix_trunc(x), etc., if x is integer valued. */
4606 (for fns (IFLOOR LFLOOR LLFLOOR
4608 IROUND LROUND LLROUND)
4610 (fns integer_valued_real_p@0)
4612 (if (!flag_errno_math)
4613 /* xrint(x) -> fix_trunc(x), etc., if x is integer valued. */
4614 (for rints (IRINT LRINT LLRINT)
4616 (rints integer_valued_real_p@0)
4619 (if (canonicalize_math_p ())
4620 (for ifn (IFLOOR ICEIL IROUND IRINT)
4621 lfn (LFLOOR LCEIL LROUND LRINT)
4622 llfn (LLFLOOR LLCEIL LLROUND LLRINT)
4623 /* Canonicalize iround (x) to lround (x) on ILP32 targets where
4624 sizeof (int) == sizeof (long). */
4625 (if (TYPE_PRECISION (integer_type_node)
4626 == TYPE_PRECISION (long_integer_type_node))
4629 (lfn:long_integer_type_node @0)))
4630 /* Canonicalize llround (x) to lround (x) on LP64 targets where
4631 sizeof (long long) == sizeof (long). */
4632 (if (TYPE_PRECISION (long_long_integer_type_node)
4633 == TYPE_PRECISION (long_integer_type_node))
4636 (lfn:long_integer_type_node @0)))))
4638 /* cproj(x) -> x if we're ignoring infinities. */
4641 (if (!HONOR_INFINITIES (type))
4644 /* If the real part is inf and the imag part is known to be
4645 nonnegative, return (inf + 0i). */
4647 (CPROJ (complex REAL_CST@0 tree_expr_nonnegative_p@1))
4648 (if (real_isinf (TREE_REAL_CST_PTR (@0)))
4649 { build_complex_inf (type, false); }))
4651 /* If the imag part is inf, return (inf+I*copysign(0,imag)). */
4653 (CPROJ (complex @0 REAL_CST@1))
4654 (if (real_isinf (TREE_REAL_CST_PTR (@1)))
4655 { build_complex_inf (type, TREE_REAL_CST_PTR (@1)->sign); }))
4661 (pows @0 REAL_CST@1)
4663 const REAL_VALUE_TYPE *value = TREE_REAL_CST_PTR (@1);
4664 REAL_VALUE_TYPE tmp;
4667 /* pow(x,0) -> 1. */
4668 (if (real_equal (value, &dconst0))
4669 { build_real (type, dconst1); })
4670 /* pow(x,1) -> x. */
4671 (if (real_equal (value, &dconst1))
4673 /* pow(x,-1) -> 1/x. */
4674 (if (real_equal (value, &dconstm1))
4675 (rdiv { build_real (type, dconst1); } @0))
4676 /* pow(x,0.5) -> sqrt(x). */
4677 (if (flag_unsafe_math_optimizations
4678 && canonicalize_math_p ()
4679 && real_equal (value, &dconsthalf))
4681 /* pow(x,1/3) -> cbrt(x). */
4682 (if (flag_unsafe_math_optimizations
4683 && canonicalize_math_p ()
4684 && (tmp = real_value_truncate (TYPE_MODE (type), dconst_third ()),
4685 real_equal (value, &tmp)))
4688 /* powi(1,x) -> 1. */
4690 (POWI real_onep@0 @1)
4694 (POWI @0 INTEGER_CST@1)
4696 /* powi(x,0) -> 1. */
4697 (if (wi::to_wide (@1) == 0)
4698 { build_real (type, dconst1); })
4699 /* powi(x,1) -> x. */
4700 (if (wi::to_wide (@1) == 1)
4702 /* powi(x,-1) -> 1/x. */
4703 (if (wi::to_wide (@1) == -1)
4704 (rdiv { build_real (type, dconst1); } @0))))
4706 /* Narrowing of arithmetic and logical operations.
4708 These are conceptually similar to the transformations performed for
4709 the C/C++ front-ends by shorten_binary_op and shorten_compare. Long
4710 term we want to move all that code out of the front-ends into here. */
4712 /* If we have a narrowing conversion of an arithmetic operation where
4713 both operands are widening conversions from the same type as the outer
4714 narrowing conversion. Then convert the innermost operands to a suitable
4715 unsigned type (to avoid introducing undefined behavior), perform the
4716 operation and convert the result to the desired type. */
4717 (for op (plus minus)
4719 (convert (op:s (convert@2 @0) (convert?@3 @1)))
4720 (if (INTEGRAL_TYPE_P (type)
4721 /* We check for type compatibility between @0 and @1 below,
4722 so there's no need to check that @1/@3 are integral types. */
4723 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
4724 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
4725 /* The precision of the type of each operand must match the
4726 precision of the mode of each operand, similarly for the
4728 && type_has_mode_precision_p (TREE_TYPE (@0))
4729 && type_has_mode_precision_p (TREE_TYPE (@1))
4730 && type_has_mode_precision_p (type)
4731 /* The inner conversion must be a widening conversion. */
4732 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
4733 && types_match (@0, type)
4734 && (types_match (@0, @1)
4735 /* Or the second operand is const integer or converted const
4736 integer from valueize. */
4737 || TREE_CODE (@1) == INTEGER_CST))
4738 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
4739 (op @0 (convert @1))
4740 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
4741 (convert (op (convert:utype @0)
4742 (convert:utype @1))))))))
4744 /* This is another case of narrowing, specifically when there's an outer
4745 BIT_AND_EXPR which masks off bits outside the type of the innermost
4746 operands. Like the previous case we have to convert the operands
4747 to unsigned types to avoid introducing undefined behavior for the
4748 arithmetic operation. */
4749 (for op (minus plus)
4751 (bit_and (op:s (convert@2 @0) (convert@3 @1)) INTEGER_CST@4)
4752 (if (INTEGRAL_TYPE_P (type)
4753 /* We check for type compatibility between @0 and @1 below,
4754 so there's no need to check that @1/@3 are integral types. */
4755 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
4756 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
4757 /* The precision of the type of each operand must match the
4758 precision of the mode of each operand, similarly for the
4760 && type_has_mode_precision_p (TREE_TYPE (@0))
4761 && type_has_mode_precision_p (TREE_TYPE (@1))
4762 && type_has_mode_precision_p (type)
4763 /* The inner conversion must be a widening conversion. */
4764 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
4765 && types_match (@0, @1)
4766 && (tree_int_cst_min_precision (@4, TYPE_SIGN (TREE_TYPE (@0)))
4767 <= TYPE_PRECISION (TREE_TYPE (@0)))
4768 && (wi::to_wide (@4)
4769 & wi::mask (TYPE_PRECISION (TREE_TYPE (@0)),
4770 true, TYPE_PRECISION (type))) == 0)
4771 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
4772 (with { tree ntype = TREE_TYPE (@0); }
4773 (convert (bit_and (op @0 @1) (convert:ntype @4))))
4774 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
4775 (convert (bit_and (op (convert:utype @0) (convert:utype @1))
4776 (convert:utype @4))))))))
4778 /* Transform (@0 < @1 and @0 < @2) to use min,
4779 (@0 > @1 and @0 > @2) to use max */
4780 (for logic (bit_and bit_and bit_and bit_and bit_ior bit_ior bit_ior bit_ior)
4781 op (lt le gt ge lt le gt ge )
4782 ext (min min max max max max min min )
4784 (logic (op:cs @0 @1) (op:cs @0 @2))
4785 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4786 && TREE_CODE (@0) != INTEGER_CST)
4787 (op @0 (ext @1 @2)))))
4790 /* signbit(x) -> 0 if x is nonnegative. */
4791 (SIGNBIT tree_expr_nonnegative_p@0)
4792 { integer_zero_node; })
4795 /* signbit(x) -> x<0 if x doesn't have signed zeros. */
4797 (if (!HONOR_SIGNED_ZEROS (@0))
4798 (convert (lt @0 { build_real (TREE_TYPE (@0), dconst0); }))))
4800 /* Transform comparisons of the form X +- C1 CMP C2 to X CMP C2 -+ C1. */
4802 (for op (plus minus)
4805 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
4806 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
4807 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
4808 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@0))
4809 && !TYPE_SATURATING (TREE_TYPE (@0)))
4810 (with { tree res = int_const_binop (rop, @2, @1); }
4811 (if (TREE_OVERFLOW (res)
4812 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
4813 { constant_boolean_node (cmp == NE_EXPR, type); }
4814 (if (single_use (@3))
4815 (cmp @0 { TREE_OVERFLOW (res)
4816 ? drop_tree_overflow (res) : res; }))))))))
4817 (for cmp (lt le gt ge)
4818 (for op (plus minus)
4821 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
4822 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
4823 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
4824 (with { tree res = int_const_binop (rop, @2, @1); }
4825 (if (TREE_OVERFLOW (res))
4827 fold_overflow_warning (("assuming signed overflow does not occur "
4828 "when simplifying conditional to constant"),
4829 WARN_STRICT_OVERFLOW_CONDITIONAL);
4830 bool less = cmp == LE_EXPR || cmp == LT_EXPR;
4831 /* wi::ges_p (@2, 0) should be sufficient for a signed type. */
4832 bool ovf_high = wi::lt_p (wi::to_wide (@1), 0,
4833 TYPE_SIGN (TREE_TYPE (@1)))
4834 != (op == MINUS_EXPR);
4835 constant_boolean_node (less == ovf_high, type);
4837 (if (single_use (@3))
4840 fold_overflow_warning (("assuming signed overflow does not occur "
4841 "when changing X +- C1 cmp C2 to "
4843 WARN_STRICT_OVERFLOW_COMPARISON);
4845 (cmp @0 { res; })))))))))
4847 /* Canonicalizations of BIT_FIELD_REFs. */
4850 (BIT_FIELD_REF (BIT_FIELD_REF @0 @1 @2) @3 @4)
4851 (BIT_FIELD_REF @0 @3 { const_binop (PLUS_EXPR, bitsizetype, @2, @4); }))
4854 (BIT_FIELD_REF (view_convert @0) @1 @2)
4855 (BIT_FIELD_REF @0 @1 @2))
4858 (BIT_FIELD_REF @0 @1 integer_zerop)
4859 (if (tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (@0))))
4863 (BIT_FIELD_REF @0 @1 @2)
4865 (if (TREE_CODE (TREE_TYPE (@0)) == COMPLEX_TYPE
4866 && tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
4868 (if (integer_zerop (@2))
4869 (view_convert (realpart @0)))
4870 (if (tree_int_cst_equal (@2, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
4871 (view_convert (imagpart @0)))))
4872 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4873 && INTEGRAL_TYPE_P (type)
4874 /* On GIMPLE this should only apply to register arguments. */
4875 && (! GIMPLE || is_gimple_reg (@0))
4876 /* A bit-field-ref that referenced the full argument can be stripped. */
4877 && ((compare_tree_int (@1, TYPE_PRECISION (TREE_TYPE (@0))) == 0
4878 && integer_zerop (@2))
4879 /* Low-parts can be reduced to integral conversions.
4880 ??? The following doesn't work for PDP endian. */
4881 || (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN
4882 /* Don't even think about BITS_BIG_ENDIAN. */
4883 && TYPE_PRECISION (TREE_TYPE (@0)) % BITS_PER_UNIT == 0
4884 && TYPE_PRECISION (type) % BITS_PER_UNIT == 0
4885 && compare_tree_int (@2, (BYTES_BIG_ENDIAN
4886 ? (TYPE_PRECISION (TREE_TYPE (@0))
4887 - TYPE_PRECISION (type))
4891 /* Simplify vector extracts. */
4894 (BIT_FIELD_REF CONSTRUCTOR@0 @1 @2)
4895 (if (VECTOR_TYPE_P (TREE_TYPE (@0))
4896 && (types_match (type, TREE_TYPE (TREE_TYPE (@0)))
4897 || (VECTOR_TYPE_P (type)
4898 && types_match (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
4901 tree ctor = (TREE_CODE (@0) == SSA_NAME
4902 ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0);
4903 tree eltype = TREE_TYPE (TREE_TYPE (ctor));
4904 unsigned HOST_WIDE_INT width = tree_to_uhwi (TYPE_SIZE (eltype));
4905 unsigned HOST_WIDE_INT n = tree_to_uhwi (@1);
4906 unsigned HOST_WIDE_INT idx = tree_to_uhwi (@2);
4909 && (idx % width) == 0
4911 && known_le ((idx + n) / width,
4912 TYPE_VECTOR_SUBPARTS (TREE_TYPE (ctor))))
4917 /* Constructor elements can be subvectors. */
4919 if (CONSTRUCTOR_NELTS (ctor) != 0)
4921 tree cons_elem = TREE_TYPE (CONSTRUCTOR_ELT (ctor, 0)->value);
4922 if (TREE_CODE (cons_elem) == VECTOR_TYPE)
4923 k = TYPE_VECTOR_SUBPARTS (cons_elem);
4925 unsigned HOST_WIDE_INT elt, count, const_k;
4928 /* We keep an exact subset of the constructor elements. */
4929 (if (multiple_p (idx, k, &elt) && multiple_p (n, k, &count))
4930 (if (CONSTRUCTOR_NELTS (ctor) == 0)
4931 { build_constructor (type, NULL); }
4933 (if (elt < CONSTRUCTOR_NELTS (ctor))
4934 (view_convert { CONSTRUCTOR_ELT (ctor, elt)->value; })
4935 { build_zero_cst (type); })
4937 vec<constructor_elt, va_gc> *vals;
4938 vec_alloc (vals, count);
4939 for (unsigned i = 0;
4940 i < count && elt + i < CONSTRUCTOR_NELTS (ctor); ++i)
4941 CONSTRUCTOR_APPEND_ELT (vals, NULL_TREE,
4942 CONSTRUCTOR_ELT (ctor, elt + i)->value);
4943 build_constructor (type, vals);
4945 /* The bitfield references a single constructor element. */
4946 (if (k.is_constant (&const_k)
4947 && idx + n <= (idx / const_k + 1) * const_k)
4949 (if (CONSTRUCTOR_NELTS (ctor) <= idx / const_k)
4950 { build_zero_cst (type); })
4952 (view_convert { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; }))
4953 (BIT_FIELD_REF { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; }
4954 @1 { bitsize_int ((idx % const_k) * width); })))))))))
4956 /* Simplify a bit extraction from a bit insertion for the cases with
4957 the inserted element fully covering the extraction or the insertion
4958 not touching the extraction. */
4960 (BIT_FIELD_REF (bit_insert @0 @1 @ipos) @rsize @rpos)
4963 unsigned HOST_WIDE_INT isize;
4964 if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
4965 isize = TYPE_PRECISION (TREE_TYPE (@1));
4967 isize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (@1)));
4970 (if (wi::leu_p (wi::to_wide (@ipos), wi::to_wide (@rpos))
4971 && wi::leu_p (wi::to_wide (@rpos) + wi::to_wide (@rsize),
4972 wi::to_wide (@ipos) + isize))
4973 (BIT_FIELD_REF @1 @rsize { wide_int_to_tree (bitsizetype,
4975 - wi::to_wide (@ipos)); }))
4976 (if (wi::geu_p (wi::to_wide (@ipos),
4977 wi::to_wide (@rpos) + wi::to_wide (@rsize))
4978 || wi::geu_p (wi::to_wide (@rpos),
4979 wi::to_wide (@ipos) + isize))
4980 (BIT_FIELD_REF @0 @rsize @rpos)))))
4982 (if (canonicalize_math_after_vectorization_p ())
4985 (fmas:c (negate @0) @1 @2)
4986 (IFN_FNMA @0 @1 @2))
4988 (fmas @0 @1 (negate @2))
4991 (fmas:c (negate @0) @1 (negate @2))
4992 (IFN_FNMS @0 @1 @2))
4994 (negate (fmas@3 @0 @1 @2))
4995 (if (single_use (@3))
4996 (IFN_FNMS @0 @1 @2))))
4999 (IFN_FMS:c (negate @0) @1 @2)
5000 (IFN_FNMS @0 @1 @2))
5002 (IFN_FMS @0 @1 (negate @2))
5005 (IFN_FMS:c (negate @0) @1 (negate @2))
5006 (IFN_FNMA @0 @1 @2))
5008 (negate (IFN_FMS@3 @0 @1 @2))
5009 (if (single_use (@3))
5010 (IFN_FNMA @0 @1 @2)))
5013 (IFN_FNMA:c (negate @0) @1 @2)
5016 (IFN_FNMA @0 @1 (negate @2))
5017 (IFN_FNMS @0 @1 @2))
5019 (IFN_FNMA:c (negate @0) @1 (negate @2))
5022 (negate (IFN_FNMA@3 @0 @1 @2))
5023 (if (single_use (@3))
5024 (IFN_FMS @0 @1 @2)))
5027 (IFN_FNMS:c (negate @0) @1 @2)
5030 (IFN_FNMS @0 @1 (negate @2))
5031 (IFN_FNMA @0 @1 @2))
5033 (IFN_FNMS:c (negate @0) @1 (negate @2))
5036 (negate (IFN_FNMS@3 @0 @1 @2))
5037 (if (single_use (@3))
5038 (IFN_FMA @0 @1 @2))))
5040 /* POPCOUNT simplifications. */
5041 (for popcount (BUILT_IN_POPCOUNT BUILT_IN_POPCOUNTL BUILT_IN_POPCOUNTLL
5042 BUILT_IN_POPCOUNTIMAX)
5043 /* popcount(X&1) is nop_expr(X&1). */
5046 (if (tree_nonzero_bits (@0) == 1)
5048 /* popcount(X) + popcount(Y) is popcount(X|Y) when X&Y must be zero. */
5050 (plus (popcount:s @0) (popcount:s @1))
5051 (if (wi::bit_and (tree_nonzero_bits (@0), tree_nonzero_bits (@1)) == 0)
5052 (popcount (bit_ior @0 @1))))
5053 /* popcount(X) == 0 is X == 0, and related (in)equalities. */
5054 (for cmp (le eq ne gt)
5057 (cmp (popcount @0) integer_zerop)
5058 (rep @0 { build_zero_cst (TREE_TYPE (@0)); }))))
5067 r = c ? a1 op a2 : b;
5069 if the target can do it in one go. This makes the operation conditional
5070 on c, so could drop potentially-trapping arithmetic, but that's a valid
5071 simplification if the result of the operation isn't needed. */
5072 (for uncond_op (UNCOND_BINARY)
5073 cond_op (COND_BINARY)
5075 (vec_cond @0 (view_convert? (uncond_op@4 @1 @2)) @3)
5076 (with { tree op_type = TREE_TYPE (@4); }
5077 (if (element_precision (type) == element_precision (op_type))
5078 (view_convert (cond_op @0 @1 @2 (view_convert:op_type @3))))))
5080 (vec_cond @0 @1 (view_convert? (uncond_op@4 @2 @3)))
5081 (with { tree op_type = TREE_TYPE (@4); }
5082 (if (element_precision (type) == element_precision (op_type))
5083 (view_convert (cond_op (bit_not @0) @2 @3 (view_convert:op_type @1)))))))
5085 /* Same for ternary operations. */
5086 (for uncond_op (UNCOND_TERNARY)
5087 cond_op (COND_TERNARY)
5089 (vec_cond @0 (view_convert? (uncond_op@5 @1 @2 @3)) @4)
5090 (with { tree op_type = TREE_TYPE (@5); }
5091 (if (element_precision (type) == element_precision (op_type))
5092 (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @4))))))
5094 (vec_cond @0 @1 (view_convert? (uncond_op@5 @2 @3 @4)))
5095 (with { tree op_type = TREE_TYPE (@5); }
5096 (if (element_precision (type) == element_precision (op_type))
5097 (view_convert (cond_op (bit_not @0) @2 @3 @4
5098 (view_convert:op_type @1)))))))
5100 /* Detect cases in which a VEC_COND_EXPR effectively replaces the
5101 "else" value of an IFN_COND_*. */
5102 (for cond_op (COND_BINARY)
5104 (vec_cond @0 (view_convert? (cond_op @0 @1 @2 @3)) @4)
5105 (with { tree op_type = TREE_TYPE (@3); }
5106 (if (element_precision (type) == element_precision (op_type))
5107 (view_convert (cond_op @0 @1 @2 (view_convert:op_type @4))))))
5109 (vec_cond @0 @1 (view_convert? (cond_op @2 @3 @4 @5)))
5110 (with { tree op_type = TREE_TYPE (@5); }
5111 (if (inverse_conditions_p (@0, @2)
5112 && element_precision (type) == element_precision (op_type))
5113 (view_convert (cond_op @2 @3 @4 (view_convert:op_type @1)))))))
5115 /* Same for ternary operations. */
5116 (for cond_op (COND_TERNARY)
5118 (vec_cond @0 (view_convert? (cond_op @0 @1 @2 @3 @4)) @5)
5119 (with { tree op_type = TREE_TYPE (@4); }
5120 (if (element_precision (type) == element_precision (op_type))
5121 (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @5))))))
5123 (vec_cond @0 @1 (view_convert? (cond_op @2 @3 @4 @5 @6)))
5124 (with { tree op_type = TREE_TYPE (@6); }
5125 (if (inverse_conditions_p (@0, @2)
5126 && element_precision (type) == element_precision (op_type))
5127 (view_convert (cond_op @2 @3 @4 @5 (view_convert:op_type @1)))))))
5129 /* For pointers @0 and @2 and nonnegative constant offset @1, look for
5132 A: (@0 + @1 < @2) | (@2 + @1 < @0)
5133 B: (@0 + @1 <= @2) | (@2 + @1 <= @0)
5135 If pointers are known not to wrap, B checks whether @1 bytes starting
5136 at @0 and @2 do not overlap, while A tests the same thing for @1 + 1
5137 bytes. A is more efficiently tested as:
5139 A: (sizetype) (@0 + @1 - @2) > @1 * 2
5141 The equivalent expression for B is given by replacing @1 with @1 - 1:
5143 B: (sizetype) (@0 + (@1 - 1) - @2) > (@1 - 1) * 2
5145 @0 and @2 can be swapped in both expressions without changing the result.
5147 The folds rely on sizetype's being unsigned (which is always true)
5148 and on its being the same width as the pointer (which we have to check).
5150 The fold replaces two pointer_plus expressions, two comparisons and
5151 an IOR with a pointer_plus, a pointer_diff, and a comparison, so in
5152 the best case it's a saving of two operations. The A fold retains one
5153 of the original pointer_pluses, so is a win even if both pointer_pluses
5154 are used elsewhere. The B fold is a wash if both pointer_pluses are
5155 used elsewhere, since all we end up doing is replacing a comparison with
5156 a pointer_plus. We do still apply the fold under those circumstances
5157 though, in case applying it to other conditions eventually makes one of the
5158 pointer_pluses dead. */
5159 (for ior (truth_orif truth_or bit_ior)
5162 (ior (cmp:cs (pointer_plus@3 @0 INTEGER_CST@1) @2)
5163 (cmp:cs (pointer_plus@4 @2 @1) @0))
5164 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
5165 && TYPE_OVERFLOW_WRAPS (sizetype)
5166 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (sizetype))
5167 /* Calculate the rhs constant. */
5168 (with { offset_int off = wi::to_offset (@1) - (cmp == LE_EXPR ? 1 : 0);
5169 offset_int rhs = off * 2; }
5170 /* Always fails for negative values. */
5171 (if (wi::min_precision (rhs, UNSIGNED) <= TYPE_PRECISION (sizetype))
5172 /* Since the order of @0 and @2 doesn't matter, let tree_swap_operands_p
5173 pick a canonical order. This increases the chances of using the
5174 same pointer_plus in multiple checks. */
5175 (with { bool swap_p = tree_swap_operands_p (@0, @2);
5176 tree rhs_tree = wide_int_to_tree (sizetype, rhs); }
5177 (if (cmp == LT_EXPR)
5178 (gt (convert:sizetype
5179 (pointer_diff:ssizetype { swap_p ? @4 : @3; }
5180 { swap_p ? @0 : @2; }))
5182 (gt (convert:sizetype
5183 (pointer_diff:ssizetype
5184 (pointer_plus { swap_p ? @2 : @0; }
5185 { wide_int_to_tree (sizetype, off); })
5186 { swap_p ? @0 : @2; }))
5187 { rhs_tree; })))))))))