1 /* Match-and-simplify patterns for shared GENERIC and GIMPLE folding.
2 This file is consumed by genmatch which produces gimple-match.cc
3 and generic-match.cc from it.
5 Copyright (C) 2014-2023 Free Software Foundation, Inc.
6 Contributed by Richard Biener <rguenther@suse.de>
7 and Prathamesh Kulkarni <bilbotheelffriend@gmail.com>
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
26 /* Generic tree predicates we inherit. */
28 integer_onep integer_zerop integer_all_onesp integer_minus_onep
29 integer_each_onep integer_truep integer_nonzerop
30 real_zerop real_onep real_minus_onep
32 initializer_each_zero_or_onep
34 tree_expr_nonnegative_p
42 bitmask_inv_cst_vector_p)
45 (define_operator_list tcc_comparison
46 lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
47 (define_operator_list inverted_tcc_comparison
48 ge gt ne eq lt le ordered unordered ge gt le lt ltgt uneq)
49 (define_operator_list inverted_tcc_comparison_with_nans
50 unge ungt ne eq unlt unle ordered unordered ge gt le lt ltgt uneq)
51 (define_operator_list swapped_tcc_comparison
52 gt ge eq ne le lt unordered ordered ungt unge unlt unle uneq ltgt)
53 (define_operator_list simple_comparison lt le eq ne ge gt)
54 (define_operator_list swapped_simple_comparison gt ge eq ne le lt)
55 (define_operator_list BSWAP BUILT_IN_BSWAP16 BUILT_IN_BSWAP32
56 BUILT_IN_BSWAP64 BUILT_IN_BSWAP128)
58 #include "cfn-operators.pd"
60 /* Define operand lists for math rounding functions {,i,l,ll}FN,
61 where the versions prefixed with "i" return an int, those prefixed with
62 "l" return a long and those prefixed with "ll" return a long long.
64 Also define operand lists:
66 X<FN>F for all float functions, in the order i, l, ll
67 X<FN> for all double functions, in the same order
68 X<FN>L for all long double functions, in the same order. */
69 #define DEFINE_INT_AND_FLOAT_ROUND_FN(FN) \
70 (define_operator_list X##FN##F BUILT_IN_I##FN##F \
73 (define_operator_list X##FN BUILT_IN_I##FN \
76 (define_operator_list X##FN##L BUILT_IN_I##FN##L \
80 DEFINE_INT_AND_FLOAT_ROUND_FN (FLOOR)
81 DEFINE_INT_AND_FLOAT_ROUND_FN (CEIL)
82 DEFINE_INT_AND_FLOAT_ROUND_FN (ROUND)
83 DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
85 /* Unary operations and their associated IFN_COND_* function. */
86 (define_operator_list UNCOND_UNARY
88 (define_operator_list COND_UNARY
91 /* Binary operations and their associated IFN_COND_* function. */
92 (define_operator_list UNCOND_BINARY
94 mult trunc_div trunc_mod rdiv
97 bit_and bit_ior bit_xor
99 (define_operator_list COND_BINARY
100 IFN_COND_ADD IFN_COND_SUB
101 IFN_COND_MUL IFN_COND_DIV IFN_COND_MOD IFN_COND_RDIV
102 IFN_COND_MIN IFN_COND_MAX
103 IFN_COND_FMIN IFN_COND_FMAX
104 IFN_COND_AND IFN_COND_IOR IFN_COND_XOR
105 IFN_COND_SHL IFN_COND_SHR)
107 /* Same for ternary operations. */
108 (define_operator_list UNCOND_TERNARY
109 IFN_FMA IFN_FMS IFN_FNMA IFN_FNMS)
110 (define_operator_list COND_TERNARY
111 IFN_COND_FMA IFN_COND_FMS IFN_COND_FNMA IFN_COND_FNMS)
113 /* __atomic_fetch_or_*, __atomic_fetch_xor_*, __atomic_xor_fetch_* */
114 (define_operator_list ATOMIC_FETCH_OR_XOR_N
115 BUILT_IN_ATOMIC_FETCH_OR_1 BUILT_IN_ATOMIC_FETCH_OR_2
116 BUILT_IN_ATOMIC_FETCH_OR_4 BUILT_IN_ATOMIC_FETCH_OR_8
117 BUILT_IN_ATOMIC_FETCH_OR_16
118 BUILT_IN_ATOMIC_FETCH_XOR_1 BUILT_IN_ATOMIC_FETCH_XOR_2
119 BUILT_IN_ATOMIC_FETCH_XOR_4 BUILT_IN_ATOMIC_FETCH_XOR_8
120 BUILT_IN_ATOMIC_FETCH_XOR_16
121 BUILT_IN_ATOMIC_XOR_FETCH_1 BUILT_IN_ATOMIC_XOR_FETCH_2
122 BUILT_IN_ATOMIC_XOR_FETCH_4 BUILT_IN_ATOMIC_XOR_FETCH_8
123 BUILT_IN_ATOMIC_XOR_FETCH_16)
124 /* __sync_fetch_and_or_*, __sync_fetch_and_xor_*, __sync_xor_and_fetch_* */
125 (define_operator_list SYNC_FETCH_OR_XOR_N
126 BUILT_IN_SYNC_FETCH_AND_OR_1 BUILT_IN_SYNC_FETCH_AND_OR_2
127 BUILT_IN_SYNC_FETCH_AND_OR_4 BUILT_IN_SYNC_FETCH_AND_OR_8
128 BUILT_IN_SYNC_FETCH_AND_OR_16
129 BUILT_IN_SYNC_FETCH_AND_XOR_1 BUILT_IN_SYNC_FETCH_AND_XOR_2
130 BUILT_IN_SYNC_FETCH_AND_XOR_4 BUILT_IN_SYNC_FETCH_AND_XOR_8
131 BUILT_IN_SYNC_FETCH_AND_XOR_16
132 BUILT_IN_SYNC_XOR_AND_FETCH_1 BUILT_IN_SYNC_XOR_AND_FETCH_2
133 BUILT_IN_SYNC_XOR_AND_FETCH_4 BUILT_IN_SYNC_XOR_AND_FETCH_8
134 BUILT_IN_SYNC_XOR_AND_FETCH_16)
135 /* __atomic_fetch_and_*. */
136 (define_operator_list ATOMIC_FETCH_AND_N
137 BUILT_IN_ATOMIC_FETCH_AND_1 BUILT_IN_ATOMIC_FETCH_AND_2
138 BUILT_IN_ATOMIC_FETCH_AND_4 BUILT_IN_ATOMIC_FETCH_AND_8
139 BUILT_IN_ATOMIC_FETCH_AND_16)
140 /* __sync_fetch_and_and_*. */
141 (define_operator_list SYNC_FETCH_AND_AND_N
142 BUILT_IN_SYNC_FETCH_AND_AND_1 BUILT_IN_SYNC_FETCH_AND_AND_2
143 BUILT_IN_SYNC_FETCH_AND_AND_4 BUILT_IN_SYNC_FETCH_AND_AND_8
144 BUILT_IN_SYNC_FETCH_AND_AND_16)
146 /* With nop_convert? combine convert? and view_convert? in one pattern
147 plus conditionalize on tree_nop_conversion_p conversions. */
148 (match (nop_convert @0)
150 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))))
151 (match (nop_convert @0)
153 (if (VECTOR_TYPE_P (type) && VECTOR_TYPE_P (TREE_TYPE (@0))
154 && known_eq (TYPE_VECTOR_SUBPARTS (type),
155 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@0)))
156 && tree_nop_conversion_p (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
158 /* Transform likes of (char) ABS_EXPR <(int) x> into (char) ABSU_EXPR <x>
159 ABSU_EXPR returns unsigned absolute value of the operand and the operand
160 of the ABSU_EXPR will have the corresponding signed type. */
161 (simplify (abs (convert @0))
162 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
163 && !TYPE_UNSIGNED (TREE_TYPE (@0))
164 && element_precision (type) > element_precision (TREE_TYPE (@0)))
165 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
166 (convert (absu:utype @0)))))
169 /* Optimize (X + (X >> (prec - 1))) ^ (X >> (prec - 1)) into abs (X). */
171 (bit_xor:c (plus:c @0 (rshift@2 @0 INTEGER_CST@1)) @2)
172 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
173 && !TYPE_UNSIGNED (TREE_TYPE (@0))
174 && wi::to_widest (@1) == element_precision (TREE_TYPE (@0)) - 1)
178 /* Simplifications of operations with one constant operand and
179 simplifications to constants or single values. */
181 (for op (plus pointer_plus minus bit_ior bit_xor)
183 (op @0 integer_zerop)
186 /* 0 +p index -> (type)index */
188 (pointer_plus integer_zerop @1)
189 (non_lvalue (convert @1)))
191 /* ptr - 0 -> (type)ptr */
193 (pointer_diff @0 integer_zerop)
196 /* See if ARG1 is zero and X + ARG1 reduces to X.
197 Likewise if the operands are reversed. */
199 (plus:c @0 real_zerop@1)
200 (if (fold_real_zero_addition_p (type, @0, @1, 0))
203 /* See if ARG1 is zero and X - ARG1 reduces to X. */
205 (minus @0 real_zerop@1)
206 (if (fold_real_zero_addition_p (type, @0, @1, 1))
209 /* Even if the fold_real_zero_addition_p can't simplify X + 0.0
210 into X, we can optimize (X + 0.0) + 0.0 or (X + 0.0) - 0.0
211 or (X - 0.0) + 0.0 into X + 0.0 and (X - 0.0) - 0.0 into X - 0.0
212 if not -frounding-math. For sNaNs the first operation would raise
213 exceptions but turn the result into qNan, so the second operation
214 would not raise it. */
215 (for inner_op (plus minus)
216 (for outer_op (plus minus)
218 (outer_op (inner_op@3 @0 REAL_CST@1) REAL_CST@2)
221 && !HONOR_SIGN_DEPENDENT_ROUNDING (type))
222 (with { bool inner_plus = ((inner_op == PLUS_EXPR)
223 ^ REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)));
225 = ((outer_op == PLUS_EXPR)
226 ^ REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@2))); }
227 (if (outer_plus && !inner_plus)
232 This is unsafe for certain floats even in non-IEEE formats.
233 In IEEE, it is unsafe because it does wrong for NaNs.
234 PR middle-end/98420: x - x may be -0.0 with FE_DOWNWARD.
235 Also note that operand_equal_p is always false if an operand
239 (if (!FLOAT_TYPE_P (type)
240 || (!tree_expr_maybe_nan_p (@0)
241 && !tree_expr_maybe_infinite_p (@0)
242 && (!HONOR_SIGN_DEPENDENT_ROUNDING (type)
243 || !HONOR_SIGNED_ZEROS (type))))
244 { build_zero_cst (type); }))
246 (pointer_diff @@0 @0)
247 { build_zero_cst (type); })
250 (mult @0 integer_zerop@1)
253 /* -x == x -> x == 0 */
256 (cmp:c @0 (negate @0))
257 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
258 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE(@0)))
259 (cmp @0 { build_zero_cst (TREE_TYPE(@0)); }))))
261 /* Maybe fold x * 0 to 0. The expressions aren't the same
262 when x is NaN, since x * 0 is also NaN. Nor are they the
263 same in modes with signed zeros, since multiplying a
264 negative value by 0 gives -0, not +0. Nor when x is +-Inf,
265 since x * 0 is NaN. */
267 (mult @0 real_zerop@1)
268 (if (!tree_expr_maybe_nan_p (@0)
269 && (!HONOR_NANS (type) || !tree_expr_maybe_infinite_p (@0))
270 && (!HONOR_SIGNED_ZEROS (type) || tree_expr_nonnegative_p (@0)))
273 /* In IEEE floating point, x*1 is not equivalent to x for snans.
274 Likewise for complex arithmetic with signed zeros. */
277 (if (!tree_expr_maybe_signaling_nan_p (@0)
278 && (!HONOR_SIGNED_ZEROS (type)
279 || !COMPLEX_FLOAT_TYPE_P (type)))
282 /* Transform x * -1.0 into -x. */
284 (mult @0 real_minus_onep)
285 (if (!tree_expr_maybe_signaling_nan_p (@0)
286 && (!HONOR_SIGNED_ZEROS (type)
287 || !COMPLEX_FLOAT_TYPE_P (type)))
290 /* Transform x * { 0 or 1, 0 or 1, ... } into x & { 0 or -1, 0 or -1, ...},
291 unless the target has native support for the former but not the latter. */
293 (mult @0 VECTOR_CST@1)
294 (if (initializer_each_zero_or_onep (@1)
295 && !HONOR_SNANS (type)
296 && !HONOR_SIGNED_ZEROS (type))
297 (with { tree itype = FLOAT_TYPE_P (type) ? unsigned_type_for (type) : type; }
299 && (!VECTOR_MODE_P (TYPE_MODE (type))
300 || (VECTOR_MODE_P (TYPE_MODE (itype))
301 && optab_handler (and_optab,
302 TYPE_MODE (itype)) != CODE_FOR_nothing)))
303 (view_convert (bit_and:itype (view_convert @0)
304 (ne @1 { build_zero_cst (type); })))))))
306 /* In SWAR (SIMD within a register) code a signed comparison of packed data
307 can be constructed with a particular combination of shift, bitwise and,
308 and multiplication by constants. If that code is vectorized we can
309 convert this pattern into a more efficient vector comparison. */
311 (mult (bit_and (rshift @0 uniform_integer_cst_p@1)
312 uniform_integer_cst_p@2)
313 uniform_integer_cst_p@3)
315 tree rshift_cst = uniform_integer_cst_p (@1);
316 tree bit_and_cst = uniform_integer_cst_p (@2);
317 tree mult_cst = uniform_integer_cst_p (@3);
319 /* Make sure we're working with vectors and uniform vector constants. */
320 (if (VECTOR_TYPE_P (type)
321 && tree_fits_uhwi_p (rshift_cst)
322 && tree_fits_uhwi_p (mult_cst)
323 && tree_fits_uhwi_p (bit_and_cst))
324 /* Compute what constants would be needed for this to represent a packed
325 comparison based on the shift amount denoted by RSHIFT_CST. */
327 HOST_WIDE_INT vec_elem_bits = vector_element_bits (type);
328 poly_int64 vec_nelts = TYPE_VECTOR_SUBPARTS (type);
329 poly_int64 vec_bits = vec_elem_bits * vec_nelts;
330 unsigned HOST_WIDE_INT cmp_bits_i, bit_and_i, mult_i;
331 unsigned HOST_WIDE_INT target_mult_i, target_bit_and_i;
332 cmp_bits_i = tree_to_uhwi (rshift_cst) + 1;
333 mult_i = tree_to_uhwi (mult_cst);
334 target_mult_i = (HOST_WIDE_INT_1U << cmp_bits_i) - 1;
335 bit_and_i = tree_to_uhwi (bit_and_cst);
336 target_bit_and_i = 0;
338 /* The bit pattern in BIT_AND_I should be a mask for the least
339 significant bit of each packed element that is CMP_BITS wide. */
340 for (unsigned i = 0; i < vec_elem_bits / cmp_bits_i; i++)
341 target_bit_and_i = (target_bit_and_i << cmp_bits_i) | 1U;
343 (if ((exact_log2 (cmp_bits_i)) >= 0
344 && cmp_bits_i < HOST_BITS_PER_WIDE_INT
345 && multiple_p (vec_bits, cmp_bits_i)
346 && vec_elem_bits <= HOST_BITS_PER_WIDE_INT
347 && target_mult_i == mult_i
348 && target_bit_and_i == bit_and_i)
349 /* Compute the vector shape for the comparison and check if the target is
350 able to expand the comparison with that type. */
352 /* We're doing a signed comparison. */
353 tree cmp_type = build_nonstandard_integer_type (cmp_bits_i, 0);
354 poly_int64 vector_type_nelts = exact_div (vec_bits, cmp_bits_i);
355 tree vec_cmp_type = build_vector_type (cmp_type, vector_type_nelts);
356 tree vec_truth_type = truth_type_for (vec_cmp_type);
357 tree zeros = build_zero_cst (vec_cmp_type);
358 tree ones = build_all_ones_cst (vec_cmp_type);
360 (if (expand_vec_cmp_expr_p (vec_cmp_type, vec_truth_type, LT_EXPR)
361 && expand_vec_cond_expr_p (vec_cmp_type, vec_truth_type, LT_EXPR))
362 (view_convert:type (vec_cond (lt:vec_truth_type
363 (view_convert:vec_cmp_type @0)
365 { ones; } { zeros; })))))))))
367 (for cmp (gt ge lt le)
368 outp (convert convert negate negate)
369 outn (negate negate convert convert)
370 /* Transform X * (X > 0.0 ? 1.0 : -1.0) into abs(X). */
371 /* Transform X * (X >= 0.0 ? 1.0 : -1.0) into abs(X). */
372 /* Transform X * (X < 0.0 ? 1.0 : -1.0) into -abs(X). */
373 /* Transform X * (X <= 0.0 ? 1.0 : -1.0) into -abs(X). */
375 (mult:c @0 (cond (cmp @0 real_zerop) real_onep@1 real_minus_onep))
376 (if (!tree_expr_maybe_nan_p (@0) && !HONOR_SIGNED_ZEROS (type))
378 /* Transform X * (X > 0.0 ? -1.0 : 1.0) into -abs(X). */
379 /* Transform X * (X >= 0.0 ? -1.0 : 1.0) into -abs(X). */
380 /* Transform X * (X < 0.0 ? -1.0 : 1.0) into abs(X). */
381 /* Transform X * (X <= 0.0 ? -1.0 : 1.0) into abs(X). */
383 (mult:c @0 (cond (cmp @0 real_zerop) real_minus_onep real_onep@1))
384 (if (!tree_expr_maybe_nan_p (@0) && !HONOR_SIGNED_ZEROS (type))
387 /* Transform X * copysign (1.0, X) into abs(X). */
389 (mult:c @0 (COPYSIGN_ALL real_onep @0))
390 (if (!tree_expr_maybe_nan_p (@0) && !HONOR_SIGNED_ZEROS (type))
393 /* Transform X * copysign (1.0, -X) into -abs(X). */
395 (mult:c @0 (COPYSIGN_ALL real_onep (negate @0)))
396 (if (!tree_expr_maybe_nan_p (@0) && !HONOR_SIGNED_ZEROS (type))
399 /* Transform copysign (CST, X) into copysign (ABS(CST), X). */
401 (COPYSIGN_ALL REAL_CST@0 @1)
402 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@0)))
403 (COPYSIGN_ALL (negate @0) @1)))
405 /* Transform c ? x * copysign (1, y) : z to c ? x ^ signs(y) : z.
406 tree-ssa-math-opts.cc does the corresponding optimization for
407 unconditional multiplications (via xorsign). */
409 (IFN_COND_MUL:c @0 @1 (IFN_COPYSIGN real_onep @2) @3)
410 (with { tree signs = sign_mask_for (type); }
412 (with { tree inttype = TREE_TYPE (signs); }
414 (IFN_COND_XOR:inttype @0
415 (view_convert:inttype @1)
416 (bit_and (view_convert:inttype @2) { signs; })
417 (view_convert:inttype @3)))))))
419 /* (x >= 0 ? x : 0) + (x <= 0 ? -x : 0) -> abs x. */
421 (plus:c (max @0 integer_zerop) (max (negate @0) integer_zerop))
424 /* X * 1, X / 1 -> X. */
425 (for op (mult trunc_div ceil_div floor_div round_div exact_div)
430 /* (A / (1 << B)) -> (A >> B).
431 Only for unsigned A. For signed A, this would not preserve rounding
433 For example: (-1 / ( 1 << B)) != -1 >> B.
434 Also handle widening conversions, like:
435 (A / (unsigned long long) (1U << B)) -> (A >> B)
437 (A / (unsigned long long) (1 << B)) -> (A >> B).
438 If the left shift is signed, it can be done only if the upper bits
439 of A starting from shift's type sign bit are zero, as
440 (unsigned long long) (1 << 31) is -2147483648ULL, not 2147483648ULL,
441 so it is valid only if A >> 31 is zero. */
443 (trunc_div (convert?@0 @3) (convert2? (lshift integer_onep@1 @2)))
444 (if ((TYPE_UNSIGNED (type) || tree_expr_nonnegative_p (@0))
445 && (!VECTOR_TYPE_P (type)
446 || target_supports_op_p (type, RSHIFT_EXPR, optab_vector)
447 || target_supports_op_p (type, RSHIFT_EXPR, optab_scalar))
448 && (useless_type_conversion_p (type, TREE_TYPE (@1))
449 || (element_precision (type) >= element_precision (TREE_TYPE (@1))
450 && (TYPE_UNSIGNED (TREE_TYPE (@1))
451 || (element_precision (type)
452 == element_precision (TREE_TYPE (@1)))
453 || (INTEGRAL_TYPE_P (type)
454 && (tree_nonzero_bits (@0)
455 & wi::mask (element_precision (TREE_TYPE (@1)) - 1,
457 element_precision (type))) == 0)))))
458 (if (!VECTOR_TYPE_P (type)
459 && useless_type_conversion_p (TREE_TYPE (@3), TREE_TYPE (@1))
460 && element_precision (TREE_TYPE (@3)) < element_precision (type))
461 (convert (rshift @3 @2))
464 /* Preserve explicit divisions by 0: the C++ front-end wants to detect
465 undefined behavior in constexpr evaluation, and assuming that the division
466 traps enables better optimizations than these anyway. */
467 (for div (trunc_div ceil_div floor_div round_div exact_div)
468 /* 0 / X is always zero. */
470 (div integer_zerop@0 @1)
471 /* But not for 0 / 0 so that we can get the proper warnings and errors. */
472 (if (!integer_zerop (@1))
476 (div @0 integer_minus_onep@1)
477 (if (!TYPE_UNSIGNED (type))
479 /* X / bool_range_Y is X. */
482 (if (INTEGRAL_TYPE_P (type)
483 && ssa_name_has_boolean_range (@1)
484 && !flag_non_call_exceptions)
489 /* But not for 0 / 0 so that we can get the proper warnings and errors.
490 And not for _Fract types where we can't build 1. */
491 (if (!ALL_FRACT_MODE_P (TYPE_MODE (type))
492 && !integer_zerop (@0)
493 && (!flag_non_call_exceptions || tree_expr_nonzero_p (@0)))
494 { build_one_cst (type); }))
495 /* X / abs (X) is X < 0 ? -1 : 1. */
498 (if (INTEGRAL_TYPE_P (type)
499 && TYPE_OVERFLOW_UNDEFINED (type)
500 && !integer_zerop (@0)
501 && (!flag_non_call_exceptions || tree_expr_nonzero_p (@0)))
502 (cond (lt @0 { build_zero_cst (type); })
503 { build_minus_one_cst (type); } { build_one_cst (type); })))
506 (div:C @0 (negate @0))
507 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
508 && TYPE_OVERFLOW_UNDEFINED (type)
509 && !integer_zerop (@0)
510 && (!flag_non_call_exceptions || tree_expr_nonzero_p (@0)))
511 { build_minus_one_cst (type); })))
513 /* For unsigned integral types, FLOOR_DIV_EXPR is the same as
514 TRUNC_DIV_EXPR. Rewrite into the latter in this case. Similarly
515 for MOD instead of DIV. */
516 (for floor_divmod (floor_div floor_mod)
517 trunc_divmod (trunc_div trunc_mod)
520 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
521 && TYPE_UNSIGNED (type))
522 (trunc_divmod @0 @1))))
524 /* 1 / X -> X == 1 for unsigned integer X.
525 1 / X -> X >= -1 && X <= 1 ? X : 0 for signed integer X.
526 But not for 1 / 0 so that we can get proper warnings and errors,
527 and not for 1-bit integers as they are edge cases better handled
530 (trunc_div integer_onep@0 @1)
531 (if (INTEGRAL_TYPE_P (type)
532 && TYPE_PRECISION (type) > 1
533 && !integer_zerop (@1)
534 && (!flag_non_call_exceptions || tree_expr_nonzero_p (@1)))
535 (if (TYPE_UNSIGNED (type))
536 (convert (eq:boolean_type_node @1 { build_one_cst (type); }))
537 (with { tree utype = unsigned_type_for (type); }
538 (cond (le (plus (convert:utype @1) { build_one_cst (utype); })
539 { build_int_cst (utype, 2); })
540 @1 { build_zero_cst (type); })))))
542 /* Combine two successive divisions. Note that combining ceil_div
543 and floor_div is trickier and combining round_div even more so. */
544 (for div (trunc_div exact_div)
546 (div (div@3 @0 INTEGER_CST@1) INTEGER_CST@2)
548 wi::overflow_type overflow;
549 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
550 TYPE_SIGN (type), &overflow);
552 (if (div == EXACT_DIV_EXPR
553 || optimize_successive_divisions_p (@2, @3))
555 (div @0 { wide_int_to_tree (type, mul); })
556 (if (TYPE_UNSIGNED (type)
557 || mul != wi::min_value (TYPE_PRECISION (type), SIGNED))
558 { build_zero_cst (type); }))))))
560 /* Combine successive multiplications. Similar to above, but handling
561 overflow is different. */
563 (mult (mult @0 INTEGER_CST@1) INTEGER_CST@2)
565 wi::overflow_type overflow;
566 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
567 TYPE_SIGN (type), &overflow);
569 /* Skip folding on overflow: the only special case is @1 * @2 == -INT_MIN,
570 otherwise undefined overflow implies that @0 must be zero. */
571 (if (!overflow || TYPE_OVERFLOW_WRAPS (type))
572 (mult @0 { wide_int_to_tree (type, mul); }))))
574 /* Similar to above, but there could be an extra add/sub between
575 successive multuiplications. */
577 (mult (plus:s (mult:s@4 @0 INTEGER_CST@1) INTEGER_CST@2) INTEGER_CST@3)
579 bool overflowed = true;
580 wi::overflow_type ovf1, ovf2;
581 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@3),
582 TYPE_SIGN (type), &ovf1);
583 wide_int add = wi::mul (wi::to_wide (@2), wi::to_wide (@3),
584 TYPE_SIGN (type), &ovf2);
585 if (TYPE_OVERFLOW_UNDEFINED (type))
589 if (ovf1 == wi::OVF_NONE && ovf2 == wi::OVF_NONE
590 && get_global_range_query ()->range_of_expr (vr0, @4)
591 && !vr0.varying_p () && !vr0.undefined_p ())
593 wide_int wmin0 = vr0.lower_bound ();
594 wide_int wmax0 = vr0.upper_bound ();
595 wmin0 = wi::mul (wmin0, wi::to_wide (@3), TYPE_SIGN (type), &ovf1);
596 wmax0 = wi::mul (wmax0, wi::to_wide (@3), TYPE_SIGN (type), &ovf2);
597 if (ovf1 == wi::OVF_NONE && ovf2 == wi::OVF_NONE)
599 wi::add (wmin0, add, TYPE_SIGN (type), &ovf1);
600 wi::add (wmax0, add, TYPE_SIGN (type), &ovf2);
601 if (ovf1 == wi::OVF_NONE && ovf2 == wi::OVF_NONE)
610 /* Skip folding on overflow. */
612 (plus (mult @0 { wide_int_to_tree (type, mul); })
613 { wide_int_to_tree (type, add); }))))
615 /* Similar to above, but a multiplication between successive additions. */
617 (plus (mult:s (plus:s @0 INTEGER_CST@1) INTEGER_CST@2) INTEGER_CST@3)
619 bool overflowed = true;
620 wi::overflow_type ovf1;
621 wi::overflow_type ovf2;
622 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
623 TYPE_SIGN (type), &ovf1);
624 wide_int add = wi::add (mul, wi::to_wide (@3),
625 TYPE_SIGN (type), &ovf2);
626 if (TYPE_OVERFLOW_UNDEFINED (type))
630 if (ovf1 == wi::OVF_NONE && ovf2 == wi::OVF_NONE
631 && get_global_range_query ()->range_of_expr (vr0, @0)
632 && !vr0.varying_p () && !vr0.undefined_p ())
634 wide_int wmin0 = vr0.lower_bound ();
635 wide_int wmax0 = vr0.upper_bound ();
636 wmin0 = wi::mul (wmin0, wi::to_wide (@2), TYPE_SIGN (type), &ovf1);
637 wmax0 = wi::mul (wmax0, wi::to_wide (@2), TYPE_SIGN (type), &ovf2);
638 if (ovf1 == wi::OVF_NONE && ovf2 == wi::OVF_NONE)
640 wi::add (wmin0, mul, TYPE_SIGN (type), &ovf1);
641 wi::add (wmax0, mul, TYPE_SIGN (type), &ovf2);
642 if (ovf1 == wi::OVF_NONE && ovf2 == wi::OVF_NONE)
651 /* Skip folding on overflow. */
653 (plus (mult @0 @2) { wide_int_to_tree (type, add); }))))
655 /* Optimize A / A to 1.0 if we don't care about
656 NaNs or Infinities. */
659 (if (FLOAT_TYPE_P (type)
660 && ! HONOR_NANS (type)
661 && ! HONOR_INFINITIES (type))
662 { build_one_cst (type); }))
664 /* Optimize -A / A to -1.0 if we don't care about
665 NaNs or Infinities. */
667 (rdiv:C @0 (negate @0))
668 (if (FLOAT_TYPE_P (type)
669 && ! HONOR_NANS (type)
670 && ! HONOR_INFINITIES (type))
671 { build_minus_one_cst (type); }))
673 /* PR71078: x / abs(x) -> copysign (1.0, x) */
675 (rdiv:C (convert? @0) (convert? (abs @0)))
676 (if (SCALAR_FLOAT_TYPE_P (type)
677 && ! HONOR_NANS (type)
678 && ! HONOR_INFINITIES (type))
680 (if (types_match (type, float_type_node))
681 (BUILT_IN_COPYSIGNF { build_one_cst (type); } (convert @0)))
682 (if (types_match (type, double_type_node))
683 (BUILT_IN_COPYSIGN { build_one_cst (type); } (convert @0)))
684 (if (types_match (type, long_double_type_node))
685 (BUILT_IN_COPYSIGNL { build_one_cst (type); } (convert @0))))))
687 /* In IEEE floating point, x/1 is not equivalent to x for snans. */
690 (if (!tree_expr_maybe_signaling_nan_p (@0))
693 /* In IEEE floating point, x/-1 is not equivalent to -x for snans. */
695 (rdiv @0 real_minus_onep)
696 (if (!tree_expr_maybe_signaling_nan_p (@0))
699 (if (flag_reciprocal_math)
700 /* Convert (A/B)/C to A/(B*C). */
702 (rdiv (rdiv:s @0 @1) @2)
703 (rdiv @0 (mult @1 @2)))
705 /* Canonicalize x / (C1 * y) to (x * C2) / y. */
707 (rdiv @0 (mult:s @1 REAL_CST@2))
709 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @2); }
711 (rdiv (mult @0 { tem; } ) @1))))
713 /* Convert A/(B/C) to (A/B)*C */
715 (rdiv @0 (rdiv:s @1 @2))
716 (mult (rdiv @0 @1) @2)))
718 /* Simplify x / (- y) to -x / y. */
720 (rdiv @0 (negate @1))
721 (rdiv (negate @0) @1))
723 (if (flag_unsafe_math_optimizations)
724 /* Simplify (C / x op 0.0) to x op 0.0 for C != 0, C != Inf/Nan.
725 Since C / x may underflow to zero, do this only for unsafe math. */
726 (for op (lt le gt ge)
729 (op (rdiv REAL_CST@0 @1) real_zerop@2)
730 (if (!HONOR_SIGNED_ZEROS (@1) && !HONOR_INFINITIES (@1))
732 (if (real_less (&dconst0, TREE_REAL_CST_PTR (@0)))
734 /* For C < 0, use the inverted operator. */
735 (if (real_less (TREE_REAL_CST_PTR (@0), &dconst0))
738 /* Optimize (X & (-A)) / A where A is a power of 2, to X >> log2(A) */
739 (for div (trunc_div ceil_div floor_div round_div exact_div)
741 (div (convert? (bit_and @0 INTEGER_CST@1)) INTEGER_CST@2)
742 (if (integer_pow2p (@2)
743 && tree_int_cst_sgn (@2) > 0
744 && tree_nop_conversion_p (type, TREE_TYPE (@0))
745 && wi::to_wide (@2) + wi::to_wide (@1) == 0)
747 { build_int_cst (integer_type_node,
748 wi::exact_log2 (wi::to_wide (@2))); }))))
750 /* If ARG1 is a constant, we can convert this to a multiply by the
751 reciprocal. This does not have the same rounding properties,
752 so only do this if -freciprocal-math. We can actually
753 always safely do it if ARG1 is a power of two, but it's hard to
754 tell if it is or not in a portable manner. */
755 (for cst (REAL_CST COMPLEX_CST VECTOR_CST)
759 (if (flag_reciprocal_math
762 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @1); }
764 (mult @0 { tem; } )))
765 (if (cst != COMPLEX_CST)
766 (with { tree inverse = exact_inverse (type, @1); }
768 (mult @0 { inverse; } ))))))))
770 (for mod (ceil_mod floor_mod round_mod trunc_mod)
771 /* 0 % X is always zero. */
773 (mod integer_zerop@0 @1)
774 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
775 (if (!integer_zerop (@1))
777 /* X % 1 is always zero. */
779 (mod @0 integer_onep)
780 { build_zero_cst (type); })
781 /* X % -1 is zero. */
783 (mod @0 integer_minus_onep@1)
784 (if (!TYPE_UNSIGNED (type))
785 { build_zero_cst (type); }))
789 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
790 (if (!integer_zerop (@0))
791 { build_zero_cst (type); }))
792 /* (X % Y) % Y is just X % Y. */
794 (mod (mod@2 @0 @1) @1)
796 /* From extract_muldiv_1: (X * C1) % C2 is zero if C1 is a multiple of C2. */
798 (mod (mult @0 INTEGER_CST@1) INTEGER_CST@2)
799 (if (ANY_INTEGRAL_TYPE_P (type)
800 && TYPE_OVERFLOW_UNDEFINED (type)
801 && wi::multiple_of_p (wi::to_wide (@1), wi::to_wide (@2),
803 { build_zero_cst (type); }))
804 /* For (X % C) == 0, if X is signed and C is power of 2, use unsigned
805 modulo and comparison, since it is simpler and equivalent. */
808 (cmp (mod @0 integer_pow2p@2) integer_zerop@1)
809 (if (!TYPE_UNSIGNED (TREE_TYPE (@0)))
810 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
811 (cmp (mod (convert:utype @0) (convert:utype @2)) (convert:utype @1)))))))
813 /* X % -C is the same as X % C. */
815 (trunc_mod @0 INTEGER_CST@1)
816 (if (TYPE_SIGN (type) == SIGNED
817 && !TREE_OVERFLOW (@1)
818 && wi::neg_p (wi::to_wide (@1))
819 && !TYPE_OVERFLOW_TRAPS (type)
820 /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */
821 && !sign_bit_p (@1, @1))
822 (trunc_mod @0 (negate @1))))
824 /* X % -Y is the same as X % Y. */
826 (trunc_mod @0 (convert? (negate @1)))
827 (if (INTEGRAL_TYPE_P (type)
828 && !TYPE_UNSIGNED (type)
829 && !TYPE_OVERFLOW_TRAPS (type)
830 && tree_nop_conversion_p (type, TREE_TYPE (@1))
831 /* Avoid this transformation if X might be INT_MIN or
832 Y might be -1, because we would then change valid
833 INT_MIN % -(-1) into invalid INT_MIN % -1. */
834 && (expr_not_equal_to (@0, wi::to_wide (TYPE_MIN_VALUE (type)))
835 || expr_not_equal_to (@1, wi::minus_one (TYPE_PRECISION
837 (trunc_mod @0 (convert @1))))
839 /* X - (X / Y) * Y is the same as X % Y. */
841 (minus (convert1? @0) (convert2? (mult:c (trunc_div @@0 @@1) @1)))
842 (if (INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
843 (convert (trunc_mod @0 @1))))
845 /* x * (1 + y / x) - y -> x - y % x */
847 (minus (mult:cs @0 (plus:s (trunc_div:s @1 @0) integer_onep)) @1)
848 (if (INTEGRAL_TYPE_P (type))
849 (minus @0 (trunc_mod @1 @0))))
851 /* Optimize TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR,
852 i.e. "X % C" into "X & (C - 1)", if X and C are positive.
853 Also optimize A % (C << N) where C is a power of 2,
854 to A & ((C << N) - 1).
855 Also optimize "A shift (B % C)", if C is a power of 2, to
856 "A shift (B & (C - 1))". SHIFT operation include "<<" and ">>"
857 and assume (B % C) is nonnegative as shifts negative values would
859 (match (power_of_two_cand @1)
861 (match (power_of_two_cand @1)
862 (lshift INTEGER_CST@1 @2))
863 (for mod (trunc_mod floor_mod)
864 (for shift (lshift rshift)
866 (shift @0 (mod @1 (power_of_two_cand@2 @3)))
867 (if (integer_pow2p (@3) && tree_int_cst_sgn (@3) > 0)
868 (shift @0 (bit_and @1 (minus @2 { build_int_cst (TREE_TYPE (@2),
871 (mod @0 (convert? (power_of_two_cand@1 @2)))
872 (if ((TYPE_UNSIGNED (type) || tree_expr_nonnegative_p (@0))
873 /* Allow any integral conversions of the divisor, except
874 conversion from narrower signed to wider unsigned type
875 where if @1 would be negative power of two, the divisor
876 would not be a power of two. */
877 && INTEGRAL_TYPE_P (type)
878 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
879 && (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@1))
880 || TYPE_UNSIGNED (TREE_TYPE (@1))
881 || !TYPE_UNSIGNED (type))
882 && integer_pow2p (@2) && tree_int_cst_sgn (@2) > 0)
883 (with { tree utype = TREE_TYPE (@1);
884 if (!TYPE_OVERFLOW_WRAPS (utype))
885 utype = unsigned_type_for (utype); }
886 (bit_and @0 (convert (minus (convert:utype @1)
887 { build_one_cst (utype); })))))))
889 /* Simplify (unsigned t * 2)/2 -> unsigned t & 0x7FFFFFFF. */
891 (trunc_div (mult @0 integer_pow2p@1) @1)
892 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) && TYPE_UNSIGNED (TREE_TYPE (@0)))
893 (bit_and @0 { wide_int_to_tree
894 (type, wi::mask (TYPE_PRECISION (type)
895 - wi::exact_log2 (wi::to_wide (@1)),
896 false, TYPE_PRECISION (type))); })))
898 /* Simplify (unsigned t / 2) * 2 -> unsigned t & ~1. */
900 (mult (trunc_div @0 integer_pow2p@1) @1)
901 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) && TYPE_UNSIGNED (TREE_TYPE (@0)))
902 (bit_and @0 (negate @1))))
904 /* Simplify (t * 2) / 2) -> t. */
905 (for div (trunc_div ceil_div floor_div round_div exact_div)
907 (div (mult:c @0 @1) @1)
908 (if (ANY_INTEGRAL_TYPE_P (type))
909 (if (TYPE_OVERFLOW_UNDEFINED (type))
914 bool overflowed = true;
915 value_range vr0, vr1;
916 if (INTEGRAL_TYPE_P (type)
917 && get_global_range_query ()->range_of_expr (vr0, @0)
918 && get_global_range_query ()->range_of_expr (vr1, @1)
919 && !vr0.varying_p () && !vr0.undefined_p ()
920 && !vr1.varying_p () && !vr1.undefined_p ())
922 wide_int wmin0 = vr0.lower_bound ();
923 wide_int wmax0 = vr0.upper_bound ();
924 wide_int wmin1 = vr1.lower_bound ();
925 wide_int wmax1 = vr1.upper_bound ();
926 /* If the multiplication can't overflow/wrap around, then
927 it can be optimized too. */
928 wi::overflow_type min_ovf, max_ovf;
929 wi::mul (wmin0, wmin1, TYPE_SIGN (type), &min_ovf);
930 wi::mul (wmax0, wmax1, TYPE_SIGN (type), &max_ovf);
931 if (min_ovf == wi::OVF_NONE && max_ovf == wi::OVF_NONE)
933 wi::mul (wmin0, wmax1, TYPE_SIGN (type), &min_ovf);
934 wi::mul (wmax0, wmin1, TYPE_SIGN (type), &max_ovf);
935 if (min_ovf == wi::OVF_NONE && max_ovf == wi::OVF_NONE)
946 /* Simplify cos(-x) and cos(|x|) -> cos(x). Similarly for cosh. */
951 /* Simplify pow(-x, y) and pow(|x|,y) -> pow(x,y) if y is an even integer. */
954 (pows (op @0) REAL_CST@1)
955 (with { HOST_WIDE_INT n; }
956 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
958 /* Likewise for powi. */
961 (pows (op @0) INTEGER_CST@1)
962 (if ((wi::to_wide (@1) & 1) == 0)
964 /* Strip negate and abs from both operands of hypot. */
972 /* copysign(-x, y) and copysign(abs(x), y) -> copysign(x, y). */
973 (for copysigns (COPYSIGN_ALL)
975 (copysigns (op @0) @1)
978 /* abs(x)*abs(x) -> x*x. Should be valid for all types. */
983 /* Convert absu(x)*absu(x) -> x*x. */
985 (mult (absu@1 @0) @1)
986 (mult (convert@2 @0) @2))
988 /* cos(copysign(x, y)) -> cos(x). Similarly for cosh. */
992 (coss (copysigns @0 @1))
995 /* pow(copysign(x, y), z) -> pow(x, z) if z is an even integer. */
999 (pows (copysigns @0 @2) REAL_CST@1)
1000 (with { HOST_WIDE_INT n; }
1001 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
1003 /* Likewise for powi. */
1005 copysigns (COPYSIGN)
1007 (pows (copysigns @0 @2) INTEGER_CST@1)
1008 (if ((wi::to_wide (@1) & 1) == 0)
1012 copysigns (COPYSIGN)
1013 /* hypot(copysign(x, y), z) -> hypot(x, z). */
1015 (hypots (copysigns @0 @1) @2)
1017 /* hypot(x, copysign(y, z)) -> hypot(x, y). */
1019 (hypots @0 (copysigns @1 @2))
1022 /* copysign(x, CST) -> [-]abs (x). */
1023 (for copysigns (COPYSIGN_ALL)
1025 (copysigns @0 REAL_CST@1)
1026 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
1030 /* copysign(copysign(x, y), z) -> copysign(x, z). */
1031 (for copysigns (COPYSIGN_ALL)
1033 (copysigns (copysigns @0 @1) @2)
1036 /* copysign(x,y)*copysign(x,y) -> x*x. */
1037 (for copysigns (COPYSIGN_ALL)
1039 (mult (copysigns@2 @0 @1) @2)
1042 /* ccos(-x) -> ccos(x). Similarly for ccosh. */
1043 (for ccoss (CCOS CCOSH)
1048 /* cabs(-x) and cos(conj(x)) -> cabs(x). */
1049 (for ops (conj negate)
1055 /* Fold (a * (1 << b)) into (a << b) */
1057 (mult:c @0 (convert? (lshift integer_onep@1 @2)))
1058 (if (! FLOAT_TYPE_P (type)
1059 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1062 /* Shifts by constants distribute over several binary operations,
1063 hence (X << C) + (Y << C) can be simplified to (X + Y) << C. */
1064 (for op (plus minus)
1066 (op (lshift:s @0 @1) (lshift:s @2 @1))
1067 (if (INTEGRAL_TYPE_P (type)
1068 && TYPE_OVERFLOW_WRAPS (type)
1069 && !TYPE_SATURATING (type))
1070 (lshift (op @0 @2) @1))))
1072 (for op (bit_and bit_ior bit_xor)
1074 (op (lshift:s @0 @1) (lshift:s @2 @1))
1075 (if (INTEGRAL_TYPE_P (type))
1076 (lshift (op @0 @2) @1)))
1078 (op (rshift:s @0 @1) (rshift:s @2 @1))
1079 (if (INTEGRAL_TYPE_P (type))
1080 (rshift (op @0 @2) @1))))
1082 /* Fold (1 << (C - x)) where C = precision(type) - 1
1083 into ((1 << C) >> x). */
1085 (lshift integer_onep@0 (minus@1 INTEGER_CST@2 @3))
1086 (if (INTEGRAL_TYPE_P (type)
1087 && wi::eq_p (wi::to_wide (@2), TYPE_PRECISION (type) - 1)
1089 (if (TYPE_UNSIGNED (type))
1090 (rshift (lshift @0 @2) @3)
1092 { tree utype = unsigned_type_for (type); }
1093 (convert (rshift (lshift (convert:utype @0) @2) @3))))))
1095 /* Fold ((type)(a<0)) << SIGNBITOFA into ((type)a) & signbit. */
1097 (lshift (convert (lt @0 integer_zerop@1)) INTEGER_CST@2)
1098 (if (TYPE_SIGN (TREE_TYPE (@0)) == SIGNED
1099 && wi::eq_p (wi::to_wide (@2), TYPE_PRECISION (TREE_TYPE (@0)) - 1))
1100 (with { wide_int wone = wi::one (TYPE_PRECISION (type)); }
1101 (bit_and (convert @0)
1102 { wide_int_to_tree (type,
1103 wi::lshift (wone, wi::to_wide (@2))); }))))
1105 /* Fold (-x >> C) into -(x > 0) where C = precision(type) - 1. */
1106 (for cst (INTEGER_CST VECTOR_CST)
1108 (rshift (negate:s @0) cst@1)
1109 (if (!TYPE_UNSIGNED (type)
1110 && TYPE_OVERFLOW_UNDEFINED (type))
1111 (with { tree stype = TREE_TYPE (@1);
1112 tree bt = truth_type_for (type);
1113 tree zeros = build_zero_cst (type);
1114 tree cst = NULL_TREE; }
1116 /* Handle scalar case. */
1117 (if (INTEGRAL_TYPE_P (type)
1118 /* If we apply the rule to the scalar type before vectorization
1119 we will enforce the result of the comparison being a bool
1120 which will require an extra AND on the result that will be
1121 indistinguishable from when the user did actually want 0
1122 or 1 as the result so it can't be removed. */
1123 && canonicalize_math_after_vectorization_p ()
1124 && wi::eq_p (wi::to_wide (@1), TYPE_PRECISION (type) - 1))
1125 (negate (convert (gt @0 { zeros; }))))
1126 /* Handle vector case. */
1127 (if (VECTOR_INTEGER_TYPE_P (type)
1128 /* First check whether the target has the same mode for vector
1129 comparison results as it's operands do. */
1130 && TYPE_MODE (bt) == TYPE_MODE (type)
1131 /* Then check to see if the target is able to expand the comparison
1132 with the given type later on, otherwise we may ICE. */
1133 && expand_vec_cmp_expr_p (type, bt, GT_EXPR)
1134 && (cst = uniform_integer_cst_p (@1)) != NULL
1135 && wi::eq_p (wi::to_wide (cst), element_precision (type) - 1))
1136 (view_convert (gt:bt @0 { zeros; }))))))))
1138 /* Fold (C1/X)*C2 into (C1*C2)/X. */
1140 (mult (rdiv@3 REAL_CST@0 @1) REAL_CST@2)
1141 (if (flag_associative_math
1144 { tree tem = const_binop (MULT_EXPR, type, @0, @2); }
1146 (rdiv { tem; } @1)))))
1148 /* Simplify ~X & X as zero. */
1150 (bit_and:c (convert? @0) (convert? (bit_not @0)))
1151 { build_zero_cst (type); })
1153 /* PR71636: Transform x & ((1U << b) - 1) -> x & ~(~0U << b); */
1155 (bit_and:c @0 (plus:s (lshift:s integer_onep @1) integer_minus_onep))
1156 (if (TYPE_UNSIGNED (type))
1157 (bit_and @0 (bit_not (lshift { build_all_ones_cst (type); } @1)))))
1159 (for bitop (bit_and bit_ior)
1161 /* PR35691: Transform
1162 (x == 0 & y == 0) -> (x | typeof(x)(y)) == 0.
1163 (x != 0 | y != 0) -> (x | typeof(x)(y)) != 0. */
1165 (bitop (cmp @0 integer_zerop@2) (cmp @1 integer_zerop))
1166 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1167 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
1168 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
1169 (cmp (bit_ior @0 (convert @1)) @2)))
1171 (x == -1 & y == -1) -> (x & typeof(x)(y)) == -1.
1172 (x != -1 | y != -1) -> (x & typeof(x)(y)) != -1. */
1174 (bitop (cmp @0 integer_all_onesp@2) (cmp @1 integer_all_onesp))
1175 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1176 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
1177 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
1178 (cmp (bit_and @0 (convert @1)) @2))))
1180 /* Fold (A & ~B) - (A & B) into (A ^ B) - B. */
1182 (minus (bit_and:cs @0 (bit_not @1)) (bit_and:cs @0 @1))
1183 (minus (bit_xor @0 @1) @1))
1185 (minus (bit_and:s @0 INTEGER_CST@2) (bit_and:s @0 INTEGER_CST@1))
1186 (if (~wi::to_wide (@2) == wi::to_wide (@1))
1187 (minus (bit_xor @0 @1) @1)))
1189 /* Fold (A & B) - (A & ~B) into B - (A ^ B). */
1191 (minus (bit_and:cs @0 @1) (bit_and:cs @0 (bit_not @1)))
1192 (minus @1 (bit_xor @0 @1)))
1194 /* Simplify (X & ~Y) |^+ (~X & Y) -> X ^ Y. */
1195 (for op (bit_ior bit_xor plus)
1197 (op (bit_and:c @0 (bit_not @1)) (bit_and:c (bit_not @0) @1))
1200 (op:c (bit_and @0 INTEGER_CST@2) (bit_and (bit_not @0) INTEGER_CST@1))
1201 (if (~wi::to_wide (@2) == wi::to_wide (@1))
1204 /* PR53979: Transform ((a ^ b) | a) -> (a | b) */
1206 (bit_ior:c (bit_xor:c @0 @1) @0)
1209 /* (a & ~b) | (a ^ b) --> a ^ b */
1211 (bit_ior:c (bit_and:c @0 (bit_not @1)) (bit_xor:c@2 @0 @1))
1214 /* (a & ~b) ^ ~a --> ~(a & b) */
1216 (bit_xor:c (bit_and:cs @0 (bit_not @1)) (bit_not @0))
1217 (bit_not (bit_and @0 @1)))
1219 /* (~a & b) ^ a --> (a | b) */
1221 (bit_xor:c (bit_and:cs (bit_not @0) @1) @0)
1224 /* (a | b) & ~(a ^ b) --> a & b */
1226 (bit_and:c (bit_ior @0 @1) (bit_not (bit_xor:c @0 @1)))
1229 /* a | ~(a ^ b) --> a | ~b */
1231 (bit_ior:c @0 (bit_not:s (bit_xor:c @0 @1)))
1232 (bit_ior @0 (bit_not @1)))
1234 /* (a | b) | (a &^ b) --> a | b */
1235 (for op (bit_and bit_xor)
1237 (bit_ior:c (bit_ior@2 @0 @1) (op:c @0 @1))
1240 /* (a & b) | ~(a ^ b) --> ~(a ^ b) */
1242 (bit_ior:c (bit_and:c @0 @1) (bit_not@2 (bit_xor @0 @1)))
1245 /* ~(~a & b) --> a | ~b */
1247 (bit_not (bit_and:cs (bit_not @0) @1))
1248 (bit_ior @0 (bit_not @1)))
1250 /* ~(~a | b) --> a & ~b */
1252 (bit_not (bit_ior:cs (bit_not @0) @1))
1253 (bit_and @0 (bit_not @1)))
1255 /* (a ^ b) & ((b ^ c) ^ a) --> (a ^ b) & ~c */
1257 (bit_and:c (bit_xor:c@3 @0 @1) (bit_xor:cs (bit_xor:cs @1 @2) @0))
1258 (bit_and @3 (bit_not @2)))
1260 /* (a ^ b) | ((b ^ c) ^ a) --> (a ^ b) | c */
1262 (bit_ior:c (bit_xor:c@3 @0 @1) (bit_xor:c (bit_xor:c @1 @2) @0))
1265 /* (~X | C) ^ D -> (X | C) ^ (~D ^ C) if (~D ^ C) can be simplified. */
1267 (bit_xor:c (bit_ior:cs (bit_not:s @0) @1) @2)
1268 (bit_xor (bit_ior @0 @1) (bit_xor! (bit_not! @2) @1)))
1270 /* (~X & C) ^ D -> (X & C) ^ (D ^ C) if (D ^ C) can be simplified. */
1272 (bit_xor:c (bit_and:cs (bit_not:s @0) @1) @2)
1273 (bit_xor (bit_and @0 @1) (bit_xor! @2 @1)))
1275 /* Simplify (~X & Y) to X ^ Y if we know that (X & ~Y) is 0. */
1277 (bit_and (bit_not SSA_NAME@0) INTEGER_CST@1)
1278 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1279 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
1282 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1283 ((A & N) + B) & M -> (A + B) & M
1284 Similarly if (N & M) == 0,
1285 ((A | N) + B) & M -> (A + B) & M
1286 and for - instead of + (or unary - instead of +)
1287 and/or ^ instead of |.
1288 If B is constant and (B & M) == 0, fold into A & M. */
1289 (for op (plus minus)
1290 (for bitop (bit_and bit_ior bit_xor)
1292 (bit_and (op:s (bitop:s@0 @3 INTEGER_CST@4) @1) INTEGER_CST@2)
1295 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, bitop,
1296 @3, @4, @1, ERROR_MARK, NULL_TREE,
1299 (convert (bit_and (op (convert:utype { pmop[0]; })
1300 (convert:utype { pmop[1]; }))
1301 (convert:utype @2))))))
1303 (bit_and (op:s @0 (bitop:s@1 @3 INTEGER_CST@4)) INTEGER_CST@2)
1306 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK,
1307 NULL_TREE, NULL_TREE, @1, bitop, @3,
1310 (convert (bit_and (op (convert:utype { pmop[0]; })
1311 (convert:utype { pmop[1]; }))
1312 (convert:utype @2)))))))
1314 (bit_and (op:s @0 @1) INTEGER_CST@2)
1317 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK,
1318 NULL_TREE, NULL_TREE, @1, ERROR_MARK,
1319 NULL_TREE, NULL_TREE, pmop); }
1321 (convert (bit_and (op (convert:utype { pmop[0]; })
1322 (convert:utype { pmop[1]; }))
1323 (convert:utype @2)))))))
1324 (for bitop (bit_and bit_ior bit_xor)
1326 (bit_and (negate:s (bitop:s@0 @2 INTEGER_CST@3)) INTEGER_CST@1)
1329 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @1, NEGATE_EXPR, @0,
1330 bitop, @2, @3, NULL_TREE, ERROR_MARK,
1331 NULL_TREE, NULL_TREE, pmop); }
1333 (convert (bit_and (negate (convert:utype { pmop[0]; }))
1334 (convert:utype @1)))))))
1336 /* X % Y is smaller than Y. */
1339 (cmp (trunc_mod @0 @1) @1)
1340 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
1341 { constant_boolean_node (cmp == LT_EXPR, type); })))
1344 (cmp @1 (trunc_mod @0 @1))
1345 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
1346 { constant_boolean_node (cmp == GT_EXPR, type); })))
1350 (bit_ior @0 integer_all_onesp@1)
1355 (bit_ior @0 integer_zerop)
1360 (bit_and @0 integer_zerop@1)
1365 (for op (bit_ior bit_xor)
1367 (op:c (convert? @0) (convert? (bit_not @0)))
1368 (convert { build_all_ones_cst (TREE_TYPE (@0)); })))
1373 { build_zero_cst (type); })
1375 /* Canonicalize X ^ ~0 to ~X. */
1377 (bit_xor @0 integer_all_onesp@1)
1382 (bit_and @0 integer_all_onesp)
1385 /* x & x -> x, x | x -> x */
1386 (for bitop (bit_and bit_ior)
1391 /* x & C -> x if we know that x & ~C == 0. */
1394 (bit_and SSA_NAME@0 INTEGER_CST@1)
1395 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1396 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
1400 /* ~(~X - Y) -> X + Y and ~(~X + Y) -> X - Y. */
1402 (bit_not (minus (bit_not @0) @1))
1405 (bit_not (plus:c (bit_not @0) @1))
1407 /* (~X - ~Y) -> Y - X. */
1409 (minus (bit_not @0) (bit_not @1))
1410 (if (!TYPE_OVERFLOW_SANITIZED (type))
1411 (with { tree utype = unsigned_type_for (type); }
1412 (convert (minus (convert:utype @1) (convert:utype @0))))))
1414 /* ~(X - Y) -> ~X + Y. */
1416 (bit_not (minus:s @0 @1))
1417 (plus (bit_not @0) @1))
1419 (bit_not (plus:s @0 INTEGER_CST@1))
1420 (if ((INTEGRAL_TYPE_P (type)
1421 && TYPE_UNSIGNED (type))
1422 || (!TYPE_OVERFLOW_SANITIZED (type)
1423 && may_negate_without_overflow_p (@1)))
1424 (plus (bit_not @0) { const_unop (NEGATE_EXPR, type, @1); })))
1427 /* ~X + Y -> (Y - X) - 1. */
1429 (plus:c (bit_not @0) @1)
1430 (if (ANY_INTEGRAL_TYPE_P (type)
1431 && TYPE_OVERFLOW_WRAPS (type)
1432 /* -1 - X is folded to ~X, so we'd recurse endlessly. */
1433 && !integer_all_onesp (@1))
1434 (plus (minus @1 @0) { build_minus_one_cst (type); })
1435 (if (INTEGRAL_TYPE_P (type)
1436 && TREE_CODE (@1) == INTEGER_CST
1437 && wi::to_wide (@1) != wi::min_value (TYPE_PRECISION (type),
1439 (minus (plus @1 { build_minus_one_cst (type); }) @0))))
1442 /* ~(X >> Y) -> ~X >> Y if ~X can be simplified. */
1444 (bit_not (rshift:s @0 @1))
1445 (if (!TYPE_UNSIGNED (TREE_TYPE (@0)))
1446 (rshift (bit_not! @0) @1)
1447 /* For logical right shifts, this is possible only if @0 doesn't
1448 have MSB set and the logical right shift is changed into
1449 arithmetic shift. */
1450 (if (INTEGRAL_TYPE_P (type)
1451 && !wi::neg_p (tree_nonzero_bits (@0)))
1452 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
1453 (convert (rshift (bit_not! (convert:stype @0)) @1))))))
1455 /* x + (x & 1) -> (x + 1) & ~1 */
1457 (plus:c @0 (bit_and:s @0 integer_onep@1))
1458 (bit_and (plus @0 @1) (bit_not @1)))
1460 /* x & ~(x & y) -> x & ~y */
1461 /* x | ~(x | y) -> x | ~y */
1462 (for bitop (bit_and bit_ior)
1464 (bitop:c @0 (bit_not (bitop:cs @0 @1)))
1465 (bitop @0 (bit_not @1))))
1467 /* (~x & y) | ~(x | y) -> ~x */
1469 (bit_ior:c (bit_and:c (bit_not@2 @0) @1) (bit_not (bit_ior:c @0 @1)))
1472 /* (x | y) ^ (x | ~y) -> ~x */
1474 (bit_xor:c (bit_ior:c @0 @1) (bit_ior:c @0 (bit_not @1)))
1477 /* (x & y) | ~(x | y) -> ~(x ^ y) */
1479 (bit_ior:c (bit_and:s @0 @1) (bit_not:s (bit_ior:s @0 @1)))
1480 (bit_not (bit_xor @0 @1)))
1482 /* (~x | y) ^ (x ^ y) -> x | ~y */
1484 (bit_xor:c (bit_ior:cs (bit_not @0) @1) (bit_xor:s @0 @1))
1485 (bit_ior @0 (bit_not @1)))
1487 /* (x ^ y) | ~(x | y) -> ~(x & y) */
1489 (bit_ior:c (bit_xor:s @0 @1) (bit_not:s (bit_ior:s @0 @1)))
1490 (bit_not (bit_and @0 @1)))
1492 /* (x | y) & ~x -> y & ~x */
1493 /* (x & y) | ~x -> y | ~x */
1494 (for bitop (bit_and bit_ior)
1495 rbitop (bit_ior bit_and)
1497 (bitop:c (rbitop:c @0 @1) (bit_not@2 @0))
1500 /* (x & y) ^ (x | y) -> x ^ y */
1502 (bit_xor:c (bit_and @0 @1) (bit_ior @0 @1))
1505 /* (x ^ y) ^ (x | y) -> x & y */
1507 (bit_xor:c (bit_xor @0 @1) (bit_ior @0 @1))
1510 /* (x & y) + (x ^ y) -> x | y */
1511 /* (x & y) | (x ^ y) -> x | y */
1512 /* (x & y) ^ (x ^ y) -> x | y */
1513 (for op (plus bit_ior bit_xor)
1515 (op:c (bit_and @0 @1) (bit_xor @0 @1))
1518 /* (x & y) + (x | y) -> x + y */
1520 (plus:c (bit_and @0 @1) (bit_ior @0 @1))
1523 /* (x + y) - (x | y) -> x & y */
1525 (minus (plus @0 @1) (bit_ior @0 @1))
1526 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1527 && !TYPE_SATURATING (type))
1530 /* (x + y) - (x & y) -> x | y */
1532 (minus (plus @0 @1) (bit_and @0 @1))
1533 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1534 && !TYPE_SATURATING (type))
1537 /* (x | y) - y -> (x & ~y) */
1539 (minus (bit_ior:cs @0 @1) @1)
1540 (bit_and @0 (bit_not @1)))
1542 /* (x | y) - (x ^ y) -> x & y */
1544 (minus (bit_ior @0 @1) (bit_xor @0 @1))
1547 /* (x | y) - (x & y) -> x ^ y */
1549 (minus (bit_ior @0 @1) (bit_and @0 @1))
1552 /* (x | y) & ~(x & y) -> x ^ y */
1554 (bit_and:c (bit_ior @0 @1) (bit_not (bit_and @0 @1)))
1557 /* (x | y) & (~x ^ y) -> x & y */
1559 (bit_and:c (bit_ior:c @0 @1) (bit_xor:c @1 (bit_not @0)))
1562 /* (~x | y) & (x | ~y) -> ~(x ^ y) */
1564 (bit_and (bit_ior:cs (bit_not @0) @1) (bit_ior:cs @0 (bit_not @1)))
1565 (bit_not (bit_xor @0 @1)))
1567 /* (~x | y) ^ (x | ~y) -> x ^ y */
1569 (bit_xor (bit_ior:c (bit_not @0) @1) (bit_ior:c @0 (bit_not @1)))
1572 /* ((x & y) - (x | y)) - 1 -> ~(x ^ y) */
1574 (plus (nop_convert1? (minus@2 (nop_convert2? (bit_and:c @0 @1))
1575 (nop_convert2? (bit_ior @0 @1))))
1577 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1578 && !TYPE_SATURATING (type) && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))
1579 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@2))
1580 && !TYPE_SATURATING (TREE_TYPE (@2)))
1581 (bit_not (convert (bit_xor @0 @1)))))
1583 (minus (nop_convert1? (plus@2 (nop_convert2? (bit_and:c @0 @1))
1585 (nop_convert3? (bit_ior @0 @1)))
1586 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1587 && !TYPE_SATURATING (type) && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))
1588 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@2))
1589 && !TYPE_SATURATING (TREE_TYPE (@2)))
1590 (bit_not (convert (bit_xor @0 @1)))))
1592 (minus (nop_convert1? (bit_and @0 @1))
1593 (nop_convert2? (plus@2 (nop_convert3? (bit_ior:c @0 @1))
1595 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1596 && !TYPE_SATURATING (type) && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))
1597 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@2))
1598 && !TYPE_SATURATING (TREE_TYPE (@2)))
1599 (bit_not (convert (bit_xor @0 @1)))))
1601 /* ~x & ~y -> ~(x | y)
1602 ~x | ~y -> ~(x & y) */
1603 (for op (bit_and bit_ior)
1604 rop (bit_ior bit_and)
1606 (op (convert1? (bit_not @0)) (convert2? (bit_not @1)))
1607 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1608 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
1609 (bit_not (rop (convert @0) (convert @1))))))
1611 /* If we are XORing or adding two BIT_AND_EXPR's, both of which are and'ing
1612 with a constant, and the two constants have no bits in common,
1613 we should treat this as a BIT_IOR_EXPR since this may produce more
1615 (for op (bit_xor plus)
1617 (op (convert1? (bit_and@4 @0 INTEGER_CST@1))
1618 (convert2? (bit_and@5 @2 INTEGER_CST@3)))
1619 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1620 && tree_nop_conversion_p (type, TREE_TYPE (@2))
1621 && (wi::to_wide (@1) & wi::to_wide (@3)) == 0)
1622 (bit_ior (convert @4) (convert @5)))))
1624 /* (X | Y) ^ X -> Y & ~ X*/
1626 (bit_xor:c (convert1? (bit_ior:c @@0 @1)) (convert2? @0))
1627 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1628 (convert (bit_and @1 (bit_not @0)))))
1630 /* Convert ~X ^ ~Y to X ^ Y. */
1632 (bit_xor (convert1? (bit_not @0)) (convert2? (bit_not @1)))
1633 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1634 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
1635 (bit_xor (convert @0) (convert @1))))
1637 /* Convert ~X ^ C to X ^ ~C. */
1639 (bit_xor (convert? (bit_not @0)) INTEGER_CST@1)
1640 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1641 (bit_xor (convert @0) (bit_not @1))))
1643 /* Fold (X & Y) ^ Y and (X ^ Y) & Y as ~X & Y. */
1644 (for opo (bit_and bit_xor)
1645 opi (bit_xor bit_and)
1647 (opo:c (opi:cs @0 @1) @1)
1648 (bit_and (bit_not @0) @1)))
1650 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
1651 operands are another bit-wise operation with a common input. If so,
1652 distribute the bit operations to save an operation and possibly two if
1653 constants are involved. For example, convert
1654 (A | B) & (A | C) into A | (B & C)
1655 Further simplification will occur if B and C are constants. */
1656 (for op (bit_and bit_ior bit_xor)
1657 rop (bit_ior bit_and bit_and)
1659 (op (convert? (rop:c @@0 @1)) (convert? (rop:c @0 @2)))
1660 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1661 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
1662 (rop (convert @0) (op (convert @1) (convert @2))))))
1664 /* Some simple reassociation for bit operations, also handled in reassoc. */
1665 /* (X & Y) & Y -> X & Y
1666 (X | Y) | Y -> X | Y */
1667 (for op (bit_and bit_ior)
1669 (op:c (convert1?@2 (op:c @0 @@1)) (convert2? @1))
1671 /* (X ^ Y) ^ Y -> X */
1673 (bit_xor:c (convert1? (bit_xor:c @0 @@1)) (convert2? @1))
1675 /* (X & Y) & (X & Z) -> (X & Y) & Z
1676 (X | Y) | (X | Z) -> (X | Y) | Z */
1677 (for op (bit_and bit_ior)
1679 (op (convert1?@3 (op:c@4 @0 @1)) (convert2?@5 (op:c@6 @0 @2)))
1680 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1681 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
1682 (if (single_use (@5) && single_use (@6))
1683 (op @3 (convert @2))
1684 (if (single_use (@3) && single_use (@4))
1685 (op (convert @1) @5))))))
1686 /* (X ^ Y) ^ (X ^ Z) -> Y ^ Z */
1688 (bit_xor (convert1? (bit_xor:c @0 @1)) (convert2? (bit_xor:c @0 @2)))
1689 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1690 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
1691 (bit_xor (convert @1) (convert @2))))
1693 /* Convert abs (abs (X)) into abs (X).
1694 also absu (absu (X)) into absu (X). */
1700 (absu (convert@2 (absu@1 @0)))
1701 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@1)))
1704 /* Convert abs[u] (-X) -> abs[u] (X). */
1713 /* Convert abs[u] (X) where X is nonnegative -> (X). */
1715 (abs tree_expr_nonnegative_p@0)
1719 (absu tree_expr_nonnegative_p@0)
1722 /* Simplify (-(X < 0) | 1) * X into abs (X) or absu(X). */
1724 (mult:c (nop_convert1?
1725 (bit_ior (nop_convert2? (negate (convert? (lt @0 integer_zerop))))
1728 (if (INTEGRAL_TYPE_P (type)
1729 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
1730 && !TYPE_UNSIGNED (TREE_TYPE (@0)))
1731 (if (TYPE_UNSIGNED (type))
1738 /* A few cases of fold-const.cc negate_expr_p predicate. */
1739 (match negate_expr_p
1741 (if ((INTEGRAL_TYPE_P (type)
1742 && TYPE_UNSIGNED (type))
1743 || (!TYPE_OVERFLOW_SANITIZED (type)
1744 && may_negate_without_overflow_p (t)))))
1745 (match negate_expr_p
1747 (match negate_expr_p
1749 (if (!TYPE_OVERFLOW_SANITIZED (type))))
1750 (match negate_expr_p
1752 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (t)))))
1753 /* VECTOR_CST handling of non-wrapping types would recurse in unsupported
1755 (match negate_expr_p
1757 (if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type))))
1758 (match negate_expr_p
1760 (if ((ANY_INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type))
1761 || (FLOAT_TYPE_P (type)
1762 && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
1763 && !HONOR_SIGNED_ZEROS (type)))))
1765 /* (-A) * (-B) -> A * B */
1767 (mult:c (convert1? (negate @0)) (convert2? negate_expr_p@1))
1768 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1769 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1770 (mult (convert @0) (convert (negate @1)))))
1772 /* -(A + B) -> (-B) - A. */
1774 (negate (plus:c @0 negate_expr_p@1))
1775 (if (!HONOR_SIGN_DEPENDENT_ROUNDING (type)
1776 && !HONOR_SIGNED_ZEROS (type))
1777 (minus (negate @1) @0)))
1779 /* -(A - B) -> B - A. */
1781 (negate (minus @0 @1))
1782 (if ((ANY_INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_SANITIZED (type))
1783 || (FLOAT_TYPE_P (type)
1784 && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
1785 && !HONOR_SIGNED_ZEROS (type)))
1788 (negate (pointer_diff @0 @1))
1789 (if (TYPE_OVERFLOW_UNDEFINED (type))
1790 (pointer_diff @1 @0)))
1792 /* A - B -> A + (-B) if B is easily negatable. */
1794 (minus @0 negate_expr_p@1)
1795 (if (!FIXED_POINT_TYPE_P (type))
1796 (plus @0 (negate @1))))
1798 /* 1 - a is a ^ 1 if a had a bool range. */
1799 /* This is only enabled for gimple as sometimes
1800 cfun is not set for the function which contains
1801 the SSA_NAME (e.g. while IPA passes are happening,
1802 fold might be called). */
1804 (minus integer_onep@0 SSA_NAME@1)
1805 (if (INTEGRAL_TYPE_P (type)
1806 && ssa_name_has_boolean_range (@1))
1809 /* Other simplifications of negation (c.f. fold_negate_expr_1). */
1811 (negate (mult:c@0 @1 negate_expr_p@2))
1812 (if (! TYPE_UNSIGNED (type)
1813 && ! HONOR_SIGN_DEPENDENT_ROUNDING (type)
1815 (mult @1 (negate @2))))
1818 (negate (rdiv@0 @1 negate_expr_p@2))
1819 (if (! HONOR_SIGN_DEPENDENT_ROUNDING (type)
1821 (rdiv @1 (negate @2))))
1824 (negate (rdiv@0 negate_expr_p@1 @2))
1825 (if (! HONOR_SIGN_DEPENDENT_ROUNDING (type)
1827 (rdiv (negate @1) @2)))
1829 /* Fold -((int)x >> (prec - 1)) into (unsigned)x >> (prec - 1). */
1831 (negate (convert? (rshift @0 INTEGER_CST@1)))
1832 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1833 && wi::to_wide (@1) == element_precision (type) - 1)
1834 (with { tree stype = TREE_TYPE (@0);
1835 tree ntype = TYPE_UNSIGNED (stype) ? signed_type_for (stype)
1836 : unsigned_type_for (stype); }
1837 (if (VECTOR_TYPE_P (type))
1838 (view_convert (rshift (view_convert:ntype @0) @1))
1839 (convert (rshift (convert:ntype @0) @1))))))
1841 /* Try to fold (type) X op CST -> (type) (X op ((type-x) CST))
1843 For bitwise binary operations apply operand conversions to the
1844 binary operation result instead of to the operands. This allows
1845 to combine successive conversions and bitwise binary operations.
1846 We combine the above two cases by using a conditional convert. */
1847 (for bitop (bit_and bit_ior bit_xor)
1849 (bitop (convert@2 @0) (convert?@3 @1))
1850 (if (((TREE_CODE (@1) == INTEGER_CST
1851 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
1852 && (int_fits_type_p (@1, TREE_TYPE (@0))
1853 || tree_nop_conversion_p (TREE_TYPE (@0), type)))
1854 || types_match (@0, @1))
1855 && !POINTER_TYPE_P (TREE_TYPE (@0))
1856 && !VECTOR_TYPE_P (TREE_TYPE (@0))
1857 && TREE_CODE (TREE_TYPE (@0)) != OFFSET_TYPE
1858 /* ??? This transform conflicts with fold-const.cc doing
1859 Convert (T)(x & c) into (T)x & (T)c, if c is an integer
1860 constants (if x has signed type, the sign bit cannot be set
1861 in c). This folds extension into the BIT_AND_EXPR.
1862 Restrict it to GIMPLE to avoid endless recursions. */
1863 && (bitop != BIT_AND_EXPR || GIMPLE)
1864 && (/* That's a good idea if the conversion widens the operand, thus
1865 after hoisting the conversion the operation will be narrower.
1866 It is also a good if the conversion is a nop as moves the
1867 conversion to one side; allowing for combining of the conversions. */
1868 TYPE_PRECISION (TREE_TYPE (@0)) < TYPE_PRECISION (type)
1869 /* The conversion check for being a nop can only be done at the gimple
1870 level as fold_binary has some re-association code which can conflict
1871 with this if there is a "constant" which is not a full INTEGER_CST. */
1872 || (GIMPLE && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (type))
1873 /* It's also a good idea if the conversion is to a non-integer
1875 || GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT
1876 /* Or if the precision of TO is not the same as the precision
1878 || !type_has_mode_precision_p (type)
1879 /* In GIMPLE, getting rid of 2 conversions for one new results
1882 && TREE_CODE (@1) != INTEGER_CST
1883 && tree_nop_conversion_p (type, TREE_TYPE (@0))
1885 && single_use (@3))))
1886 (convert (bitop @0 (convert @1)))))
1887 /* In GIMPLE, getting rid of 2 conversions for one new results
1890 (convert (bitop:cs@2 (nop_convert:s @0) @1))
1892 && TREE_CODE (@1) != INTEGER_CST
1893 && tree_nop_conversion_p (type, TREE_TYPE (@2))
1894 && types_match (type, @0)
1895 && !POINTER_TYPE_P (TREE_TYPE (@0))
1896 && TREE_CODE (TREE_TYPE (@0)) != OFFSET_TYPE)
1897 (bitop @0 (convert @1)))))
1899 (for bitop (bit_and bit_ior)
1900 rbitop (bit_ior bit_and)
1901 /* (x | y) & x -> x */
1902 /* (x & y) | x -> x */
1904 (bitop:c (rbitop:c @0 @1) @0)
1906 /* (~x | y) & x -> x & y */
1907 /* (~x & y) | x -> x | y */
1909 (bitop:c (rbitop:c (bit_not @0) @1) @0)
1912 /* ((x | y) & z) | x -> (z & y) | x */
1914 (bit_ior:c (bit_and:cs (bit_ior:cs @0 @1) @2) @0)
1915 (bit_ior (bit_and @2 @1) @0))
1917 /* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */
1919 (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1920 (bit_ior (bit_and @0 @2) (bit_and! @1 @2)))
1922 /* Combine successive equal operations with constants. */
1923 (for bitop (bit_and bit_ior bit_xor)
1925 (bitop (bitop @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1926 (if (!CONSTANT_CLASS_P (@0))
1927 /* This is the canonical form regardless of whether (bitop @1 @2) can be
1928 folded to a constant. */
1929 (bitop @0 (bitop! @1 @2))
1930 /* In this case we have three constants and (bitop @0 @1) doesn't fold
1931 to a constant. This can happen if @0 or @1 is a POLY_INT_CST and if
1932 the values involved are such that the operation can't be decided at
1933 compile time. Try folding one of @0 or @1 with @2 to see whether
1934 that combination can be decided at compile time.
1936 Keep the existing form if both folds fail, to avoid endless
1938 (with { tree cst1 = const_binop (bitop, type, @0, @2); }
1940 (bitop @1 { cst1; })
1941 (with { tree cst2 = const_binop (bitop, type, @1, @2); }
1943 (bitop @0 { cst2; }))))))))
1945 /* Try simple folding for X op !X, and X op X with the help
1946 of the truth_valued_p and logical_inverted_value predicates. */
1947 (match truth_valued_p
1949 (if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1)))
1950 (for op (tcc_comparison truth_and truth_andif truth_or truth_orif truth_xor)
1951 (match truth_valued_p
1953 (match truth_valued_p
1956 (match (logical_inverted_value @0)
1958 (match (logical_inverted_value @0)
1959 (bit_not truth_valued_p@0))
1960 (match (logical_inverted_value @0)
1961 (eq @0 integer_zerop))
1962 (match (logical_inverted_value @0)
1963 (ne truth_valued_p@0 integer_truep))
1964 (match (logical_inverted_value @0)
1965 (bit_xor truth_valued_p@0 integer_truep))
1969 (bit_and:c @0 (logical_inverted_value @0))
1970 { build_zero_cst (type); })
1971 /* X | !X and X ^ !X -> 1, , if X is truth-valued. */
1972 (for op (bit_ior bit_xor)
1974 (op:c truth_valued_p@0 (logical_inverted_value @0))
1975 { constant_boolean_node (true, type); }))
1976 /* X ==/!= !X is false/true. */
1979 (op:c truth_valued_p@0 (logical_inverted_value @0))
1980 { constant_boolean_node (op == NE_EXPR ? true : false, type); }))
1984 (bit_not (bit_not @0))
1987 /* zero_one_valued_p will match when a value is known to be either
1988 0 or 1 including constants 0 or 1.
1989 Signed 1-bits includes -1 so they cannot match here. */
1990 (match zero_one_valued_p
1992 (if (INTEGRAL_TYPE_P (type)
1993 && (TYPE_UNSIGNED (type)
1994 || TYPE_PRECISION (type) > 1)
1995 && wi::leu_p (tree_nonzero_bits (@0), 1))))
1996 (match zero_one_valued_p
1998 (if (INTEGRAL_TYPE_P (type)
1999 && (TYPE_UNSIGNED (type)
2000 || TYPE_PRECISION (type) > 1))))
2002 /* Transform { 0 or 1 } * { 0 or 1 } into { 0 or 1 } & { 0 or 1 }. */
2004 (mult zero_one_valued_p@0 zero_one_valued_p@1)
2005 (if (INTEGRAL_TYPE_P (type))
2008 (for cmp (tcc_comparison)
2009 icmp (inverted_tcc_comparison)
2010 /* Fold (((a < b) & c) | ((a >= b) & d)) into (a < b ? c : d) & 1. */
2013 (bit_and:c (convert? (cmp@0 @01 @02)) @3)
2014 (bit_and:c (convert? (icmp@4 @01 @02)) @5))
2015 (if (INTEGRAL_TYPE_P (type)
2016 /* The scalar version has to be canonicalized after vectorization
2017 because it makes unconditional loads conditional ones, which
2018 means we lose vectorization because the loads may trap. */
2019 && canonicalize_math_after_vectorization_p ())
2020 (bit_and (cond @0 @3 @5) { build_one_cst (type); })))
2022 /* Fold ((-(a < b) & c) | (-(a >= b) & d)) into a < b ? c : d. This is
2023 canonicalized further and we recognize the conditional form:
2024 (a < b ? c : 0) | (a >= b ? d : 0) into a < b ? c : d. */
2027 (cond (cmp@0 @01 @02) @3 zerop)
2028 (cond (icmp@4 @01 @02) @5 zerop))
2029 (if (INTEGRAL_TYPE_P (type)
2030 /* The scalar version has to be canonicalized after vectorization
2031 because it makes unconditional loads conditional ones, which
2032 means we lose vectorization because the loads may trap. */
2033 && canonicalize_math_after_vectorization_p ())
2036 /* Vector Fold (((a < b) & c) | ((a >= b) & d)) into a < b ? c : d.
2037 and ((~(a < b) & c) | (~(a >= b) & d)) into a < b ? c : d. */
2040 (bit_and:c (vec_cond:s (cmp@0 @6 @7) @4 @5) @2)
2041 (bit_and:c (vec_cond:s (icmp@1 @6 @7) @4 @5) @3))
2042 (if (integer_zerop (@5))
2044 (if (integer_onep (@4))
2045 (bit_and (vec_cond @0 @2 @3) @4))
2046 (if (integer_minus_onep (@4))
2047 (vec_cond @0 @2 @3)))
2048 (if (integer_zerop (@4))
2050 (if (integer_onep (@5))
2051 (bit_and (vec_cond @0 @3 @2) @5))
2052 (if (integer_minus_onep (@5))
2053 (vec_cond @0 @3 @2))))))
2055 /* Scalar Vectorized Fold ((-(a < b) & c) | (-(a >= b) & d))
2056 into a < b ? d : c. */
2059 (vec_cond:s (cmp@0 @4 @5) @2 integer_zerop)
2060 (vec_cond:s (icmp@1 @4 @5) @3 integer_zerop))
2061 (vec_cond @0 @2 @3)))
2063 /* Transform X & -Y into X * Y when Y is { 0 or 1 }. */
2065 (bit_and:c (convert? (negate zero_one_valued_p@0)) @1)
2066 (if (INTEGRAL_TYPE_P (type)
2067 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
2068 && TREE_CODE (TREE_TYPE (@0)) != BOOLEAN_TYPE
2069 /* Sign extending of the neg or a truncation of the neg
2071 && (!TYPE_UNSIGNED (TREE_TYPE (@0))
2072 || TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))))
2073 (mult (convert @0) @1)))
2075 /* Narrow integer multiplication by a zero_one_valued_p operand.
2076 Multiplication by [0,1] is guaranteed not to overflow. */
2078 (convert (mult@0 zero_one_valued_p@1 INTEGER_CST@2))
2079 (if (INTEGRAL_TYPE_P (type)
2080 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
2081 && TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (@0)))
2082 (mult (convert @1) (convert @2))))
2084 /* (X << C) != 0 can be simplified to X, when C is zero_one_valued_p.
2085 Check that the shift is well-defined (C is less than TYPE_PRECISION)
2086 as some targets (such as x86's SSE) may return zero for larger C. */
2088 (ne (lshift zero_one_valued_p@0 INTEGER_CST@1) integer_zerop@2)
2089 (if (tree_fits_shwi_p (@1)
2090 && tree_to_shwi (@1) > 0
2091 && tree_to_shwi (@1) < TYPE_PRECISION (TREE_TYPE (@0)))
2094 /* (X << C) == 0 can be simplified to X == 0, when C is zero_one_valued_p.
2095 Check that the shift is well-defined (C is less than TYPE_PRECISION)
2096 as some targets (such as x86's SSE) may return zero for larger C. */
2098 (eq (lshift zero_one_valued_p@0 INTEGER_CST@1) integer_zerop@2)
2099 (if (tree_fits_shwi_p (@1)
2100 && tree_to_shwi (@1) > 0
2101 && tree_to_shwi (@1) < TYPE_PRECISION (TREE_TYPE (@0)))
2104 /* Convert ~ (-A) to A - 1. */
2106 (bit_not (convert? (negate @0)))
2107 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
2108 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
2109 (convert (minus @0 { build_each_one_cst (TREE_TYPE (@0)); }))))
2111 /* Convert - (~A) to A + 1. */
2113 (negate (nop_convert? (bit_not @0)))
2114 (plus (view_convert @0) { build_each_one_cst (type); }))
2116 /* (a & b) ^ (a == b) -> !(a | b) */
2117 /* (a & b) == (a ^ b) -> !(a | b) */
2118 (for first_op (bit_xor eq)
2119 second_op (eq bit_xor)
2121 (first_op:c (bit_and:c truth_valued_p@0 truth_valued_p@1) (second_op:c @0 @1))
2122 (bit_not (bit_ior @0 @1))))
2124 /* Convert ~ (A - 1) or ~ (A + -1) to -A. */
2126 (bit_not (convert? (minus @0 integer_each_onep)))
2127 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
2128 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
2129 (convert (negate @0))))
2131 (bit_not (convert? (plus @0 integer_all_onesp)))
2132 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
2133 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
2134 (convert (negate @0))))
2136 /* Part of convert ~(X ^ Y) to ~X ^ Y or X ^ ~Y if ~X or ~Y simplify. */
2138 (bit_not (convert? (bit_xor @0 INTEGER_CST@1)))
2139 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2140 (convert (bit_xor @0 (bit_not @1)))))
2142 (bit_not (convert? (bit_xor:c (bit_not @0) @1)))
2143 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2144 (convert (bit_xor @0 @1))))
2146 /* Otherwise prefer ~(X ^ Y) to ~X ^ Y as more canonical. */
2148 (bit_xor:c (nop_convert?:s (bit_not:s @0)) @1)
2149 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2150 (bit_not (bit_xor (view_convert @0) @1))))
2152 /* ~(a ^ b) is a == b for truth valued a and b. */
2154 (bit_not (bit_xor:s truth_valued_p@0 truth_valued_p@1))
2155 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
2156 && TYPE_PRECISION (TREE_TYPE (@0)) == 1)
2157 (convert (eq @0 @1))))
2159 /* (~a) == b is a ^ b for truth valued a and b. */
2161 (eq:c (bit_not:s truth_valued_p@0) truth_valued_p@1)
2162 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
2163 && TYPE_PRECISION (TREE_TYPE (@0)) == 1)
2164 (convert (bit_xor @0 @1))))
2166 /* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */
2168 (bit_ior:c (bit_and:cs @0 (bit_not @2)) (bit_and:cs @1 @2))
2169 (bit_xor (bit_and (bit_xor @0 @1) @2) @0))
2171 /* Fold A - (A & B) into ~B & A. */
2173 (minus (convert1? @0) (convert2?:s (bit_and:cs @@0 @1)))
2174 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
2175 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
2176 (convert (bit_and (bit_not @1) @0))))
2178 /* (m1 CMP m2) * d -> (m1 CMP m2) ? d : 0 */
2179 (if (!canonicalize_math_p ())
2180 (for cmp (tcc_comparison)
2182 (mult:c (convert (cmp@0 @1 @2)) @3)
2183 (if (INTEGRAL_TYPE_P (type)
2184 && INTEGRAL_TYPE_P (TREE_TYPE (@0)))
2185 (cond @0 @3 { build_zero_cst (type); })))
2186 /* (-(m1 CMP m2)) & d -> (m1 CMP m2) ? d : 0 */
2188 (bit_and:c (negate (convert (cmp@0 @1 @2))) @3)
2189 (if (INTEGRAL_TYPE_P (type)
2190 && INTEGRAL_TYPE_P (TREE_TYPE (@0)))
2191 (cond @0 @3 { build_zero_cst (type); })))
2195 /* For integral types with undefined overflow and C != 0 fold
2196 x * C EQ/NE y * C into x EQ/NE y. */
2199 (cmp (mult:c @0 @1) (mult:c @2 @1))
2200 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2201 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
2202 && tree_expr_nonzero_p (@1))
2205 /* For integral types with wrapping overflow and C odd fold
2206 x * C EQ/NE y * C into x EQ/NE y. */
2209 (cmp (mult @0 INTEGER_CST@1) (mult @2 @1))
2210 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2211 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
2212 && (TREE_INT_CST_LOW (@1) & 1) != 0)
2215 /* For integral types with undefined overflow and C != 0 fold
2216 x * C RELOP y * C into:
2218 x RELOP y for nonnegative C
2219 y RELOP x for negative C */
2220 (for cmp (lt gt le ge)
2222 (cmp (mult:c @0 @1) (mult:c @2 @1))
2223 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2224 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2225 (if (tree_expr_nonnegative_p (@1) && tree_expr_nonzero_p (@1))
2227 (if (TREE_CODE (@1) == INTEGER_CST
2228 && wi::neg_p (wi::to_wide (@1), TYPE_SIGN (TREE_TYPE (@1))))
2231 /* (X - 1U) <= INT_MAX-1U into (int) X > 0. */
2235 (cmp (plus @0 integer_minus_onep@1) INTEGER_CST@2)
2236 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
2237 && TYPE_UNSIGNED (TREE_TYPE (@0))
2238 && TYPE_PRECISION (TREE_TYPE (@0)) > 1
2239 && (wi::to_wide (@2)
2240 == wi::max_value (TYPE_PRECISION (TREE_TYPE (@0)), SIGNED) - 1))
2241 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
2242 (icmp (convert:stype @0) { build_int_cst (stype, 0); })))))
2244 /* X / 4 < Y / 4 iff X < Y when the division is known to be exact. */
2245 (for cmp (simple_comparison)
2247 (cmp (convert?@3 (exact_div @0 INTEGER_CST@2)) (convert? (exact_div @1 @2)))
2248 (if (element_precision (@3) >= element_precision (@0)
2249 && types_match (@0, @1))
2250 (if (wi::lt_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2))))
2251 (if (!TYPE_UNSIGNED (TREE_TYPE (@3)))
2253 (if (tree_expr_nonzero_p (@0) && tree_expr_nonzero_p (@1))
2256 tree utype = unsigned_type_for (TREE_TYPE (@0));
2258 (cmp (convert:utype @1) (convert:utype @0)))))
2259 (if (wi::gt_p (wi::to_wide (@2), 1, TYPE_SIGN (TREE_TYPE (@2))))
2260 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) || !TYPE_UNSIGNED (TREE_TYPE (@3)))
2264 tree utype = unsigned_type_for (TREE_TYPE (@0));
2266 (cmp (convert:utype @0) (convert:utype @1)))))))))
2268 /* X / C1 op C2 into a simple range test. */
2269 (for cmp (simple_comparison)
2271 (cmp (trunc_div:s @0 INTEGER_CST@1) INTEGER_CST@2)
2272 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
2273 && integer_nonzerop (@1)
2274 && !TREE_OVERFLOW (@1)
2275 && !TREE_OVERFLOW (@2))
2276 (with { tree lo, hi; bool neg_overflow;
2277 enum tree_code code = fold_div_compare (cmp, @1, @2, &lo, &hi,
2280 (if (code == LT_EXPR || code == GE_EXPR)
2281 (if (TREE_OVERFLOW (lo))
2282 { build_int_cst (type, (code == LT_EXPR) ^ neg_overflow); }
2283 (if (code == LT_EXPR)
2286 (if (code == LE_EXPR || code == GT_EXPR)
2287 (if (TREE_OVERFLOW (hi))
2288 { build_int_cst (type, (code == LE_EXPR) ^ neg_overflow); }
2289 (if (code == LE_EXPR)
2293 { build_int_cst (type, code == NE_EXPR); })
2294 (if (code == EQ_EXPR && !hi)
2296 (if (code == EQ_EXPR && !lo)
2298 (if (code == NE_EXPR && !hi)
2300 (if (code == NE_EXPR && !lo)
2303 { build_range_check (UNKNOWN_LOCATION, type, @0, code == EQ_EXPR,
2307 tree etype = range_check_type (TREE_TYPE (@0));
2310 hi = fold_convert (etype, hi);
2311 lo = fold_convert (etype, lo);
2312 hi = const_binop (MINUS_EXPR, etype, hi, lo);
2315 (if (etype && hi && !TREE_OVERFLOW (hi))
2316 (if (code == EQ_EXPR)
2317 (le (minus (convert:etype @0) { lo; }) { hi; })
2318 (gt (minus (convert:etype @0) { lo; }) { hi; })))))))))
2320 /* X + Z < Y + Z is the same as X < Y when there is no overflow. */
2321 (for op (lt le ge gt)
2323 (op (plus:c @0 @2) (plus:c @1 @2))
2324 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2325 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2328 /* As a special case, X + C < Y + C is the same as (signed) X < (signed) Y
2329 when C is an unsigned integer constant with only the MSB set, and X and
2330 Y have types of equal or lower integer conversion rank than C's. */
2331 (for op (lt le ge gt)
2333 (op (plus @1 INTEGER_CST@0) (plus @2 @0))
2334 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
2335 && TYPE_UNSIGNED (TREE_TYPE (@0))
2336 && wi::only_sign_bit_p (wi::to_wide (@0)))
2337 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
2338 (op (convert:stype @1) (convert:stype @2))))))
2340 /* For equality and subtraction, this is also true with wrapping overflow. */
2341 (for op (eq ne minus)
2343 (op (plus:c @0 @2) (plus:c @1 @2))
2344 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2345 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
2346 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
2349 /* X - Z < Y - Z is the same as X < Y when there is no overflow. */
2350 (for op (lt le ge gt)
2352 (op (minus @0 @2) (minus @1 @2))
2353 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2354 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2356 /* For equality and subtraction, this is also true with wrapping overflow. */
2357 (for op (eq ne minus)
2359 (op (minus @0 @2) (minus @1 @2))
2360 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2361 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
2362 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
2364 /* And for pointers... */
2365 (for op (simple_comparison)
2367 (op (pointer_diff@3 @0 @2) (pointer_diff @1 @2))
2368 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
2371 (minus (pointer_diff@3 @0 @2) (pointer_diff @1 @2))
2372 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3))
2373 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
2374 (pointer_diff @0 @1)))
2376 /* Z - X < Z - Y is the same as Y < X when there is no overflow. */
2377 (for op (lt le ge gt)
2379 (op (minus @2 @0) (minus @2 @1))
2380 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2381 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2383 /* For equality and subtraction, this is also true with wrapping overflow. */
2384 (for op (eq ne minus)
2386 (op (minus @2 @0) (minus @2 @1))
2387 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2388 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
2389 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
2391 /* And for pointers... */
2392 (for op (simple_comparison)
2394 (op (pointer_diff@3 @2 @0) (pointer_diff @2 @1))
2395 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
2398 (minus (pointer_diff@3 @2 @0) (pointer_diff @2 @1))
2399 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3))
2400 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
2401 (pointer_diff @1 @0)))
2403 /* X + Y < Y is the same as X < 0 when there is no overflow. */
2404 (for op (lt le gt ge)
2406 (op:c (plus:c@2 @0 @1) @1)
2407 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2408 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
2409 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
2410 && (CONSTANT_CLASS_P (@0) || single_use (@2)))
2411 (op @0 { build_zero_cst (TREE_TYPE (@0)); }))))
2412 /* For equality, this is also true with wrapping overflow. */
2415 (op:c (nop_convert?@3 (plus:c@2 @0 (convert1? @1))) (convert2? @1))
2416 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2417 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
2418 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
2419 && (CONSTANT_CLASS_P (@0) || (single_use (@2) && single_use (@3)))
2420 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@2))
2421 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@1)))
2422 (op @0 { build_zero_cst (TREE_TYPE (@0)); })))
2424 (op:c (nop_convert?@3 (pointer_plus@2 (convert1? @0) @1)) (convert2? @0))
2425 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0))
2426 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
2427 && (CONSTANT_CLASS_P (@1) || (single_use (@2) && single_use (@3))))
2428 (op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
2430 /* (&a + b) !=/== (&a[1] + c) -> (&a[0] - &a[1]) + b !=/== c */
2433 (neeq:c ADDR_EXPR@0 (pointer_plus @2 @3))
2434 (with { poly_int64 diff; tree inner_type = TREE_TYPE (@3);}
2435 (if (ptr_difference_const (@0, @2, &diff))
2436 (neeq { build_int_cst_type (inner_type, diff); } @3))))
2438 (neeq (pointer_plus ADDR_EXPR@0 @1) (pointer_plus ADDR_EXPR@2 @3))
2439 (with { poly_int64 diff; tree inner_type = TREE_TYPE (@1);}
2440 (if (ptr_difference_const (@0, @2, &diff))
2441 (neeq (plus { build_int_cst_type (inner_type, diff); } @1) @3)))))
2443 /* X - Y < X is the same as Y > 0 when there is no overflow.
2444 For equality, this is also true with wrapping overflow. */
2445 (for op (simple_comparison)
2447 (op:c @0 (minus@2 @0 @1))
2448 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2449 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
2450 || ((op == EQ_EXPR || op == NE_EXPR)
2451 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
2452 && (CONSTANT_CLASS_P (@1) || single_use (@2)))
2453 (op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
2456 (X / Y) == 0 -> X < Y if X, Y are unsigned.
2457 (X / Y) != 0 -> X >= Y, if X, Y are unsigned. */
2461 (cmp (trunc_div @0 @1) integer_zerop)
2462 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
2463 /* Complex ==/!= is allowed, but not </>=. */
2464 && TREE_CODE (TREE_TYPE (@0)) != COMPLEX_TYPE
2465 && (VECTOR_TYPE_P (type) || !VECTOR_TYPE_P (TREE_TYPE (@0))))
2468 /* X == C - X can never be true if C is odd. */
2471 (cmp:c (convert? @0) (convert1? (minus INTEGER_CST@1 (convert2? @0))))
2472 (if (TREE_INT_CST_LOW (@1) & 1)
2473 { constant_boolean_node (cmp == NE_EXPR, type); })))
2475 /* Arguments on which one can call get_nonzero_bits to get the bits
2477 (match with_possible_nonzero_bits
2479 (match with_possible_nonzero_bits
2481 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))))
2482 /* Slightly extended version, do not make it recursive to keep it cheap. */
2483 (match (with_possible_nonzero_bits2 @0)
2484 with_possible_nonzero_bits@0)
2485 (match (with_possible_nonzero_bits2 @0)
2486 (bit_and:c with_possible_nonzero_bits@0 @2))
2488 /* Same for bits that are known to be set, but we do not have
2489 an equivalent to get_nonzero_bits yet. */
2490 (match (with_certain_nonzero_bits2 @0)
2492 (match (with_certain_nonzero_bits2 @0)
2493 (bit_ior @1 INTEGER_CST@0))
2495 /* X == C (or X & Z == Y | C) is impossible if ~nonzero(X) & C != 0. */
2498 (cmp:c (with_possible_nonzero_bits2 @0) (with_certain_nonzero_bits2 @1))
2499 (if (wi::bit_and_not (wi::to_wide (@1), get_nonzero_bits (@0)) != 0)
2500 { constant_boolean_node (cmp == NE_EXPR, type); })))
2502 /* ((X inner_op C0) outer_op C1)
2503 With X being a tree where value_range has reasoned certain bits to always be
2504 zero throughout its computed value range,
2505 inner_op = {|,^}, outer_op = {|,^} and inner_op != outer_op
2506 where zero_mask has 1's for all bits that are sure to be 0 in
2508 if (inner_op == '^') C0 &= ~C1;
2509 if ((C0 & ~zero_mask) == 0) then emit (X outer_op (C0 outer_op C1)
2510 if ((C1 & ~zero_mask) == 0) then emit (X inner_op (C0 outer_op C1)
2512 (for inner_op (bit_ior bit_xor)
2513 outer_op (bit_xor bit_ior)
2516 (inner_op:s @2 INTEGER_CST@0) INTEGER_CST@1)
2520 wide_int zero_mask_not;
2524 if (TREE_CODE (@2) == SSA_NAME)
2525 zero_mask_not = get_nonzero_bits (@2);
2529 if (inner_op == BIT_XOR_EXPR)
2531 C0 = wi::bit_and_not (wi::to_wide (@0), wi::to_wide (@1));
2532 cst_emit = C0 | wi::to_wide (@1);
2536 C0 = wi::to_wide (@0);
2537 cst_emit = C0 ^ wi::to_wide (@1);
2540 (if (!fail && (C0 & zero_mask_not) == 0)
2541 (outer_op @2 { wide_int_to_tree (type, cst_emit); })
2542 (if (!fail && (wi::to_wide (@1) & zero_mask_not) == 0)
2543 (inner_op @2 { wide_int_to_tree (type, cst_emit); }))))))
2545 /* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */
2547 (pointer_plus (pointer_plus:s @0 @1) @3)
2548 (pointer_plus @0 (plus @1 @3)))
2551 (pointer_plus (convert:s (pointer_plus:s @0 @1)) @3)
2552 (convert:type (pointer_plus @0 (plus @1 @3))))
2559 tem4 = (unsigned long) tem3;
2564 (pointer_plus @0 (convert?@2 (minus@3 (convert @1) (convert @0))))
2565 /* Conditionally look through a sign-changing conversion. */
2566 (if (TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@3))
2567 && ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@1)))
2568 || (GENERIC && type == TREE_TYPE (@1))))
2571 (pointer_plus @0 (convert?@2 (pointer_diff@3 @1 @@0)))
2572 (if (TYPE_PRECISION (TREE_TYPE (@2)) >= TYPE_PRECISION (TREE_TYPE (@3)))
2576 tem = (sizetype) ptr;
2580 and produce the simpler and easier to analyze with respect to alignment
2581 ... = ptr & ~algn; */
2583 (pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1)))
2584 (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), ~wi::to_wide (@1)); }
2585 (bit_and @0 { algn; })))
2587 /* Try folding difference of addresses. */
2589 (minus (convert ADDR_EXPR@0) (convert (pointer_plus @1 @2)))
2590 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2591 (with { poly_int64 diff; }
2592 (if (ptr_difference_const (@0, @1, &diff))
2593 (minus { build_int_cst_type (type, diff); } (convert @2))))))
2595 (minus (convert (pointer_plus @0 @2)) (convert ADDR_EXPR@1))
2596 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2597 (with { poly_int64 diff; }
2598 (if (ptr_difference_const (@0, @1, &diff))
2599 (plus (convert @2) { build_int_cst_type (type, diff); })))))
2601 (minus (convert ADDR_EXPR@0) (convert @1))
2602 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2603 (with { poly_int64 diff; }
2604 (if (ptr_difference_const (@0, @1, &diff))
2605 { build_int_cst_type (type, diff); }))))
2607 (minus (convert @0) (convert ADDR_EXPR@1))
2608 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2609 (with { poly_int64 diff; }
2610 (if (ptr_difference_const (@0, @1, &diff))
2611 { build_int_cst_type (type, diff); }))))
2613 (pointer_diff (convert?@2 ADDR_EXPR@0) (convert1?@3 @1))
2614 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0))
2615 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1)))
2616 (with { poly_int64 diff; }
2617 (if (ptr_difference_const (@0, @1, &diff))
2618 { build_int_cst_type (type, diff); }))))
2620 (pointer_diff (convert?@2 @0) (convert1?@3 ADDR_EXPR@1))
2621 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0))
2622 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1)))
2623 (with { poly_int64 diff; }
2624 (if (ptr_difference_const (@0, @1, &diff))
2625 { build_int_cst_type (type, diff); }))))
2627 /* (&a+b) - (&a[1] + c) -> sizeof(a[0]) + (b - c) */
2629 (pointer_diff (pointer_plus ADDR_EXPR@0 @1) (pointer_plus ADDR_EXPR@2 @3))
2630 (with { poly_int64 diff; }
2631 (if (ptr_difference_const (@0, @2, &diff))
2632 (plus { build_int_cst_type (type, diff); } (convert (minus @1 @3))))))
2633 /* (p + b) - &p->d -> offsetof (*p, d) + b */
2635 (pointer_diff (pointer_plus @0 @1) ADDR_EXPR@2)
2636 (with { poly_int64 diff; }
2637 (if (ptr_difference_const (@0, @2, &diff))
2638 (plus { build_int_cst_type (type, diff); } (convert @1)))))
2640 (pointer_diff ADDR_EXPR@0 (pointer_plus @1 @2))
2641 (with { poly_int64 diff; }
2642 (if (ptr_difference_const (@0, @1, &diff))
2643 (minus { build_int_cst_type (type, diff); } (convert @2)))))
2645 /* Canonicalize (T *)(ptr - ptr-cst) to &MEM[ptr + -ptr-cst]. */
2647 (convert (pointer_diff @0 INTEGER_CST@1))
2648 (if (POINTER_TYPE_P (type))
2649 { build_fold_addr_expr_with_type
2650 (build2 (MEM_REF, char_type_node, @0,
2651 wide_int_to_tree (ptr_type_node, wi::neg (wi::to_wide (@1)))),
2654 /* If arg0 is derived from the address of an object or function, we may
2655 be able to fold this expression using the object or function's
2658 (bit_and (convert? @0) INTEGER_CST@1)
2659 (if (POINTER_TYPE_P (TREE_TYPE (@0))
2660 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
2664 unsigned HOST_WIDE_INT bitpos;
2665 get_pointer_alignment_1 (@0, &align, &bitpos);
2667 (if (wi::ltu_p (wi::to_wide (@1), align / BITS_PER_UNIT))
2668 { wide_int_to_tree (type, (wi::to_wide (@1)
2669 & (bitpos / BITS_PER_UNIT))); }))))
2673 (if (INTEGRAL_TYPE_P (type)
2674 && wi::eq_p (wi::to_wide (t), wi::min_value (type)))))
2678 (if (INTEGRAL_TYPE_P (type)
2679 && wi::eq_p (wi::to_wide (t), wi::max_value (type)))))
2681 /* x > y && x != XXX_MIN --> x > y
2682 x > y && x == XXX_MIN --> false . */
2685 (bit_and:c (gt:c@2 @0 @1) (eqne @0 min_value))
2687 (if (eqne == EQ_EXPR)
2688 { constant_boolean_node (false, type); })
2689 (if (eqne == NE_EXPR)
2693 /* x < y && x != XXX_MAX --> x < y
2694 x < y && x == XXX_MAX --> false. */
2697 (bit_and:c (lt:c@2 @0 @1) (eqne @0 max_value))
2699 (if (eqne == EQ_EXPR)
2700 { constant_boolean_node (false, type); })
2701 (if (eqne == NE_EXPR)
2705 /* x <= y && x == XXX_MIN --> x == XXX_MIN. */
2707 (bit_and:c (le:c @0 @1) (eq@2 @0 min_value))
2710 /* x >= y && x == XXX_MAX --> x == XXX_MAX. */
2712 (bit_and:c (ge:c @0 @1) (eq@2 @0 max_value))
2715 /* x > y || x != XXX_MIN --> x != XXX_MIN. */
2717 (bit_ior:c (gt:c @0 @1) (ne@2 @0 min_value))
2720 /* x <= y || x != XXX_MIN --> true. */
2722 (bit_ior:c (le:c @0 @1) (ne @0 min_value))
2723 { constant_boolean_node (true, type); })
2725 /* x <= y || x == XXX_MIN --> x <= y. */
2727 (bit_ior:c (le:c@2 @0 @1) (eq @0 min_value))
2730 /* x < y || x != XXX_MAX --> x != XXX_MAX. */
2732 (bit_ior:c (lt:c @0 @1) (ne@2 @0 max_value))
2735 /* x >= y || x != XXX_MAX --> true
2736 x >= y || x == XXX_MAX --> x >= y. */
2739 (bit_ior:c (ge:c@2 @0 @1) (eqne @0 max_value))
2741 (if (eqne == EQ_EXPR)
2743 (if (eqne == NE_EXPR)
2744 { constant_boolean_node (true, type); }))))
2746 /* y == XXX_MIN || x < y --> x <= y - 1 */
2748 (bit_ior:c (eq:s @1 min_value) (lt:cs @0 @1))
2749 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2750 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
2751 (le @0 (minus @1 { build_int_cst (TREE_TYPE (@1), 1); }))))
2753 /* y != XXX_MIN && x >= y --> x > y - 1 */
2755 (bit_and:c (ne:s @1 min_value) (ge:cs @0 @1))
2756 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2757 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
2758 (gt @0 (minus @1 { build_int_cst (TREE_TYPE (@1), 1); }))))
2760 /* Convert (X == CST1) && (X OP2 CST2) to a known value
2761 based on CST1 OP2 CST2. Similarly for (X != CST1). */
2764 (for code2 (eq ne lt gt le ge)
2766 (bit_and:c (code1@3 @0 INTEGER_CST@1) (code2@4 @0 INTEGER_CST@2))
2769 int cmp = tree_int_cst_compare (@1, @2);
2773 case EQ_EXPR: val = (cmp == 0); break;
2774 case NE_EXPR: val = (cmp != 0); break;
2775 case LT_EXPR: val = (cmp < 0); break;
2776 case GT_EXPR: val = (cmp > 0); break;
2777 case LE_EXPR: val = (cmp <= 0); break;
2778 case GE_EXPR: val = (cmp >= 0); break;
2779 default: gcc_unreachable ();
2783 (if (code1 == EQ_EXPR && val) @3)
2784 (if (code1 == EQ_EXPR && !val) { constant_boolean_node (false, type); })
2785 (if (code1 == NE_EXPR && !val) @4))))))
2787 /* Convert (X OP1 CST1) && (X OP2 CST2). */
2789 (for code1 (lt le gt ge)
2790 (for code2 (lt le gt ge)
2792 (bit_and (code1:c@3 @0 INTEGER_CST@1) (code2:c@4 @0 INTEGER_CST@2))
2795 int cmp = tree_int_cst_compare (@1, @2);
2798 /* Choose the more restrictive of two < or <= comparisons. */
2799 (if ((code1 == LT_EXPR || code1 == LE_EXPR)
2800 && (code2 == LT_EXPR || code2 == LE_EXPR))
2801 (if ((cmp < 0) || (cmp == 0 && code1 == LT_EXPR))
2804 /* Likewise chose the more restrictive of two > or >= comparisons. */
2805 (if ((code1 == GT_EXPR || code1 == GE_EXPR)
2806 && (code2 == GT_EXPR || code2 == GE_EXPR))
2807 (if ((cmp > 0) || (cmp == 0 && code1 == GT_EXPR))
2810 /* Check for singleton ranges. */
2812 && ((code1 == LE_EXPR && code2 == GE_EXPR)
2813 || (code1 == GE_EXPR && code2 == LE_EXPR)))
2815 /* Check for disjoint ranges. */
2817 && (code1 == LT_EXPR || code1 == LE_EXPR)
2818 && (code2 == GT_EXPR || code2 == GE_EXPR))
2819 { constant_boolean_node (false, type); })
2821 && (code1 == GT_EXPR || code1 == GE_EXPR)
2822 && (code2 == LT_EXPR || code2 == LE_EXPR))
2823 { constant_boolean_node (false, type); })
2826 /* Convert (X == CST1) || (X OP2 CST2) to a known value
2827 based on CST1 OP2 CST2. Similarly for (X != CST1). */
2830 (for code2 (eq ne lt gt le ge)
2832 (bit_ior:c (code1@3 @0 INTEGER_CST@1) (code2@4 @0 INTEGER_CST@2))
2835 int cmp = tree_int_cst_compare (@1, @2);
2839 case EQ_EXPR: val = (cmp == 0); break;
2840 case NE_EXPR: val = (cmp != 0); break;
2841 case LT_EXPR: val = (cmp < 0); break;
2842 case GT_EXPR: val = (cmp > 0); break;
2843 case LE_EXPR: val = (cmp <= 0); break;
2844 case GE_EXPR: val = (cmp >= 0); break;
2845 default: gcc_unreachable ();
2849 (if (code1 == EQ_EXPR && val) @4)
2850 (if (code1 == NE_EXPR && val) { constant_boolean_node (true, type); })
2851 (if (code1 == NE_EXPR && !val) @3))))))
2853 /* Convert (X OP1 CST1) || (X OP2 CST2). */
2855 (for code1 (lt le gt ge)
2856 (for code2 (lt le gt ge)
2858 (bit_ior (code1@3 @0 INTEGER_CST@1) (code2@4 @0 INTEGER_CST@2))
2861 int cmp = tree_int_cst_compare (@1, @2);
2864 /* Choose the more restrictive of two < or <= comparisons. */
2865 (if ((code1 == LT_EXPR || code1 == LE_EXPR)
2866 && (code2 == LT_EXPR || code2 == LE_EXPR))
2867 (if ((cmp < 0) || (cmp == 0 && code1 == LT_EXPR))
2870 /* Likewise chose the more restrictive of two > or >= comparisons. */
2871 (if ((code1 == GT_EXPR || code1 == GE_EXPR)
2872 && (code2 == GT_EXPR || code2 == GE_EXPR))
2873 (if ((cmp > 0) || (cmp == 0 && code1 == GT_EXPR))
2876 /* Check for singleton ranges. */
2878 && ((code1 == LT_EXPR && code2 == GT_EXPR)
2879 || (code1 == GT_EXPR && code2 == LT_EXPR)))
2881 /* Check for disjoint ranges. */
2883 && (code1 == LT_EXPR || code1 == LE_EXPR)
2884 && (code2 == GT_EXPR || code2 == GE_EXPR))
2885 { constant_boolean_node (true, type); })
2887 && (code1 == GT_EXPR || code1 == GE_EXPR)
2888 && (code2 == LT_EXPR || code2 == LE_EXPR))
2889 { constant_boolean_node (true, type); })
2892 /* We can't reassociate at all for saturating types. */
2893 (if (!TYPE_SATURATING (type))
2895 /* Contract negates. */
2896 /* A + (-B) -> A - B */
2898 (plus:c @0 (convert? (negate @1)))
2899 /* Apply STRIP_NOPS on the negate. */
2900 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
2901 && !TYPE_OVERFLOW_SANITIZED (type))
2905 if (INTEGRAL_TYPE_P (type)
2906 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
2907 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
2909 (convert (minus (convert:t1 @0) (convert:t1 @1))))))
2910 /* A - (-B) -> A + B */
2912 (minus @0 (convert? (negate @1)))
2913 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
2914 && !TYPE_OVERFLOW_SANITIZED (type))
2918 if (INTEGRAL_TYPE_P (type)
2919 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
2920 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
2922 (convert (plus (convert:t1 @0) (convert:t1 @1))))))
2924 Sign-extension is ok except for INT_MIN, which thankfully cannot
2925 happen without overflow. */
2927 (negate (convert (negate @1)))
2928 (if (INTEGRAL_TYPE_P (type)
2929 && (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@1))
2930 || (!TYPE_UNSIGNED (TREE_TYPE (@1))
2931 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
2932 && !TYPE_OVERFLOW_SANITIZED (type)
2933 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
2936 (negate (convert negate_expr_p@1))
2937 (if (SCALAR_FLOAT_TYPE_P (type)
2938 && ((DECIMAL_FLOAT_TYPE_P (type)
2939 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))
2940 && TYPE_PRECISION (type) >= TYPE_PRECISION (TREE_TYPE (@1)))
2941 || !HONOR_SIGN_DEPENDENT_ROUNDING (type)))
2942 (convert (negate @1))))
2944 (negate (nop_convert? (negate @1)))
2945 (if (!TYPE_OVERFLOW_SANITIZED (type)
2946 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
2949 /* We can't reassociate floating-point unless -fassociative-math
2950 or fixed-point plus or minus because of saturation to +-Inf. */
2951 (if ((!FLOAT_TYPE_P (type) || flag_associative_math)
2952 && !FIXED_POINT_TYPE_P (type))
2954 /* Match patterns that allow contracting a plus-minus pair
2955 irrespective of overflow issues. */
2956 /* (A +- B) - A -> +- B */
2957 /* (A +- B) -+ B -> A */
2958 /* A - (A +- B) -> -+ B */
2959 /* A +- (B -+ A) -> +- B */
2961 (minus (nop_convert1? (plus:c (nop_convert2? @0) @1)) @0)
2964 (minus (nop_convert1? (minus (nop_convert2? @0) @1)) @0)
2965 (if (!ANY_INTEGRAL_TYPE_P (type)
2966 || TYPE_OVERFLOW_WRAPS (type))
2967 (negate (view_convert @1))
2968 (view_convert (negate @1))))
2970 (plus:c (nop_convert1? (minus @0 (nop_convert2? @1))) @1)
2973 (minus @0 (nop_convert1? (plus:c (nop_convert2? @0) @1)))
2974 (if (!ANY_INTEGRAL_TYPE_P (type)
2975 || TYPE_OVERFLOW_WRAPS (type))
2976 (negate (view_convert @1))
2977 (view_convert (negate @1))))
2979 (minus @0 (nop_convert1? (minus (nop_convert2? @0) @1)))
2981 /* (A +- B) + (C - A) -> C +- B */
2982 /* (A + B) - (A - C) -> B + C */
2983 /* More cases are handled with comparisons. */
2985 (plus:c (plus:c @0 @1) (minus @2 @0))
2988 (plus:c (minus @0 @1) (minus @2 @0))
2991 (plus:c (pointer_diff @0 @1) (pointer_diff @2 @0))
2992 (if (TYPE_OVERFLOW_UNDEFINED (type)
2993 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0)))
2994 (pointer_diff @2 @1)))
2996 (minus (plus:c @0 @1) (minus @0 @2))
2999 /* (A +- CST1) +- CST2 -> A + CST3
3000 Use view_convert because it is safe for vectors and equivalent for
3002 (for outer_op (plus minus)
3003 (for inner_op (plus minus)
3004 neg_inner_op (minus plus)
3006 (outer_op (nop_convert? (inner_op @0 CONSTANT_CLASS_P@1))
3008 /* If one of the types wraps, use that one. */
3009 (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
3010 /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse
3011 forever if something doesn't simplify into a constant. */
3012 (if (!CONSTANT_CLASS_P (@0))
3013 (if (outer_op == PLUS_EXPR)
3014 (plus (view_convert @0) (inner_op! @2 (view_convert @1)))
3015 (minus (view_convert @0) (neg_inner_op! @2 (view_convert @1)))))
3016 (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3017 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
3018 (if (outer_op == PLUS_EXPR)
3019 (view_convert (plus @0 (inner_op! (view_convert @2) @1)))
3020 (view_convert (minus @0 (neg_inner_op! (view_convert @2) @1))))
3021 /* If the constant operation overflows we cannot do the transform
3022 directly as we would introduce undefined overflow, for example
3023 with (a - 1) + INT_MIN. */
3024 (if (types_match (type, @0) && !TYPE_OVERFLOW_SANITIZED (type))
3025 (with { tree cst = const_binop (outer_op == inner_op
3026 ? PLUS_EXPR : MINUS_EXPR,
3029 (if (INTEGRAL_TYPE_P (type) && !TREE_OVERFLOW (cst))
3030 (inner_op @0 { cst; } )
3031 /* X+INT_MAX+1 is X-INT_MIN. */
3032 (if (INTEGRAL_TYPE_P (type)
3033 && wi::to_wide (cst) == wi::min_value (type))
3034 (neg_inner_op @0 { wide_int_to_tree (type, wi::to_wide (cst)); })
3035 /* Last resort, use some unsigned type. */
3036 (with { tree utype = unsigned_type_for (type); }
3038 (view_convert (inner_op
3039 (view_convert:utype @0)
3041 { TREE_OVERFLOW (cst)
3042 ? drop_tree_overflow (cst) : cst; })))))))))))))))
3044 /* (CST1 - A) +- CST2 -> CST3 - A */
3045 (for outer_op (plus minus)
3047 (outer_op (nop_convert? (minus CONSTANT_CLASS_P@1 @0)) CONSTANT_CLASS_P@2)
3048 /* If one of the types wraps, use that one. */
3049 (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
3050 /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse
3051 forever if something doesn't simplify into a constant. */
3052 (if (!CONSTANT_CLASS_P (@0))
3053 (minus (outer_op! (view_convert @1) @2) (view_convert @0)))
3054 (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3055 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
3056 (view_convert (minus (outer_op! @1 (view_convert @2)) @0))
3057 (if (types_match (type, @0) && !TYPE_OVERFLOW_SANITIZED (type))
3058 (with { tree cst = const_binop (outer_op, type, @1, @2); }
3059 (if (cst && !TREE_OVERFLOW (cst))
3060 (minus { cst; } @0))))))))
3062 /* CST1 - (CST2 - A) -> CST3 + A
3063 Use view_convert because it is safe for vectors and equivalent for
3066 (minus CONSTANT_CLASS_P@1 (nop_convert? (minus CONSTANT_CLASS_P@2 @0)))
3067 /* If one of the types wraps, use that one. */
3068 (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
3069 /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse
3070 forever if something doesn't simplify into a constant. */
3071 (if (!CONSTANT_CLASS_P (@0))
3072 (plus (view_convert @0) (minus! @1 (view_convert @2))))
3073 (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3074 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
3075 (view_convert (plus @0 (minus! (view_convert @1) @2)))
3076 (if (types_match (type, @0) && !TYPE_OVERFLOW_SANITIZED (type))
3077 (with { tree cst = const_binop (MINUS_EXPR, type, @1, @2); }
3078 (if (cst && !TREE_OVERFLOW (cst))
3079 (plus { cst; } @0)))))))
3081 /* ((T)(A)) + CST -> (T)(A + CST) */
3084 (plus (convert:s SSA_NAME@0) INTEGER_CST@1)
3085 (if (TREE_CODE (TREE_TYPE (@0)) == INTEGER_TYPE
3086 && TREE_CODE (type) == INTEGER_TYPE
3087 && TYPE_PRECISION (type) > TYPE_PRECISION (TREE_TYPE (@0))
3088 && int_fits_type_p (@1, TREE_TYPE (@0)))
3089 /* Perform binary operation inside the cast if the constant fits
3090 and (A + CST)'s range does not overflow. */
3093 wi::overflow_type min_ovf = wi::OVF_OVERFLOW,
3094 max_ovf = wi::OVF_OVERFLOW;
3095 tree inner_type = TREE_TYPE (@0);
3098 = wide_int::from (wi::to_wide (@1), TYPE_PRECISION (inner_type),
3099 TYPE_SIGN (inner_type));
3102 if (get_global_range_query ()->range_of_expr (vr, @0)
3103 && !vr.varying_p () && !vr.undefined_p ())
3105 wide_int wmin0 = vr.lower_bound ();
3106 wide_int wmax0 = vr.upper_bound ();
3107 wi::add (wmin0, w1, TYPE_SIGN (inner_type), &min_ovf);
3108 wi::add (wmax0, w1, TYPE_SIGN (inner_type), &max_ovf);
3111 (if (min_ovf == wi::OVF_NONE && max_ovf == wi::OVF_NONE)
3112 (convert (plus @0 { wide_int_to_tree (TREE_TYPE (@0), w1); } )))
3116 /* ((T)(A + CST1)) + CST2 -> (T)(A) + (T)CST1 + CST2 */
3118 (for op (plus minus)
3120 (plus (convert:s (op:s @0 INTEGER_CST@1)) INTEGER_CST@2)
3121 (if (TREE_CODE (TREE_TYPE (@0)) == INTEGER_TYPE
3122 && TREE_CODE (type) == INTEGER_TYPE
3123 && TYPE_PRECISION (type) > TYPE_PRECISION (TREE_TYPE (@0))
3124 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
3125 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
3126 && TYPE_OVERFLOW_WRAPS (type))
3127 (plus (convert @0) (op @2 (convert @1))))))
3130 /* (T)(A) +- (T)(B) -> (T)(A +- B) only when (A +- B) could be simplified
3131 to a simple value. */
3132 (for op (plus minus)
3134 (op (convert @0) (convert @1))
3135 (if (INTEGRAL_TYPE_P (type)
3136 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
3137 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))
3138 && types_match (TREE_TYPE (@0), TREE_TYPE (@1))
3139 && !TYPE_OVERFLOW_TRAPS (type)
3140 && !TYPE_OVERFLOW_SANITIZED (type))
3141 (convert (op! @0 @1)))))
3145 (plus:c (convert? (bit_not @0)) (convert? @0))
3146 (if (!TYPE_OVERFLOW_TRAPS (type))
3147 (convert { build_all_ones_cst (TREE_TYPE (@0)); })))
3151 (plus (convert? (bit_not @0)) integer_each_onep)
3152 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
3153 (negate (convert @0))))
3157 (minus (convert? (negate @0)) integer_each_onep)
3158 (if (!TYPE_OVERFLOW_TRAPS (type)
3159 && TREE_CODE (type) != COMPLEX_TYPE
3160 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
3161 (bit_not (convert @0))))
3165 (minus integer_all_onesp @0)
3166 (if (TREE_CODE (type) != COMPLEX_TYPE)
3169 /* (T)(P + A) - (T)P -> (T) A */
3171 (minus (convert (plus:c @@0 @1))
3173 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
3174 /* For integer types, if A has a smaller type
3175 than T the result depends on the possible
3177 E.g. T=size_t, A=(unsigned)429497295, P>0.
3178 However, if an overflow in P + A would cause
3179 undefined behavior, we can assume that there
3181 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
3182 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
3185 (minus (convert (pointer_plus @@0 @1))
3187 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
3188 /* For pointer types, if the conversion of A to the
3189 final type requires a sign- or zero-extension,
3190 then we have to punt - it is not defined which
3192 || (POINTER_TYPE_P (TREE_TYPE (@0))
3193 && TREE_CODE (@1) == INTEGER_CST
3194 && tree_int_cst_sign_bit (@1) == 0))
3197 (pointer_diff (pointer_plus @@0 @1) @0)
3198 /* The second argument of pointer_plus must be interpreted as signed, and
3199 thus sign-extended if necessary. */
3200 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
3201 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
3202 second arg is unsigned even when we need to consider it as signed,
3203 we don't want to diagnose overflow here. */
3204 (convert (view_convert:stype @1))))
3206 /* (T)P - (T)(P + A) -> -(T) A */
3208 (minus (convert? @0)
3209 (convert (plus:c @@0 @1)))
3210 (if (INTEGRAL_TYPE_P (type)
3211 && TYPE_OVERFLOW_UNDEFINED (type)
3212 /* For integer literals, using an intermediate unsigned type to avoid
3213 an overflow at run time is counter-productive because it introduces
3214 spurious overflows at compile time, in the form of TREE_OVERFLOW on
3215 the result, which may be problematic in GENERIC for some front-ends:
3216 (T)P - (T)(P + 4) -> (T)(-(U)4) -> (T)(4294967292) -> -4(OVF)
3217 so we use the direct path for them. */
3218 && TREE_CODE (@1) != INTEGER_CST
3219 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
3220 (with { tree utype = unsigned_type_for (type); }
3221 (convert (negate (convert:utype @1))))
3222 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
3223 /* For integer types, if A has a smaller type
3224 than T the result depends on the possible
3226 E.g. T=size_t, A=(unsigned)429497295, P>0.
3227 However, if an overflow in P + A would cause
3228 undefined behavior, we can assume that there
3230 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
3231 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
3232 (negate (convert @1)))))
3235 (convert (pointer_plus @@0 @1)))
3236 (if (INTEGRAL_TYPE_P (type)
3237 && TYPE_OVERFLOW_UNDEFINED (type)
3238 /* See above the rationale for this condition. */
3239 && TREE_CODE (@1) != INTEGER_CST
3240 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
3241 (with { tree utype = unsigned_type_for (type); }
3242 (convert (negate (convert:utype @1))))
3243 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
3244 /* For pointer types, if the conversion of A to the
3245 final type requires a sign- or zero-extension,
3246 then we have to punt - it is not defined which
3248 || (POINTER_TYPE_P (TREE_TYPE (@0))
3249 && TREE_CODE (@1) == INTEGER_CST
3250 && tree_int_cst_sign_bit (@1) == 0))
3251 (negate (convert @1)))))
3253 (pointer_diff @0 (pointer_plus @@0 @1))
3254 /* The second argument of pointer_plus must be interpreted as signed, and
3255 thus sign-extended if necessary. */
3256 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
3257 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
3258 second arg is unsigned even when we need to consider it as signed,
3259 we don't want to diagnose overflow here. */
3260 (negate (convert (view_convert:stype @1)))))
3262 /* (T)(P + A) - (T)(P + B) -> (T)A - (T)B */
3264 (minus (convert (plus:c @@0 @1))
3265 (convert (plus:c @0 @2)))
3266 (if (INTEGRAL_TYPE_P (type)
3267 && TYPE_OVERFLOW_UNDEFINED (type)
3268 && element_precision (type) <= element_precision (TREE_TYPE (@1))
3269 && element_precision (type) <= element_precision (TREE_TYPE (@2)))
3270 (with { tree utype = unsigned_type_for (type); }
3271 (convert (minus (convert:utype @1) (convert:utype @2))))
3272 (if (((element_precision (type) <= element_precision (TREE_TYPE (@1)))
3273 == (element_precision (type) <= element_precision (TREE_TYPE (@2))))
3274 && (element_precision (type) <= element_precision (TREE_TYPE (@1))
3275 /* For integer types, if A has a smaller type
3276 than T the result depends on the possible
3278 E.g. T=size_t, A=(unsigned)429497295, P>0.
3279 However, if an overflow in P + A would cause
3280 undefined behavior, we can assume that there
3282 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
3283 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
3284 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))
3285 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@2)))))
3286 (minus (convert @1) (convert @2)))))
3288 (minus (convert (pointer_plus @@0 @1))
3289 (convert (pointer_plus @0 @2)))
3290 (if (INTEGRAL_TYPE_P (type)
3291 && TYPE_OVERFLOW_UNDEFINED (type)
3292 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
3293 (with { tree utype = unsigned_type_for (type); }
3294 (convert (minus (convert:utype @1) (convert:utype @2))))
3295 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
3296 /* For pointer types, if the conversion of A to the
3297 final type requires a sign- or zero-extension,
3298 then we have to punt - it is not defined which
3300 || (POINTER_TYPE_P (TREE_TYPE (@0))
3301 && TREE_CODE (@1) == INTEGER_CST
3302 && tree_int_cst_sign_bit (@1) == 0
3303 && TREE_CODE (@2) == INTEGER_CST
3304 && tree_int_cst_sign_bit (@2) == 0))
3305 (minus (convert @1) (convert @2)))))
3307 (pointer_diff (pointer_plus @0 @2) (pointer_plus @1 @2))
3308 (pointer_diff @0 @1))
3310 (pointer_diff (pointer_plus @@0 @1) (pointer_plus @0 @2))
3311 /* The second argument of pointer_plus must be interpreted as signed, and
3312 thus sign-extended if necessary. */
3313 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
3314 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
3315 second arg is unsigned even when we need to consider it as signed,
3316 we don't want to diagnose overflow here. */
3317 (minus (convert (view_convert:stype @1))
3318 (convert (view_convert:stype @2)))))))
3320 /* (A * C) +- (B * C) -> (A+-B) * C and (A * C) +- A -> A * (C+-1).
3321 Modeled after fold_plusminus_mult_expr. */
3322 (if (!TYPE_SATURATING (type)
3323 && (!FLOAT_TYPE_P (type) || flag_associative_math))
3324 (for plusminus (plus minus)
3326 (plusminus (mult:cs@3 @0 @1) (mult:cs@4 @0 @2))
3327 (if (!ANY_INTEGRAL_TYPE_P (type)
3328 || TYPE_OVERFLOW_WRAPS (type)
3329 || (INTEGRAL_TYPE_P (type)
3330 && tree_expr_nonzero_p (@0)
3331 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type)))))
3332 (if (single_use (@3) || single_use (@4))
3333 /* If @1 +- @2 is constant require a hard single-use on either
3334 original operand (but not on both). */
3335 (mult (plusminus @1 @2) @0)
3336 (mult! (plusminus @1 @2) @0)
3338 /* We cannot generate constant 1 for fract. */
3339 (if (!ALL_FRACT_MODE_P (TYPE_MODE (type)))
3341 (plusminus @0 (mult:c@3 @0 @2))
3342 (if ((!ANY_INTEGRAL_TYPE_P (type)
3343 || TYPE_OVERFLOW_WRAPS (type)
3344 /* For @0 + @0*@2 this transformation would introduce UB
3345 (where there was none before) for @0 in [-1,0] and @2 max.
3346 For @0 - @0*@2 this transformation would introduce UB
3347 for @0 0 and @2 in [min,min+1] or @0 -1 and @2 min+1. */
3348 || (INTEGRAL_TYPE_P (type)
3349 && ((tree_expr_nonzero_p (@0)
3350 && expr_not_equal_to (@0,
3351 wi::minus_one (TYPE_PRECISION (type))))
3352 || (plusminus == PLUS_EXPR
3353 ? expr_not_equal_to (@2,
3354 wi::max_value (TYPE_PRECISION (type), SIGNED))
3355 /* Let's ignore the @0 -1 and @2 min case. */
3356 : (expr_not_equal_to (@2,
3357 wi::min_value (TYPE_PRECISION (type), SIGNED))
3358 && expr_not_equal_to (@2,
3359 wi::min_value (TYPE_PRECISION (type), SIGNED)
3362 (mult (plusminus { build_one_cst (type); } @2) @0)))
3364 (plusminus (mult:c@3 @0 @2) @0)
3365 (if ((!ANY_INTEGRAL_TYPE_P (type)
3366 || TYPE_OVERFLOW_WRAPS (type)
3367 /* For @0*@2 + @0 this transformation would introduce UB
3368 (where there was none before) for @0 in [-1,0] and @2 max.
3369 For @0*@2 - @0 this transformation would introduce UB
3370 for @0 0 and @2 min. */
3371 || (INTEGRAL_TYPE_P (type)
3372 && ((tree_expr_nonzero_p (@0)
3373 && (plusminus == MINUS_EXPR
3374 || expr_not_equal_to (@0,
3375 wi::minus_one (TYPE_PRECISION (type)))))
3376 || expr_not_equal_to (@2,
3377 (plusminus == PLUS_EXPR
3378 ? wi::max_value (TYPE_PRECISION (type), SIGNED)
3379 : wi::min_value (TYPE_PRECISION (type), SIGNED))))))
3381 (mult (plusminus @2 { build_one_cst (type); }) @0))))))
3384 /* Canonicalize X + (X << C) into X * (1 + (1 << C)) and
3385 (X << C1) + (X << C2) into X * ((1 << C1) + (1 << C2)). */
3387 (plus:c @0 (lshift:s @0 INTEGER_CST@1))
3388 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3389 && tree_fits_uhwi_p (@1)
3390 && tree_to_uhwi (@1) < element_precision (type)
3391 && (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3392 || optab_handler (smul_optab,
3393 TYPE_MODE (type)) != CODE_FOR_nothing))
3394 (with { tree t = type;
3395 if (!TYPE_OVERFLOW_WRAPS (t)) t = unsigned_type_for (t);
3396 wide_int w = wi::set_bit_in_zero (tree_to_uhwi (@1),
3397 element_precision (type));
3399 tree cst = wide_int_to_tree (VECTOR_TYPE_P (t) ? TREE_TYPE (t)
3401 cst = build_uniform_cst (t, cst); }
3402 (convert (mult (convert:t @0) { cst; })))))
3404 (plus (lshift:s @0 INTEGER_CST@1) (lshift:s @0 INTEGER_CST@2))
3405 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3406 && tree_fits_uhwi_p (@1)
3407 && tree_to_uhwi (@1) < element_precision (type)
3408 && tree_fits_uhwi_p (@2)
3409 && tree_to_uhwi (@2) < element_precision (type)
3410 && (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3411 || optab_handler (smul_optab,
3412 TYPE_MODE (type)) != CODE_FOR_nothing))
3413 (with { tree t = type;
3414 if (!TYPE_OVERFLOW_WRAPS (t)) t = unsigned_type_for (t);
3415 unsigned int prec = element_precision (type);
3416 wide_int w = wi::set_bit_in_zero (tree_to_uhwi (@1), prec);
3417 w += wi::set_bit_in_zero (tree_to_uhwi (@2), prec);
3418 tree cst = wide_int_to_tree (VECTOR_TYPE_P (t) ? TREE_TYPE (t)
3420 cst = build_uniform_cst (t, cst); }
3421 (convert (mult (convert:t @0) { cst; })))))
3424 /* Canonicalize (X*C1)|(X*C2) and (X*C1)^(X*C2) to (C1+C2)*X when
3425 tree_nonzero_bits allows IOR and XOR to be treated like PLUS.
3426 Likewise, handle (X<<C3) and X as legitimate variants of X*C. */
3427 (for op (bit_ior bit_xor)
3429 (op (mult:s@0 @1 INTEGER_CST@2)
3430 (mult:s@3 @1 INTEGER_CST@4))
3431 (if (INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type)
3432 && (tree_nonzero_bits (@0) & tree_nonzero_bits (@3)) == 0)
3434 { wide_int_to_tree (type, wi::to_wide (@2) + wi::to_wide (@4)); })))
3436 (op:c (mult:s@0 @1 INTEGER_CST@2)
3437 (lshift:s@3 @1 INTEGER_CST@4))
3438 (if (INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type)
3439 && tree_int_cst_sgn (@4) > 0
3440 && (tree_nonzero_bits (@0) & tree_nonzero_bits (@3)) == 0)
3441 (with { wide_int wone = wi::one (TYPE_PRECISION (type));
3442 wide_int c = wi::add (wi::to_wide (@2),
3443 wi::lshift (wone, wi::to_wide (@4))); }
3444 (mult @1 { wide_int_to_tree (type, c); }))))
3446 (op:c (mult:s@0 @1 INTEGER_CST@2)
3448 (if (INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type)
3449 && (tree_nonzero_bits (@0) & tree_nonzero_bits (@1)) == 0)
3451 { wide_int_to_tree (type,
3452 wi::add (wi::to_wide (@2), 1)); })))
3454 (op (lshift:s@0 @1 INTEGER_CST@2)
3455 (lshift:s@3 @1 INTEGER_CST@4))
3456 (if (INTEGRAL_TYPE_P (type)
3457 && tree_int_cst_sgn (@2) > 0
3458 && tree_int_cst_sgn (@4) > 0
3459 && (tree_nonzero_bits (@0) & tree_nonzero_bits (@3)) == 0)
3460 (with { tree t = type;
3461 if (!TYPE_OVERFLOW_WRAPS (t))
3462 t = unsigned_type_for (t);
3463 wide_int wone = wi::one (TYPE_PRECISION (t));
3464 wide_int c = wi::add (wi::lshift (wone, wi::to_wide (@2)),
3465 wi::lshift (wone, wi::to_wide (@4))); }
3466 (convert (mult:t (convert:t @1) { wide_int_to_tree (t,c); })))))
3468 (op:c (lshift:s@0 @1 INTEGER_CST@2)
3470 (if (INTEGRAL_TYPE_P (type)
3471 && tree_int_cst_sgn (@2) > 0
3472 && (tree_nonzero_bits (@0) & tree_nonzero_bits (@1)) == 0)
3473 (with { tree t = type;
3474 if (!TYPE_OVERFLOW_WRAPS (t))
3475 t = unsigned_type_for (t);
3476 wide_int wone = wi::one (TYPE_PRECISION (t));
3477 wide_int c = wi::add (wi::lshift (wone, wi::to_wide (@2)), wone); }
3478 (convert (mult:t (convert:t @1) { wide_int_to_tree (t, c); }))))))
3480 /* Simplifications of MIN_EXPR, MAX_EXPR, fmin() and fmax(). */
3482 (for minmax (min max)
3486 /* For fmin() and fmax(), skip folding when both are sNaN. */
3487 (for minmax (FMIN_ALL FMAX_ALL)
3490 (if (!tree_expr_maybe_signaling_nan_p (@0))
3492 /* min(max(x,y),y) -> y. */
3494 (min:c (max:c @0 @1) @1)
3496 /* max(min(x,y),y) -> y. */
3498 (max:c (min:c @0 @1) @1)
3500 /* max(a,-a) -> abs(a). */
3502 (max:c @0 (negate @0))
3503 (if (TREE_CODE (type) != COMPLEX_TYPE
3504 && (! ANY_INTEGRAL_TYPE_P (type)
3505 || TYPE_OVERFLOW_UNDEFINED (type)))
3507 /* min(a,-a) -> -abs(a). */
3509 (min:c @0 (negate @0))
3510 (if (TREE_CODE (type) != COMPLEX_TYPE
3511 && (! ANY_INTEGRAL_TYPE_P (type)
3512 || TYPE_OVERFLOW_UNDEFINED (type)))
3517 (if (INTEGRAL_TYPE_P (type)
3518 && TYPE_MIN_VALUE (type)
3519 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
3521 (if (INTEGRAL_TYPE_P (type)
3522 && TYPE_MAX_VALUE (type)
3523 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
3528 (if (INTEGRAL_TYPE_P (type)
3529 && TYPE_MAX_VALUE (type)
3530 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
3532 (if (INTEGRAL_TYPE_P (type)
3533 && TYPE_MIN_VALUE (type)
3534 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
3537 /* max (a, a + CST) -> a + CST where CST is positive. */
3538 /* max (a, a + CST) -> a where CST is negative. */
3540 (max:c @0 (plus@2 @0 INTEGER_CST@1))
3541 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
3542 (if (tree_int_cst_sgn (@1) > 0)
3546 /* min (a, a + CST) -> a where CST is positive. */
3547 /* min (a, a + CST) -> a + CST where CST is negative. */
3549 (min:c @0 (plus@2 @0 INTEGER_CST@1))
3550 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
3551 (if (tree_int_cst_sgn (@1) > 0)
3555 /* Simplify min (&var[off0], &var[off1]) etc. depending on whether
3556 the addresses are known to be less, equal or greater. */
3557 (for minmax (min max)
3560 (minmax (convert1?@2 addr@0) (convert2?@3 addr@1))
3563 poly_int64 off0, off1;
3565 int equal = address_compare (cmp, TREE_TYPE (@2), @0, @1, base0, base1,
3566 off0, off1, GENERIC);
3569 (if (minmax == MIN_EXPR)
3570 (if (known_le (off0, off1))
3572 (if (known_gt (off0, off1))
3574 (if (known_ge (off0, off1))
3576 (if (known_lt (off0, off1))
3579 /* (convert (minmax ((convert (x) c)))) -> minmax (x c) if x is promoted
3580 and the outer convert demotes the expression back to x's type. */
3581 (for minmax (min max)
3583 (convert (minmax@0 (convert @1) INTEGER_CST@2))
3584 (if (INTEGRAL_TYPE_P (type)
3585 && types_match (@1, type) && int_fits_type_p (@2, type)
3586 && TYPE_SIGN (TREE_TYPE (@0)) == TYPE_SIGN (type)
3587 && TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type))
3588 (minmax @1 (convert @2)))))
3590 (for minmax (FMIN_ALL FMAX_ALL)
3591 /* If either argument is NaN and other one is not sNaN, return the other
3592 one. Avoid the transformation if we get (and honor) a signalling NaN. */
3594 (minmax:c @0 REAL_CST@1)
3595 (if (real_isnan (TREE_REAL_CST_PTR (@1))
3596 && (!HONOR_SNANS (@1) || !TREE_REAL_CST (@1).signalling)
3597 && !tree_expr_maybe_signaling_nan_p (@0))
3599 /* Convert fmin/fmax to MIN_EXPR/MAX_EXPR. C99 requires these
3600 functions to return the numeric arg if the other one is NaN.
3601 MIN and MAX don't honor that, so only transform if -ffinite-math-only
3602 is set. C99 doesn't require -0.0 to be handled, so we don't have to
3603 worry about it either. */
3604 (if (flag_finite_math_only)
3611 /* min (-A, -B) -> -max (A, B) */
3612 (for minmax (min max FMIN_ALL FMAX_ALL)
3613 maxmin (max min FMAX_ALL FMIN_ALL)
3615 (minmax (negate:s@2 @0) (negate:s@3 @1))
3616 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3617 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3618 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
3619 (negate (maxmin @0 @1)))))
3620 /* MIN (~X, ~Y) -> ~MAX (X, Y)
3621 MAX (~X, ~Y) -> ~MIN (X, Y) */
3622 (for minmax (min max)
3625 (minmax (bit_not:s@2 @0) (bit_not:s@3 @1))
3626 (bit_not (maxmin @0 @1))))
3628 /* MIN (X, Y) == X -> X <= Y */
3629 (for minmax (min min max max)
3633 (cmp:c (minmax:c @0 @1) @0)
3634 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)))
3636 /* MIN (X, 5) == 0 -> X == 0
3637 MIN (X, 5) == 7 -> false */
3640 (cmp (min @0 INTEGER_CST@1) INTEGER_CST@2)
3641 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
3642 TYPE_SIGN (TREE_TYPE (@0))))
3643 { constant_boolean_node (cmp == NE_EXPR, type); }
3644 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
3645 TYPE_SIGN (TREE_TYPE (@0))))
3649 (cmp (max @0 INTEGER_CST@1) INTEGER_CST@2)
3650 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
3651 TYPE_SIGN (TREE_TYPE (@0))))
3652 { constant_boolean_node (cmp == NE_EXPR, type); }
3653 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
3654 TYPE_SIGN (TREE_TYPE (@0))))
3656 /* MIN (X, C1) < C2 -> X < C2 || C1 < C2 */
3657 (for minmax (min min max max min min max max )
3658 cmp (lt le gt ge gt ge lt le )
3659 comb (bit_ior bit_ior bit_ior bit_ior bit_and bit_and bit_and bit_and)
3661 (cmp (minmax @0 INTEGER_CST@1) INTEGER_CST@2)
3662 (comb (cmp @0 @2) (cmp @1 @2))))
3664 /* X <= MAX(X, Y) -> true
3665 X > MAX(X, Y) -> false
3666 X >= MIN(X, Y) -> true
3667 X < MIN(X, Y) -> false */
3668 (for minmax (min min max max )
3671 (cmp @0 (minmax:c @0 @1))
3672 { constant_boolean_node (cmp == GE_EXPR || cmp == LE_EXPR, type); } ))
3674 /* Undo fancy ways of writing max/min or other ?: expressions, like
3675 a - ((a - b) & -(a < b)) and a - (a - b) * (a < b) into (a < b) ? b : a.
3676 People normally use ?: and that is what we actually try to optimize. */
3677 /* Transform A + (B-A)*cmp into cmp ? B : A. */
3679 (plus:c @0 (mult:c (minus @1 @0) zero_one_valued_p@2))
3680 (if (INTEGRAL_TYPE_P (type)
3681 && (GIMPLE || !TREE_SIDE_EFFECTS (@1)))
3682 (cond (convert:boolean_type_node @2) @1 @0)))
3683 /* Transform A - (A-B)*cmp into cmp ? B : A. */
3685 (minus @0 (mult:c (minus @0 @1) zero_one_valued_p@2))
3686 (if (INTEGRAL_TYPE_P (type)
3687 && (GIMPLE || !TREE_SIDE_EFFECTS (@1)))
3688 (cond (convert:boolean_type_node @2) @1 @0)))
3689 /* Transform A ^ (A^B)*cmp into cmp ? B : A. */
3691 (bit_xor:c @0 (mult:c (bit_xor:c @0 @1) zero_one_valued_p@2))
3692 (if (INTEGRAL_TYPE_P (type)
3693 && (GIMPLE || !TREE_SIDE_EFFECTS (@1)))
3694 (cond (convert:boolean_type_node @2) @1 @0)))
3696 /* (x <= 0 ? -x : 0) -> max(-x, 0). */
3698 (cond (le @0 integer_zerop@1) (negate@2 @0) integer_zerop@1)
3701 /* (zero_one == 0) ? y : z <op> y -> ((typeof(y))zero_one * z) <op> y */
3702 (for op (bit_xor bit_ior plus)
3704 (cond (eq zero_one_valued_p@0
3708 (if (INTEGRAL_TYPE_P (type)
3709 && TYPE_PRECISION (type) > 1
3710 && (INTEGRAL_TYPE_P (TREE_TYPE (@0))))
3711 (op (mult (convert:type @0) @2) @1))))
3713 /* (zero_one != 0) ? z <op> y : y -> ((typeof(y))zero_one * z) <op> y */
3714 (for op (bit_xor bit_ior plus)
3716 (cond (ne zero_one_valued_p@0
3720 (if (INTEGRAL_TYPE_P (type)
3721 && TYPE_PRECISION (type) > 1
3722 && (INTEGRAL_TYPE_P (TREE_TYPE (@0))))
3723 (op (mult (convert:type @0) @2) @1))))
3725 /* Simplifications of shift and rotates. */
3727 (for rotate (lrotate rrotate)
3729 (rotate integer_all_onesp@0 @1)
3732 /* Optimize -1 >> x for arithmetic right shifts. */
3734 (rshift integer_all_onesp@0 @1)
3735 (if (!TYPE_UNSIGNED (type))
3738 /* Optimize (x >> c) << c into x & (-1<<c). */
3740 (lshift (nop_convert? (rshift @0 INTEGER_CST@1)) @1)
3741 (if (wi::ltu_p (wi::to_wide (@1), element_precision (type)))
3742 /* It doesn't matter if the right shift is arithmetic or logical. */
3743 (bit_and (view_convert @0) (lshift { build_minus_one_cst (type); } @1))))
3746 (lshift (convert (convert@2 (rshift @0 INTEGER_CST@1))) @1)
3747 (if (wi::ltu_p (wi::to_wide (@1), element_precision (type))
3748 /* Allow intermediate conversion to integral type with whatever sign, as
3749 long as the low TYPE_PRECISION (type)
3750 - TYPE_PRECISION (TREE_TYPE (@2)) bits are preserved. */
3751 && INTEGRAL_TYPE_P (type)
3752 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
3753 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
3754 && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0))
3755 && (TYPE_PRECISION (TREE_TYPE (@2)) >= TYPE_PRECISION (type)
3756 || wi::geu_p (wi::to_wide (@1),
3757 TYPE_PRECISION (type)
3758 - TYPE_PRECISION (TREE_TYPE (@2)))))
3759 (bit_and (convert @0) (lshift { build_minus_one_cst (type); } @1))))
3761 /* Optimize (x << c) >> c into x & ((unsigned)-1 >> c) for unsigned
3764 (rshift (lshift @0 INTEGER_CST@1) @1)
3765 (if (TYPE_UNSIGNED (type)
3766 && (wi::ltu_p (wi::to_wide (@1), element_precision (type))))
3767 (bit_and @0 (rshift { build_minus_one_cst (type); } @1))))
3769 /* Optimize x >> x into 0 */
3772 { build_zero_cst (type); })
3774 (for shiftrotate (lrotate rrotate lshift rshift)
3776 (shiftrotate @0 integer_zerop)
3779 (shiftrotate integer_zerop@0 @1)
3781 /* Prefer vector1 << scalar to vector1 << vector2
3782 if vector2 is uniform. */
3783 (for vec (VECTOR_CST CONSTRUCTOR)
3785 (shiftrotate @0 vec@1)
3786 (with { tree tem = uniform_vector_p (@1); }
3788 (shiftrotate @0 { tem; }))))))
3790 /* Simplify X << Y where Y's low width bits are 0 to X, as only valid
3791 Y is 0. Similarly for X >> Y. */
3793 (for shift (lshift rshift)
3795 (shift @0 SSA_NAME@1)
3796 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
3798 int width = ceil_log2 (element_precision (TREE_TYPE (@0)));
3799 int prec = TYPE_PRECISION (TREE_TYPE (@1));
3801 (if ((get_nonzero_bits (@1) & wi::mask (width, false, prec)) == 0)
3805 /* Rewrite an LROTATE_EXPR by a constant into an
3806 RROTATE_EXPR by a new constant. */
3808 (lrotate @0 INTEGER_CST@1)
3809 (rrotate @0 { const_binop (MINUS_EXPR, TREE_TYPE (@1),
3810 build_int_cst (TREE_TYPE (@1),
3811 element_precision (type)), @1); }))
3813 /* Turn (a OP c1) OP c2 into a OP (c1+c2). */
3814 (for op (lrotate rrotate rshift lshift)
3816 (op (op @0 INTEGER_CST@1) INTEGER_CST@2)
3817 (with { unsigned int prec = element_precision (type); }
3818 (if (wi::ge_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1)))
3819 && wi::lt_p (wi::to_wide (@1), prec, TYPE_SIGN (TREE_TYPE (@1)))
3820 && wi::ge_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2)))
3821 && wi::lt_p (wi::to_wide (@2), prec, TYPE_SIGN (TREE_TYPE (@2))))
3822 (with { unsigned int low = (tree_to_uhwi (@1)
3823 + tree_to_uhwi (@2)); }
3824 /* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2
3825 being well defined. */
3827 (if (op == LROTATE_EXPR || op == RROTATE_EXPR)
3828 (op @0 { build_int_cst (TREE_TYPE (@1), low % prec); })
3829 (if (TYPE_UNSIGNED (type) || op == LSHIFT_EXPR)
3830 { build_zero_cst (type); }
3831 (op @0 { build_int_cst (TREE_TYPE (@1), prec - 1); })))
3832 (op @0 { build_int_cst (TREE_TYPE (@1), low); })))))))
3835 /* Simplify (CST << x) & 1 to 0 if CST is even or to x == 0 if it is odd. */
3837 (bit_and (lshift INTEGER_CST@1 @0) integer_onep)
3838 (if ((wi::to_wide (@1) & 1) != 0)
3839 (convert (eq:boolean_type_node @0 { build_zero_cst (TREE_TYPE (@0)); }))
3840 { build_zero_cst (type); }))
3842 /* Simplify ((C << x) & D) != 0 where C and D are power of two constants,
3843 either to false if D is smaller (unsigned comparison) than C, or to
3844 x == log2 (D) - log2 (C). Similarly for right shifts. */
3848 (cmp (bit_and (lshift integer_pow2p@1 @0) integer_pow2p@2) integer_zerop)
3849 (with { int c1 = wi::clz (wi::to_wide (@1));
3850 int c2 = wi::clz (wi::to_wide (@2)); }
3852 { constant_boolean_node (cmp == NE_EXPR ? false : true, type); }
3853 (icmp @0 { build_int_cst (TREE_TYPE (@0), c1 - c2); }))))
3855 (cmp (bit_and (rshift integer_pow2p@1 @0) integer_pow2p@2) integer_zerop)
3856 (if (tree_int_cst_sgn (@1) > 0)
3857 (with { int c1 = wi::clz (wi::to_wide (@1));
3858 int c2 = wi::clz (wi::to_wide (@2)); }
3860 { constant_boolean_node (cmp == NE_EXPR ? false : true, type); }
3861 (icmp @0 { build_int_cst (TREE_TYPE (@0), c2 - c1); }))))))
3863 /* (CST1 << A) == CST2 -> A == ctz (CST2) - ctz (CST1)
3864 (CST1 << A) != CST2 -> A != ctz (CST2) - ctz (CST1)
3868 (cmp (lshift INTEGER_CST@0 @1) INTEGER_CST@2)
3869 (with { int cand = wi::ctz (wi::to_wide (@2)) - wi::ctz (wi::to_wide (@0)); }
3871 || (!integer_zerop (@2)
3872 && wi::lshift (wi::to_wide (@0), cand) != wi::to_wide (@2)))
3873 { constant_boolean_node (cmp == NE_EXPR, type); }
3874 (if (!integer_zerop (@2)
3875 && wi::lshift (wi::to_wide (@0), cand) == wi::to_wide (@2))
3876 (cmp @1 { build_int_cst (TREE_TYPE (@1), cand); }))))))
3878 /* Fold ((X << C1) & C2) cmp C3 into (X & (C2 >> C1)) cmp (C3 >> C1)
3879 ((X >> C1) & C2) cmp C3 into (X & (C2 << C1)) cmp (C3 << C1). */
3882 (cmp (bit_and:s (lshift:s @0 INTEGER_CST@1) INTEGER_CST@2) INTEGER_CST@3)
3883 (if (tree_fits_shwi_p (@1)
3884 && tree_to_shwi (@1) > 0
3885 && tree_to_shwi (@1) < TYPE_PRECISION (TREE_TYPE (@0)))
3886 (if (tree_to_shwi (@1) > wi::ctz (wi::to_wide (@3)))
3887 { constant_boolean_node (cmp == NE_EXPR, type); }
3888 (with { wide_int c1 = wi::to_wide (@1);
3889 wide_int c2 = wi::lrshift (wi::to_wide (@2), c1);
3890 wide_int c3 = wi::lrshift (wi::to_wide (@3), c1); }
3891 (cmp (bit_and @0 { wide_int_to_tree (TREE_TYPE (@0), c2); })
3892 { wide_int_to_tree (TREE_TYPE (@0), c3); })))))
3894 (cmp (bit_and:s (rshift:s @0 INTEGER_CST@1) INTEGER_CST@2) INTEGER_CST@3)
3895 (if (tree_fits_shwi_p (@1)
3896 && tree_to_shwi (@1) > 0
3897 && tree_to_shwi (@1) < TYPE_PRECISION (TREE_TYPE (@0)))
3898 (with { tree t0 = TREE_TYPE (@0);
3899 unsigned int prec = TYPE_PRECISION (t0);
3900 wide_int c1 = wi::to_wide (@1);
3901 wide_int c2 = wi::to_wide (@2);
3902 wide_int c3 = wi::to_wide (@3);
3903 wide_int sb = wi::set_bit_in_zero (prec - 1, prec); }
3904 (if ((c2 & c3) != c3)
3905 { constant_boolean_node (cmp == NE_EXPR, type); }
3906 (if (TYPE_UNSIGNED (t0))
3907 (if ((c3 & wi::arshift (sb, c1 - 1)) != 0)
3908 { constant_boolean_node (cmp == NE_EXPR, type); }
3909 (cmp (bit_and @0 { wide_int_to_tree (t0, c2 << c1); })
3910 { wide_int_to_tree (t0, c3 << c1); }))
3911 (with { wide_int smask = wi::arshift (sb, c1); }
3913 (if ((c2 & smask) == 0)
3914 (cmp (bit_and @0 { wide_int_to_tree (t0, c2 << c1); })
3915 { wide_int_to_tree (t0, c3 << c1); }))
3916 (if ((c3 & smask) == 0)
3917 (cmp (bit_and @0 { wide_int_to_tree (t0, (c2 << c1) | sb); })
3918 { wide_int_to_tree (t0, c3 << c1); }))
3919 (if ((c2 & smask) != (c3 & smask))
3920 { constant_boolean_node (cmp == NE_EXPR, type); })
3921 (cmp (bit_and @0 { wide_int_to_tree (t0, (c2 << c1) | sb); })
3922 { wide_int_to_tree (t0, (c3 << c1) | sb); })))))))))
3924 /* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1))
3925 (X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1))
3926 if the new mask might be further optimized. */
3927 (for shift (lshift rshift)
3929 (bit_and (convert?:s@4 (shift:s@5 (convert1?@3 @0) INTEGER_CST@1))
3931 (if (tree_nop_conversion_p (TREE_TYPE (@4), TREE_TYPE (@5))
3932 && TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT
3933 && tree_fits_uhwi_p (@1)
3934 && tree_to_uhwi (@1) > 0
3935 && tree_to_uhwi (@1) < TYPE_PRECISION (type))
3938 unsigned int shiftc = tree_to_uhwi (@1);
3939 unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (@2);
3940 unsigned HOST_WIDE_INT newmask, zerobits = 0;
3941 tree shift_type = TREE_TYPE (@3);
3944 if (shift == LSHIFT_EXPR)
3945 zerobits = ((HOST_WIDE_INT_1U << shiftc) - 1);
3946 else if (shift == RSHIFT_EXPR
3947 && type_has_mode_precision_p (shift_type))
3949 prec = TYPE_PRECISION (TREE_TYPE (@3));
3951 /* See if more bits can be proven as zero because of
3954 && TYPE_UNSIGNED (TREE_TYPE (@0)))
3956 tree inner_type = TREE_TYPE (@0);
3957 if (type_has_mode_precision_p (inner_type)
3958 && TYPE_PRECISION (inner_type) < prec)
3960 prec = TYPE_PRECISION (inner_type);
3961 /* See if we can shorten the right shift. */
3963 shift_type = inner_type;
3964 /* Otherwise X >> C1 is all zeros, so we'll optimize
3965 it into (X, 0) later on by making sure zerobits
3969 zerobits = HOST_WIDE_INT_M1U;
3972 zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc;
3973 zerobits <<= prec - shiftc;
3975 /* For arithmetic shift if sign bit could be set, zerobits
3976 can contain actually sign bits, so no transformation is
3977 possible, unless MASK masks them all away. In that
3978 case the shift needs to be converted into logical shift. */
3979 if (!TYPE_UNSIGNED (TREE_TYPE (@3))
3980 && prec == TYPE_PRECISION (TREE_TYPE (@3)))
3982 if ((mask & zerobits) == 0)
3983 shift_type = unsigned_type_for (TREE_TYPE (@3));
3989 /* ((X << 16) & 0xff00) is (X, 0). */
3990 (if ((mask & zerobits) == mask)
3991 { build_int_cst (type, 0); }
3992 (with { newmask = mask | zerobits; }
3993 (if (newmask != mask && (newmask & (newmask + 1)) == 0)
3996 /* Only do the transformation if NEWMASK is some integer
3998 for (prec = BITS_PER_UNIT;
3999 prec < HOST_BITS_PER_WIDE_INT; prec <<= 1)
4000 if (newmask == (HOST_WIDE_INT_1U << prec) - 1)
4003 (if (prec < HOST_BITS_PER_WIDE_INT
4004 || newmask == HOST_WIDE_INT_M1U)
4006 { tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); }
4007 (if (!tree_int_cst_equal (newmaskt, @2))
4008 (if (shift_type != TREE_TYPE (@3))
4009 (bit_and (convert (shift:shift_type (convert @3) @1)) { newmaskt; })
4010 (bit_and @4 { newmaskt; })))))))))))))
4012 /* ((1 << n) & M) != 0 -> n == log2 (M) */
4018 (nop_convert? (lshift integer_onep @0)) integer_pow2p@1) integer_zerop)
4019 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)))
4020 (icmp @0 { wide_int_to_tree (TREE_TYPE (@0),
4021 wi::exact_log2 (wi::to_wide (@1))); }))))
4023 /* Fold (X {&,^,|} C2) << C1 into (X << C1) {&,^,|} (C2 << C1)
4024 (X {&,^,|} C2) >> C1 into (X >> C1) & (C2 >> C1). */
4025 (for shift (lshift rshift)
4026 (for bit_op (bit_and bit_xor bit_ior)
4028 (shift (convert?:s (bit_op:s @0 INTEGER_CST@2)) INTEGER_CST@1)
4029 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
4030 (with { tree mask = int_const_binop (shift, fold_convert (type, @2), @1); }
4032 (bit_op (shift (convert @0) @1) { mask; })))))))
4034 /* ~(~X >> Y) -> X >> Y (for arithmetic shift). */
4036 (bit_not (convert1?:s (rshift:s (convert2?@0 (bit_not @1)) @2)))
4037 (if (!TYPE_UNSIGNED (TREE_TYPE (@0))
4038 && (element_precision (TREE_TYPE (@0))
4039 <= element_precision (TREE_TYPE (@1))
4040 || !TYPE_UNSIGNED (TREE_TYPE (@1))))
4042 { tree shift_type = TREE_TYPE (@0); }
4043 (convert (rshift (convert:shift_type @1) @2)))))
4045 /* ~(~X >>r Y) -> X >>r Y
4046 ~(~X <<r Y) -> X <<r Y */
4047 (for rotate (lrotate rrotate)
4049 (bit_not (convert1?:s (rotate:s (convert2?@0 (bit_not @1)) @2)))
4050 (if ((element_precision (TREE_TYPE (@0))
4051 <= element_precision (TREE_TYPE (@1))
4052 || !TYPE_UNSIGNED (TREE_TYPE (@1)))
4053 && (element_precision (type) <= element_precision (TREE_TYPE (@0))
4054 || !TYPE_UNSIGNED (TREE_TYPE (@0))))
4056 { tree rotate_type = TREE_TYPE (@0); }
4057 (convert (rotate (convert:rotate_type @1) @2))))))
4060 (for rotate (lrotate rrotate)
4061 invrot (rrotate lrotate)
4062 /* (X >>r Y) cmp (Z >>r Y) may simplify to X cmp Y. */
4064 (cmp (rotate @1 @0) (rotate @2 @0))
4066 /* (X >>r C1) cmp C2 may simplify to X cmp C3. */
4068 (cmp (rotate @0 INTEGER_CST@1) INTEGER_CST@2)
4069 (cmp @0 { const_binop (invrot, TREE_TYPE (@0), @2, @1); }))
4070 /* (X >>r Y) cmp C where C is 0 or ~0, may simplify to X cmp C. */
4072 (cmp (rotate @0 @1) INTEGER_CST@2)
4073 (if (integer_zerop (@2) || integer_all_onesp (@2))
4076 /* Narrow a lshift by constant. */
4078 (convert (lshift:s@0 @1 INTEGER_CST@2))
4079 (if (INTEGRAL_TYPE_P (type)
4080 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
4081 && !integer_zerop (@2)
4082 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0)))
4083 (if (TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0))
4084 || wi::ltu_p (wi::to_wide (@2), TYPE_PRECISION (type)))
4085 (lshift (convert @1) @2)
4086 (if (wi::ltu_p (wi::to_wide (@2), TYPE_PRECISION (TREE_TYPE (@0))))
4087 { build_zero_cst (type); }))))
4089 /* Simplifications of conversions. */
4091 /* Basic strip-useless-type-conversions / strip_nops. */
4092 (for cvt (convert view_convert float fix_trunc)
4095 (if ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@0)))
4096 || (GENERIC && type == TREE_TYPE (@0)))
4099 /* Contract view-conversions. */
4101 (view_convert (view_convert @0))
4104 /* For integral conversions with the same precision or pointer
4105 conversions use a NOP_EXPR instead. */
4108 (if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
4109 && (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
4110 && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0)))
4113 /* Strip inner integral conversions that do not change precision or size, or
4114 zero-extend while keeping the same size (for bool-to-char). */
4116 (view_convert (convert@0 @1))
4117 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
4118 && (INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
4119 && TYPE_SIZE (TREE_TYPE (@0)) == TYPE_SIZE (TREE_TYPE (@1))
4120 && (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))
4121 || (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@1))
4122 && TYPE_UNSIGNED (TREE_TYPE (@1)))))
4125 /* Simplify a view-converted empty or single-element constructor. */
4127 (view_convert CONSTRUCTOR@0)
4129 { tree ctor = (TREE_CODE (@0) == SSA_NAME
4130 ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0); }
4132 (if (CONSTRUCTOR_NELTS (ctor) == 0)
4133 { build_zero_cst (type); })
4134 (if (CONSTRUCTOR_NELTS (ctor) == 1
4135 && VECTOR_TYPE_P (TREE_TYPE (ctor))
4136 && operand_equal_p (TYPE_SIZE (type),
4137 TYPE_SIZE (TREE_TYPE
4138 (CONSTRUCTOR_ELT (ctor, 0)->value))))
4139 (view_convert { CONSTRUCTOR_ELT (ctor, 0)->value; })))))
4141 /* Re-association barriers around constants and other re-association
4142 barriers can be removed. */
4144 (paren CONSTANT_CLASS_P@0)
4147 (paren (paren@1 @0))
4150 /* Handle cases of two conversions in a row. */
4151 (for ocvt (convert float fix_trunc)
4152 (for icvt (convert float)
4157 tree inside_type = TREE_TYPE (@0);
4158 tree inter_type = TREE_TYPE (@1);
4159 int inside_int = INTEGRAL_TYPE_P (inside_type);
4160 int inside_ptr = POINTER_TYPE_P (inside_type);
4161 int inside_float = FLOAT_TYPE_P (inside_type);
4162 int inside_vec = VECTOR_TYPE_P (inside_type);
4163 unsigned int inside_prec = element_precision (inside_type);
4164 int inside_unsignedp = TYPE_UNSIGNED (inside_type);
4165 int inter_int = INTEGRAL_TYPE_P (inter_type);
4166 int inter_ptr = POINTER_TYPE_P (inter_type);
4167 int inter_float = FLOAT_TYPE_P (inter_type);
4168 int inter_vec = VECTOR_TYPE_P (inter_type);
4169 unsigned int inter_prec = element_precision (inter_type);
4170 int inter_unsignedp = TYPE_UNSIGNED (inter_type);
4171 int final_int = INTEGRAL_TYPE_P (type);
4172 int final_ptr = POINTER_TYPE_P (type);
4173 int final_float = FLOAT_TYPE_P (type);
4174 int final_vec = VECTOR_TYPE_P (type);
4175 unsigned int final_prec = element_precision (type);
4176 int final_unsignedp = TYPE_UNSIGNED (type);
4179 /* In addition to the cases of two conversions in a row
4180 handled below, if we are converting something to its own
4181 type via an object of identical or wider precision, neither
4182 conversion is needed. */
4183 (if (((GIMPLE && useless_type_conversion_p (type, inside_type))
4185 && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type)))
4186 && (((inter_int || inter_ptr) && final_int)
4187 || (inter_float && final_float))
4188 && inter_prec >= final_prec)
4191 /* Likewise, if the intermediate and initial types are either both
4192 float or both integer, we don't need the middle conversion if the
4193 former is wider than the latter and doesn't change the signedness
4194 (for integers). Avoid this if the final type is a pointer since
4195 then we sometimes need the middle conversion. */
4196 (if (((inter_int && inside_int) || (inter_float && inside_float))
4197 && (final_int || final_float)
4198 && inter_prec >= inside_prec
4199 && (inter_float || inter_unsignedp == inside_unsignedp))
4202 /* If we have a sign-extension of a zero-extended value, we can
4203 replace that by a single zero-extension. Likewise if the
4204 final conversion does not change precision we can drop the
4205 intermediate conversion. */
4206 (if (inside_int && inter_int && final_int
4207 && ((inside_prec < inter_prec && inter_prec < final_prec
4208 && inside_unsignedp && !inter_unsignedp)
4209 || final_prec == inter_prec))
4212 /* Two conversions in a row are not needed unless:
4213 - some conversion is floating-point (overstrict for now), or
4214 - some conversion is a vector (overstrict for now), or
4215 - the intermediate type is narrower than both initial and
4217 - the intermediate type and innermost type differ in signedness,
4218 and the outermost type is wider than the intermediate, or
4219 - the initial type is a pointer type and the precisions of the
4220 intermediate and final types differ, or
4221 - the final type is a pointer type and the precisions of the
4222 initial and intermediate types differ. */
4223 (if (! inside_float && ! inter_float && ! final_float
4224 && ! inside_vec && ! inter_vec && ! final_vec
4225 && (inter_prec >= inside_prec || inter_prec >= final_prec)
4226 && ! (inside_int && inter_int
4227 && inter_unsignedp != inside_unsignedp
4228 && inter_prec < final_prec)
4229 && ((inter_unsignedp && inter_prec > inside_prec)
4230 == (final_unsignedp && final_prec > inter_prec))
4231 && ! (inside_ptr && inter_prec != final_prec)
4232 && ! (final_ptr && inside_prec != inter_prec))
4235 /* A truncation to an unsigned type (a zero-extension) should be
4236 canonicalized as bitwise and of a mask. */
4237 (if (GIMPLE /* PR70366: doing this in GENERIC breaks -Wconversion. */
4238 && final_int && inter_int && inside_int
4239 && final_prec == inside_prec
4240 && final_prec > inter_prec
4242 (convert (bit_and @0 { wide_int_to_tree
4244 wi::mask (inter_prec, false,
4245 TYPE_PRECISION (inside_type))); })))
4247 /* If we are converting an integer to a floating-point that can
4248 represent it exactly and back to an integer, we can skip the
4249 floating-point conversion. */
4250 (if (GIMPLE /* PR66211 */
4251 && inside_int && inter_float && final_int &&
4252 (unsigned) significand_size (TYPE_MODE (inter_type))
4253 >= inside_prec - !inside_unsignedp)
4256 /* (float_type)(integer_type) x -> trunc (x) if the type of x matches
4257 float_type. Only do the transformation if we do not need to preserve
4258 trapping behaviour, so require !flag_trapping_math. */
4261 (float (fix_trunc @0))
4262 (if (!flag_trapping_math
4263 && types_match (type, TREE_TYPE (@0))
4264 && direct_internal_fn_supported_p (IFN_TRUNC, type,
4269 /* If we have a narrowing conversion to an integral type that is fed by a
4270 BIT_AND_EXPR, we might be able to remove the BIT_AND_EXPR if it merely
4271 masks off bits outside the final type (and nothing else). */
4273 (convert (bit_and @0 INTEGER_CST@1))
4274 (if (INTEGRAL_TYPE_P (type)
4275 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
4276 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))
4277 && operand_equal_p (@1, build_low_bits_mask (TREE_TYPE (@1),
4278 TYPE_PRECISION (type)), 0))
4282 /* (X /[ex] A) * A -> X. */
4284 (mult (convert1? (exact_div @0 @@1)) (convert2? @1))
4287 /* Simplify (A / B) * B + (A % B) -> A. */
4288 (for div (trunc_div ceil_div floor_div round_div)
4289 mod (trunc_mod ceil_mod floor_mod round_mod)
4291 (plus:c (mult:c (div @0 @1) @1) (mod @0 @1))
4294 /* x / y * y == x -> x % y == 0. */
4296 (eq:c (mult:c (trunc_div:s @0 @1) @1) @0)
4297 (if (TREE_CODE (TREE_TYPE (@0)) != COMPLEX_TYPE)
4298 (eq (trunc_mod @0 @1) { build_zero_cst (TREE_TYPE (@0)); })))
4300 /* ((X /[ex] A) +- B) * A --> X +- A * B. */
4301 (for op (plus minus)
4303 (mult (convert1? (op (convert2? (exact_div @0 INTEGER_CST@@1)) INTEGER_CST@2)) @1)
4304 (if (tree_nop_conversion_p (type, TREE_TYPE (@2))
4305 && tree_nop_conversion_p (TREE_TYPE (@0), TREE_TYPE (@2)))
4308 wi::overflow_type overflow;
4309 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
4310 TYPE_SIGN (type), &overflow);
4312 (if (types_match (type, TREE_TYPE (@2))
4313 && types_match (TREE_TYPE (@0), TREE_TYPE (@2)) && !overflow)
4314 (op @0 { wide_int_to_tree (type, mul); })
4315 (with { tree utype = unsigned_type_for (type); }
4316 (convert (op (convert:utype @0)
4317 (mult (convert:utype @1) (convert:utype @2))))))))))
4319 /* Canonicalization of binary operations. */
4321 /* Convert X + -C into X - C. */
4323 (plus @0 REAL_CST@1)
4324 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
4325 (with { tree tem = const_unop (NEGATE_EXPR, type, @1); }
4326 (if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
4327 (minus @0 { tem; })))))
4329 /* Convert x+x into x*2. */
4332 (if (SCALAR_FLOAT_TYPE_P (type))
4333 (mult @0 { build_real (type, dconst2); })
4334 (if (INTEGRAL_TYPE_P (type))
4335 (mult @0 { build_int_cst (type, 2); }))))
4339 (minus integer_zerop @1)
4342 (pointer_diff integer_zerop @1)
4343 (negate (convert @1)))
4345 /* (ARG0 - ARG1) is the same as (-ARG1 + ARG0). So check whether
4346 ARG0 is zero and X + ARG0 reduces to X, since that would mean
4347 (-ARG1 + ARG0) reduces to -ARG1. */
4349 (minus real_zerop@0 @1)
4350 (if (fold_real_zero_addition_p (type, @1, @0, 0))
4353 /* Transform x * -1 into -x. */
4355 (mult @0 integer_minus_onep)
4358 /* Reassociate (X * CST) * Y to (X * Y) * CST. This does not introduce
4359 signed overflow for CST != 0 && CST != -1. */
4361 (mult:c (mult:s@3 @0 INTEGER_CST@1) @2)
4362 (if (TREE_CODE (@2) != INTEGER_CST
4364 && !integer_zerop (@1) && !integer_minus_onep (@1))
4365 (mult (mult @0 @2) @1)))
4367 /* True if we can easily extract the real and imaginary parts of a complex
4369 (match compositional_complex
4370 (convert? (complex @0 @1)))
4372 /* COMPLEX_EXPR and REALPART/IMAGPART_EXPR cancellations. */
4374 (complex (realpart @0) (imagpart @0))
4377 (realpart (complex @0 @1))
4380 (imagpart (complex @0 @1))
4383 /* Sometimes we only care about half of a complex expression. */
4385 (realpart (convert?:s (conj:s @0)))
4386 (convert (realpart @0)))
4388 (imagpart (convert?:s (conj:s @0)))
4389 (convert (negate (imagpart @0))))
4390 (for part (realpart imagpart)
4391 (for op (plus minus)
4393 (part (convert?:s@2 (op:s @0 @1)))
4394 (convert (op (part @0) (part @1))))))
4396 (realpart (convert?:s (CEXPI:s @0)))
4399 (imagpart (convert?:s (CEXPI:s @0)))
4402 /* conj(conj(x)) -> x */
4404 (conj (convert? (conj @0)))
4405 (if (tree_nop_conversion_p (TREE_TYPE (@0), type))
4408 /* conj({x,y}) -> {x,-y} */
4410 (conj (convert?:s (complex:s @0 @1)))
4411 (with { tree itype = TREE_TYPE (type); }
4412 (complex (convert:itype @0) (negate (convert:itype @1)))))
4414 /* BSWAP simplifications, transforms checked by gcc.dg/builtin-bswap-8.c. */
4420 (bswap (bit_not (bswap @0)))
4422 (for bitop (bit_xor bit_ior bit_and)
4424 (bswap (bitop:c (bswap @0) @1))
4425 (bitop @0 (bswap @1))))
4428 (cmp (bswap@2 @0) (bswap @1))
4429 (with { tree ctype = TREE_TYPE (@2); }
4430 (cmp (convert:ctype @0) (convert:ctype @1))))
4432 (cmp (bswap @0) INTEGER_CST@1)
4433 (with { tree ctype = TREE_TYPE (@1); }
4434 (cmp (convert:ctype @0) (bswap! @1)))))
4435 /* (bswap(x) >> C1) & C2 can sometimes be simplified to (x >> C3) & C2. */
4437 (bit_and (convert1? (rshift@0 (convert2? (bswap@4 @1)) INTEGER_CST@2))
4439 (if (BITS_PER_UNIT == 8
4440 && tree_fits_uhwi_p (@2)
4441 && tree_fits_uhwi_p (@3))
4444 unsigned HOST_WIDE_INT prec = TYPE_PRECISION (TREE_TYPE (@4));
4445 unsigned HOST_WIDE_INT bits = tree_to_uhwi (@2);
4446 unsigned HOST_WIDE_INT mask = tree_to_uhwi (@3);
4447 unsigned HOST_WIDE_INT lo = bits & 7;
4448 unsigned HOST_WIDE_INT hi = bits - lo;
4451 && mask < (256u>>lo)
4452 && bits < TYPE_PRECISION (TREE_TYPE(@0)))
4453 (with { unsigned HOST_WIDE_INT ns = (prec - (hi + 8)) + lo; }
4455 (bit_and (convert @1) @3)
4458 tree utype = unsigned_type_for (TREE_TYPE (@1));
4459 tree nst = build_int_cst (integer_type_node, ns);
4461 (bit_and (convert (rshift:utype (convert:utype @1) {nst;})) @3))))))))
4462 /* bswap(x) >> C1 can sometimes be simplified to (T)x >> C2. */
4464 (rshift (convert? (bswap@2 @0)) INTEGER_CST@1)
4465 (if (BITS_PER_UNIT == 8
4466 && CHAR_TYPE_SIZE == 8
4467 && tree_fits_uhwi_p (@1))
4470 unsigned HOST_WIDE_INT prec = TYPE_PRECISION (TREE_TYPE (@2));
4471 unsigned HOST_WIDE_INT bits = tree_to_uhwi (@1);
4472 /* If the bswap was extended before the original shift, this
4473 byte (shift) has the sign of the extension, not the sign of
4474 the original shift. */
4475 tree st = TYPE_PRECISION (type) > prec ? TREE_TYPE (@2) : type;
4477 /* Special case: logical right shift of sign-extended bswap.
4478 (unsigned)(short)bswap16(x)>>12 is (unsigned)((short)x<<8)>>12. */
4479 (if (TYPE_PRECISION (type) > prec
4480 && !TYPE_UNSIGNED (TREE_TYPE (@2))
4481 && TYPE_UNSIGNED (type)
4482 && bits < prec && bits + 8 >= prec)
4483 (with { tree nst = build_int_cst (integer_type_node, prec - 8); }
4484 (rshift (convert (lshift:st (convert:st @0) {nst;})) @1))
4485 (if (bits + 8 == prec)
4486 (if (TYPE_UNSIGNED (st))
4487 (convert (convert:unsigned_char_type_node @0))
4488 (convert (convert:signed_char_type_node @0)))
4489 (if (bits < prec && bits + 8 > prec)
4492 tree nst = build_int_cst (integer_type_node, bits & 7);
4493 tree bt = TYPE_UNSIGNED (st) ? unsigned_char_type_node
4494 : signed_char_type_node;
4496 (convert (rshift:bt (convert:bt @0) {nst;})))))))))
4497 /* bswap(x) & C1 can sometimes be simplified to (x >> C2) & C1. */
4499 (bit_and (convert? (bswap@2 @0)) INTEGER_CST@1)
4500 (if (BITS_PER_UNIT == 8
4501 && tree_fits_uhwi_p (@1)
4502 && tree_to_uhwi (@1) < 256)
4505 unsigned HOST_WIDE_INT prec = TYPE_PRECISION (TREE_TYPE (@2));
4506 tree utype = unsigned_type_for (TREE_TYPE (@0));
4507 tree nst = build_int_cst (integer_type_node, prec - 8);
4509 (bit_and (convert (rshift:utype (convert:utype @0) {nst;})) @1)))))
4512 /* Combine COND_EXPRs and VEC_COND_EXPRs. */
4514 /* Simplify constant conditions.
4515 Only optimize constant conditions when the selected branch
4516 has the same type as the COND_EXPR. This avoids optimizing
4517 away "c ? x : throw", where the throw has a void type.
4518 Note that we cannot throw away the fold-const.cc variant nor
4519 this one as we depend on doing this transform before possibly
4520 A ? B : B -> B triggers and the fold-const.cc one can optimize
4521 0 ? A : B to B even if A has side-effects. Something
4522 genmatch cannot handle. */
4524 (cond INTEGER_CST@0 @1 @2)
4525 (if (integer_zerop (@0))
4526 (if (!VOID_TYPE_P (TREE_TYPE (@2)) || VOID_TYPE_P (type))
4528 (if (!VOID_TYPE_P (TREE_TYPE (@1)) || VOID_TYPE_P (type))
4531 (vec_cond VECTOR_CST@0 @1 @2)
4532 (if (integer_all_onesp (@0))
4534 (if (integer_zerop (@0))
4537 /* Sink unary operations to branches, but only if we do fold both. */
4538 (for op (negate bit_not abs absu)
4540 (op (vec_cond:s @0 @1 @2))
4541 (vec_cond @0 (op! @1) (op! @2))))
4543 /* Sink binary operation to branches, but only if we can fold it. */
4544 (for op (tcc_comparison plus minus mult bit_and bit_ior bit_xor
4545 lshift rshift rdiv trunc_div ceil_div floor_div round_div
4546 trunc_mod ceil_mod floor_mod round_mod min max)
4547 /* (c ? a : b) op (c ? d : e) --> c ? (a op d) : (b op e) */
4549 (op (vec_cond:s @0 @1 @2) (vec_cond:s @0 @3 @4))
4550 (vec_cond @0 (op! @1 @3) (op! @2 @4)))
4552 /* (c ? a : b) op d --> c ? (a op d) : (b op d) */
4554 (op (vec_cond:s @0 @1 @2) @3)
4555 (vec_cond @0 (op! @1 @3) (op! @2 @3)))
4557 (op @3 (vec_cond:s @0 @1 @2))
4558 (vec_cond @0 (op! @3 @1) (op! @3 @2))))
4561 (match (nop_atomic_bit_test_and_p @0 @1 @4)
4562 (bit_and (convert?@4 (ATOMIC_FETCH_OR_XOR_N @2 INTEGER_CST@0 @3))
4565 int ibit = tree_log2 (@0);
4566 int ibit2 = tree_log2 (@1);
4570 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))))))
4572 (match (nop_atomic_bit_test_and_p @0 @1 @3)
4573 (bit_and (convert?@3 (SYNC_FETCH_OR_XOR_N @2 INTEGER_CST@0))
4576 int ibit = tree_log2 (@0);
4577 int ibit2 = tree_log2 (@1);
4581 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))))))
4583 (match (nop_atomic_bit_test_and_p @0 @0 @4)
4586 (ATOMIC_FETCH_OR_XOR_N @2 (nop_convert? (lshift@0 integer_onep@5 @6)) @3))
4588 (if (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0)))))
4590 (match (nop_atomic_bit_test_and_p @0 @0 @4)
4593 (SYNC_FETCH_OR_XOR_N @2 (nop_convert? (lshift@0 integer_onep@3 @5))))
4595 (if (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0)))))
4597 (match (nop_atomic_bit_test_and_p @0 @1 @3)
4598 (bit_and@4 (convert?@3 (ATOMIC_FETCH_AND_N @2 INTEGER_CST@0 @5))
4601 int ibit = wi::exact_log2 (wi::zext (wi::bit_not (wi::to_wide (@0)),
4602 TYPE_PRECISION(type)));
4603 int ibit2 = tree_log2 (@1);
4607 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))))))
4609 (match (nop_atomic_bit_test_and_p @0 @1 @3)
4611 (convert?@3 (SYNC_FETCH_AND_AND_N @2 INTEGER_CST@0))
4614 int ibit = wi::exact_log2 (wi::zext (wi::bit_not (wi::to_wide (@0)),
4615 TYPE_PRECISION(type)));
4616 int ibit2 = tree_log2 (@1);
4620 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))))))
4622 (match (nop_atomic_bit_test_and_p @4 @0 @3)
4625 (ATOMIC_FETCH_AND_N @2 (nop_convert?@4 (bit_not (lshift@0 integer_onep@6 @7))) @5))
4627 (if (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@4)))))
4629 (match (nop_atomic_bit_test_and_p @4 @0 @3)
4632 (SYNC_FETCH_AND_AND_N @2 (nop_convert?@4 (bit_not (lshift@0 integer_onep@6 @7)))))
4634 (if (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@4)))))
4638 /* (v ? w : 0) ? a : b is just (v & w) ? a : b
4639 Currently disabled after pass lvec because ARM understands
4640 VEC_COND_EXPR<v==w,-1,0> but not a plain v==w fed to BIT_IOR_EXPR. */
4642 (vec_cond (vec_cond:s @0 @3 integer_zerop) @1 @2)
4643 (if (optimize_vectors_before_lowering_p () && types_match (@0, @3))
4644 (vec_cond (bit_and @0 @3) @1 @2)))
4646 (vec_cond (vec_cond:s @0 integer_all_onesp @3) @1 @2)
4647 (if (optimize_vectors_before_lowering_p () && types_match (@0, @3))
4648 (vec_cond (bit_ior @0 @3) @1 @2)))
4650 (vec_cond (vec_cond:s @0 integer_zerop @3) @1 @2)
4651 (if (optimize_vectors_before_lowering_p () && types_match (@0, @3))
4652 (vec_cond (bit_ior @0 (bit_not @3)) @2 @1)))
4654 (vec_cond (vec_cond:s @0 @3 integer_all_onesp) @1 @2)
4655 (if (optimize_vectors_before_lowering_p () && types_match (@0, @3))
4656 (vec_cond (bit_and @0 (bit_not @3)) @2 @1)))
4658 /* c1 ? c2 ? a : b : b --> (c1 & c2) ? a : b */
4660 (vec_cond @0 (vec_cond:s @1 @2 @3) @3)
4661 (if (optimize_vectors_before_lowering_p () && types_match (@0, @1))
4662 (vec_cond (bit_and @0 @1) @2 @3)))
4664 (vec_cond @0 @2 (vec_cond:s @1 @2 @3))
4665 (if (optimize_vectors_before_lowering_p () && types_match (@0, @1))
4666 (vec_cond (bit_ior @0 @1) @2 @3)))
4668 (vec_cond @0 (vec_cond:s @1 @2 @3) @2)
4669 (if (optimize_vectors_before_lowering_p () && types_match (@0, @1))
4670 (vec_cond (bit_ior (bit_not @0) @1) @2 @3)))
4672 (vec_cond @0 @3 (vec_cond:s @1 @2 @3))
4673 (if (optimize_vectors_before_lowering_p () && types_match (@0, @1))
4674 (vec_cond (bit_and (bit_not @0) @1) @2 @3)))
4676 /* Canonicalize mask ? { 0, ... } : { -1, ...} to ~mask if the mask
4677 types are compatible. */
4679 (vec_cond @0 VECTOR_CST@1 VECTOR_CST@2)
4680 (if (VECTOR_BOOLEAN_TYPE_P (type)
4681 && types_match (type, TREE_TYPE (@0)))
4682 (if (integer_zerop (@1) && integer_all_onesp (@2))
4684 (if (integer_all_onesp (@1) && integer_zerop (@2))
4687 /* A few simplifications of "a ? CST1 : CST2". */
4688 /* NOTE: Only do this on gimple as the if-chain-to-switch
4689 optimization depends on the gimple to have if statements in it. */
4692 (cond @0 INTEGER_CST@1 INTEGER_CST@2)
4694 (if (integer_zerop (@2))
4696 /* a ? 1 : 0 -> a if 0 and 1 are integral types. */
4697 (if (integer_onep (@1))
4698 (convert (convert:boolean_type_node @0)))
4699 /* a ? powerof2cst : 0 -> a << (log2(powerof2cst)) */
4700 (if (INTEGRAL_TYPE_P (type) && integer_pow2p (@1))
4702 tree shift = build_int_cst (integer_type_node, tree_log2 (@1));
4704 (lshift (convert (convert:boolean_type_node @0)) { shift; })))
4705 /* a ? -1 : 0 -> -a. No need to check the TYPE_PRECISION not being 1
4706 here as the powerof2cst case above will handle that case correctly. */
4707 (if (INTEGRAL_TYPE_P (type) && integer_all_onesp (@1))
4709 auto prec = TYPE_PRECISION (type);
4710 auto unsign = TYPE_UNSIGNED (type);
4711 tree inttype = build_nonstandard_integer_type (prec, unsign);
4713 (convert (negate (convert:inttype (convert:boolean_type_node @0))))))))
4714 (if (integer_zerop (@1))
4716 tree booltrue = constant_boolean_node (true, boolean_type_node);
4719 /* a ? 0 : 1 -> !a. */
4720 (if (integer_onep (@2))
4721 (convert (bit_xor (convert:boolean_type_node @0) { booltrue; } )))
4722 /* a ? powerof2cst : 0 -> (!a) << (log2(powerof2cst)) */
4723 (if (INTEGRAL_TYPE_P (type) && integer_pow2p (@2))
4725 tree shift = build_int_cst (integer_type_node, tree_log2 (@2));
4727 (lshift (convert (bit_xor (convert:boolean_type_node @0) { booltrue; } ))
4729 /* a ? -1 : 0 -> -(!a). No need to check the TYPE_PRECISION not being 1
4730 here as the powerof2cst case above will handle that case correctly. */
4731 (if (INTEGRAL_TYPE_P (type) && integer_all_onesp (@2))
4733 auto prec = TYPE_PRECISION (type);
4734 auto unsign = TYPE_UNSIGNED (type);
4735 tree inttype = build_nonstandard_integer_type (prec, unsign);
4740 (bit_xor (convert:boolean_type_node @0) { booltrue; } )
4753 (cond @0 zero_one_valued_p@1 zero_one_valued_p@2)
4755 /* bool0 ? bool1 : 0 -> bool0 & bool1 */
4756 (if (integer_zerop (@2))
4757 (bit_and (convert @0) @1))
4758 /* bool0 ? 0 : bool2 -> (bool0^1) & bool2 */
4759 (if (integer_zerop (@1))
4760 (bit_and (bit_xor (convert @0) { build_one_cst (type); } ) @2))
4761 /* bool0 ? 1 : bool2 -> bool0 | bool2 */
4762 (if (integer_onep (@1))
4763 (bit_ior (convert @0) @2))
4764 /* bool0 ? bool1 : 1 -> (bool0^1) | bool1 */
4765 (if (integer_onep (@2))
4766 (bit_ior (bit_xor (convert @0) @2) @1))
4771 # x_5 in range [cst1, cst2] where cst2 = cst1 + 1
4772 x_5 ? cstN ? cst4 : cst3
4773 # op is == or != and N is 1 or 2
4774 to r_6 = x_5 + (min (cst3, cst4) - cst1) or
4775 r_6 = (min (cst3, cst4) + cst1) - x_5 depending on op, N and which
4776 of cst3 and cst4 is smaller.
4777 This was originally done by two_value_replacement in phiopt (PR 88676). */
4780 (cond (eqne SSA_NAME@0 INTEGER_CST@1) INTEGER_CST@2 INTEGER_CST@3)
4781 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4782 && INTEGRAL_TYPE_P (type)
4783 && (wi::to_widest (@2) + 1 == wi::to_widest (@3)
4784 || wi::to_widest (@2) == wi::to_widest (@3) + 1))
4787 get_range_query (cfun)->range_of_expr (r, @0);
4788 if (r.undefined_p ())
4789 r.set_varying (TREE_TYPE (@0));
4791 wide_int min = r.lower_bound ();
4792 wide_int max = r.upper_bound ();
4795 && (wi::to_wide (@1) == min
4796 || wi::to_wide (@1) == max))
4798 tree arg0 = @2, arg1 = @3;
4800 if ((eqne == EQ_EXPR) ^ (wi::to_wide (@1) == min))
4801 std::swap (arg0, arg1);
4802 if (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type))
4803 type1 = TREE_TYPE (@0);
4806 auto prec = TYPE_PRECISION (type1);
4807 auto unsign = TYPE_UNSIGNED (type1);
4808 type1 = build_nonstandard_integer_type (prec, unsign);
4809 min = wide_int::from (min, prec,
4810 TYPE_SIGN (TREE_TYPE (@0)));
4811 wide_int a = wide_int::from (wi::to_wide (arg0), prec,
4813 enum tree_code code;
4814 wi::overflow_type ovf;
4815 if (tree_int_cst_lt (arg0, arg1))
4821 /* lhs is known to be in range [min, min+1] and we want to add a
4822 to it. Check if that operation can overflow for those 2 values
4823 and if yes, force unsigned type. */
4824 wi::add (min + (wi::neg_p (a) ? 0 : 1), a, SIGNED, &ovf);
4826 type1 = unsigned_type_for (type1);
4835 /* lhs is known to be in range [min, min+1] and we want to subtract
4836 it from a. Check if that operation can overflow for those 2
4837 values and if yes, force unsigned type. */
4838 wi::sub (a, min + (wi::neg_p (min) ? 0 : 1), SIGNED, &ovf);
4840 type1 = unsigned_type_for (type1);
4843 tree arg = wide_int_to_tree (type1, a);
4845 (if (code == PLUS_EXPR)
4846 (convert (plus (convert:type1 @0) { arg; }))
4847 (convert (minus { arg; } (convert:type1 @0)))
4858 (convert (cond@0 @1 INTEGER_CST@2 INTEGER_CST@3))
4859 (if (INTEGRAL_TYPE_P (type)
4860 && INTEGRAL_TYPE_P (TREE_TYPE (@0)))
4861 (cond @1 (convert @2) (convert @3))))
4863 /* Simplification moved from fold_cond_expr_with_comparison. It may also
4865 /* This pattern implements two kinds simplification:
4868 (cond (cmp (convert1? x) c1) (convert2? x) c2) -> (minmax (x c)) if:
4869 1) Conversions are type widening from smaller type.
4870 2) Const c1 equals to c2 after canonicalizing comparison.
4871 3) Comparison has tree code LT, LE, GT or GE.
4872 This specific pattern is needed when (cmp (convert x) c) may not
4873 be simplified by comparison patterns because of multiple uses of
4874 x. It also makes sense here because simplifying across multiple
4875 referred var is always benefitial for complicated cases.
4878 (cond (eq (convert1? x) c1) (convert2? x) c2) -> (cond (eq x c1) c1 c2). */
4879 (for cmp (lt le gt ge eq ne)
4881 (cond (cmp (convert1? @1) INTEGER_CST@3) (convert2? @1) INTEGER_CST@2)
4884 tree from_type = TREE_TYPE (@1);
4885 tree c1_type = TREE_TYPE (@3), c2_type = TREE_TYPE (@2);
4886 enum tree_code code = ERROR_MARK;
4888 if (INTEGRAL_TYPE_P (from_type)
4889 && int_fits_type_p (@2, from_type)
4890 && (types_match (c1_type, from_type)
4891 || (TYPE_PRECISION (c1_type) > TYPE_PRECISION (from_type)
4892 && (TYPE_UNSIGNED (from_type)
4893 || TYPE_SIGN (c1_type) == TYPE_SIGN (from_type))))
4894 && (types_match (c2_type, from_type)
4895 || (TYPE_PRECISION (c2_type) > TYPE_PRECISION (from_type)
4896 && (TYPE_UNSIGNED (from_type)
4897 || TYPE_SIGN (c2_type) == TYPE_SIGN (from_type)))))
4900 code = minmax_from_comparison (cmp, @1, @3, @1, @2);
4901 /* Can do A == C1 ? A : C2 -> A == C1 ? C1 : C2? */
4902 else if (int_fits_type_p (@3, from_type))
4906 (if (code == MAX_EXPR)
4907 (convert (max @1 (convert @2)))
4908 (if (code == MIN_EXPR)
4909 (convert (min @1 (convert @2)))
4910 (if (code == EQ_EXPR)
4911 (convert (cond (eq @1 (convert @3))
4912 (convert:from_type @3) (convert:from_type @2)))))))))
4914 /* (cond (cmp (convert? x) c1) (op x c2) c3) -> (op (minmax x c1) c2) if:
4916 1) OP is PLUS or MINUS.
4917 2) CMP is LT, LE, GT or GE.
4918 3) C3 == (C1 op C2), and computation doesn't have undefined behavior.
4920 This pattern also handles special cases like:
4922 A) Operand x is a unsigned to signed type conversion and c1 is
4923 integer zero. In this case,
4924 (signed type)x < 0 <=> x > MAX_VAL(signed type)
4925 (signed type)x >= 0 <=> x <= MAX_VAL(signed type)
4926 B) Const c1 may not equal to (C3 op' C2). In this case we also
4927 check equality for (c1+1) and (c1-1) by adjusting comparison
4930 TODO: Though signed type is handled by this pattern, it cannot be
4931 simplified at the moment because C standard requires additional
4932 type promotion. In order to match&simplify it here, the IR needs
4933 to be cleaned up by other optimizers, i.e, VRP. */
4934 (for op (plus minus)
4935 (for cmp (lt le gt ge)
4937 (cond (cmp (convert? @X) INTEGER_CST@1) (op @X INTEGER_CST@2) INTEGER_CST@3)
4938 (with { tree from_type = TREE_TYPE (@X), to_type = TREE_TYPE (@1); }
4939 (if (types_match (from_type, to_type)
4940 /* Check if it is special case A). */
4941 || (TYPE_UNSIGNED (from_type)
4942 && !TYPE_UNSIGNED (to_type)
4943 && TYPE_PRECISION (from_type) == TYPE_PRECISION (to_type)
4944 && integer_zerop (@1)
4945 && (cmp == LT_EXPR || cmp == GE_EXPR)))
4948 wi::overflow_type overflow = wi::OVF_NONE;
4949 enum tree_code code, cmp_code = cmp;
4951 wide_int c1 = wi::to_wide (@1);
4952 wide_int c2 = wi::to_wide (@2);
4953 wide_int c3 = wi::to_wide (@3);
4954 signop sgn = TYPE_SIGN (from_type);
4956 /* Handle special case A), given x of unsigned type:
4957 ((signed type)x < 0) <=> (x > MAX_VAL(signed type))
4958 ((signed type)x >= 0) <=> (x <= MAX_VAL(signed type)) */
4959 if (!types_match (from_type, to_type))
4961 if (cmp_code == LT_EXPR)
4963 if (cmp_code == GE_EXPR)
4965 c1 = wi::max_value (to_type);
4967 /* To simplify this pattern, we require c3 = (c1 op c2). Here we
4968 compute (c3 op' c2) and check if it equals to c1 with op' being
4969 the inverted operator of op. Make sure overflow doesn't happen
4970 if it is undefined. */
4971 if (op == PLUS_EXPR)
4972 real_c1 = wi::sub (c3, c2, sgn, &overflow);
4974 real_c1 = wi::add (c3, c2, sgn, &overflow);
4977 if (!overflow || !TYPE_OVERFLOW_UNDEFINED (from_type))
4979 /* Check if c1 equals to real_c1. Boundary condition is handled
4980 by adjusting comparison operation if necessary. */
4981 if (!wi::cmp (wi::sub (real_c1, 1, sgn, &overflow), c1, sgn)
4984 /* X <= Y - 1 equals to X < Y. */
4985 if (cmp_code == LE_EXPR)
4987 /* X > Y - 1 equals to X >= Y. */
4988 if (cmp_code == GT_EXPR)
4991 if (!wi::cmp (wi::add (real_c1, 1, sgn, &overflow), c1, sgn)
4994 /* X < Y + 1 equals to X <= Y. */
4995 if (cmp_code == LT_EXPR)
4997 /* X >= Y + 1 equals to X > Y. */
4998 if (cmp_code == GE_EXPR)
5001 if (code != cmp_code || !wi::cmp (real_c1, c1, sgn))
5003 if (cmp_code == LT_EXPR || cmp_code == LE_EXPR)
5005 if (cmp_code == GT_EXPR || cmp_code == GE_EXPR)
5010 (if (code == MAX_EXPR)
5011 (op (max @X { wide_int_to_tree (from_type, real_c1); })
5012 { wide_int_to_tree (from_type, c2); })
5013 (if (code == MIN_EXPR)
5014 (op (min @X { wide_int_to_tree (from_type, real_c1); })
5015 { wide_int_to_tree (from_type, c2); })))))))))
5018 /* A >= B ? A : B -> max (A, B) and friends. The code is still
5019 in fold_cond_expr_with_comparison for GENERIC folding with
5020 some extra constraints. */
5021 (for cmp (eq ne le lt unle unlt ge gt unge ungt uneq ltgt)
5023 (cond (cmp:c (nop_convert1?@c0 @0) (nop_convert2?@c1 @1))
5024 (convert3? @0) (convert4? @1))
5025 (if (!HONOR_SIGNED_ZEROS (type)
5026 && (/* Allow widening conversions of the compare operands as data. */
5027 (INTEGRAL_TYPE_P (type)
5028 && types_match (TREE_TYPE (@c0), TREE_TYPE (@0))
5029 && types_match (TREE_TYPE (@c1), TREE_TYPE (@1))
5030 && TYPE_PRECISION (TREE_TYPE (@0)) <= TYPE_PRECISION (type)
5031 && TYPE_PRECISION (TREE_TYPE (@1)) <= TYPE_PRECISION (type))
5032 /* Or sign conversions for the comparison. */
5033 || (types_match (type, TREE_TYPE (@0))
5034 && types_match (type, TREE_TYPE (@1)))))
5036 (if (cmp == EQ_EXPR)
5037 (if (VECTOR_TYPE_P (type))
5040 (if (cmp == NE_EXPR)
5041 (if (VECTOR_TYPE_P (type))
5044 (if (cmp == LE_EXPR || cmp == UNLE_EXPR || cmp == LT_EXPR || cmp == UNLT_EXPR)
5045 (if (!HONOR_NANS (type))
5046 (if (VECTOR_TYPE_P (type))
5047 (view_convert (min @c0 @c1))
5048 (convert (min @c0 @c1)))))
5049 (if (cmp == GE_EXPR || cmp == UNGE_EXPR || cmp == GT_EXPR || cmp == UNGT_EXPR)
5050 (if (!HONOR_NANS (type))
5051 (if (VECTOR_TYPE_P (type))
5052 (view_convert (max @c0 @c1))
5053 (convert (max @c0 @c1)))))
5054 (if (cmp == UNEQ_EXPR)
5055 (if (!HONOR_NANS (type))
5056 (if (VECTOR_TYPE_P (type))
5059 (if (cmp == LTGT_EXPR)
5060 (if (!HONOR_NANS (type))
5061 (if (VECTOR_TYPE_P (type))
5063 (convert @c0))))))))
5066 /* These was part of minmax phiopt. */
5067 /* Optimize (a CMP b) ? minmax<a, c> : minmax<b, c>
5068 to minmax<min/max<a, b>, c> */
5069 (for minmax (min max)
5070 (for cmp (lt le gt ge ne)
5072 (cond (cmp @1 @3) (minmax:c @1 @4) (minmax:c @2 @4))
5075 tree_code code = minmax_from_comparison (cmp, @1, @2, @1, @3);
5077 (if (code == MIN_EXPR)
5078 (minmax (min @1 @2) @4)
5079 (if (code == MAX_EXPR)
5080 (minmax (max @1 @2) @4)))))))
5082 /* Optimize (a CMP CST1) ? max<a,CST2> : a */
5083 (for cmp (gt ge lt le)
5084 minmax (min min max max)
5086 (cond (cmp @0 @1) (minmax:c@2 @0 @3) @4)
5089 tree_code code = minmax_from_comparison (cmp, @0, @1, @0, @4);
5091 (if ((cmp == LT_EXPR || cmp == LE_EXPR)
5093 && integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node, @3, @1)))
5095 (if ((cmp == GT_EXPR || cmp == GE_EXPR)
5097 && integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node, @3, @1)))
5100 /* X != C1 ? -X : C2 simplifies to -X when -C1 == C2. */
5102 (cond (ne @0 INTEGER_CST@1) (negate@3 @0) INTEGER_CST@2)
5103 (if (!TYPE_SATURATING (type)
5104 && (TYPE_OVERFLOW_WRAPS (type)
5105 || !wi::only_sign_bit_p (wi::to_wide (@1)))
5106 && wi::eq_p (wi::neg (wi::to_wide (@1)), wi::to_wide (@2)))
5109 /* X != C1 ? ~X : C2 simplifies to ~X when ~C1 == C2. */
5111 (cond (ne @0 INTEGER_CST@1) (bit_not@3 @0) INTEGER_CST@2)
5112 (if (wi::eq_p (wi::bit_not (wi::to_wide (@1)), wi::to_wide (@2)))
5115 /* (X + 1) > Y ? -X : 1 simplifies to X >= Y ? -X : 1 when
5116 X is unsigned, as when X + 1 overflows, X is -1, so -X == 1. */
5118 (cond (gt (plus @0 integer_onep) @1) (negate @0) integer_onep@2)
5119 (if (TYPE_UNSIGNED (type))
5120 (cond (ge @0 @1) (negate @0) @2)))
5122 (for cnd (cond vec_cond)
5123 /* A ? B : (A ? X : C) -> A ? B : C. */
5125 (cnd @0 (cnd @0 @1 @2) @3)
5128 (cnd @0 @1 (cnd @0 @2 @3))
5130 /* A ? B : (!A ? C : X) -> A ? B : C. */
5131 /* ??? This matches embedded conditions open-coded because genmatch
5132 would generate matching code for conditions in separate stmts only.
5133 The following is still important to merge then and else arm cases
5134 from if-conversion. */
5136 (cnd @0 @1 (cnd @2 @3 @4))
5137 (if (inverse_conditions_p (@0, @2))
5140 (cnd @0 (cnd @1 @2 @3) @4)
5141 (if (inverse_conditions_p (@0, @1))
5144 /* A ? B : B -> B. */
5149 /* !A ? B : C -> A ? C : B. */
5151 (cnd (logical_inverted_value truth_valued_p@0) @1 @2)
5154 /* abs/negative simplifications moved from fold_cond_expr_with_comparison,
5155 Need to handle (A - B) case as fold_cond_expr_with_comparison does.
5156 Need to handle UN* comparisons.
5158 None of these transformations work for modes with signed
5159 zeros. If A is +/-0, the first two transformations will
5160 change the sign of the result (from +0 to -0, or vice
5161 versa). The last four will fix the sign of the result,
5162 even though the original expressions could be positive or
5163 negative, depending on the sign of A.
5165 Note that all these transformations are correct if A is
5166 NaN, since the two alternatives (A and -A) are also NaNs. */
5168 (for cnd (cond vec_cond)
5169 /* A == 0 ? A : -A same as -A */
5172 (cnd (cmp @0 zerop) @0 (negate@1 @0))
5173 (if (!HONOR_SIGNED_ZEROS (type))
5176 (cnd (cmp @0 zerop) zerop (negate@1 @0))
5177 (if (!HONOR_SIGNED_ZEROS (type))
5180 /* A != 0 ? A : -A same as A */
5183 (cnd (cmp @0 zerop) @0 (negate @0))
5184 (if (!HONOR_SIGNED_ZEROS (type))
5187 (cnd (cmp @0 zerop) @0 integer_zerop)
5188 (if (!HONOR_SIGNED_ZEROS (type))
5191 /* A >=/> 0 ? A : -A same as abs (A) */
5194 (cnd (cmp @0 zerop) @0 (negate @0))
5195 (if (!HONOR_SIGNED_ZEROS (type)
5196 && !TYPE_UNSIGNED (type))
5198 /* A <=/< 0 ? A : -A same as -abs (A) */
5201 (cnd (cmp @0 zerop) @0 (negate @0))
5202 (if (!HONOR_SIGNED_ZEROS (type)
5203 && !TYPE_UNSIGNED (type))
5204 (if (ANY_INTEGRAL_TYPE_P (type)
5205 && !TYPE_OVERFLOW_WRAPS (type))
5207 tree utype = unsigned_type_for (type);
5209 (convert (negate (absu:utype @0))))
5210 (negate (abs @0)))))
5214 /* -(type)!A -> (type)A - 1. */
5216 (negate (convert?:s (logical_inverted_value:s @0)))
5217 (if (INTEGRAL_TYPE_P (type)
5218 && TREE_CODE (type) != BOOLEAN_TYPE
5219 && TYPE_PRECISION (type) > 1
5220 && TREE_CODE (@0) == SSA_NAME
5221 && ssa_name_has_boolean_range (@0))
5222 (plus (convert:type @0) { build_all_ones_cst (type); })))
5224 /* A + (B vcmp C ? 1 : 0) -> A - (B vcmp C ? -1 : 0), since vector comparisons
5225 return all -1 or all 0 results. */
5226 /* ??? We could instead convert all instances of the vec_cond to negate,
5227 but that isn't necessarily a win on its own. */
5229 (plus:c @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
5230 (if (VECTOR_TYPE_P (type)
5231 && known_eq (TYPE_VECTOR_SUBPARTS (type),
5232 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
5233 && (TYPE_MODE (TREE_TYPE (type))
5234 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
5235 (minus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
5237 /* ... likewise A - (B vcmp C ? 1 : 0) -> A + (B vcmp C ? -1 : 0). */
5239 (minus @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
5240 (if (VECTOR_TYPE_P (type)
5241 && known_eq (TYPE_VECTOR_SUBPARTS (type),
5242 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
5243 && (TYPE_MODE (TREE_TYPE (type))
5244 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
5245 (plus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
5248 /* Simplifications of comparisons. */
5250 /* See if we can reduce the magnitude of a constant involved in a
5251 comparison by changing the comparison code. This is a canonicalization
5252 formerly done by maybe_canonicalize_comparison_1. */
5256 (cmp @0 uniform_integer_cst_p@1)
5257 (with { tree cst = uniform_integer_cst_p (@1); }
5258 (if (tree_int_cst_sgn (cst) == -1)
5259 (acmp @0 { build_uniform_cst (TREE_TYPE (@1),
5260 wide_int_to_tree (TREE_TYPE (cst),
5266 (cmp @0 uniform_integer_cst_p@1)
5267 (with { tree cst = uniform_integer_cst_p (@1); }
5268 (if (tree_int_cst_sgn (cst) == 1)
5269 (acmp @0 { build_uniform_cst (TREE_TYPE (@1),
5270 wide_int_to_tree (TREE_TYPE (cst),
5271 wi::to_wide (cst) - 1)); })))))
5273 /* We can simplify a logical negation of a comparison to the
5274 inverted comparison. As we cannot compute an expression
5275 operator using invert_tree_comparison we have to simulate
5276 that with expression code iteration. */
5277 (for cmp (tcc_comparison)
5278 icmp (inverted_tcc_comparison)
5279 ncmp (inverted_tcc_comparison_with_nans)
5280 /* Ideally we'd like to combine the following two patterns
5281 and handle some more cases by using
5282 (logical_inverted_value (cmp @0 @1))
5283 here but for that genmatch would need to "inline" that.
5284 For now implement what forward_propagate_comparison did. */
5286 (bit_not (cmp @0 @1))
5287 (if (VECTOR_TYPE_P (type)
5288 || (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1))
5289 /* Comparison inversion may be impossible for trapping math,
5290 invert_tree_comparison will tell us. But we can't use
5291 a computed operator in the replacement tree thus we have
5292 to play the trick below. */
5293 (with { enum tree_code ic = invert_tree_comparison
5294 (cmp, HONOR_NANS (@0)); }
5300 (bit_xor (cmp @0 @1) integer_truep)
5301 (with { enum tree_code ic = invert_tree_comparison
5302 (cmp, HONOR_NANS (@0)); }
5307 /* The following bits are handled by fold_binary_op_with_conditional_arg. */
5309 (ne (cmp@2 @0 @1) integer_zerop)
5310 (if (types_match (type, TREE_TYPE (@2)))
5313 (eq (cmp@2 @0 @1) integer_truep)
5314 (if (types_match (type, TREE_TYPE (@2)))
5317 (ne (cmp@2 @0 @1) integer_truep)
5318 (if (types_match (type, TREE_TYPE (@2)))
5319 (with { enum tree_code ic = invert_tree_comparison
5320 (cmp, HONOR_NANS (@0)); }
5326 (eq (cmp@2 @0 @1) integer_zerop)
5327 (if (types_match (type, TREE_TYPE (@2)))
5328 (with { enum tree_code ic = invert_tree_comparison
5329 (cmp, HONOR_NANS (@0)); }
5335 /* Transform comparisons of the form X - Y CMP 0 to X CMP Y.
5336 ??? The transformation is valid for the other operators if overflow
5337 is undefined for the type, but performing it here badly interacts
5338 with the transformation in fold_cond_expr_with_comparison which
5339 attempts to synthetize ABS_EXPR. */
5341 (for sub (minus pointer_diff)
5343 (cmp (sub@2 @0 @1) integer_zerop)
5344 (if (single_use (@2))
5347 /* Simplify (x < 0) ^ (y < 0) to (x ^ y) < 0 and
5348 (x >= 0) ^ (y >= 0) to (x ^ y) < 0. */
5351 (bit_xor (cmp:s @0 integer_zerop) (cmp:s @1 integer_zerop))
5352 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
5353 && !TYPE_UNSIGNED (TREE_TYPE (@0))
5354 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
5355 (lt (bit_xor @0 @1) { build_zero_cst (TREE_TYPE (@0)); }))))
5356 /* Simplify (x < 0) ^ (y >= 0) to (x ^ y) >= 0 and
5357 (x >= 0) ^ (y < 0) to (x ^ y) >= 0. */
5359 (bit_xor:c (lt:s @0 integer_zerop) (ge:s @1 integer_zerop))
5360 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
5361 && !TYPE_UNSIGNED (TREE_TYPE (@0))
5362 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
5363 (ge (bit_xor @0 @1) { build_zero_cst (TREE_TYPE (@0)); })))
5365 /* Transform comparisons of the form X * C1 CMP 0 to X CMP 0 in the
5366 signed arithmetic case. That form is created by the compiler
5367 often enough for folding it to be of value. One example is in
5368 computing loop trip counts after Operator Strength Reduction. */
5369 (for cmp (simple_comparison)
5370 scmp (swapped_simple_comparison)
5372 (cmp (mult@3 @0 INTEGER_CST@1) integer_zerop@2)
5373 /* Handle unfolded multiplication by zero. */
5374 (if (integer_zerop (@1))
5376 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
5377 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
5379 /* If @1 is negative we swap the sense of the comparison. */
5380 (if (tree_int_cst_sgn (@1) < 0)
5384 /* For integral types with undefined overflow fold
5385 x * C1 == C2 into x == C2 / C1 or false.
5386 If overflow wraps and C1 is odd, simplify to x == C2 / C1 in the ring
5390 (cmp (mult @0 INTEGER_CST@1) INTEGER_CST@2)
5391 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
5392 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
5393 && wi::to_wide (@1) != 0)
5394 (with { widest_int quot; }
5395 (if (wi::multiple_of_p (wi::to_widest (@2), wi::to_widest (@1),
5396 TYPE_SIGN (TREE_TYPE (@0)), "))
5397 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), quot); })
5398 { constant_boolean_node (cmp == NE_EXPR, type); }))
5399 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
5400 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
5401 && (wi::bit_and (wi::to_wide (@1), 1) == 1))
5404 tree itype = TREE_TYPE (@0);
5405 int p = TYPE_PRECISION (itype);
5406 wide_int m = wi::one (p + 1) << p;
5407 wide_int a = wide_int::from (wi::to_wide (@1), p + 1, UNSIGNED);
5408 wide_int i = wide_int::from (wi::mod_inv (a, m),
5409 p, TYPE_SIGN (itype));
5410 wide_int_to_tree (itype, wi::mul (i, wi::to_wide (@2)));
5413 /* Simplify comparison of something with itself. For IEEE
5414 floating-point, we can only do some of these simplifications. */
5418 (if (! FLOAT_TYPE_P (TREE_TYPE (@0))
5419 || ! tree_expr_maybe_nan_p (@0))
5420 { constant_boolean_node (true, type); }
5422 /* With -ftrapping-math conversion to EQ loses an exception. */
5423 && (! FLOAT_TYPE_P (TREE_TYPE (@0))
5424 || ! flag_trapping_math))
5430 || ! FLOAT_TYPE_P (TREE_TYPE (@0))
5431 || ! tree_expr_maybe_nan_p (@0))
5432 { constant_boolean_node (false, type); })))
5433 (for cmp (unle unge uneq)
5436 { constant_boolean_node (true, type); }))
5437 (for cmp (unlt ungt)
5443 (if (!flag_trapping_math || !tree_expr_maybe_nan_p (@0))
5444 { constant_boolean_node (false, type); }))
5446 /* x == ~x -> false */
5447 /* x != ~x -> true */
5450 (cmp:c @0 (bit_not @0))
5451 { constant_boolean_node (cmp == NE_EXPR, type); }))
5453 /* Fold ~X op ~Y as Y op X. */
5454 (for cmp (simple_comparison)
5456 (cmp (bit_not@2 @0) (bit_not@3 @1))
5457 (if (single_use (@2) && single_use (@3))
5460 /* Fold ~X op C as X op' ~C, where op' is the swapped comparison. */
5461 (for cmp (simple_comparison)
5462 scmp (swapped_simple_comparison)
5464 (cmp (bit_not@2 @0) CONSTANT_CLASS_P@1)
5465 (if (single_use (@2)
5466 && (TREE_CODE (@1) == INTEGER_CST || TREE_CODE (@1) == VECTOR_CST))
5467 (scmp @0 (bit_not @1)))))
5469 (for cmp (simple_comparison)
5472 /* IEEE doesn't distinguish +0 and -0 in comparisons. */
5474 /* a CMP (-0) -> a CMP 0 */
5475 (if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)))
5476 (cmp @0 { build_real (TREE_TYPE (@1), dconst0); }))
5477 /* (-0) CMP b -> 0 CMP b. */
5478 (if (TREE_CODE (@0) == REAL_CST
5479 && REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@0)))
5480 (cmp { build_real (TREE_TYPE (@0), dconst0); } @1))
5481 /* x != NaN is always true, other ops are always false. */
5482 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
5483 && (cmp == EQ_EXPR || cmp == NE_EXPR || !flag_trapping_math)
5484 && !tree_expr_signaling_nan_p (@1)
5485 && !tree_expr_maybe_signaling_nan_p (@0))
5486 { constant_boolean_node (cmp == NE_EXPR, type); })
5487 /* NaN != y is always true, other ops are always false. */
5488 (if (TREE_CODE (@0) == REAL_CST
5489 && REAL_VALUE_ISNAN (TREE_REAL_CST (@0))
5490 && (cmp == EQ_EXPR || cmp == NE_EXPR || !flag_trapping_math)
5491 && !tree_expr_signaling_nan_p (@0)
5492 && !tree_expr_signaling_nan_p (@1))
5493 { constant_boolean_node (cmp == NE_EXPR, type); })
5494 /* Fold comparisons against infinity. */
5495 (if (REAL_VALUE_ISINF (TREE_REAL_CST (@1))
5496 && MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (@1))))
5499 REAL_VALUE_TYPE max;
5500 enum tree_code code = cmp;
5501 bool neg = REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1));
5503 code = swap_tree_comparison (code);
5506 /* x > +Inf is always false, if we ignore NaNs or exceptions. */
5507 (if (code == GT_EXPR
5508 && !(HONOR_NANS (@0) && flag_trapping_math))
5509 { constant_boolean_node (false, type); })
5510 (if (code == LE_EXPR)
5511 /* x <= +Inf is always true, if we don't care about NaNs. */
5512 (if (! HONOR_NANS (@0))
5513 { constant_boolean_node (true, type); }
5514 /* x <= +Inf is the same as x == x, i.e. !isnan(x), but this loses
5515 an "invalid" exception. */
5516 (if (!flag_trapping_math)
5518 /* x == +Inf and x >= +Inf are always equal to x > DBL_MAX, but
5519 for == this introduces an exception for x a NaN. */
5520 (if ((code == EQ_EXPR && !(HONOR_NANS (@0) && flag_trapping_math))
5522 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
5524 (lt @0 { build_real (TREE_TYPE (@0), max); })
5525 (gt @0 { build_real (TREE_TYPE (@0), max); }))))
5526 /* x < +Inf is always equal to x <= DBL_MAX. */
5527 (if (code == LT_EXPR)
5528 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
5530 (ge @0 { build_real (TREE_TYPE (@0), max); })
5531 (le @0 { build_real (TREE_TYPE (@0), max); }))))
5532 /* x != +Inf is always equal to !(x > DBL_MAX), but this introduces
5533 an exception for x a NaN so use an unordered comparison. */
5534 (if (code == NE_EXPR)
5535 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
5536 (if (! HONOR_NANS (@0))
5538 (ge @0 { build_real (TREE_TYPE (@0), max); })
5539 (le @0 { build_real (TREE_TYPE (@0), max); }))
5541 (unge @0 { build_real (TREE_TYPE (@0), max); })
5542 (unle @0 { build_real (TREE_TYPE (@0), max); }))))))))))
5544 /* If this is a comparison of a real constant with a PLUS_EXPR
5545 or a MINUS_EXPR of a real constant, we can convert it into a
5546 comparison with a revised real constant as long as no overflow
5547 occurs when unsafe_math_optimizations are enabled. */
5548 (if (flag_unsafe_math_optimizations)
5549 (for op (plus minus)
5551 (cmp (op @0 REAL_CST@1) REAL_CST@2)
5554 tree tem = const_binop (op == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR,
5555 TREE_TYPE (@1), @2, @1);
5557 (if (tem && !TREE_OVERFLOW (tem))
5558 (cmp @0 { tem; }))))))
5560 /* Likewise, we can simplify a comparison of a real constant with
5561 a MINUS_EXPR whose first operand is also a real constant, i.e.
5562 (c1 - x) < c2 becomes x > c1-c2. Reordering is allowed on
5563 floating-point types only if -fassociative-math is set. */
5564 (if (flag_associative_math)
5566 (cmp (minus REAL_CST@0 @1) REAL_CST@2)
5567 (with { tree tem = const_binop (MINUS_EXPR, TREE_TYPE (@1), @0, @2); }
5568 (if (tem && !TREE_OVERFLOW (tem))
5569 (cmp { tem; } @1)))))
5571 /* Fold comparisons against built-in math functions. */
5572 (if (flag_unsafe_math_optimizations && ! flag_errno_math)
5575 (cmp (sq @0) REAL_CST@1)
5577 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
5579 /* sqrt(x) < y is always false, if y is negative. */
5580 (if (cmp == EQ_EXPR || cmp == LT_EXPR || cmp == LE_EXPR)
5581 { constant_boolean_node (false, type); })
5582 /* sqrt(x) > y is always true, if y is negative and we
5583 don't care about NaNs, i.e. negative values of x. */
5584 (if (cmp == NE_EXPR || !HONOR_NANS (@0))
5585 { constant_boolean_node (true, type); })
5586 /* sqrt(x) > y is the same as x >= 0, if y is negative. */
5587 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })))
5588 (if (real_equal (TREE_REAL_CST_PTR (@1), &dconst0))
5590 /* sqrt(x) < 0 is always false. */
5591 (if (cmp == LT_EXPR)
5592 { constant_boolean_node (false, type); })
5593 /* sqrt(x) >= 0 is always true if we don't care about NaNs. */
5594 (if (cmp == GE_EXPR && !HONOR_NANS (@0))
5595 { constant_boolean_node (true, type); })
5596 /* sqrt(x) <= 0 -> x == 0. */
5597 (if (cmp == LE_EXPR)
5599 /* Otherwise sqrt(x) cmp 0 -> x cmp 0. Here cmp can be >=, >,
5600 == or !=. In the last case:
5602 (sqrt(x) != 0) == (NaN != 0) == true == (x != 0)
5604 if x is negative or NaN. Due to -funsafe-math-optimizations,
5605 the results for other x follow from natural arithmetic. */
5607 (if ((cmp == LT_EXPR
5611 && !REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
5612 /* Give up for -frounding-math. */
5613 && !HONOR_SIGN_DEPENDENT_ROUNDING (TREE_TYPE (@0)))
5617 enum tree_code ncmp = cmp;
5618 const real_format *fmt
5619 = REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@0)));
5620 real_arithmetic (&c2, MULT_EXPR,
5621 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
5622 real_convert (&c2, fmt, &c2);
5623 /* See PR91734: if c2 is inexact and sqrt(c2) < c (or sqrt(c2) >= c),
5624 then change LT_EXPR into LE_EXPR or GE_EXPR into GT_EXPR. */
5625 if (!REAL_VALUE_ISINF (c2))
5627 tree c3 = fold_const_call (CFN_SQRT, TREE_TYPE (@0),
5628 build_real (TREE_TYPE (@0), c2));
5629 if (c3 == NULL_TREE || TREE_CODE (c3) != REAL_CST)
5631 else if ((cmp == LT_EXPR || cmp == GE_EXPR)
5632 && real_less (&TREE_REAL_CST (c3), &TREE_REAL_CST (@1)))
5633 ncmp = cmp == LT_EXPR ? LE_EXPR : GT_EXPR;
5634 else if ((cmp == LE_EXPR || cmp == GT_EXPR)
5635 && real_less (&TREE_REAL_CST (@1), &TREE_REAL_CST (c3)))
5636 ncmp = cmp == LE_EXPR ? LT_EXPR : GE_EXPR;
5639 /* With rounding to even, sqrt of up to 3 different values
5640 gives the same normal result, so in some cases c2 needs
5642 REAL_VALUE_TYPE c2alt, tow;
5643 if (cmp == LT_EXPR || cmp == GE_EXPR)
5647 real_nextafter (&c2alt, fmt, &c2, &tow);
5648 real_convert (&c2alt, fmt, &c2alt);
5649 if (REAL_VALUE_ISINF (c2alt))
5653 c3 = fold_const_call (CFN_SQRT, TREE_TYPE (@0),
5654 build_real (TREE_TYPE (@0), c2alt));
5655 if (c3 == NULL_TREE || TREE_CODE (c3) != REAL_CST)
5657 else if (real_equal (&TREE_REAL_CST (c3),
5658 &TREE_REAL_CST (@1)))
5664 (if (cmp == GT_EXPR || cmp == GE_EXPR)
5665 (if (REAL_VALUE_ISINF (c2))
5666 /* sqrt(x) > y is x == +Inf, when y is very large. */
5667 (if (HONOR_INFINITIES (@0))
5668 (eq @0 { build_real (TREE_TYPE (@0), c2); })
5669 { constant_boolean_node (false, type); })
5670 /* sqrt(x) > c is the same as x > c*c. */
5671 (if (ncmp != ERROR_MARK)
5672 (if (ncmp == GE_EXPR)
5673 (ge @0 { build_real (TREE_TYPE (@0), c2); })
5674 (gt @0 { build_real (TREE_TYPE (@0), c2); }))))
5675 /* else if (cmp == LT_EXPR || cmp == LE_EXPR) */
5676 (if (REAL_VALUE_ISINF (c2))
5678 /* sqrt(x) < y is always true, when y is a very large
5679 value and we don't care about NaNs or Infinities. */
5680 (if (! HONOR_NANS (@0) && ! HONOR_INFINITIES (@0))
5681 { constant_boolean_node (true, type); })
5682 /* sqrt(x) < y is x != +Inf when y is very large and we
5683 don't care about NaNs. */
5684 (if (! HONOR_NANS (@0))
5685 (ne @0 { build_real (TREE_TYPE (@0), c2); }))
5686 /* sqrt(x) < y is x >= 0 when y is very large and we
5687 don't care about Infinities. */
5688 (if (! HONOR_INFINITIES (@0))
5689 (ge @0 { build_real (TREE_TYPE (@0), dconst0); }))
5690 /* sqrt(x) < y is x >= 0 && x != +Inf, when y is large. */
5693 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
5694 (ne @0 { build_real (TREE_TYPE (@0), c2); }))))
5695 /* sqrt(x) < c is the same as x < c*c, if we ignore NaNs. */
5696 (if (ncmp != ERROR_MARK && ! HONOR_NANS (@0))
5697 (if (ncmp == LT_EXPR)
5698 (lt @0 { build_real (TREE_TYPE (@0), c2); })
5699 (le @0 { build_real (TREE_TYPE (@0), c2); }))
5700 /* sqrt(x) < c is the same as x >= 0 && x < c*c. */
5701 (if (ncmp != ERROR_MARK && GENERIC)
5702 (if (ncmp == LT_EXPR)
5704 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
5705 (lt @0 { build_real (TREE_TYPE (@0), c2); }))
5707 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
5708 (le @0 { build_real (TREE_TYPE (@0), c2); })))))))))))
5709 /* Transform sqrt(x) cmp sqrt(y) -> x cmp y. */
5711 (cmp (sq @0) (sq @1))
5712 (if (! HONOR_NANS (@0))
5715 /* Optimize various special cases of (FTYPE) N CMP (FTYPE) M. */
5716 (for cmp (lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
5717 icmp (lt le eq ne ge gt unordered ordered lt le gt ge eq ne)
5719 (cmp (float@0 @1) (float @2))
5720 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@0))
5721 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
5724 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@0))));
5725 tree type1 = TREE_TYPE (@1);
5726 bool type1_signed_p = TYPE_SIGN (type1) == SIGNED;
5727 tree type2 = TREE_TYPE (@2);
5728 bool type2_signed_p = TYPE_SIGN (type2) == SIGNED;
5730 (if (fmt.can_represent_integral_type_p (type1)
5731 && fmt.can_represent_integral_type_p (type2))
5732 (if (cmp == ORDERED_EXPR || cmp == UNORDERED_EXPR)
5733 { constant_boolean_node (cmp == ORDERED_EXPR, type); }
5734 (if (TYPE_PRECISION (type1) > TYPE_PRECISION (type2)
5735 && type1_signed_p >= type2_signed_p)
5736 (icmp @1 (convert @2))
5737 (if (TYPE_PRECISION (type1) < TYPE_PRECISION (type2)
5738 && type1_signed_p <= type2_signed_p)
5739 (icmp (convert:type2 @1) @2)
5740 (if (TYPE_PRECISION (type1) == TYPE_PRECISION (type2)
5741 && type1_signed_p == type2_signed_p)
5742 (icmp @1 @2))))))))))
5744 /* Optimize various special cases of (FTYPE) N CMP CST. */
5745 (for cmp (lt le eq ne ge gt)
5746 icmp (le le eq ne ge ge)
5748 (cmp (float @0) REAL_CST@1)
5749 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@1))
5750 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1)))
5753 tree itype = TREE_TYPE (@0);
5754 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@1))));
5755 const REAL_VALUE_TYPE *cst = TREE_REAL_CST_PTR (@1);
5756 /* Be careful to preserve any potential exceptions due to
5757 NaNs. qNaNs are ok in == or != context.
5758 TODO: relax under -fno-trapping-math or
5759 -fno-signaling-nans. */
5761 = real_isnan (cst) && (cst->signalling
5762 || (cmp != EQ_EXPR && cmp != NE_EXPR));
5764 /* TODO: allow non-fitting itype and SNaNs when
5765 -fno-trapping-math. */
5766 (if (fmt.can_represent_integral_type_p (itype) && ! exception_p)
5769 signop isign = TYPE_SIGN (itype);
5770 REAL_VALUE_TYPE imin, imax;
5771 real_from_integer (&imin, fmt, wi::min_value (itype), isign);
5772 real_from_integer (&imax, fmt, wi::max_value (itype), isign);
5774 REAL_VALUE_TYPE icst;
5775 if (cmp == GT_EXPR || cmp == GE_EXPR)
5776 real_ceil (&icst, fmt, cst);
5777 else if (cmp == LT_EXPR || cmp == LE_EXPR)
5778 real_floor (&icst, fmt, cst);
5780 real_trunc (&icst, fmt, cst);
5782 bool cst_int_p = !real_isnan (cst) && real_identical (&icst, cst);
5784 bool overflow_p = false;
5786 = real_to_integer (&icst, &overflow_p, TYPE_PRECISION (itype));
5789 /* Optimize cases when CST is outside of ITYPE's range. */
5790 (if (real_compare (LT_EXPR, cst, &imin))
5791 { constant_boolean_node (cmp == GT_EXPR || cmp == GE_EXPR || cmp == NE_EXPR,
5793 (if (real_compare (GT_EXPR, cst, &imax))
5794 { constant_boolean_node (cmp == LT_EXPR || cmp == LE_EXPR || cmp == NE_EXPR,
5796 /* Remove cast if CST is an integer representable by ITYPE. */
5798 (cmp @0 { gcc_assert (!overflow_p);
5799 wide_int_to_tree (itype, icst_val); })
5801 /* When CST is fractional, optimize
5802 (FTYPE) N == CST -> 0
5803 (FTYPE) N != CST -> 1. */
5804 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
5805 { constant_boolean_node (cmp == NE_EXPR, type); })
5806 /* Otherwise replace with sensible integer constant. */
5809 gcc_checking_assert (!overflow_p);
5811 (icmp @0 { wide_int_to_tree (itype, icst_val); })))))))))
5813 /* Fold A /[ex] B CMP C to A CMP B * C. */
5816 (cmp (exact_div @0 @1) INTEGER_CST@2)
5817 (if (!integer_zerop (@1))
5818 (if (wi::to_wide (@2) == 0)
5820 (if (TREE_CODE (@1) == INTEGER_CST)
5823 wi::overflow_type ovf;
5824 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
5825 TYPE_SIGN (TREE_TYPE (@1)), &ovf);
5828 { constant_boolean_node (cmp == NE_EXPR, type); }
5829 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))))
5830 (for cmp (lt le gt ge)
5832 (cmp (exact_div @0 INTEGER_CST@1) INTEGER_CST@2)
5833 (if (wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))))
5836 wi::overflow_type ovf;
5837 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
5838 TYPE_SIGN (TREE_TYPE (@1)), &ovf);
5841 { constant_boolean_node (wi::lt_p (wi::to_wide (@2), 0,
5842 TYPE_SIGN (TREE_TYPE (@2)))
5843 != (cmp == LT_EXPR || cmp == LE_EXPR), type); }
5844 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))
5846 /* Fold (size_t)(A /[ex] B) CMP C to (size_t)A CMP (size_t)B * C or A CMP' 0.
5848 For small C (less than max/B), this is (size_t)A CMP (size_t)B * C.
5849 For large C (more than min/B+2^size), this is also true, with the
5850 multiplication computed modulo 2^size.
5851 For intermediate C, this just tests the sign of A. */
5852 (for cmp (lt le gt ge)
5855 (cmp (convert (exact_div @0 INTEGER_CST@1)) INTEGER_CST@2)
5856 (if (tree_nop_conversion_p (TREE_TYPE (@0), TREE_TYPE (@2))
5857 && TYPE_UNSIGNED (TREE_TYPE (@2)) && !TYPE_UNSIGNED (TREE_TYPE (@0))
5858 && wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))))
5861 tree utype = TREE_TYPE (@2);
5862 wide_int denom = wi::to_wide (@1);
5863 wide_int right = wi::to_wide (@2);
5864 wide_int smax = wi::sdiv_trunc (wi::max_value (TREE_TYPE (@0)), denom);
5865 wide_int smin = wi::sdiv_trunc (wi::min_value (TREE_TYPE (@0)), denom);
5866 bool small = wi::leu_p (right, smax);
5867 bool large = wi::geu_p (right, smin);
5869 (if (small || large)
5870 (cmp (convert:utype @0) (mult @2 (convert @1)))
5871 (cmp2 @0 { build_zero_cst (TREE_TYPE (@0)); }))))))
5873 /* Unordered tests if either argument is a NaN. */
5875 (bit_ior (unordered @0 @0) (unordered @1 @1))
5876 (if (types_match (@0, @1))
5879 (bit_and (ordered @0 @0) (ordered @1 @1))
5880 (if (types_match (@0, @1))
5883 (bit_ior:c (unordered @0 @0) (unordered:c@2 @0 @1))
5886 (bit_and:c (ordered @0 @0) (ordered:c@2 @0 @1))
5889 /* Simple range test simplifications. */
5890 /* A < B || A >= B -> true. */
5891 (for test1 (lt le le le ne ge)
5892 test2 (ge gt ge ne eq ne)
5894 (bit_ior:c (test1 @0 @1) (test2 @0 @1))
5895 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
5896 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
5897 { constant_boolean_node (true, type); })))
5898 /* A < B && A >= B -> false. */
5899 (for test1 (lt lt lt le ne eq)
5900 test2 (ge gt eq gt eq gt)
5902 (bit_and:c (test1 @0 @1) (test2 @0 @1))
5903 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
5904 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
5905 { constant_boolean_node (false, type); })))
5907 /* A & (2**N - 1) <= 2**K - 1 -> A & (2**N - 2**K) == 0
5908 A & (2**N - 1) > 2**K - 1 -> A & (2**N - 2**K) != 0
5910 Note that comparisons
5911 A & (2**N - 1) < 2**K -> A & (2**N - 2**K) == 0
5912 A & (2**N - 1) >= 2**K -> A & (2**N - 2**K) != 0
5913 will be canonicalized to above so there's no need to
5920 (cmp (bit_and@0 @1 INTEGER_CST@2) INTEGER_CST@3)
5921 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)))
5924 tree ty = TREE_TYPE (@0);
5925 unsigned prec = TYPE_PRECISION (ty);
5926 wide_int mask = wi::to_wide (@2, prec);
5927 wide_int rhs = wi::to_wide (@3, prec);
5928 signop sgn = TYPE_SIGN (ty);
5930 (if ((mask & (mask + 1)) == 0 && wi::gt_p (rhs, 0, sgn)
5931 && (rhs & (rhs + 1)) == 0 && wi::ge_p (mask, rhs, sgn))
5932 (eqcmp (bit_and @1 { wide_int_to_tree (ty, mask - rhs); })
5933 { build_zero_cst (ty); }))))))
5935 /* -A CMP -B -> B CMP A. */
5936 (for cmp (tcc_comparison)
5937 scmp (swapped_tcc_comparison)
5939 (cmp (negate @0) (negate @1))
5940 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
5941 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
5944 || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))))
5947 (cmp (negate @0) CONSTANT_CLASS_P@1)
5948 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
5949 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
5952 || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))))
5953 (with { tree tem = const_unop (NEGATE_EXPR, TREE_TYPE (@0), @1); }
5954 (if (tem && !TREE_OVERFLOW (tem))
5955 (scmp @0 { tem; }))))))
5957 /* Convert ABS[U]_EXPR<x> == 0 or ABS[U]_EXPR<x> != 0 to x == 0 or x != 0. */
5961 (eqne (op @0) zerop@1)
5962 (eqne @0 { build_zero_cst (TREE_TYPE (@0)); }))))
5964 /* From fold_sign_changed_comparison and fold_widened_comparison.
5965 FIXME: the lack of symmetry is disturbing. */
5966 (for cmp (simple_comparison)
5968 (cmp (convert@0 @00) (convert?@1 @10))
5969 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
5970 /* Disable this optimization if we're casting a function pointer
5971 type on targets that require function pointer canonicalization. */
5972 && !(targetm.have_canonicalize_funcptr_for_compare ()
5973 && ((POINTER_TYPE_P (TREE_TYPE (@00))
5974 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@00))))
5975 || (POINTER_TYPE_P (TREE_TYPE (@10))
5976 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@10))))))
5978 (if (TYPE_PRECISION (TREE_TYPE (@00)) == TYPE_PRECISION (TREE_TYPE (@0))
5979 && (TREE_CODE (@10) == INTEGER_CST
5981 && (TYPE_UNSIGNED (TREE_TYPE (@00)) == TYPE_UNSIGNED (TREE_TYPE (@0))
5984 && !POINTER_TYPE_P (TREE_TYPE (@00))
5985 /* (int)bool:32 != (int)uint is not the same as
5986 bool:32 != (bool:32)uint since boolean types only have two valid
5987 values independent of their precision. */
5988 && (TREE_CODE (TREE_TYPE (@00)) != BOOLEAN_TYPE
5989 || TREE_CODE (TREE_TYPE (@10)) == BOOLEAN_TYPE))
5990 /* ??? The special-casing of INTEGER_CST conversion was in the original
5991 code and here to avoid a spurious overflow flag on the resulting
5992 constant which fold_convert produces. */
5993 (if (TREE_CODE (@1) == INTEGER_CST)
5994 (cmp @00 { force_fit_type (TREE_TYPE (@00), wi::to_widest (@1), 0,
5995 TREE_OVERFLOW (@1)); })
5996 (cmp @00 (convert @1)))
5998 (if (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@00)))
5999 /* If possible, express the comparison in the shorter mode. */
6000 (if ((cmp == EQ_EXPR || cmp == NE_EXPR
6001 || TYPE_UNSIGNED (TREE_TYPE (@0)) == TYPE_UNSIGNED (TREE_TYPE (@00))
6002 || (!TYPE_UNSIGNED (TREE_TYPE (@0))
6003 && TYPE_UNSIGNED (TREE_TYPE (@00))))
6004 && (types_match (TREE_TYPE (@10), TREE_TYPE (@00))
6005 || ((TYPE_PRECISION (TREE_TYPE (@00))
6006 >= TYPE_PRECISION (TREE_TYPE (@10)))
6007 && (TYPE_UNSIGNED (TREE_TYPE (@00))
6008 == TYPE_UNSIGNED (TREE_TYPE (@10))))
6009 || (TREE_CODE (@10) == INTEGER_CST
6010 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
6011 && int_fits_type_p (@10, TREE_TYPE (@00)))))
6012 (cmp @00 (convert @10))
6013 (if (TREE_CODE (@10) == INTEGER_CST
6014 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
6015 && !int_fits_type_p (@10, TREE_TYPE (@00)))
6018 tree min = lower_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
6019 tree max = upper_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
6020 bool above = integer_nonzerop (const_binop (LT_EXPR, type, max, @10));
6021 bool below = integer_nonzerop (const_binop (LT_EXPR, type, @10, min));
6023 (if (above || below)
6024 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
6025 { constant_boolean_node (cmp == EQ_EXPR ? false : true, type); }
6026 (if (cmp == LT_EXPR || cmp == LE_EXPR)
6027 { constant_boolean_node (above ? true : false, type); }
6028 (if (cmp == GT_EXPR || cmp == GE_EXPR)
6029 { constant_boolean_node (above ? false : true, type); })))))))))
6030 /* Fold (double)float1 CMP (double)float2 into float1 CMP float2. */
6031 (if (FLOAT_TYPE_P (TREE_TYPE (@00))
6032 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0))
6033 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@00)))
6034 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0))
6035 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@10))))
6038 tree type1 = TREE_TYPE (@10);
6039 if (TREE_CODE (@10) == REAL_CST && !DECIMAL_FLOAT_TYPE_P (type1))
6041 REAL_VALUE_TYPE orig = TREE_REAL_CST (@10);
6042 if (TYPE_PRECISION (type1) > TYPE_PRECISION (float_type_node)
6043 && exact_real_truncate (TYPE_MODE (float_type_node), &orig))
6044 type1 = float_type_node;
6045 if (TYPE_PRECISION (type1) > TYPE_PRECISION (double_type_node)
6046 && exact_real_truncate (TYPE_MODE (double_type_node), &orig))
6047 type1 = double_type_node;
6050 = (element_precision (TREE_TYPE (@00)) > element_precision (type1)
6051 ? TREE_TYPE (@00) : type1);
6053 (if (element_precision (TREE_TYPE (@0)) > element_precision (newtype))
6054 (cmp (convert:newtype @00) (convert:newtype @10))))))))
6059 /* SSA names are canonicalized to 2nd place. */
6060 (cmp addr@0 SSA_NAME@1)
6063 poly_int64 off; tree base;
6064 tree addr = (TREE_CODE (@0) == SSA_NAME
6065 ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0);
6067 /* A local variable can never be pointed to by
6068 the default SSA name of an incoming parameter. */
6069 (if (SSA_NAME_IS_DEFAULT_DEF (@1)
6070 && TREE_CODE (SSA_NAME_VAR (@1)) == PARM_DECL
6071 && (base = get_base_address (TREE_OPERAND (addr, 0)))
6072 && TREE_CODE (base) == VAR_DECL
6073 && auto_var_in_fn_p (base, current_function_decl))
6074 (if (cmp == NE_EXPR)
6075 { constant_boolean_node (true, type); }
6076 { constant_boolean_node (false, type); })
6077 /* If the address is based on @1 decide using the offset. */
6078 (if ((base = get_addr_base_and_unit_offset (TREE_OPERAND (addr, 0), &off))
6079 && TREE_CODE (base) == MEM_REF
6080 && TREE_OPERAND (base, 0) == @1)
6081 (with { off += mem_ref_offset (base).force_shwi (); }
6082 (if (known_ne (off, 0))
6083 { constant_boolean_node (cmp == NE_EXPR, type); }
6084 (if (known_eq (off, 0))
6085 { constant_boolean_node (cmp == EQ_EXPR, type); }))))))))
6087 /* Equality compare simplifications from fold_binary */
6090 /* If we have (A | C) == D where C & ~D != 0, convert this into 0.
6091 Similarly for NE_EXPR. */
6093 (cmp (convert?@3 (bit_ior @0 INTEGER_CST@1)) INTEGER_CST@2)
6094 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
6095 && wi::bit_and_not (wi::to_wide (@1), wi::to_wide (@2)) != 0)
6096 { constant_boolean_node (cmp == NE_EXPR, type); }))
6098 /* (X ^ Y) == 0 becomes X == Y, and (X ^ Y) != 0 becomes X != Y. */
6100 (cmp (bit_xor @0 @1) integer_zerop)
6103 /* (X ^ Y) == Y becomes X == 0.
6104 Likewise (X ^ Y) == X becomes Y == 0. */
6106 (cmp:c (bit_xor:c @0 @1) @0)
6107 (cmp @1 { build_zero_cst (TREE_TYPE (@1)); }))
6109 /* (X & Y) == X becomes (X & ~Y) == 0. */
6111 (cmp:c (bit_and:c @0 @1) @0)
6112 (cmp (bit_and @0 (bit_not! @1)) { build_zero_cst (TREE_TYPE (@0)); }))
6114 (cmp:c (convert@3 (bit_and (convert@2 @0) INTEGER_CST@1)) (convert @0))
6115 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
6116 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
6117 && INTEGRAL_TYPE_P (TREE_TYPE (@3))
6118 && TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@0))
6119 && TYPE_PRECISION (TREE_TYPE (@3)) > TYPE_PRECISION (TREE_TYPE (@2))
6120 && !wi::neg_p (wi::to_wide (@1)))
6121 (cmp (bit_and @0 (convert (bit_not @1)))
6122 { build_zero_cst (TREE_TYPE (@0)); })))
6124 /* (X | Y) == Y becomes (X & ~Y) == 0. */
6126 (cmp:c (bit_ior:c @0 @1) @1)
6127 (cmp (bit_and @0 (bit_not! @1)) { build_zero_cst (TREE_TYPE (@0)); }))
6129 /* (X ^ C1) op C2 can be rewritten as X op (C1 ^ C2). */
6131 (cmp (convert?@3 (bit_xor @0 INTEGER_CST@1)) INTEGER_CST@2)
6132 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)))
6133 (cmp @0 (bit_xor @1 (convert @2)))))
6136 (cmp (nop_convert? @0) integer_zerop)
6137 (if (tree_expr_nonzero_p (@0))
6138 { constant_boolean_node (cmp == NE_EXPR, type); }))
6140 /* (X & C) op (Y & C) into (X ^ Y) & C op 0. */
6142 (cmp (bit_and:cs @0 @2) (bit_and:cs @1 @2))
6143 (cmp (bit_and (bit_xor @0 @1) @2) { build_zero_cst (TREE_TYPE (@2)); })))
6145 /* (X < 0) != (Y < 0) into (X ^ Y) < 0.
6146 (X >= 0) != (Y >= 0) into (X ^ Y) < 0.
6147 (X < 0) == (Y < 0) into (X ^ Y) >= 0.
6148 (X >= 0) == (Y >= 0) into (X ^ Y) >= 0. */
6153 (cmp (sgncmp @0 integer_zerop@2) (sgncmp @1 integer_zerop))
6154 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
6155 && !TYPE_UNSIGNED (TREE_TYPE (@0))
6156 && types_match (@0, @1))
6157 (ncmp (bit_xor @0 @1) @2)))))
6158 /* (X < 0) == (Y >= 0) into (X ^ Y) < 0.
6159 (X < 0) != (Y >= 0) into (X ^ Y) >= 0. */
6163 (cmp:c (lt @0 integer_zerop@2) (ge @1 integer_zerop))
6164 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
6165 && !TYPE_UNSIGNED (TREE_TYPE (@0))
6166 && types_match (@0, @1))
6167 (ncmp (bit_xor @0 @1) @2))))
6169 /* If we have (A & C) == C where C is a power of 2, convert this into
6170 (A & C) != 0. Similarly for NE_EXPR. */
6174 (cmp (bit_and@2 @0 integer_pow2p@1) @1)
6175 (icmp @2 { build_zero_cst (TREE_TYPE (@0)); })))
6178 /* From fold_binary_op_with_conditional_arg handle the case of
6179 rewriting (a ? b : c) > d to a ? (b > d) : (c > d) when the
6180 compares simplify. */
6181 (for cmp (simple_comparison)
6183 (cmp:c (cond @0 @1 @2) @3)
6184 /* Do not move possibly trapping operations into the conditional as this
6185 pessimizes code and causes gimplification issues when applied late. */
6186 (if (!FLOAT_TYPE_P (TREE_TYPE (@3))
6187 || !operation_could_trap_p (cmp, true, false, @3))
6188 (cond @0 (cmp! @1 @3) (cmp! @2 @3)))))
6192 /* x < 0 ? ~y : y into (x >> (prec-1)) ^ y. */
6193 /* x >= 0 ? ~y : y into ~((x >> (prec-1)) ^ y). */
6195 (cond (cmp @0 integer_zerop) (bit_not @1) @1)
6196 (if (INTEGRAL_TYPE_P (type)
6197 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
6198 && !TYPE_UNSIGNED (TREE_TYPE (@0))
6199 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (type))
6202 tree shifter = build_int_cst (integer_type_node, TYPE_PRECISION (type) - 1);
6204 (if (cmp == LT_EXPR)
6205 (bit_xor (convert (rshift @0 {shifter;})) @1)
6206 (bit_not (bit_xor (convert (rshift @0 {shifter;})) @1))))))
6207 /* x < 0 ? y : ~y into ~((x >> (prec-1)) ^ y). */
6208 /* x >= 0 ? y : ~y into (x >> (prec-1)) ^ y. */
6210 (cond (cmp @0 integer_zerop) @1 (bit_not @1))
6211 (if (INTEGRAL_TYPE_P (type)
6212 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
6213 && !TYPE_UNSIGNED (TREE_TYPE (@0))
6214 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (type))
6217 tree shifter = build_int_cst (integer_type_node, TYPE_PRECISION (type) - 1);
6219 (if (cmp == GE_EXPR)
6220 (bit_xor (convert (rshift @0 {shifter;})) @1)
6221 (bit_not (bit_xor (convert (rshift @0 {shifter;})) @1)))))))
6223 /* If we have (A & C) != 0 ? D : 0 where C and D are powers of 2,
6224 convert this into a shift followed by ANDing with D. */
6227 (ne (bit_and @0 integer_pow2p@1) integer_zerop)
6228 INTEGER_CST@2 integer_zerop)
6229 (if (!POINTER_TYPE_P (type) && integer_pow2p (@2))
6231 int shift = (wi::exact_log2 (wi::to_wide (@2))
6232 - wi::exact_log2 (wi::to_wide (@1)));
6236 (lshift (convert @0) { build_int_cst (integer_type_node, shift); }) @2)
6238 (convert (rshift @0 { build_int_cst (integer_type_node, -shift); }))
6241 /* If we have (A & C) != 0 where C is the sign bit of A, convert
6242 this into A < 0. Similarly for (A & C) == 0 into A >= 0. */
6246 (cmp (bit_and (convert?@2 @0) integer_pow2p@1) integer_zerop)
6247 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
6248 && type_has_mode_precision_p (TREE_TYPE (@0))
6249 && element_precision (@2) >= element_precision (@0)
6250 && wi::only_sign_bit_p (wi::to_wide (@1), element_precision (@0)))
6251 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
6252 (ncmp (convert:stype @0) { build_zero_cst (stype); })))))
6254 /* If we have A < 0 ? C : 0 where C is a power of 2, convert
6255 this into a right shift or sign extension followed by ANDing with C. */
6258 (lt @0 integer_zerop)
6259 INTEGER_CST@1 integer_zerop)
6260 (if (integer_pow2p (@1)
6261 && !TYPE_UNSIGNED (TREE_TYPE (@0)))
6263 int shift = element_precision (@0) - wi::exact_log2 (wi::to_wide (@1)) - 1;
6267 (convert (rshift @0 { build_int_cst (integer_type_node, shift); }))
6269 /* Otherwise ctype must be wider than TREE_TYPE (@0) and pure
6270 sign extension followed by AND with C will achieve the effect. */
6271 (bit_and (convert @0) @1)))))
6273 /* When the addresses are not directly of decls compare base and offset.
6274 This implements some remaining parts of fold_comparison address
6275 comparisons but still no complete part of it. Still it is good
6276 enough to make fold_stmt not regress when not dispatching to fold_binary. */
6277 (for cmp (simple_comparison)
6279 (cmp (convert1?@2 addr@0) (convert2? addr@1))
6282 poly_int64 off0, off1;
6284 int equal = address_compare (cmp, TREE_TYPE (@2), @0, @1, base0, base1,
6285 off0, off1, GENERIC);
6289 (if (cmp == EQ_EXPR && (known_eq (off0, off1) || known_ne (off0, off1)))
6290 { constant_boolean_node (known_eq (off0, off1), type); })
6291 (if (cmp == NE_EXPR && (known_eq (off0, off1) || known_ne (off0, off1)))
6292 { constant_boolean_node (known_ne (off0, off1), type); })
6293 (if (cmp == LT_EXPR && (known_lt (off0, off1) || known_ge (off0, off1)))
6294 { constant_boolean_node (known_lt (off0, off1), type); })
6295 (if (cmp == LE_EXPR && (known_le (off0, off1) || known_gt (off0, off1)))
6296 { constant_boolean_node (known_le (off0, off1), type); })
6297 (if (cmp == GE_EXPR && (known_ge (off0, off1) || known_lt (off0, off1)))
6298 { constant_boolean_node (known_ge (off0, off1), type); })
6299 (if (cmp == GT_EXPR && (known_gt (off0, off1) || known_le (off0, off1)))
6300 { constant_boolean_node (known_gt (off0, off1), type); }))
6303 (if (cmp == EQ_EXPR)
6304 { constant_boolean_node (false, type); })
6305 (if (cmp == NE_EXPR)
6306 { constant_boolean_node (true, type); })))))))
6308 /* Simplify pointer equality compares using PTA. */
6312 (if (POINTER_TYPE_P (TREE_TYPE (@0))
6313 && ptrs_compare_unequal (@0, @1))
6314 { constant_boolean_node (neeq != EQ_EXPR, type); })))
6316 /* PR70920: Transform (intptr_t)x eq/ne CST to x eq/ne (typeof x) CST.
6317 and (typeof ptr_cst) x eq/ne ptr_cst to x eq/ne (typeof x) CST.
6318 Disable the transform if either operand is pointer to function.
6319 This broke pr22051-2.c for arm where function pointer
6320 canonicalizaion is not wanted. */
6324 (cmp (convert @0) INTEGER_CST@1)
6325 (if (((POINTER_TYPE_P (TREE_TYPE (@0))
6326 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@0)))
6327 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
6328 /* Don't perform this optimization in GENERIC if @0 has reference
6329 type when sanitizing. See PR101210. */
6331 && TREE_CODE (TREE_TYPE (@0)) == REFERENCE_TYPE
6332 && (flag_sanitize & (SANITIZE_NULL | SANITIZE_ALIGNMENT))))
6333 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
6334 && POINTER_TYPE_P (TREE_TYPE (@1))
6335 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@1)))))
6336 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
6337 (cmp @0 (convert @1)))))
6339 /* Non-equality compare simplifications from fold_binary */
6340 (for cmp (lt gt le ge)
6341 /* Comparisons with the highest or lowest possible integer of
6342 the specified precision will have known values. */
6344 (cmp (convert?@2 @0) uniform_integer_cst_p@1)
6345 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@1))
6346 || POINTER_TYPE_P (TREE_TYPE (@1))
6347 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@1)))
6348 && tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0)))
6351 tree cst = uniform_integer_cst_p (@1);
6352 tree arg1_type = TREE_TYPE (cst);
6353 unsigned int prec = TYPE_PRECISION (arg1_type);
6354 wide_int max = wi::max_value (arg1_type);
6355 wide_int signed_max = wi::max_value (prec, SIGNED);
6356 wide_int min = wi::min_value (arg1_type);
6359 (if (wi::to_wide (cst) == max)
6361 (if (cmp == GT_EXPR)
6362 { constant_boolean_node (false, type); })
6363 (if (cmp == GE_EXPR)
6365 (if (cmp == LE_EXPR)
6366 { constant_boolean_node (true, type); })
6367 (if (cmp == LT_EXPR)
6369 (if (wi::to_wide (cst) == min)
6371 (if (cmp == LT_EXPR)
6372 { constant_boolean_node (false, type); })
6373 (if (cmp == LE_EXPR)
6375 (if (cmp == GE_EXPR)
6376 { constant_boolean_node (true, type); })
6377 (if (cmp == GT_EXPR)
6379 (if (wi::to_wide (cst) == max - 1)
6381 (if (cmp == GT_EXPR)
6382 (eq @2 { build_uniform_cst (TREE_TYPE (@1),
6383 wide_int_to_tree (TREE_TYPE (cst),
6386 (if (cmp == LE_EXPR)
6387 (ne @2 { build_uniform_cst (TREE_TYPE (@1),
6388 wide_int_to_tree (TREE_TYPE (cst),
6391 (if (wi::to_wide (cst) == min + 1)
6393 (if (cmp == GE_EXPR)
6394 (ne @2 { build_uniform_cst (TREE_TYPE (@1),
6395 wide_int_to_tree (TREE_TYPE (cst),
6398 (if (cmp == LT_EXPR)
6399 (eq @2 { build_uniform_cst (TREE_TYPE (@1),
6400 wide_int_to_tree (TREE_TYPE (cst),
6403 (if (wi::to_wide (cst) == signed_max
6404 && TYPE_UNSIGNED (arg1_type)
6405 /* We will flip the signedness of the comparison operator
6406 associated with the mode of @1, so the sign bit is
6407 specified by this mode. Check that @1 is the signed
6408 max associated with this sign bit. */
6409 && prec == GET_MODE_PRECISION (SCALAR_INT_TYPE_MODE (arg1_type))
6410 /* signed_type does not work on pointer types. */
6411 && INTEGRAL_TYPE_P (arg1_type))
6412 /* The following case also applies to X < signed_max+1
6413 and X >= signed_max+1 because previous transformations. */
6414 (if (cmp == LE_EXPR || cmp == GT_EXPR)
6415 (with { tree st = signed_type_for (TREE_TYPE (@1)); }
6417 (if (cst == @1 && cmp == LE_EXPR)
6418 (ge (convert:st @0) { build_zero_cst (st); }))
6419 (if (cst == @1 && cmp == GT_EXPR)
6420 (lt (convert:st @0) { build_zero_cst (st); }))
6421 (if (cmp == LE_EXPR)
6422 (ge (view_convert:st @0) { build_zero_cst (st); }))
6423 (if (cmp == GT_EXPR)
6424 (lt (view_convert:st @0) { build_zero_cst (st); })))))))))))
6426 /* unsigned < (typeof unsigned)(unsigned != 0) is always false. */
6428 (lt:c @0 (convert (ne @0 integer_zerop)))
6429 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
6430 { constant_boolean_node (false, type); }))
6432 /* x != (typeof x)(x == 0) is always true. */
6434 (ne:c @0 (convert (eq @0 integer_zerop)))
6435 { constant_boolean_node (true, type); })
6437 (for cmp (unordered ordered unlt unle ungt unge uneq ltgt)
6438 /* If the second operand is NaN, the result is constant. */
6441 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
6442 && (cmp != LTGT_EXPR || ! flag_trapping_math))
6443 { constant_boolean_node (cmp == ORDERED_EXPR || cmp == LTGT_EXPR
6444 ? false : true, type); })))
6446 /* Fold UNORDERED if either operand must be NaN, or neither can be. */
6450 (if (tree_expr_nan_p (@0) || tree_expr_nan_p (@1))
6451 { constant_boolean_node (true, type); })
6452 (if (!tree_expr_maybe_nan_p (@0) && !tree_expr_maybe_nan_p (@1))
6453 { constant_boolean_node (false, type); })))
6455 /* Fold ORDERED if either operand must be NaN, or neither can be. */
6459 (if (tree_expr_nan_p (@0) || tree_expr_nan_p (@1))
6460 { constant_boolean_node (false, type); })
6461 (if (!tree_expr_maybe_nan_p (@0) && !tree_expr_maybe_nan_p (@1))
6462 { constant_boolean_node (true, type); })))
6464 /* bool_var != 0 becomes bool_var. */
6466 (ne @0 integer_zerop)
6467 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
6468 && types_match (type, TREE_TYPE (@0)))
6470 /* bool_var == 1 becomes bool_var. */
6472 (eq @0 integer_onep)
6473 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
6474 && types_match (type, TREE_TYPE (@0)))
6477 bool_var == 0 becomes !bool_var or
6478 bool_var != 1 becomes !bool_var
6479 here because that only is good in assignment context as long
6480 as we require a tcc_comparison in GIMPLE_CONDs where we'd
6481 replace if (x == 0) with tem = ~x; if (tem != 0) which is
6482 clearly less optimal and which we'll transform again in forwprop. */
6484 /* Transform comparisons of the form (X & Y) CMP 0 to X CMP2 Z
6485 where ~Y + 1 == pow2 and Z = ~Y. */
6486 (for cst (VECTOR_CST INTEGER_CST)
6490 (cmp (bit_and:c@2 @0 cst@1) integer_zerop)
6491 (with { tree csts = bitmask_inv_cst_vector_p (@1); }
6492 (if (csts && (VECTOR_TYPE_P (TREE_TYPE (@1)) || single_use (@2)))
6493 (with { auto optab = VECTOR_TYPE_P (TREE_TYPE (@1))
6494 ? optab_vector : optab_default;
6495 tree utype = unsigned_type_for (TREE_TYPE (@1)); }
6496 (if (target_supports_op_p (utype, icmp, optab)
6497 || (optimize_vectors_before_lowering_p ()
6498 && (!target_supports_op_p (type, cmp, optab)
6499 || !target_supports_op_p (type, BIT_AND_EXPR, optab))))
6500 (if (TYPE_UNSIGNED (TREE_TYPE (@1)))
6502 (icmp (view_convert:utype @0) { csts; })))))))))
6504 /* When one argument is a constant, overflow detection can be simplified.
6505 Currently restricted to single use so as not to interfere too much with
6506 ADD_OVERFLOW detection in tree-ssa-math-opts.cc.
6507 CONVERT?(CONVERT?(A) + CST) CMP A -> A CMP' CST' */
6508 (for cmp (lt le ge gt)
6511 (cmp:c (convert?@3 (plus@2 (convert?@4 @0) INTEGER_CST@1)) @0)
6512 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@2))
6513 && types_match (TREE_TYPE (@0), TREE_TYPE (@3))
6514 && tree_nop_conversion_p (TREE_TYPE (@4), TREE_TYPE (@0))
6515 && wi::to_wide (@1) != 0
6518 unsigned int prec = TYPE_PRECISION (TREE_TYPE (@0));
6519 signop sign = TYPE_SIGN (TREE_TYPE (@0));
6521 (out @0 { wide_int_to_tree (TREE_TYPE (@0),
6522 wi::max_value (prec, sign)
6523 - wi::to_wide (@1)); })))))
6525 /* To detect overflow in unsigned A - B, A < B is simpler than A - B > A.
6526 However, the detection logic for SUB_OVERFLOW in tree-ssa-math-opts.cc
6527 expects the long form, so we restrict the transformation for now. */
6530 (cmp:c (minus@2 @0 @1) @0)
6531 (if (single_use (@2)
6532 && ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
6533 && TYPE_UNSIGNED (TREE_TYPE (@0)))
6536 /* Optimize A - B + -1 >= A into B >= A for unsigned comparisons. */
6539 (cmp:c (plus (minus @0 @1) integer_minus_onep) @0)
6540 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
6541 && TYPE_UNSIGNED (TREE_TYPE (@0)))
6544 /* Testing for overflow is unnecessary if we already know the result. */
6549 (cmp:c (realpart (IFN_SUB_OVERFLOW@2 @0 @1)) @0)
6550 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
6551 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
6552 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
6557 (cmp:c (realpart (IFN_ADD_OVERFLOW:c@2 @0 @1)) @0)
6558 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
6559 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
6560 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
6562 /* For unsigned operands, -1 / B < A checks whether A * B would overflow.
6563 Simplify it to __builtin_mul_overflow (A, B, <unused>). */
6567 (cmp:c (trunc_div:s integer_all_onesp @1) @0)
6568 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) && !VECTOR_TYPE_P (TREE_TYPE (@0)))
6569 (with { tree t = TREE_TYPE (@0), cpx = build_complex_type (t); }
6570 (out (imagpart (IFN_MUL_OVERFLOW:cpx @0 @1)) { build_zero_cst (t); })))))
6572 /* Similarly, for unsigned operands, (((type) A * B) >> prec) != 0 where type
6573 is at least twice as wide as type of A and B, simplify to
6574 __builtin_mul_overflow (A, B, <unused>). */
6577 (cmp (rshift (mult:s (convert@3 @0) (convert @1)) INTEGER_CST@2)
6579 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
6580 && INTEGRAL_TYPE_P (TREE_TYPE (@3))
6581 && TYPE_UNSIGNED (TREE_TYPE (@0))
6582 && (TYPE_PRECISION (TREE_TYPE (@3))
6583 >= 2 * TYPE_PRECISION (TREE_TYPE (@0)))
6584 && tree_fits_uhwi_p (@2)
6585 && tree_to_uhwi (@2) == TYPE_PRECISION (TREE_TYPE (@0))
6586 && types_match (@0, @1)
6587 && type_has_mode_precision_p (TREE_TYPE (@0))
6588 && (optab_handler (umulv4_optab, TYPE_MODE (TREE_TYPE (@0)))
6589 != CODE_FOR_nothing))
6590 (with { tree t = TREE_TYPE (@0), cpx = build_complex_type (t); }
6591 (cmp (imagpart (IFN_MUL_OVERFLOW:cpx @0 @1)) { build_zero_cst (t); })))))
6593 /* Demote operands of IFN_{ADD,SUB,MUL}_OVERFLOW. */
6594 (for ovf (IFN_ADD_OVERFLOW IFN_SUB_OVERFLOW IFN_MUL_OVERFLOW)
6596 (ovf (convert@2 @0) @1)
6597 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
6598 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
6599 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
6600 && (!TYPE_UNSIGNED (TREE_TYPE (@2)) || TYPE_UNSIGNED (TREE_TYPE (@0))))
6603 (ovf @1 (convert@2 @0))
6604 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
6605 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
6606 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
6607 && (!TYPE_UNSIGNED (TREE_TYPE (@2)) || TYPE_UNSIGNED (TREE_TYPE (@0))))
6610 /* Optimize __builtin_mul_overflow_p (x, cst, (utype) 0) if all 3 types
6611 are unsigned to x > (umax / cst). Similarly for signed type, but
6612 in that case it needs to be outside of a range. */
6614 (imagpart (IFN_MUL_OVERFLOW:cs@2 @0 integer_nonzerop@1))
6615 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
6616 && TYPE_MAX_VALUE (TREE_TYPE (@0))
6617 && types_match (TREE_TYPE (@0), TREE_TYPE (TREE_TYPE (@2)))
6618 && int_fits_type_p (@1, TREE_TYPE (@0)))
6619 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
6620 (convert (gt @0 (trunc_div! { TYPE_MAX_VALUE (TREE_TYPE (@0)); } @1)))
6621 (if (TYPE_MIN_VALUE (TREE_TYPE (@0)))
6622 (if (integer_minus_onep (@1))
6623 (convert (eq @0 { TYPE_MIN_VALUE (TREE_TYPE (@0)); }))
6626 tree div = fold_convert (TREE_TYPE (@0), @1);
6627 tree lo = int_const_binop (TRUNC_DIV_EXPR,
6628 TYPE_MIN_VALUE (TREE_TYPE (@0)), div);
6629 tree hi = int_const_binop (TRUNC_DIV_EXPR,
6630 TYPE_MAX_VALUE (TREE_TYPE (@0)), div);
6631 tree etype = range_check_type (TREE_TYPE (@0));
6634 if (wi::neg_p (wi::to_wide (div)))
6636 lo = fold_convert (etype, lo);
6637 hi = fold_convert (etype, hi);
6638 hi = int_const_binop (MINUS_EXPR, hi, lo);
6642 (convert (gt (minus (convert:etype @0) { lo; }) { hi; })))))))))
6644 /* Simplification of math builtins. These rules must all be optimizations
6645 as well as IL simplifications. If there is a possibility that the new
6646 form could be a pessimization, the rule should go in the canonicalization
6647 section that follows this one.
6649 Rules can generally go in this section if they satisfy one of
6652 - the rule describes an identity
6654 - the rule replaces calls with something as simple as addition or
6657 - the rule contains unary calls only and simplifies the surrounding
6658 arithmetic. (The idea here is to exclude non-unary calls in which
6659 one operand is constant and in which the call is known to be cheap
6660 when the operand has that value.) */
6662 (if (flag_unsafe_math_optimizations)
6663 /* Simplify sqrt(x) * sqrt(x) -> x. */
6665 (mult (SQRT_ALL@1 @0) @1)
6666 (if (!tree_expr_maybe_signaling_nan_p (@0))
6669 (for op (plus minus)
6670 /* Simplify (A / C) +- (B / C) -> (A +- B) / C. */
6674 (rdiv (op @0 @2) @1)))
6676 (for cmp (lt le gt ge)
6677 neg_cmp (gt ge lt le)
6678 /* Simplify (x * C1) cmp C2 -> x cmp (C2 / C1), where C1 != 0. */
6680 (cmp (mult @0 REAL_CST@1) REAL_CST@2)
6682 { tree tem = const_binop (RDIV_EXPR, type, @2, @1); }
6684 && !(REAL_VALUE_ISINF (TREE_REAL_CST (tem))
6685 || (real_zerop (tem) && !real_zerop (@1))))
6687 (if (real_less (&dconst0, TREE_REAL_CST_PTR (@1)))
6689 (if (real_less (TREE_REAL_CST_PTR (@1), &dconst0))
6690 (neg_cmp @0 { tem; })))))))
6692 /* Simplify sqrt(x) * sqrt(y) -> sqrt(x*y). */
6693 (for root (SQRT CBRT)
6695 (mult (root:s @0) (root:s @1))
6696 (root (mult @0 @1))))
6698 /* Simplify expN(x) * expN(y) -> expN(x+y). */
6699 (for exps (EXP EXP2 EXP10 POW10)
6701 (mult (exps:s @0) (exps:s @1))
6702 (exps (plus @0 @1))))
6704 /* Simplify a/root(b/c) into a*root(c/b). */
6705 (for root (SQRT CBRT)
6707 (rdiv @0 (root:s (rdiv:s @1 @2)))
6708 (mult @0 (root (rdiv @2 @1)))))
6710 /* Simplify x/expN(y) into x*expN(-y). */
6711 (for exps (EXP EXP2 EXP10 POW10)
6713 (rdiv @0 (exps:s @1))
6714 (mult @0 (exps (negate @1)))))
6716 (for logs (LOG LOG2 LOG10 LOG10)
6717 exps (EXP EXP2 EXP10 POW10)
6718 /* logN(expN(x)) -> x. */
6722 /* expN(logN(x)) -> x. */
6727 /* Optimize logN(func()) for various exponential functions. We
6728 want to determine the value "x" and the power "exponent" in
6729 order to transform logN(x**exponent) into exponent*logN(x). */
6730 (for logs (LOG LOG LOG LOG2 LOG2 LOG2 LOG10 LOG10)
6731 exps (EXP2 EXP10 POW10 EXP EXP10 POW10 EXP EXP2)
6734 (if (SCALAR_FLOAT_TYPE_P (type))
6740 /* Prepare to do logN(exp(exponent)) -> exponent*logN(e). */
6741 x = build_real_truncate (type, dconst_e ());
6744 /* Prepare to do logN(exp2(exponent)) -> exponent*logN(2). */
6745 x = build_real (type, dconst2);
6749 /* Prepare to do logN(exp10(exponent)) -> exponent*logN(10). */
6751 REAL_VALUE_TYPE dconst10;
6752 real_from_integer (&dconst10, VOIDmode, 10, SIGNED);
6753 x = build_real (type, dconst10);
6760 (mult (logs { x; }) @0)))))
6768 (if (SCALAR_FLOAT_TYPE_P (type))
6774 /* Prepare to do logN(sqrt(x)) -> 0.5*logN(x). */
6775 x = build_real (type, dconsthalf);
6778 /* Prepare to do logN(cbrt(x)) -> (1/3)*logN(x). */
6779 x = build_real_truncate (type, dconst_third ());
6785 (mult { x; } (logs @0))))))
6787 /* logN(pow(x,exponent)) -> exponent*logN(x). */
6788 (for logs (LOG LOG2 LOG10)
6792 (mult @1 (logs @0))))
6794 /* pow(C,x) -> exp(log(C)*x) if C > 0,
6795 or if C is a positive power of 2,
6796 pow(C,x) -> exp2(log2(C)*x). */
6804 (pows REAL_CST@0 @1)
6805 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0)
6806 && real_isfinite (TREE_REAL_CST_PTR (@0))
6807 /* As libmvec doesn't have a vectorized exp2, defer optimizing
6808 the use_exp2 case until after vectorization. It seems actually
6809 beneficial for all constants to postpone this until later,
6810 because exp(log(C)*x), while faster, will have worse precision
6811 and if x folds into a constant too, that is unnecessary
6813 && canonicalize_math_after_vectorization_p ())
6815 const REAL_VALUE_TYPE *const value = TREE_REAL_CST_PTR (@0);
6816 bool use_exp2 = false;
6817 if (targetm.libc_has_function (function_c99_misc, TREE_TYPE (@0))
6818 && value->cl == rvc_normal)
6820 REAL_VALUE_TYPE frac_rvt = *value;
6821 SET_REAL_EXP (&frac_rvt, 1);
6822 if (real_equal (&frac_rvt, &dconst1))
6827 (if (optimize_pow_to_exp (@0, @1))
6828 (exps (mult (logs @0) @1)))
6829 (exp2s (mult (log2s @0) @1)))))))
6832 /* pow(C,x)*expN(y) -> expN(logN(C)*x+y) if C > 0. */
6834 exps (EXP EXP2 EXP10 POW10)
6835 logs (LOG LOG2 LOG10 LOG10)
6837 (mult:c (pows:s REAL_CST@0 @1) (exps:s @2))
6838 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0)
6839 && real_isfinite (TREE_REAL_CST_PTR (@0)))
6840 (exps (plus (mult (logs @0) @1) @2)))))
6845 exps (EXP EXP2 EXP10 POW10)
6846 /* sqrt(expN(x)) -> expN(x*0.5). */
6849 (exps (mult @0 { build_real (type, dconsthalf); })))
6850 /* cbrt(expN(x)) -> expN(x/3). */
6853 (exps (mult @0 { build_real_truncate (type, dconst_third ()); })))
6854 /* pow(expN(x), y) -> expN(x*y). */
6857 (exps (mult @0 @1))))
6859 /* tan(atan(x)) -> x. */
6866 /* Simplify sin(atan(x)) -> x / sqrt(x*x + 1). */
6870 copysigns (COPYSIGN)
6875 REAL_VALUE_TYPE r_cst;
6876 build_sinatan_real (&r_cst, type);
6877 tree t_cst = build_real (type, r_cst);
6878 tree t_one = build_one_cst (type);
6880 (if (SCALAR_FLOAT_TYPE_P (type))
6881 (cond (lt (abs @0) { t_cst; })
6882 (rdiv @0 (sqrts (plus (mult @0 @0) { t_one; })))
6883 (copysigns { t_one; } @0))))))
6885 /* Simplify cos(atan(x)) -> 1 / sqrt(x*x + 1). */
6889 copysigns (COPYSIGN)
6894 REAL_VALUE_TYPE r_cst;
6895 build_sinatan_real (&r_cst, type);
6896 tree t_cst = build_real (type, r_cst);
6897 tree t_one = build_one_cst (type);
6898 tree t_zero = build_zero_cst (type);
6900 (if (SCALAR_FLOAT_TYPE_P (type))
6901 (cond (lt (abs @0) { t_cst; })
6902 (rdiv { t_one; } (sqrts (plus (mult @0 @0) { t_one; })))
6903 (copysigns { t_zero; } @0))))))
6905 (if (!flag_errno_math)
6906 /* Simplify sinh(atanh(x)) -> x / sqrt((1 - x)*(1 + x)). */
6911 (sinhs (atanhs:s @0))
6912 (with { tree t_one = build_one_cst (type); }
6913 (rdiv @0 (sqrts (mult (minus { t_one; } @0) (plus { t_one; } @0)))))))
6915 /* Simplify cosh(atanh(x)) -> 1 / sqrt((1 - x)*(1 + x)) */
6920 (coshs (atanhs:s @0))
6921 (with { tree t_one = build_one_cst (type); }
6922 (rdiv { t_one; } (sqrts (mult (minus { t_one; } @0) (plus { t_one; } @0))))))))
6924 /* cabs(x+0i) or cabs(0+xi) -> abs(x). */
6926 (CABS (complex:C @0 real_zerop@1))
6929 /* trunc(trunc(x)) -> trunc(x), etc. */
6930 (for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL)
6934 /* f(x) -> x if x is integer valued and f does nothing for such values. */
6935 (for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL)
6937 (fns integer_valued_real_p@0)
6940 /* hypot(x,0) and hypot(0,x) -> abs(x). */
6942 (HYPOT:c @0 real_zerop@1)
6945 /* pow(1,x) -> 1. */
6947 (POW real_onep@0 @1)
6951 /* copysign(x,x) -> x. */
6952 (COPYSIGN_ALL @0 @0)
6956 /* copysign(x,-x) -> -x. */
6957 (COPYSIGN_ALL @0 (negate@1 @0))
6961 /* copysign(x,y) -> fabs(x) if y is nonnegative. */
6962 (COPYSIGN_ALL @0 tree_expr_nonnegative_p@1)
6965 (for scale (LDEXP SCALBN SCALBLN)
6966 /* ldexp(0, x) -> 0. */
6968 (scale real_zerop@0 @1)
6970 /* ldexp(x, 0) -> x. */
6972 (scale @0 integer_zerop@1)
6974 /* ldexp(x, y) -> x if x is +-Inf or NaN. */
6976 (scale REAL_CST@0 @1)
6977 (if (!real_isfinite (TREE_REAL_CST_PTR (@0)))
6980 /* Canonicalization of sequences of math builtins. These rules represent
6981 IL simplifications but are not necessarily optimizations.
6983 The sincos pass is responsible for picking "optimal" implementations
6984 of math builtins, which may be more complicated and can sometimes go
6985 the other way, e.g. converting pow into a sequence of sqrts.
6986 We only want to do these canonicalizations before the pass has run. */
6988 (if (flag_unsafe_math_optimizations && canonicalize_math_p ())
6989 /* Simplify tan(x) * cos(x) -> sin(x). */
6991 (mult:c (TAN:s @0) (COS:s @0))
6994 /* Simplify x * pow(x,c) -> pow(x,c+1). */
6996 (mult:c @0 (POW:s @0 REAL_CST@1))
6997 (if (!TREE_OVERFLOW (@1))
6998 (POW @0 (plus @1 { build_one_cst (type); }))))
7000 /* Simplify sin(x) / cos(x) -> tan(x). */
7002 (rdiv (SIN:s @0) (COS:s @0))
7005 /* Simplify sinh(x) / cosh(x) -> tanh(x). */
7007 (rdiv (SINH:s @0) (COSH:s @0))
7010 /* Simplify tanh (x) / sinh (x) -> 1.0 / cosh (x). */
7012 (rdiv (TANH:s @0) (SINH:s @0))
7013 (rdiv {build_one_cst (type);} (COSH @0)))
7015 /* Simplify cos(x) / sin(x) -> 1 / tan(x). */
7017 (rdiv (COS:s @0) (SIN:s @0))
7018 (rdiv { build_one_cst (type); } (TAN @0)))
7020 /* Simplify sin(x) / tan(x) -> cos(x). */
7022 (rdiv (SIN:s @0) (TAN:s @0))
7023 (if (! HONOR_NANS (@0)
7024 && ! HONOR_INFINITIES (@0))
7027 /* Simplify tan(x) / sin(x) -> 1.0 / cos(x). */
7029 (rdiv (TAN:s @0) (SIN:s @0))
7030 (if (! HONOR_NANS (@0)
7031 && ! HONOR_INFINITIES (@0))
7032 (rdiv { build_one_cst (type); } (COS @0))))
7034 /* Simplify pow(x,y) * pow(x,z) -> pow(x,y+z). */
7036 (mult (POW:s @0 @1) (POW:s @0 @2))
7037 (POW @0 (plus @1 @2)))
7039 /* Simplify pow(x,y) * pow(z,y) -> pow(x*z,y). */
7041 (mult (POW:s @0 @1) (POW:s @2 @1))
7042 (POW (mult @0 @2) @1))
7044 /* Simplify powi(x,y) * powi(z,y) -> powi(x*z,y). */
7046 (mult (POWI:s @0 @1) (POWI:s @2 @1))
7047 (POWI (mult @0 @2) @1))
7049 /* Simplify pow(x,c) / x -> pow(x,c-1). */
7051 (rdiv (POW:s @0 REAL_CST@1) @0)
7052 (if (!TREE_OVERFLOW (@1))
7053 (POW @0 (minus @1 { build_one_cst (type); }))))
7055 /* Simplify x / pow (y,z) -> x * pow(y,-z). */
7057 (rdiv @0 (POW:s @1 @2))
7058 (mult @0 (POW @1 (negate @2))))
7063 /* sqrt(sqrt(x)) -> pow(x,1/4). */
7066 (pows @0 { build_real (type, dconst_quarter ()); }))
7067 /* sqrt(cbrt(x)) -> pow(x,1/6). */
7070 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
7071 /* cbrt(sqrt(x)) -> pow(x,1/6). */
7074 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
7075 /* cbrt(cbrt(x)) -> pow(x,1/9), iff x is nonnegative. */
7077 (cbrts (cbrts tree_expr_nonnegative_p@0))
7078 (pows @0 { build_real_truncate (type, dconst_ninth ()); }))
7079 /* sqrt(pow(x,y)) -> pow(|x|,y*0.5). */
7081 (sqrts (pows @0 @1))
7082 (pows (abs @0) (mult @1 { build_real (type, dconsthalf); })))
7083 /* cbrt(pow(x,y)) -> pow(x,y/3), iff x is nonnegative. */
7085 (cbrts (pows tree_expr_nonnegative_p@0 @1))
7086 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
7087 /* pow(sqrt(x),y) -> pow(x,y*0.5). */
7089 (pows (sqrts @0) @1)
7090 (pows @0 (mult @1 { build_real (type, dconsthalf); })))
7091 /* pow(cbrt(x),y) -> pow(x,y/3) iff x is nonnegative. */
7093 (pows (cbrts tree_expr_nonnegative_p@0) @1)
7094 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
7095 /* pow(pow(x,y),z) -> pow(x,y*z) iff x is nonnegative. */
7097 (pows (pows tree_expr_nonnegative_p@0 @1) @2)
7098 (pows @0 (mult @1 @2))))
7100 /* cabs(x+xi) -> fabs(x)*sqrt(2). */
7102 (CABS (complex @0 @0))
7103 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
7105 /* hypot(x,x) -> fabs(x)*sqrt(2). */
7108 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
7110 /* cexp(x+yi) -> exp(x)*cexpi(y). */
7115 (cexps compositional_complex@0)
7116 (if (targetm.libc_has_function (function_c99_math_complex, TREE_TYPE (@0)))
7118 (mult (exps@1 (realpart @0)) (realpart (cexpis:type@2 (imagpart @0))))
7119 (mult @1 (imagpart @2)))))))
7121 (if (canonicalize_math_p ())
7122 /* floor(x) -> trunc(x) if x is nonnegative. */
7123 (for floors (FLOOR_ALL)
7126 (floors tree_expr_nonnegative_p@0)
7129 (match double_value_p
7131 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == double_type_node)))
7132 (for froms (BUILT_IN_TRUNCL
7144 /* truncl(extend(x)) -> extend(trunc(x)), etc., if x is a double. */
7145 (if (optimize && canonicalize_math_p ())
7147 (froms (convert double_value_p@0))
7148 (convert (tos @0)))))
7150 (match float_value_p
7152 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == float_type_node)))
7153 (for froms (BUILT_IN_TRUNCL BUILT_IN_TRUNC
7154 BUILT_IN_FLOORL BUILT_IN_FLOOR
7155 BUILT_IN_CEILL BUILT_IN_CEIL
7156 BUILT_IN_ROUNDL BUILT_IN_ROUND
7157 BUILT_IN_NEARBYINTL BUILT_IN_NEARBYINT
7158 BUILT_IN_RINTL BUILT_IN_RINT)
7159 tos (BUILT_IN_TRUNCF BUILT_IN_TRUNCF
7160 BUILT_IN_FLOORF BUILT_IN_FLOORF
7161 BUILT_IN_CEILF BUILT_IN_CEILF
7162 BUILT_IN_ROUNDF BUILT_IN_ROUNDF
7163 BUILT_IN_NEARBYINTF BUILT_IN_NEARBYINTF
7164 BUILT_IN_RINTF BUILT_IN_RINTF)
7165 /* truncl(extend(x)) and trunc(extend(x)) -> extend(truncf(x)), etc.,
7167 (if (optimize && canonicalize_math_p ()
7168 && targetm.libc_has_function (function_c99_misc, NULL_TREE))
7170 (froms (convert float_value_p@0))
7171 (convert (tos @0)))))
7174 (match float16_value_p
7176 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == float16_type_node)))
7177 (for froms (BUILT_IN_TRUNCL BUILT_IN_TRUNC BUILT_IN_TRUNCF
7178 BUILT_IN_FLOORL BUILT_IN_FLOOR BUILT_IN_FLOORF
7179 BUILT_IN_CEILL BUILT_IN_CEIL BUILT_IN_CEILF
7180 BUILT_IN_ROUNDEVENL BUILT_IN_ROUNDEVEN BUILT_IN_ROUNDEVENF
7181 BUILT_IN_ROUNDL BUILT_IN_ROUND BUILT_IN_ROUNDF
7182 BUILT_IN_NEARBYINTL BUILT_IN_NEARBYINT BUILT_IN_NEARBYINTF
7183 BUILT_IN_RINTL BUILT_IN_RINT BUILT_IN_RINTF
7184 BUILT_IN_SQRTL BUILT_IN_SQRT BUILT_IN_SQRTF)
7185 tos (IFN_TRUNC IFN_TRUNC IFN_TRUNC
7186 IFN_FLOOR IFN_FLOOR IFN_FLOOR
7187 IFN_CEIL IFN_CEIL IFN_CEIL
7188 IFN_ROUNDEVEN IFN_ROUNDEVEN IFN_ROUNDEVEN
7189 IFN_ROUND IFN_ROUND IFN_ROUND
7190 IFN_NEARBYINT IFN_NEARBYINT IFN_NEARBYINT
7191 IFN_RINT IFN_RINT IFN_RINT
7192 IFN_SQRT IFN_SQRT IFN_SQRT)
7193 /* (_Float16) round ((doube) x) -> __built_in_roundf16 (x), etc.,
7194 if x is a _Float16. */
7196 (convert (froms (convert float16_value_p@0)))
7198 && types_match (type, TREE_TYPE (@0))
7199 && direct_internal_fn_supported_p (as_internal_fn (tos),
7200 type, OPTIMIZE_FOR_BOTH))
7203 /* Simplify (trunc)copysign ((extend)x, (extend)y) to copysignf (x, y),
7204 x,y is float value, similar for _Float16/double. */
7205 (for copysigns (COPYSIGN_ALL)
7207 (convert (copysigns (convert@2 @0) (convert @1)))
7209 && !HONOR_SNANS (@2)
7210 && types_match (type, TREE_TYPE (@0))
7211 && types_match (type, TREE_TYPE (@1))
7212 && TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (@2))
7213 && direct_internal_fn_supported_p (IFN_COPYSIGN,
7214 type, OPTIMIZE_FOR_BOTH))
7215 (IFN_COPYSIGN @0 @1))))
7217 (for froms (BUILT_IN_FMAF BUILT_IN_FMA BUILT_IN_FMAL)
7218 tos (IFN_FMA IFN_FMA IFN_FMA)
7220 (convert (froms (convert@3 @0) (convert @1) (convert @2)))
7221 (if (flag_unsafe_math_optimizations
7223 && FLOAT_TYPE_P (type)
7224 && FLOAT_TYPE_P (TREE_TYPE (@3))
7225 && types_match (type, TREE_TYPE (@0))
7226 && types_match (type, TREE_TYPE (@1))
7227 && types_match (type, TREE_TYPE (@2))
7228 && TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (@3))
7229 && direct_internal_fn_supported_p (as_internal_fn (tos),
7230 type, OPTIMIZE_FOR_BOTH))
7233 (for maxmin (max min)
7235 (convert (maxmin (convert@2 @0) (convert @1)))
7237 && FLOAT_TYPE_P (type)
7238 && FLOAT_TYPE_P (TREE_TYPE (@2))
7239 && types_match (type, TREE_TYPE (@0))
7240 && types_match (type, TREE_TYPE (@1))
7241 && element_precision (type) < element_precision (TREE_TYPE (@2)))
7245 (for froms (XFLOORL XCEILL XROUNDL XRINTL)
7246 tos (XFLOOR XCEIL XROUND XRINT)
7247 /* llfloorl(extend(x)) -> llfloor(x), etc., if x is a double. */
7248 (if (optimize && canonicalize_math_p ())
7250 (froms (convert double_value_p@0))
7253 (for froms (XFLOORL XCEILL XROUNDL XRINTL
7254 XFLOOR XCEIL XROUND XRINT)
7255 tos (XFLOORF XCEILF XROUNDF XRINTF)
7256 /* llfloorl(extend(x)) and llfloor(extend(x)) -> llfloorf(x), etc.,
7258 (if (optimize && canonicalize_math_p ())
7260 (froms (convert float_value_p@0))
7263 (if (canonicalize_math_p ())
7264 /* xfloor(x) -> fix_trunc(x) if x is nonnegative. */
7265 (for floors (IFLOOR LFLOOR LLFLOOR)
7267 (floors tree_expr_nonnegative_p@0)
7270 (if (canonicalize_math_p ())
7271 /* xfloor(x) -> fix_trunc(x), etc., if x is integer valued. */
7272 (for fns (IFLOOR LFLOOR LLFLOOR
7274 IROUND LROUND LLROUND)
7276 (fns integer_valued_real_p@0)
7278 (if (!flag_errno_math)
7279 /* xrint(x) -> fix_trunc(x), etc., if x is integer valued. */
7280 (for rints (IRINT LRINT LLRINT)
7282 (rints integer_valued_real_p@0)
7285 (if (canonicalize_math_p ())
7286 (for ifn (IFLOOR ICEIL IROUND IRINT)
7287 lfn (LFLOOR LCEIL LROUND LRINT)
7288 llfn (LLFLOOR LLCEIL LLROUND LLRINT)
7289 /* Canonicalize iround (x) to lround (x) on ILP32 targets where
7290 sizeof (int) == sizeof (long). */
7291 (if (TYPE_PRECISION (integer_type_node)
7292 == TYPE_PRECISION (long_integer_type_node))
7295 (lfn:long_integer_type_node @0)))
7296 /* Canonicalize llround (x) to lround (x) on LP64 targets where
7297 sizeof (long long) == sizeof (long). */
7298 (if (TYPE_PRECISION (long_long_integer_type_node)
7299 == TYPE_PRECISION (long_integer_type_node))
7302 (lfn:long_integer_type_node @0)))))
7304 /* cproj(x) -> x if we're ignoring infinities. */
7307 (if (!HONOR_INFINITIES (type))
7310 /* If the real part is inf and the imag part is known to be
7311 nonnegative, return (inf + 0i). */
7313 (CPROJ (complex REAL_CST@0 tree_expr_nonnegative_p@1))
7314 (if (real_isinf (TREE_REAL_CST_PTR (@0)))
7315 { build_complex_inf (type, false); }))
7317 /* If the imag part is inf, return (inf+I*copysign(0,imag)). */
7319 (CPROJ (complex @0 REAL_CST@1))
7320 (if (real_isinf (TREE_REAL_CST_PTR (@1)))
7321 { build_complex_inf (type, TREE_REAL_CST_PTR (@1)->sign); }))
7327 (pows @0 REAL_CST@1)
7329 const REAL_VALUE_TYPE *value = TREE_REAL_CST_PTR (@1);
7330 REAL_VALUE_TYPE tmp;
7333 /* pow(x,0) -> 1. */
7334 (if (real_equal (value, &dconst0))
7335 { build_real (type, dconst1); })
7336 /* pow(x,1) -> x. */
7337 (if (real_equal (value, &dconst1))
7339 /* pow(x,-1) -> 1/x. */
7340 (if (real_equal (value, &dconstm1))
7341 (rdiv { build_real (type, dconst1); } @0))
7342 /* pow(x,0.5) -> sqrt(x). */
7343 (if (flag_unsafe_math_optimizations
7344 && canonicalize_math_p ()
7345 && real_equal (value, &dconsthalf))
7347 /* pow(x,1/3) -> cbrt(x). */
7348 (if (flag_unsafe_math_optimizations
7349 && canonicalize_math_p ()
7350 && (tmp = real_value_truncate (TYPE_MODE (type), dconst_third ()),
7351 real_equal (value, &tmp)))
7354 /* powi(1,x) -> 1. */
7356 (POWI real_onep@0 @1)
7360 (POWI @0 INTEGER_CST@1)
7362 /* powi(x,0) -> 1. */
7363 (if (wi::to_wide (@1) == 0)
7364 { build_real (type, dconst1); })
7365 /* powi(x,1) -> x. */
7366 (if (wi::to_wide (@1) == 1)
7368 /* powi(x,-1) -> 1/x. */
7369 (if (wi::to_wide (@1) == -1)
7370 (rdiv { build_real (type, dconst1); } @0))))
7372 /* Narrowing of arithmetic and logical operations.
7374 These are conceptually similar to the transformations performed for
7375 the C/C++ front-ends by shorten_binary_op and shorten_compare. Long
7376 term we want to move all that code out of the front-ends into here. */
7378 /* Convert (outertype)((innertype0)a+(innertype1)b)
7379 into ((newtype)a+(newtype)b) where newtype
7380 is the widest mode from all of these. */
7381 (for op (plus minus mult rdiv)
7383 (convert (op:s@0 (convert1?@3 @1) (convert2?@4 @2)))
7384 /* If we have a narrowing conversion of an arithmetic operation where
7385 both operands are widening conversions from the same type as the outer
7386 narrowing conversion. Then convert the innermost operands to a
7387 suitable unsigned type (to avoid introducing undefined behavior),
7388 perform the operation and convert the result to the desired type. */
7389 (if (INTEGRAL_TYPE_P (type)
7392 /* We check for type compatibility between @0 and @1 below,
7393 so there's no need to check that @2/@4 are integral types. */
7394 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
7395 && INTEGRAL_TYPE_P (TREE_TYPE (@3))
7396 /* The precision of the type of each operand must match the
7397 precision of the mode of each operand, similarly for the
7399 && type_has_mode_precision_p (TREE_TYPE (@1))
7400 && type_has_mode_precision_p (TREE_TYPE (@2))
7401 && type_has_mode_precision_p (type)
7402 /* The inner conversion must be a widening conversion. */
7403 && TYPE_PRECISION (TREE_TYPE (@3)) > TYPE_PRECISION (TREE_TYPE (@1))
7404 && types_match (@1, type)
7405 && (types_match (@1, @2)
7406 /* Or the second operand is const integer or converted const
7407 integer from valueize. */
7408 || poly_int_tree_p (@4)))
7409 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
7410 (op @1 (convert @2))
7411 (with { tree utype = unsigned_type_for (TREE_TYPE (@1)); }
7412 (convert (op (convert:utype @1)
7413 (convert:utype @2)))))
7414 (if (FLOAT_TYPE_P (type)
7415 && DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0))
7416 == DECIMAL_FLOAT_TYPE_P (type))
7417 (with { tree arg0 = strip_float_extensions (@1);
7418 tree arg1 = strip_float_extensions (@2);
7419 tree itype = TREE_TYPE (@0);
7420 tree ty1 = TREE_TYPE (arg0);
7421 tree ty2 = TREE_TYPE (arg1);
7422 enum tree_code code = TREE_CODE (itype); }
7423 (if (FLOAT_TYPE_P (ty1)
7424 && FLOAT_TYPE_P (ty2))
7425 (with { tree newtype = type;
7426 if (TYPE_MODE (ty1) == SDmode
7427 || TYPE_MODE (ty2) == SDmode
7428 || TYPE_MODE (type) == SDmode)
7429 newtype = dfloat32_type_node;
7430 if (TYPE_MODE (ty1) == DDmode
7431 || TYPE_MODE (ty2) == DDmode
7432 || TYPE_MODE (type) == DDmode)
7433 newtype = dfloat64_type_node;
7434 if (TYPE_MODE (ty1) == TDmode
7435 || TYPE_MODE (ty2) == TDmode
7436 || TYPE_MODE (type) == TDmode)
7437 newtype = dfloat128_type_node; }
7438 (if ((newtype == dfloat32_type_node
7439 || newtype == dfloat64_type_node
7440 || newtype == dfloat128_type_node)
7442 && types_match (newtype, type))
7443 (op (convert:newtype @1) (convert:newtype @2))
7444 (with { if (element_precision (ty1) > element_precision (newtype))
7446 if (element_precision (ty2) > element_precision (newtype))
7448 /* Sometimes this transformation is safe (cannot
7449 change results through affecting double rounding
7450 cases) and sometimes it is not. If NEWTYPE is
7451 wider than TYPE, e.g. (float)((long double)double
7452 + (long double)double) converted to
7453 (float)(double + double), the transformation is
7454 unsafe regardless of the details of the types
7455 involved; double rounding can arise if the result
7456 of NEWTYPE arithmetic is a NEWTYPE value half way
7457 between two representable TYPE values but the
7458 exact value is sufficiently different (in the
7459 right direction) for this difference to be
7460 visible in ITYPE arithmetic. If NEWTYPE is the
7461 same as TYPE, however, the transformation may be
7462 safe depending on the types involved: it is safe
7463 if the ITYPE has strictly more than twice as many
7464 mantissa bits as TYPE, can represent infinities
7465 and NaNs if the TYPE can, and has sufficient
7466 exponent range for the product or ratio of two
7467 values representable in the TYPE to be within the
7468 range of normal values of ITYPE. */
7469 (if (element_precision (newtype) < element_precision (itype)
7470 && (!VECTOR_MODE_P (TYPE_MODE (newtype))
7471 || target_supports_op_p (newtype, op, optab_default))
7472 && (flag_unsafe_math_optimizations
7473 || (element_precision (newtype) == element_precision (type)
7474 && real_can_shorten_arithmetic (element_mode (itype),
7475 element_mode (type))
7476 && !excess_precision_type (newtype)))
7477 && !types_match (itype, newtype))
7478 (convert:type (op (convert:newtype @1)
7479 (convert:newtype @2)))
7484 /* This is another case of narrowing, specifically when there's an outer
7485 BIT_AND_EXPR which masks off bits outside the type of the innermost
7486 operands. Like the previous case we have to convert the operands
7487 to unsigned types to avoid introducing undefined behavior for the
7488 arithmetic operation. */
7489 (for op (minus plus)
7491 (bit_and (op:s (convert@2 @0) (convert@3 @1)) INTEGER_CST@4)
7492 (if (INTEGRAL_TYPE_P (type)
7493 /* We check for type compatibility between @0 and @1 below,
7494 so there's no need to check that @1/@3 are integral types. */
7495 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
7496 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
7497 /* The precision of the type of each operand must match the
7498 precision of the mode of each operand, similarly for the
7500 && type_has_mode_precision_p (TREE_TYPE (@0))
7501 && type_has_mode_precision_p (TREE_TYPE (@1))
7502 && type_has_mode_precision_p (type)
7503 /* The inner conversion must be a widening conversion. */
7504 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
7505 && types_match (@0, @1)
7506 && (tree_int_cst_min_precision (@4, TYPE_SIGN (TREE_TYPE (@0)))
7507 <= TYPE_PRECISION (TREE_TYPE (@0)))
7508 && (wi::to_wide (@4)
7509 & wi::mask (TYPE_PRECISION (TREE_TYPE (@0)),
7510 true, TYPE_PRECISION (type))) == 0)
7511 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
7512 (with { tree ntype = TREE_TYPE (@0); }
7513 (convert (bit_and (op @0 @1) (convert:ntype @4))))
7514 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
7515 (convert (bit_and (op (convert:utype @0) (convert:utype @1))
7516 (convert:utype @4))))))))
7518 /* Transform (@0 < @1 and @0 < @2) to use min,
7519 (@0 > @1 and @0 > @2) to use max */
7520 (for logic (bit_and bit_and bit_and bit_and bit_ior bit_ior bit_ior bit_ior)
7521 op (lt le gt ge lt le gt ge )
7522 ext (min min max max max max min min )
7524 (logic (op:cs @0 @1) (op:cs @0 @2))
7525 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
7526 && TREE_CODE (@0) != INTEGER_CST)
7527 (op @0 (ext @1 @2)))))
7529 /* Max<bool0, bool1> -> bool0 | bool1
7530 Min<bool0, bool1> -> bool0 & bool1 */
7532 logic (bit_ior bit_and)
7534 (op zero_one_valued_p@0 zero_one_valued_p@1)
7537 /* signbit(x) != 0 ? -x : x -> abs(x)
7538 signbit(x) == 0 ? -x : x -> -abs(x) */
7542 (cond (neeq (sign @0) integer_zerop) (negate @0) @0)
7543 (if (neeq == NE_EXPR)
7545 (negate (abs @0))))))
7548 /* signbit(x) -> 0 if x is nonnegative. */
7549 (SIGNBIT tree_expr_nonnegative_p@0)
7550 { integer_zero_node; })
7553 /* signbit(x) -> x<0 if x doesn't have signed zeros. */
7555 (if (!HONOR_SIGNED_ZEROS (@0))
7556 (convert (lt @0 { build_real (TREE_TYPE (@0), dconst0); }))))
7558 /* Transform comparisons of the form X +- C1 CMP C2 to X CMP C2 -+ C1. */
7560 (for op (plus minus)
7563 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
7564 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
7565 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
7566 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@0))
7567 && !TYPE_SATURATING (TREE_TYPE (@0)))
7568 (with { tree res = int_const_binop (rop, @2, @1); }
7569 (if (TREE_OVERFLOW (res)
7570 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
7571 { constant_boolean_node (cmp == NE_EXPR, type); }
7572 (if (single_use (@3))
7573 (cmp @0 { TREE_OVERFLOW (res)
7574 ? drop_tree_overflow (res) : res; }))))))))
7575 (for cmp (lt le gt ge)
7576 (for op (plus minus)
7579 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
7580 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
7581 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
7582 (with { tree res = int_const_binop (rop, @2, @1); }
7583 (if (TREE_OVERFLOW (res))
7585 fold_overflow_warning (("assuming signed overflow does not occur "
7586 "when simplifying conditional to constant"),
7587 WARN_STRICT_OVERFLOW_CONDITIONAL);
7588 bool less = cmp == LE_EXPR || cmp == LT_EXPR;
7589 /* wi::ges_p (@2, 0) should be sufficient for a signed type. */
7590 bool ovf_high = wi::lt_p (wi::to_wide (@1), 0,
7591 TYPE_SIGN (TREE_TYPE (@1)))
7592 != (op == MINUS_EXPR);
7593 constant_boolean_node (less == ovf_high, type);
7595 (if (single_use (@3))
7598 fold_overflow_warning (("assuming signed overflow does not occur "
7599 "when changing X +- C1 cmp C2 to "
7601 WARN_STRICT_OVERFLOW_COMPARISON);
7603 (cmp @0 { res; })))))))))
7605 /* Canonicalizations of BIT_FIELD_REFs. */
7608 (BIT_FIELD_REF (BIT_FIELD_REF @0 @1 @2) @3 @4)
7609 (BIT_FIELD_REF @0 @3 { const_binop (PLUS_EXPR, bitsizetype, @2, @4); }))
7612 (BIT_FIELD_REF (view_convert @0) @1 @2)
7613 (BIT_FIELD_REF @0 @1 @2))
7616 (BIT_FIELD_REF @0 @1 integer_zerop)
7617 (if (tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (@0))))
7621 (BIT_FIELD_REF @0 @1 @2)
7623 (if (TREE_CODE (TREE_TYPE (@0)) == COMPLEX_TYPE
7624 && tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
7626 (if (integer_zerop (@2))
7627 (view_convert (realpart @0)))
7628 (if (tree_int_cst_equal (@2, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
7629 (view_convert (imagpart @0)))))
7630 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
7631 && INTEGRAL_TYPE_P (type)
7632 /* On GIMPLE this should only apply to register arguments. */
7633 && (! GIMPLE || is_gimple_reg (@0))
7634 /* A bit-field-ref that referenced the full argument can be stripped. */
7635 && ((compare_tree_int (@1, TYPE_PRECISION (TREE_TYPE (@0))) == 0
7636 && integer_zerop (@2))
7637 /* Low-parts can be reduced to integral conversions.
7638 ??? The following doesn't work for PDP endian. */
7639 || (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN
7640 /* But only do this after vectorization. */
7641 && canonicalize_math_after_vectorization_p ()
7642 /* Don't even think about BITS_BIG_ENDIAN. */
7643 && TYPE_PRECISION (TREE_TYPE (@0)) % BITS_PER_UNIT == 0
7644 && TYPE_PRECISION (type) % BITS_PER_UNIT == 0
7645 && compare_tree_int (@2, (BYTES_BIG_ENDIAN
7646 ? (TYPE_PRECISION (TREE_TYPE (@0))
7647 - TYPE_PRECISION (type))
7651 /* Simplify vector extracts. */
7654 (BIT_FIELD_REF CONSTRUCTOR@0 @1 @2)
7655 (if (VECTOR_TYPE_P (TREE_TYPE (@0))
7656 && tree_fits_uhwi_p (TYPE_SIZE (type))
7657 && ((tree_to_uhwi (TYPE_SIZE (type))
7658 == tree_to_uhwi (TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
7659 || (VECTOR_TYPE_P (type)
7660 && (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (type)))
7661 == tree_to_uhwi (TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0))))))))
7664 tree ctor = (TREE_CODE (@0) == SSA_NAME
7665 ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0);
7666 tree eltype = TREE_TYPE (TREE_TYPE (ctor));
7667 unsigned HOST_WIDE_INT width = tree_to_uhwi (TYPE_SIZE (eltype));
7668 unsigned HOST_WIDE_INT n = tree_to_uhwi (@1);
7669 unsigned HOST_WIDE_INT idx = tree_to_uhwi (@2);
7672 && (idx % width) == 0
7674 && known_le ((idx + n) / width,
7675 TYPE_VECTOR_SUBPARTS (TREE_TYPE (ctor))))
7680 /* Constructor elements can be subvectors. */
7682 if (CONSTRUCTOR_NELTS (ctor) != 0)
7684 tree cons_elem = TREE_TYPE (CONSTRUCTOR_ELT (ctor, 0)->value);
7685 if (TREE_CODE (cons_elem) == VECTOR_TYPE)
7686 k = TYPE_VECTOR_SUBPARTS (cons_elem);
7688 unsigned HOST_WIDE_INT elt, count, const_k;
7691 /* We keep an exact subset of the constructor elements. */
7692 (if (multiple_p (idx, k, &elt) && multiple_p (n, k, &count))
7693 (if (CONSTRUCTOR_NELTS (ctor) == 0)
7694 { build_zero_cst (type); }
7696 (if (elt < CONSTRUCTOR_NELTS (ctor))
7697 (view_convert { CONSTRUCTOR_ELT (ctor, elt)->value; })
7698 { build_zero_cst (type); })
7699 /* We don't want to emit new CTORs unless the old one goes away.
7700 ??? Eventually allow this if the CTOR ends up constant or
7702 (if (single_use (@0))
7705 vec<constructor_elt, va_gc> *vals;
7706 vec_alloc (vals, count);
7707 bool constant_p = true;
7709 for (unsigned i = 0;
7710 i < count && elt + i < CONSTRUCTOR_NELTS (ctor); ++i)
7712 tree e = CONSTRUCTOR_ELT (ctor, elt + i)->value;
7713 CONSTRUCTOR_APPEND_ELT (vals, NULL_TREE, e);
7714 if (!CONSTANT_CLASS_P (e))
7717 tree evtype = (types_match (TREE_TYPE (type),
7718 TREE_TYPE (TREE_TYPE (ctor)))
7720 : build_vector_type (TREE_TYPE (TREE_TYPE (ctor)),
7722 /* We used to build a CTOR in the non-constant case here
7723 but that's not a GIMPLE value. We'd have to expose this
7724 operation somehow so the code generation can properly
7725 split it out to a separate stmt. */
7726 res = (constant_p ? build_vector_from_ctor (evtype, vals)
7727 : (GIMPLE ? NULL_TREE : build_constructor (evtype, vals)));
7730 (view_convert { res; })))))))
7731 /* The bitfield references a single constructor element. */
7732 (if (k.is_constant (&const_k)
7733 && idx + n <= (idx / const_k + 1) * const_k)
7735 (if (CONSTRUCTOR_NELTS (ctor) <= idx / const_k)
7736 { build_zero_cst (type); })
7738 (view_convert { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; }))
7739 (BIT_FIELD_REF { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; }
7740 @1 { bitsize_int ((idx % const_k) * width); })))))))))
7742 /* Simplify a bit extraction from a bit insertion for the cases with
7743 the inserted element fully covering the extraction or the insertion
7744 not touching the extraction. */
7746 (BIT_FIELD_REF (bit_insert @0 @1 @ipos) @rsize @rpos)
7749 unsigned HOST_WIDE_INT isize;
7750 if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
7751 isize = TYPE_PRECISION (TREE_TYPE (@1));
7753 isize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (@1)));
7756 (if ((!INTEGRAL_TYPE_P (TREE_TYPE (@1))
7757 || type_has_mode_precision_p (TREE_TYPE (@1)))
7758 && wi::leu_p (wi::to_wide (@ipos), wi::to_wide (@rpos))
7759 && wi::leu_p (wi::to_wide (@rpos) + wi::to_wide (@rsize),
7760 wi::to_wide (@ipos) + isize))
7761 (BIT_FIELD_REF @1 @rsize { wide_int_to_tree (bitsizetype,
7763 - wi::to_wide (@ipos)); }))
7764 (if (wi::eq_p (wi::to_wide (@ipos), wi::to_wide (@rpos))
7765 && compare_tree_int (@rsize, isize) == 0)
7767 (if (wi::geu_p (wi::to_wide (@ipos),
7768 wi::to_wide (@rpos) + wi::to_wide (@rsize))
7769 || wi::geu_p (wi::to_wide (@rpos),
7770 wi::to_wide (@ipos) + isize))
7771 (BIT_FIELD_REF @0 @rsize @rpos)))))
7773 (if (canonicalize_math_after_vectorization_p ())
7776 (fmas:c (negate @0) @1 @2)
7777 (IFN_FNMA @0 @1 @2))
7779 (fmas @0 @1 (negate @2))
7782 (fmas:c (negate @0) @1 (negate @2))
7783 (IFN_FNMS @0 @1 @2))
7785 (negate (fmas@3 @0 @1 @2))
7786 (if (single_use (@3))
7787 (IFN_FNMS @0 @1 @2))))
7790 (IFN_FMS:c (negate @0) @1 @2)
7791 (IFN_FNMS @0 @1 @2))
7793 (IFN_FMS @0 @1 (negate @2))
7796 (IFN_FMS:c (negate @0) @1 (negate @2))
7797 (IFN_FNMA @0 @1 @2))
7799 (negate (IFN_FMS@3 @0 @1 @2))
7800 (if (single_use (@3))
7801 (IFN_FNMA @0 @1 @2)))
7804 (IFN_FNMA:c (negate @0) @1 @2)
7807 (IFN_FNMA @0 @1 (negate @2))
7808 (IFN_FNMS @0 @1 @2))
7810 (IFN_FNMA:c (negate @0) @1 (negate @2))
7813 (negate (IFN_FNMA@3 @0 @1 @2))
7814 (if (single_use (@3))
7815 (IFN_FMS @0 @1 @2)))
7818 (IFN_FNMS:c (negate @0) @1 @2)
7821 (IFN_FNMS @0 @1 (negate @2))
7822 (IFN_FNMA @0 @1 @2))
7824 (IFN_FNMS:c (negate @0) @1 (negate @2))
7827 (negate (IFN_FNMS@3 @0 @1 @2))
7828 (if (single_use (@3))
7829 (IFN_FMA @0 @1 @2))))
7831 /* CLZ simplifications. */
7836 (op (clz:s@2 @0) INTEGER_CST@1)
7837 (if (integer_zerop (@1) && single_use (@2))
7838 /* clz(X) == 0 is (int)X < 0 and clz(X) != 0 is (int)X >= 0. */
7839 (with { tree type0 = TREE_TYPE (@0);
7840 tree stype = signed_type_for (type0);
7841 HOST_WIDE_INT val = 0;
7842 /* Punt on hypothetical weird targets. */
7844 && CLZ_DEFINED_VALUE_AT_ZERO (SCALAR_TYPE_MODE (type0),
7850 (cmp (convert:stype @0) { build_zero_cst (stype); })))
7851 /* clz(X) == (prec-1) is X == 1 and clz(X) != (prec-1) is X != 1. */
7852 (with { bool ok = true;
7853 HOST_WIDE_INT val = 0;
7854 tree type0 = TREE_TYPE (@0);
7855 /* Punt on hypothetical weird targets. */
7857 && CLZ_DEFINED_VALUE_AT_ZERO (SCALAR_TYPE_MODE (type0),
7859 && val == TYPE_PRECISION (type0) - 1)
7862 (if (ok && wi::to_wide (@1) == (TYPE_PRECISION (type0) - 1))
7863 (op @0 { build_one_cst (type0); })))))))
7865 /* CTZ simplifications. */
7867 (for op (ge gt le lt)
7870 /* __builtin_ctz (x) >= C -> (x & ((1 << C) - 1)) == 0. */
7871 (op (ctz:s @0) INTEGER_CST@1)
7872 (with { bool ok = true;
7873 HOST_WIDE_INT val = 0;
7874 if (!tree_fits_shwi_p (@1))
7878 val = tree_to_shwi (@1);
7879 /* Canonicalize to >= or <. */
7880 if (op == GT_EXPR || op == LE_EXPR)
7882 if (val == HOST_WIDE_INT_MAX)
7888 bool zero_res = false;
7889 HOST_WIDE_INT zero_val = 0;
7890 tree type0 = TREE_TYPE (@0);
7891 int prec = TYPE_PRECISION (type0);
7893 && CTZ_DEFINED_VALUE_AT_ZERO (SCALAR_TYPE_MODE (type0),
7898 (if (ok && (!zero_res || zero_val >= val))
7899 { constant_boolean_node (cmp == EQ_EXPR ? true : false, type); })
7901 (if (ok && (!zero_res || zero_val < val))
7902 { constant_boolean_node (cmp == EQ_EXPR ? false : true, type); })
7903 (if (ok && (!zero_res || zero_val < 0 || zero_val >= prec))
7904 (cmp (bit_and @0 { wide_int_to_tree (type0,
7905 wi::mask (val, false, prec)); })
7906 { build_zero_cst (type0); })))))))
7909 /* __builtin_ctz (x) == C -> (x & ((1 << (C + 1)) - 1)) == (1 << C). */
7910 (op (ctz:s @0) INTEGER_CST@1)
7911 (with { bool zero_res = false;
7912 HOST_WIDE_INT zero_val = 0;
7913 tree type0 = TREE_TYPE (@0);
7914 int prec = TYPE_PRECISION (type0);
7916 && CTZ_DEFINED_VALUE_AT_ZERO (SCALAR_TYPE_MODE (type0),
7920 (if (tree_int_cst_sgn (@1) < 0 || wi::to_widest (@1) >= prec)
7921 (if (!zero_res || zero_val != wi::to_widest (@1))
7922 { constant_boolean_node (op == EQ_EXPR ? false : true, type); })
7923 (if (!zero_res || zero_val < 0 || zero_val >= prec)
7924 (op (bit_and @0 { wide_int_to_tree (type0,
7925 wi::mask (tree_to_uhwi (@1) + 1,
7927 { wide_int_to_tree (type0,
7928 wi::shifted_mask (tree_to_uhwi (@1), 1,
7929 false, prec)); })))))))
7931 /* POPCOUNT simplifications. */
7932 /* popcount(X) + popcount(Y) is popcount(X|Y) when X&Y must be zero. */
7934 (plus (POPCOUNT:s @0) (POPCOUNT:s @1))
7935 (if (INTEGRAL_TYPE_P (type)
7936 && wi::bit_and (tree_nonzero_bits (@0), tree_nonzero_bits (@1)) == 0)
7937 (POPCOUNT (bit_ior @0 @1))))
7939 /* popcount(X) == 0 is X == 0, and related (in)equalities. */
7940 (for popcount (POPCOUNT)
7941 (for cmp (le eq ne gt)
7944 (cmp (popcount @0) integer_zerop)
7945 (rep @0 { build_zero_cst (TREE_TYPE (@0)); }))))
7947 /* popcount(bswap(x)) is popcount(x). */
7948 (for popcount (POPCOUNT)
7949 (for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32
7950 BUILT_IN_BSWAP64 BUILT_IN_BSWAP128)
7952 (popcount (convert?@0 (bswap:s@1 @2)))
7953 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
7954 && INTEGRAL_TYPE_P (TREE_TYPE (@1)))
7955 (with { tree type0 = TREE_TYPE (@0);
7956 tree type1 = TREE_TYPE (@1);
7957 unsigned int prec0 = TYPE_PRECISION (type0);
7958 unsigned int prec1 = TYPE_PRECISION (type1); }
7959 (if (prec0 == prec1 || (prec0 > prec1 && TYPE_UNSIGNED (type1)))
7960 (popcount (convert:type0 (convert:type1 @2)))))))))
7962 /* popcount(rotate(X Y)) is popcount(X). */
7963 (for popcount (POPCOUNT)
7964 (for rot (lrotate rrotate)
7966 (popcount (convert?@0 (rot:s@1 @2 @3)))
7967 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
7968 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
7969 && (GIMPLE || !TREE_SIDE_EFFECTS (@3)))
7970 (with { tree type0 = TREE_TYPE (@0);
7971 tree type1 = TREE_TYPE (@1);
7972 unsigned int prec0 = TYPE_PRECISION (type0);
7973 unsigned int prec1 = TYPE_PRECISION (type1); }
7974 (if (prec0 == prec1 || (prec0 > prec1 && TYPE_UNSIGNED (type1)))
7975 (popcount (convert:type0 @2))))))))
7977 /* Canonicalize POPCOUNT(x)&1 as PARITY(X). */
7979 (bit_and (POPCOUNT @0) integer_onep)
7982 /* popcount(X&Y) + popcount(X|Y) is popcount(x) + popcount(Y). */
7984 (plus:c (POPCOUNT:s (bit_and:s @0 @1)) (POPCOUNT:s (bit_ior:cs @0 @1)))
7985 (plus (POPCOUNT @0) (POPCOUNT @1)))
7987 /* popcount(X) + popcount(Y) - popcount(X&Y) is popcount(X|Y). */
7988 /* popcount(X) + popcount(Y) - popcount(X|Y) is popcount(X&Y). */
7989 (for popcount (POPCOUNT)
7990 (for log1 (bit_and bit_ior)
7991 log2 (bit_ior bit_and)
7993 (minus (plus:s (popcount:s @0) (popcount:s @1))
7994 (popcount:s (log1:cs @0 @1)))
7995 (popcount (log2 @0 @1)))
7997 (plus:c (minus:s (popcount:s @0) (popcount:s (log1:cs @0 @1)))
7999 (popcount (log2 @0 @1)))))
8001 /* PARITY simplifications. */
8002 /* parity(~X) is parity(X). */
8004 (PARITY (bit_not @0))
8007 /* parity(bswap(x)) is parity(x). */
8008 (for parity (PARITY)
8009 (for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32
8010 BUILT_IN_BSWAP64 BUILT_IN_BSWAP128)
8012 (parity (convert?@0 (bswap:s@1 @2)))
8013 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
8014 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
8015 && TYPE_PRECISION (TREE_TYPE (@0))
8016 >= TYPE_PRECISION (TREE_TYPE (@1)))
8017 (with { tree type0 = TREE_TYPE (@0);
8018 tree type1 = TREE_TYPE (@1); }
8019 (parity (convert:type0 (convert:type1 @2))))))))
8021 /* parity(rotate(X Y)) is parity(X). */
8022 (for parity (PARITY)
8023 (for rot (lrotate rrotate)
8025 (parity (convert?@0 (rot:s@1 @2 @3)))
8026 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
8027 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
8028 && (GIMPLE || !TREE_SIDE_EFFECTS (@3))
8029 && TYPE_PRECISION (TREE_TYPE (@0))
8030 >= TYPE_PRECISION (TREE_TYPE (@1)))
8031 (with { tree type0 = TREE_TYPE (@0); }
8032 (parity (convert:type0 @2)))))))
8034 /* parity(X)^parity(Y) is parity(X^Y). */
8036 (bit_xor (PARITY:s @0) (PARITY:s @1))
8037 (PARITY (bit_xor @0 @1)))
8039 /* a != 0 ? FUN(a) : 0 -> Fun(a) for some builtin functions. */
8040 (for func (POPCOUNT BSWAP FFS PARITY)
8042 (cond (ne @0 integer_zerop@1) (func@3 (convert? @0)) integer_zerop@2)
8045 /* a != 0 ? FUN(a) : CST -> Fun(a) for some CLRSB builtins
8046 where CST is precision-1. */
8049 (cond (ne @0 integer_zerop@1) (func@4 (convert?@3 @0)) INTEGER_CST@2)
8050 (if (wi::to_widest (@2) == TYPE_PRECISION (TREE_TYPE (@3)) - 1)
8054 /* a != 0 ? CLZ(a) : CST -> .CLZ(a) where CST is the result of the internal function for 0. */
8057 (cond (ne @0 integer_zerop@1) (func (convert?@3 @0)) INTEGER_CST@2)
8059 internal_fn ifn = IFN_LAST;
8060 if (direct_internal_fn_supported_p (IFN_CLZ, type, OPTIMIZE_FOR_BOTH)
8061 && CLZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (type),
8065 (if (ifn == IFN_CLZ && wi::to_widest (@2) == val)
8068 /* a != 0 ? CTZ(a) : CST -> .CTZ(a) where CST is the result of the internal function for 0. */
8071 (cond (ne @0 integer_zerop@1) (func (convert?@3 @0)) INTEGER_CST@2)
8073 internal_fn ifn = IFN_LAST;
8074 if (direct_internal_fn_supported_p (IFN_CTZ, type, OPTIMIZE_FOR_BOTH)
8075 && CTZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (type),
8079 (if (ifn == IFN_CTZ && wi::to_widest (@2) == val)
8083 /* Common POPCOUNT/PARITY simplifications. */
8084 /* popcount(X&C1) is (X>>C2)&1 when C1 == 1<<C2. Same for parity(X&C1). */
8085 (for pfun (POPCOUNT PARITY)
8088 (if (INTEGRAL_TYPE_P (type))
8089 (with { wide_int nz = tree_nonzero_bits (@0); }
8093 (if (wi::popcount (nz) == 1)
8094 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
8095 (convert (rshift:utype (convert:utype @0)
8096 { build_int_cst (integer_type_node,
8097 wi::ctz (nz)); })))))))))
8100 /* 64- and 32-bits branchless implementations of popcount are detected:
8102 int popcount64c (uint64_t x)
8104 x -= (x >> 1) & 0x5555555555555555ULL;
8105 x = (x & 0x3333333333333333ULL) + ((x >> 2) & 0x3333333333333333ULL);
8106 x = (x + (x >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
8107 return (x * 0x0101010101010101ULL) >> 56;
8110 int popcount32c (uint32_t x)
8112 x -= (x >> 1) & 0x55555555;
8113 x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
8114 x = (x + (x >> 4)) & 0x0f0f0f0f;
8115 return (x * 0x01010101) >> 24;
8122 (rshift @8 INTEGER_CST@5)
8124 (bit_and @6 INTEGER_CST@7)
8128 (bit_and (rshift @0 INTEGER_CST@4) INTEGER_CST@11))
8134 /* Check constants and optab. */
8135 (with { unsigned prec = TYPE_PRECISION (type);
8136 int shift = (64 - prec) & 63;
8137 unsigned HOST_WIDE_INT c1
8138 = HOST_WIDE_INT_UC (0x0101010101010101) >> shift;
8139 unsigned HOST_WIDE_INT c2
8140 = HOST_WIDE_INT_UC (0x0F0F0F0F0F0F0F0F) >> shift;
8141 unsigned HOST_WIDE_INT c3
8142 = HOST_WIDE_INT_UC (0x3333333333333333) >> shift;
8143 unsigned HOST_WIDE_INT c4
8144 = HOST_WIDE_INT_UC (0x5555555555555555) >> shift;
8149 && TYPE_UNSIGNED (type)
8150 && integer_onep (@4)
8151 && wi::to_widest (@10) == 2
8152 && wi::to_widest (@5) == 4
8153 && wi::to_widest (@1) == prec - 8
8154 && tree_to_uhwi (@2) == c1
8155 && tree_to_uhwi (@3) == c2
8156 && tree_to_uhwi (@9) == c3
8157 && tree_to_uhwi (@7) == c3
8158 && tree_to_uhwi (@11) == c4)
8159 (if (direct_internal_fn_supported_p (IFN_POPCOUNT, type,
8161 (convert (IFN_POPCOUNT:type @0))
8162 /* Try to do popcount in two halves. PREC must be at least
8163 five bits for this to work without extension before adding. */
8165 tree half_type = NULL_TREE;
8166 opt_machine_mode m = mode_for_size ((prec + 1) / 2, MODE_INT, 1);
8169 && m.require () != TYPE_MODE (type))
8171 half_prec = GET_MODE_PRECISION (as_a <scalar_int_mode> (m));
8172 half_type = build_nonstandard_integer_type (half_prec, 1);
8174 gcc_assert (half_prec > 2);
8176 (if (half_type != NULL_TREE
8177 && direct_internal_fn_supported_p (IFN_POPCOUNT, half_type,
8180 (IFN_POPCOUNT:half_type (convert @0))
8181 (IFN_POPCOUNT:half_type (convert (rshift @0
8182 { build_int_cst (integer_type_node, half_prec); } )))))))))))
8184 /* __builtin_ffs needs to deal on many targets with the possible zero
8185 argument. If we know the argument is always non-zero, __builtin_ctz + 1
8186 should lead to better code. */
8188 (FFS tree_expr_nonzero_p@0)
8189 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
8190 && direct_internal_fn_supported_p (IFN_CTZ, TREE_TYPE (@0),
8191 OPTIMIZE_FOR_SPEED))
8192 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
8193 (plus (CTZ:type (convert:utype @0)) { build_one_cst (type); }))))
8196 (for ffs (BUILT_IN_FFS BUILT_IN_FFSL BUILT_IN_FFSLL
8198 /* __builtin_ffs (X) == 0 -> X == 0.
8199 __builtin_ffs (X) == 6 -> (X & 63) == 32. */
8202 (cmp (ffs@2 @0) INTEGER_CST@1)
8203 (with { int prec = TYPE_PRECISION (TREE_TYPE (@0)); }
8205 (if (integer_zerop (@1))
8206 (cmp @0 { build_zero_cst (TREE_TYPE (@0)); }))
8207 (if (tree_int_cst_sgn (@1) < 0 || wi::to_widest (@1) > prec)
8208 { constant_boolean_node (cmp == NE_EXPR ? true : false, type); })
8209 (if (single_use (@2))
8210 (cmp (bit_and @0 { wide_int_to_tree (TREE_TYPE (@0),
8211 wi::mask (tree_to_uhwi (@1),
8213 { wide_int_to_tree (TREE_TYPE (@0),
8214 wi::shifted_mask (tree_to_uhwi (@1) - 1, 1,
8215 false, prec)); }))))))
8217 /* __builtin_ffs (X) > 6 -> X != 0 && (X & 63) == 0. */
8221 bit_op (bit_and bit_ior)
8223 (cmp (ffs@2 @0) INTEGER_CST@1)
8224 (with { int prec = TYPE_PRECISION (TREE_TYPE (@0)); }
8226 (if (integer_zerop (@1))
8227 (cmp2 @0 { build_zero_cst (TREE_TYPE (@0)); }))
8228 (if (tree_int_cst_sgn (@1) < 0)
8229 { constant_boolean_node (cmp == GT_EXPR ? true : false, type); })
8230 (if (wi::to_widest (@1) >= prec)
8231 { constant_boolean_node (cmp == GT_EXPR ? false : true, type); })
8232 (if (wi::to_widest (@1) == prec - 1)
8233 (cmp3 @0 { wide_int_to_tree (TREE_TYPE (@0),
8234 wi::shifted_mask (prec - 1, 1,
8236 (if (single_use (@2))
8237 (bit_op (cmp2 @0 { build_zero_cst (TREE_TYPE (@0)); })
8239 { wide_int_to_tree (TREE_TYPE (@0),
8240 wi::mask (tree_to_uhwi (@1),
8242 { build_zero_cst (TREE_TYPE (@0)); }))))))))
8249 --> r = .COND_FN (cond, a, b)
8253 --> r = .COND_FN (~cond, b, a). */
8255 (for uncond_op (UNCOND_UNARY)
8256 cond_op (COND_UNARY)
8258 (vec_cond @0 (view_convert? (uncond_op@3 @1)) @2)
8259 (with { tree op_type = TREE_TYPE (@3); }
8260 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
8261 && is_truth_type_for (op_type, TREE_TYPE (@0)))
8262 (cond_op @0 @1 @2))))
8264 (vec_cond @0 @1 (view_convert? (uncond_op@3 @2)))
8265 (with { tree op_type = TREE_TYPE (@3); }
8266 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
8267 && is_truth_type_for (op_type, TREE_TYPE (@0)))
8268 (cond_op (bit_not @0) @2 @1)))))
8277 r = c ? a1 op a2 : b;
8279 if the target can do it in one go. This makes the operation conditional
8280 on c, so could drop potentially-trapping arithmetic, but that's a valid
8281 simplification if the result of the operation isn't needed.
8283 Avoid speculatively generating a stand-alone vector comparison
8284 on targets that might not support them. Any target implementing
8285 conditional internal functions must support the same comparisons
8286 inside and outside a VEC_COND_EXPR. */
8288 (for uncond_op (UNCOND_BINARY)
8289 cond_op (COND_BINARY)
8291 (vec_cond @0 (view_convert? (uncond_op@4 @1 @2)) @3)
8292 (with { tree op_type = TREE_TYPE (@4); }
8293 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
8294 && is_truth_type_for (op_type, TREE_TYPE (@0))
8296 (view_convert (cond_op @0 @1 @2 (view_convert:op_type @3))))))
8298 (vec_cond @0 @1 (view_convert? (uncond_op@4 @2 @3)))
8299 (with { tree op_type = TREE_TYPE (@4); }
8300 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
8301 && is_truth_type_for (op_type, TREE_TYPE (@0))
8303 (view_convert (cond_op (bit_not @0) @2 @3 (view_convert:op_type @1)))))))
8305 /* Same for ternary operations. */
8306 (for uncond_op (UNCOND_TERNARY)
8307 cond_op (COND_TERNARY)
8309 (vec_cond @0 (view_convert? (uncond_op@5 @1 @2 @3)) @4)
8310 (with { tree op_type = TREE_TYPE (@5); }
8311 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
8312 && is_truth_type_for (op_type, TREE_TYPE (@0))
8314 (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @4))))))
8316 (vec_cond @0 @1 (view_convert? (uncond_op@5 @2 @3 @4)))
8317 (with { tree op_type = TREE_TYPE (@5); }
8318 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
8319 && is_truth_type_for (op_type, TREE_TYPE (@0))
8321 (view_convert (cond_op (bit_not @0) @2 @3 @4
8322 (view_convert:op_type @1)))))))
8325 /* Detect cases in which a VEC_COND_EXPR effectively replaces the
8326 "else" value of an IFN_COND_*. */
8327 (for cond_op (COND_BINARY)
8329 (vec_cond @0 (view_convert? (cond_op @0 @1 @2 @3)) @4)
8330 (with { tree op_type = TREE_TYPE (@3); }
8331 (if (element_precision (type) == element_precision (op_type))
8332 (view_convert (cond_op @0 @1 @2 (view_convert:op_type @4))))))
8334 (vec_cond @0 @1 (view_convert? (cond_op @2 @3 @4 @5)))
8335 (with { tree op_type = TREE_TYPE (@5); }
8336 (if (inverse_conditions_p (@0, @2)
8337 && element_precision (type) == element_precision (op_type))
8338 (view_convert (cond_op @2 @3 @4 (view_convert:op_type @1)))))))
8340 /* Same for ternary operations. */
8341 (for cond_op (COND_TERNARY)
8343 (vec_cond @0 (view_convert? (cond_op @0 @1 @2 @3 @4)) @5)
8344 (with { tree op_type = TREE_TYPE (@4); }
8345 (if (element_precision (type) == element_precision (op_type))
8346 (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @5))))))
8348 (vec_cond @0 @1 (view_convert? (cond_op @2 @3 @4 @5 @6)))
8349 (with { tree op_type = TREE_TYPE (@6); }
8350 (if (inverse_conditions_p (@0, @2)
8351 && element_precision (type) == element_precision (op_type))
8352 (view_convert (cond_op @2 @3 @4 @5 (view_convert:op_type @1)))))))
8354 /* Detect simplication for a conditional reduction where
8357 c = mask2 ? d + a : d
8361 c = mask1 && mask2 ? d + b : d. */
8363 (IFN_COND_ADD @0 @1 (vec_cond @2 @3 integer_zerop) @1)
8364 (IFN_COND_ADD (bit_and @0 @2) @1 @3 @1))
8366 /* For pointers @0 and @2 and nonnegative constant offset @1, look for
8369 A: (@0 + @1 < @2) | (@2 + @1 < @0)
8370 B: (@0 + @1 <= @2) | (@2 + @1 <= @0)
8372 If pointers are known not to wrap, B checks whether @1 bytes starting
8373 at @0 and @2 do not overlap, while A tests the same thing for @1 + 1
8374 bytes. A is more efficiently tested as:
8376 A: (sizetype) (@0 + @1 - @2) > @1 * 2
8378 The equivalent expression for B is given by replacing @1 with @1 - 1:
8380 B: (sizetype) (@0 + (@1 - 1) - @2) > (@1 - 1) * 2
8382 @0 and @2 can be swapped in both expressions without changing the result.
8384 The folds rely on sizetype's being unsigned (which is always true)
8385 and on its being the same width as the pointer (which we have to check).
8387 The fold replaces two pointer_plus expressions, two comparisons and
8388 an IOR with a pointer_plus, a pointer_diff, and a comparison, so in
8389 the best case it's a saving of two operations. The A fold retains one
8390 of the original pointer_pluses, so is a win even if both pointer_pluses
8391 are used elsewhere. The B fold is a wash if both pointer_pluses are
8392 used elsewhere, since all we end up doing is replacing a comparison with
8393 a pointer_plus. We do still apply the fold under those circumstances
8394 though, in case applying it to other conditions eventually makes one of the
8395 pointer_pluses dead. */
8396 (for ior (truth_orif truth_or bit_ior)
8399 (ior (cmp:cs (pointer_plus@3 @0 INTEGER_CST@1) @2)
8400 (cmp:cs (pointer_plus@4 @2 @1) @0))
8401 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
8402 && TYPE_OVERFLOW_WRAPS (sizetype)
8403 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (sizetype))
8404 /* Calculate the rhs constant. */
8405 (with { offset_int off = wi::to_offset (@1) - (cmp == LE_EXPR ? 1 : 0);
8406 offset_int rhs = off * 2; }
8407 /* Always fails for negative values. */
8408 (if (wi::min_precision (rhs, UNSIGNED) <= TYPE_PRECISION (sizetype))
8409 /* Since the order of @0 and @2 doesn't matter, let tree_swap_operands_p
8410 pick a canonical order. This increases the chances of using the
8411 same pointer_plus in multiple checks. */
8412 (with { bool swap_p = tree_swap_operands_p (@0, @2);
8413 tree rhs_tree = wide_int_to_tree (sizetype, rhs); }
8414 (if (cmp == LT_EXPR)
8415 (gt (convert:sizetype
8416 (pointer_diff:ssizetype { swap_p ? @4 : @3; }
8417 { swap_p ? @0 : @2; }))
8419 (gt (convert:sizetype
8420 (pointer_diff:ssizetype
8421 (pointer_plus { swap_p ? @2 : @0; }
8422 { wide_int_to_tree (sizetype, off); })
8423 { swap_p ? @0 : @2; }))
8424 { rhs_tree; })))))))))
8426 /* Fold REDUC (@0 & @1) -> @0[I] & @1[I] if element I is the only nonzero
8428 (for reduc (IFN_REDUC_PLUS IFN_REDUC_IOR IFN_REDUC_XOR)
8429 (simplify (reduc (view_convert? (bit_and @0 VECTOR_CST@1)))
8430 (with { int i = single_nonzero_element (@1); }
8432 (with { tree elt = vector_cst_elt (@1, i);
8433 tree elt_type = TREE_TYPE (elt);
8434 unsigned int elt_bits = tree_to_uhwi (TYPE_SIZE (elt_type));
8435 tree size = bitsize_int (elt_bits);
8436 tree pos = bitsize_int (elt_bits * i); }
8439 (BIT_FIELD_REF:elt_type @0 { size; } { pos; })
8442 /* Fold reduction of a single nonzero element constructor. */
8443 (for reduc (IFN_REDUC_PLUS IFN_REDUC_IOR IFN_REDUC_XOR)
8444 (simplify (reduc (CONSTRUCTOR@0))
8445 (with { tree ctor = (TREE_CODE (@0) == SSA_NAME
8446 ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0);
8447 tree elt = ctor_single_nonzero_element (ctor); }
8449 && !HONOR_SNANS (type)
8450 && !HONOR_SIGNED_ZEROS (type))
8453 /* Fold REDUC (@0 op VECTOR_CST) as REDUC (@0) op REDUC (VECTOR_CST). */
8454 (for reduc (IFN_REDUC_PLUS IFN_REDUC_MAX IFN_REDUC_MIN IFN_REDUC_FMAX
8455 IFN_REDUC_FMIN IFN_REDUC_AND IFN_REDUC_IOR IFN_REDUC_XOR)
8456 op (plus max min IFN_FMAX IFN_FMIN bit_and bit_ior bit_xor)
8457 (simplify (reduc (op @0 VECTOR_CST@1))
8458 (op (reduc:type @0) (reduc:type @1))))
8460 /* Simplify vector floating point operations of alternating sub/add pairs
8461 into using an fneg of a wider element type followed by a normal add.
8462 under IEEE 754 the fneg of the wider type will negate every even entry
8463 and when doing an add we get a sub of the even and add of every odd
8465 (for plusminus (plus minus)
8466 minusplus (minus plus)
8468 (vec_perm (plusminus @0 @1) (minusplus @2 @3) VECTOR_CST@4)
8469 (if (!VECTOR_INTEGER_TYPE_P (type)
8470 && !FLOAT_WORDS_BIG_ENDIAN
8471 /* plus is commutative, while minus is not, so :c can't be used.
8472 Do equality comparisons by hand and at the end pick the operands
8474 && (operand_equal_p (@0, @2, 0)
8475 ? operand_equal_p (@1, @3, 0)
8476 : operand_equal_p (@0, @3, 0) && operand_equal_p (@1, @2, 0)))
8479 /* Build a vector of integers from the tree mask. */
8480 vec_perm_builder builder;
8482 (if (tree_to_vec_perm_builder (&builder, @4))
8485 /* Create a vec_perm_indices for the integer vector. */
8486 poly_uint64 nelts = TYPE_VECTOR_SUBPARTS (type);
8487 vec_perm_indices sel (builder, 2, nelts);
8488 machine_mode vec_mode = TYPE_MODE (type);
8489 machine_mode wide_mode;
8490 scalar_mode wide_elt_mode;
8491 poly_uint64 wide_nunits;
8492 scalar_mode inner_mode = GET_MODE_INNER (vec_mode);
8494 (if (VECTOR_MODE_P (vec_mode)
8495 && sel.series_p (0, 2, 0, 2)
8496 && sel.series_p (1, 2, nelts + 1, 2)
8497 && GET_MODE_2XWIDER_MODE (inner_mode).exists (&wide_elt_mode)
8498 && multiple_p (GET_MODE_NUNITS (vec_mode), 2, &wide_nunits)
8499 && related_vector_mode (vec_mode, wide_elt_mode,
8500 wide_nunits).exists (&wide_mode))
8504 = lang_hooks.types.type_for_mode (GET_MODE_INNER (wide_mode),
8505 TYPE_UNSIGNED (type));
8506 tree ntype = build_vector_type_for_mode (stype, wide_mode);
8508 /* The format has to be a non-extended ieee format. */
8509 const struct real_format *fmt_old = FLOAT_MODE_FORMAT (vec_mode);
8510 const struct real_format *fmt_new = FLOAT_MODE_FORMAT (wide_mode);
8512 (if (TYPE_MODE (stype) != BLKmode
8513 && VECTOR_TYPE_P (ntype)
8518 /* If the target doesn't support v1xx vectors, try using
8519 scalar mode xx instead. */
8520 if (known_eq (GET_MODE_NUNITS (wide_mode), 1)
8521 && !target_supports_op_p (ntype, NEGATE_EXPR, optab_vector))
8524 (if (fmt_new->signbit_rw
8525 == fmt_old->signbit_rw + GET_MODE_UNIT_BITSIZE (vec_mode)
8526 && fmt_new->signbit_rw == fmt_new->signbit_ro
8527 && targetm.can_change_mode_class (TYPE_MODE (ntype),
8528 TYPE_MODE (type), ALL_REGS)
8529 && ((optimize_vectors_before_lowering_p ()
8530 && VECTOR_TYPE_P (ntype))
8531 || target_supports_op_p (ntype, NEGATE_EXPR, optab_vector)))
8532 (if (plusminus == PLUS_EXPR)
8533 (plus (view_convert:type (negate (view_convert:ntype @3))) @2)
8534 (minus @0 (view_convert:type
8535 (negate (view_convert:ntype @1))))))))))))))))
8538 (vec_perm @0 @1 VECTOR_CST@2)
8541 tree op0 = @0, op1 = @1, op2 = @2;
8542 machine_mode result_mode = TYPE_MODE (type);
8543 machine_mode op_mode = TYPE_MODE (TREE_TYPE (op0));
8545 /* Build a vector of integers from the tree mask. */
8546 vec_perm_builder builder;
8548 (if (tree_to_vec_perm_builder (&builder, op2))
8551 /* Create a vec_perm_indices for the integer vector. */
8552 poly_uint64 nelts = TYPE_VECTOR_SUBPARTS (type);
8553 bool single_arg = (op0 == op1);
8554 vec_perm_indices sel (builder, single_arg ? 1 : 2, nelts);
8556 (if (sel.series_p (0, 1, 0, 1))
8558 (if (sel.series_p (0, 1, nelts, 1))
8564 if (sel.all_from_input_p (0))
8566 else if (sel.all_from_input_p (1))
8569 sel.rotate_inputs (1);
8571 else if (known_ge (poly_uint64 (sel[0]), nelts))
8573 std::swap (op0, op1);
8574 sel.rotate_inputs (1);
8578 tree cop0 = op0, cop1 = op1;
8579 if (TREE_CODE (op0) == SSA_NAME
8580 && (def = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (op0)))
8581 && gimple_assign_rhs_code (def) == CONSTRUCTOR)
8582 cop0 = gimple_assign_rhs1 (def);
8583 if (TREE_CODE (op1) == SSA_NAME
8584 && (def = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (op1)))
8585 && gimple_assign_rhs_code (def) == CONSTRUCTOR)
8586 cop1 = gimple_assign_rhs1 (def);
8589 (if ((TREE_CODE (cop0) == VECTOR_CST
8590 || TREE_CODE (cop0) == CONSTRUCTOR)
8591 && (TREE_CODE (cop1) == VECTOR_CST
8592 || TREE_CODE (cop1) == CONSTRUCTOR)
8593 && (t = fold_vec_perm (type, cop0, cop1, sel)))
8597 bool changed = (op0 == op1 && !single_arg);
8598 tree ins = NULL_TREE;
8601 /* See if the permutation is performing a single element
8602 insert from a CONSTRUCTOR or constant and use a BIT_INSERT_EXPR
8603 in that case. But only if the vector mode is supported,
8604 otherwise this is invalid GIMPLE. */
8605 if (op_mode != BLKmode
8606 && (TREE_CODE (cop0) == VECTOR_CST
8607 || TREE_CODE (cop0) == CONSTRUCTOR
8608 || TREE_CODE (cop1) == VECTOR_CST
8609 || TREE_CODE (cop1) == CONSTRUCTOR))
8611 bool insert_first_p = sel.series_p (1, 1, nelts + 1, 1);
8614 /* After canonicalizing the first elt to come from the
8615 first vector we only can insert the first elt from
8616 the first vector. */
8618 if ((ins = fold_read_from_vector (cop0, sel[0])))
8621 /* The above can fail for two-element vectors which always
8622 appear to insert the first element, so try inserting
8623 into the second lane as well. For more than two
8624 elements that's wasted time. */
8625 if (!insert_first_p || (!ins && maybe_eq (nelts, 2u)))
8627 unsigned int encoded_nelts = sel.encoding ().encoded_nelts ();
8628 for (at = 0; at < encoded_nelts; ++at)
8629 if (maybe_ne (sel[at], at))
8631 if (at < encoded_nelts
8632 && (known_eq (at + 1, nelts)
8633 || sel.series_p (at + 1, 1, at + 1, 1)))
8635 if (known_lt (poly_uint64 (sel[at]), nelts))
8636 ins = fold_read_from_vector (cop0, sel[at]);
8638 ins = fold_read_from_vector (cop1, sel[at] - nelts);
8643 /* Generate a canonical form of the selector. */
8644 if (!ins && sel.encoding () != builder)
8646 /* Some targets are deficient and fail to expand a single
8647 argument permutation while still allowing an equivalent
8648 2-argument version. */
8650 if (sel.ninputs () == 2
8651 || can_vec_perm_const_p (result_mode, op_mode, sel, false))
8652 op2 = vec_perm_indices_to_tree (TREE_TYPE (op2), sel);
8655 vec_perm_indices sel2 (builder, 2, nelts);
8656 if (can_vec_perm_const_p (result_mode, op_mode, sel2, false))
8657 op2 = vec_perm_indices_to_tree (TREE_TYPE (op2), sel2);
8659 /* Not directly supported with either encoding,
8660 so use the preferred form. */
8661 op2 = vec_perm_indices_to_tree (TREE_TYPE (op2), sel);
8663 if (!operand_equal_p (op2, oldop2, 0))
8668 (bit_insert { op0; } { ins; }
8669 { bitsize_int (at * vector_element_bits (type)); })
8671 (vec_perm { op0; } { op1; } { op2; }))))))))))))
8673 /* VEC_PERM_EXPR (v, v, mask) -> v where v contains same element. */
8675 (match vec_same_elem_p
8678 (match vec_same_elem_p
8680 (if (TREE_CODE (@0) == SSA_NAME
8681 && uniform_vector_p (gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0))))))
8683 (match vec_same_elem_p
8685 (if (uniform_vector_p (@0))))
8689 (vec_perm vec_same_elem_p@0 @0 @1)
8690 (if (types_match (type, TREE_TYPE (@0)))
8694 tree elem = uniform_vector_p (@0);
8697 { build_vector_from_val (type, elem); }))))
8699 /* Push VEC_PERM earlier if that may help FMA perception (PR101895). */
8701 (plus:c (vec_perm:s (mult:c@0 @1 vec_same_elem_p@2) @0 @3) @4)
8702 (if (TREE_CODE (@0) == SSA_NAME && num_imm_uses (@0) == 2)
8703 (plus (mult (vec_perm @1 @1 @3) @2) @4)))
8705 (minus (vec_perm:s (mult:c@0 @1 vec_same_elem_p@2) @0 @3) @4)
8706 (if (TREE_CODE (@0) == SSA_NAME && num_imm_uses (@0) == 2)
8707 (minus (mult (vec_perm @1 @1 @3) @2) @4)))
8711 c = VEC_PERM_EXPR <a, b, VCST0>;
8712 d = VEC_PERM_EXPR <c, c, VCST1>;
8714 d = VEC_PERM_EXPR <a, b, NEW_VCST>; */
8717 (vec_perm (vec_perm@0 @1 @2 VECTOR_CST@3) @0 VECTOR_CST@4)
8718 (if (TYPE_VECTOR_SUBPARTS (type).is_constant ())
8721 machine_mode result_mode = TYPE_MODE (type);
8722 machine_mode op_mode = TYPE_MODE (TREE_TYPE (@1));
8723 int nelts = TYPE_VECTOR_SUBPARTS (type).to_constant ();
8724 vec_perm_builder builder0;
8725 vec_perm_builder builder1;
8726 vec_perm_builder builder2 (nelts, nelts, 1);
8728 (if (tree_to_vec_perm_builder (&builder0, @3)
8729 && tree_to_vec_perm_builder (&builder1, @4))
8732 vec_perm_indices sel0 (builder0, 2, nelts);
8733 vec_perm_indices sel1 (builder1, 1, nelts);
8735 for (int i = 0; i < nelts; i++)
8736 builder2.quick_push (sel0[sel1[i].to_constant ()]);
8738 vec_perm_indices sel2 (builder2, 2, nelts);
8740 tree op0 = NULL_TREE;
8741 /* If the new VEC_PERM_EXPR can't be handled but both
8742 original VEC_PERM_EXPRs can, punt.
8743 If one or both of the original VEC_PERM_EXPRs can't be
8744 handled and the new one can't be either, don't increase
8745 number of VEC_PERM_EXPRs that can't be handled. */
8746 if (can_vec_perm_const_p (result_mode, op_mode, sel2, false)
8748 ? (!can_vec_perm_const_p (result_mode, op_mode, sel0, false)
8749 || !can_vec_perm_const_p (result_mode, op_mode, sel1, false))
8750 : !can_vec_perm_const_p (result_mode, op_mode, sel1, false)))
8751 op0 = vec_perm_indices_to_tree (TREE_TYPE (@4), sel2);
8754 (vec_perm @1 @2 { op0; })))))))
8757 /* Match count trailing zeroes for simplify_count_trailing_zeroes in fwprop.
8758 The canonical form is array[((x & -x) * C) >> SHIFT] where C is a magic
8759 constant which when multiplied by a power of 2 contains a unique value
8760 in the top 5 or 6 bits. This is then indexed into a table which maps it
8761 to the number of trailing zeroes. */
8762 (match (ctz_table_index @1 @2 @3)
8763 (rshift (mult (bit_and:c (negate @1) @1) INTEGER_CST@2) INTEGER_CST@3))
8765 (match (cond_expr_convert_p @0 @2 @3 @6)
8766 (cond (simple_comparison@6 @0 @1) (convert@4 @2) (convert@5 @3))
8767 (if (INTEGRAL_TYPE_P (type)
8768 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
8769 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
8770 && INTEGRAL_TYPE_P (TREE_TYPE (@3))
8771 && TYPE_PRECISION (type) != TYPE_PRECISION (TREE_TYPE (@0))
8772 && TYPE_PRECISION (TREE_TYPE (@0))
8773 == TYPE_PRECISION (TREE_TYPE (@2))
8774 && TYPE_PRECISION (TREE_TYPE (@0))
8775 == TYPE_PRECISION (TREE_TYPE (@3))
8776 /* For vect_recog_cond_expr_convert_pattern, @2 and @3 can differ in
8777 signess when convert is truncation, but not ok for extension since
8778 it's sign_extend vs zero_extend. */
8779 && (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type)
8780 || (TYPE_UNSIGNED (TREE_TYPE (@2))
8781 == TYPE_UNSIGNED (TREE_TYPE (@3))))
8783 && single_use (@5))))
8785 (for bit_op (bit_and bit_ior bit_xor)
8786 (match (bitwise_induction_p @0 @2 @3)
8788 (nop_convert1? (bit_not2?@0 (convert3? (lshift integer_onep@1 @2))))
8791 (match (bitwise_induction_p @0 @2 @3)
8793 (nop_convert1? (bit_xor@0 (convert2? (lshift integer_onep@1 @2)) @3))))
8795 /* n - (((n > C1) ? n : C1) & -C2) -> n & C1 for unsigned case.
8796 n - (((n > C1) ? n : C1) & -C2) -> (n <= C1) ? n : (n & C1) for signed case. */
8798 (minus @0 (bit_and (max @0 INTEGER_CST@1) INTEGER_CST@2))
8799 (with { auto i = wi::neg (wi::to_wide (@2)); }
8800 /* Check if -C2 is a power of 2 and C1 = -C2 - 1. */
8801 (if (wi::popcount (i) == 1
8802 && (wi::to_wide (@1)) == (i - 1))
8803 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
8805 (cond (le @0 @1) @0 (bit_and @0 @1))))))
8807 /* -x & 1 -> x & 1. */
8809 (bit_and (negate @0) integer_onep@1)
8810 (if (!TYPE_OVERFLOW_SANITIZED (type))
8814 c1 = VEC_PERM_EXPR (a, a, mask)
8815 c2 = VEC_PERM_EXPR (b, b, mask)
8819 c3 = VEC_PERM_EXPR (c, c, mask)
8820 For all integer non-div operations. */
8821 (for op (plus minus mult bit_and bit_ior bit_xor
8824 (op (vec_perm @0 @0 @2) (vec_perm @1 @1 @2))
8825 (if (VECTOR_INTEGER_TYPE_P (type))
8826 (vec_perm (op@3 @0 @1) @3 @2))))
8828 /* Similar for float arithmetic when permutation constant covers
8829 all vector elements. */
8830 (for op (plus minus mult)
8832 (op (vec_perm @0 @0 VECTOR_CST@2) (vec_perm @1 @1 VECTOR_CST@2))
8833 (if (VECTOR_FLOAT_TYPE_P (type)
8834 && TYPE_VECTOR_SUBPARTS (type).is_constant ())
8838 vec_perm_builder builder;
8839 bool full_perm_p = false;
8840 if (tree_to_vec_perm_builder (&builder, perm_cst))
8842 unsigned HOST_WIDE_INT nelts;
8844 nelts = TYPE_VECTOR_SUBPARTS (type).to_constant ();
8845 /* Create a vec_perm_indices for the VECTOR_CST. */
8846 vec_perm_indices sel (builder, 1, nelts);
8848 /* Check if perm indices covers all vector elements. */
8849 if (sel.encoding ().encoded_full_vector_p ())
8851 auto_sbitmap seen (nelts);
8852 bitmap_clear (seen);
8854 unsigned HOST_WIDE_INT count = 0, i;
8856 for (i = 0; i < nelts; i++)
8858 if (!bitmap_set_bit (seen, sel[i].to_constant ()))
8862 full_perm_p = count == nelts;
8867 (vec_perm (op@3 @0 @1) @3 @2))))))