1 /* Match-and-simplify patterns for shared GENERIC and GIMPLE folding.
2 This file is consumed by genmatch which produces gimple-match.cc
3 and generic-match.cc from it.
5 Copyright (C) 2014-2024 Free Software Foundation, Inc.
6 Contributed by Richard Biener <rguenther@suse.de>
7 and Prathamesh Kulkarni <bilbotheelffriend@gmail.com>
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
26 /* Generic tree predicates we inherit. */
28 integer_onep integer_zerop integer_all_onesp integer_minus_onep
29 integer_each_onep integer_truep integer_nonzerop
30 real_zerop real_onep real_minus_onep
32 initializer_each_zero_or_onep
34 tree_expr_nonnegative_p
42 bitmask_inv_cst_vector_p)
45 (define_operator_list tcc_comparison
46 lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
47 (define_operator_list inverted_tcc_comparison
48 ge gt ne eq lt le ordered unordered ge gt le lt ltgt uneq)
49 (define_operator_list inverted_tcc_comparison_with_nans
50 unge ungt ne eq unlt unle ordered unordered ge gt le lt ltgt uneq)
51 (define_operator_list swapped_tcc_comparison
52 gt ge eq ne le lt unordered ordered ungt unge unlt unle uneq ltgt)
53 (define_operator_list simple_comparison lt le eq ne ge gt)
54 (define_operator_list swapped_simple_comparison gt ge eq ne le lt)
55 (define_operator_list BSWAP BUILT_IN_BSWAP16 BUILT_IN_BSWAP32
56 BUILT_IN_BSWAP64 BUILT_IN_BSWAP128)
58 #include "cfn-operators.pd"
60 /* Define operand lists for math rounding functions {,i,l,ll}FN,
61 where the versions prefixed with "i" return an int, those prefixed with
62 "l" return a long and those prefixed with "ll" return a long long.
64 Also define operand lists:
66 X<FN>F for all float functions, in the order i, l, ll
67 X<FN> for all double functions, in the same order
68 X<FN>L for all long double functions, in the same order. */
69 #define DEFINE_INT_AND_FLOAT_ROUND_FN(FN) \
70 (define_operator_list X##FN##F BUILT_IN_I##FN##F \
73 (define_operator_list X##FN BUILT_IN_I##FN \
76 (define_operator_list X##FN##L BUILT_IN_I##FN##L \
80 DEFINE_INT_AND_FLOAT_ROUND_FN (FLOOR)
81 DEFINE_INT_AND_FLOAT_ROUND_FN (CEIL)
82 DEFINE_INT_AND_FLOAT_ROUND_FN (ROUND)
83 DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
85 /* Unary operations and their associated IFN_COND_* function. */
86 (define_operator_list UNCOND_UNARY
88 (define_operator_list COND_UNARY
89 IFN_COND_NEG IFN_COND_NOT)
90 (define_operator_list COND_LEN_UNARY
91 IFN_COND_LEN_NEG IFN_COND_LEN_NOT)
93 /* Binary operations and their associated IFN_COND_* function. */
94 (define_operator_list UNCOND_BINARY
96 mult trunc_div trunc_mod rdiv
98 IFN_FMIN IFN_FMAX IFN_COPYSIGN
99 bit_and bit_ior bit_xor
101 (define_operator_list COND_BINARY
102 IFN_COND_ADD IFN_COND_SUB
103 IFN_COND_MUL IFN_COND_DIV IFN_COND_MOD IFN_COND_RDIV
104 IFN_COND_MIN IFN_COND_MAX
105 IFN_COND_FMIN IFN_COND_FMAX IFN_COND_COPYSIGN
106 IFN_COND_AND IFN_COND_IOR IFN_COND_XOR
107 IFN_COND_SHL IFN_COND_SHR)
108 (define_operator_list COND_LEN_BINARY
109 IFN_COND_LEN_ADD IFN_COND_LEN_SUB
110 IFN_COND_LEN_MUL IFN_COND_LEN_DIV
111 IFN_COND_LEN_MOD IFN_COND_LEN_RDIV
112 IFN_COND_LEN_MIN IFN_COND_LEN_MAX
113 IFN_COND_LEN_FMIN IFN_COND_LEN_FMAX IFN_COND_LEN_COPYSIGN
114 IFN_COND_LEN_AND IFN_COND_LEN_IOR IFN_COND_LEN_XOR
115 IFN_COND_LEN_SHL IFN_COND_LEN_SHR)
117 /* Same for ternary operations. */
118 (define_operator_list UNCOND_TERNARY
119 IFN_FMA IFN_FMS IFN_FNMA IFN_FNMS)
120 (define_operator_list COND_TERNARY
121 IFN_COND_FMA IFN_COND_FMS IFN_COND_FNMA IFN_COND_FNMS)
122 (define_operator_list COND_LEN_TERNARY
123 IFN_COND_LEN_FMA IFN_COND_LEN_FMS IFN_COND_LEN_FNMA IFN_COND_LEN_FNMS)
125 /* __atomic_fetch_or_*, __atomic_fetch_xor_*, __atomic_xor_fetch_* */
126 (define_operator_list ATOMIC_FETCH_OR_XOR_N
127 BUILT_IN_ATOMIC_FETCH_OR_1 BUILT_IN_ATOMIC_FETCH_OR_2
128 BUILT_IN_ATOMIC_FETCH_OR_4 BUILT_IN_ATOMIC_FETCH_OR_8
129 BUILT_IN_ATOMIC_FETCH_OR_16
130 BUILT_IN_ATOMIC_FETCH_XOR_1 BUILT_IN_ATOMIC_FETCH_XOR_2
131 BUILT_IN_ATOMIC_FETCH_XOR_4 BUILT_IN_ATOMIC_FETCH_XOR_8
132 BUILT_IN_ATOMIC_FETCH_XOR_16
133 BUILT_IN_ATOMIC_XOR_FETCH_1 BUILT_IN_ATOMIC_XOR_FETCH_2
134 BUILT_IN_ATOMIC_XOR_FETCH_4 BUILT_IN_ATOMIC_XOR_FETCH_8
135 BUILT_IN_ATOMIC_XOR_FETCH_16)
136 /* __sync_fetch_and_or_*, __sync_fetch_and_xor_*, __sync_xor_and_fetch_* */
137 (define_operator_list SYNC_FETCH_OR_XOR_N
138 BUILT_IN_SYNC_FETCH_AND_OR_1 BUILT_IN_SYNC_FETCH_AND_OR_2
139 BUILT_IN_SYNC_FETCH_AND_OR_4 BUILT_IN_SYNC_FETCH_AND_OR_8
140 BUILT_IN_SYNC_FETCH_AND_OR_16
141 BUILT_IN_SYNC_FETCH_AND_XOR_1 BUILT_IN_SYNC_FETCH_AND_XOR_2
142 BUILT_IN_SYNC_FETCH_AND_XOR_4 BUILT_IN_SYNC_FETCH_AND_XOR_8
143 BUILT_IN_SYNC_FETCH_AND_XOR_16
144 BUILT_IN_SYNC_XOR_AND_FETCH_1 BUILT_IN_SYNC_XOR_AND_FETCH_2
145 BUILT_IN_SYNC_XOR_AND_FETCH_4 BUILT_IN_SYNC_XOR_AND_FETCH_8
146 BUILT_IN_SYNC_XOR_AND_FETCH_16)
147 /* __atomic_fetch_and_*. */
148 (define_operator_list ATOMIC_FETCH_AND_N
149 BUILT_IN_ATOMIC_FETCH_AND_1 BUILT_IN_ATOMIC_FETCH_AND_2
150 BUILT_IN_ATOMIC_FETCH_AND_4 BUILT_IN_ATOMIC_FETCH_AND_8
151 BUILT_IN_ATOMIC_FETCH_AND_16)
152 /* __sync_fetch_and_and_*. */
153 (define_operator_list SYNC_FETCH_AND_AND_N
154 BUILT_IN_SYNC_FETCH_AND_AND_1 BUILT_IN_SYNC_FETCH_AND_AND_2
155 BUILT_IN_SYNC_FETCH_AND_AND_4 BUILT_IN_SYNC_FETCH_AND_AND_8
156 BUILT_IN_SYNC_FETCH_AND_AND_16)
158 /* With nop_convert? combine convert? and view_convert? in one pattern
159 plus conditionalize on tree_nop_conversion_p conversions. */
160 (match (nop_convert @0)
162 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))))
163 (match (nop_convert @0)
165 (if (VECTOR_TYPE_P (type) && VECTOR_TYPE_P (TREE_TYPE (@0))
166 && known_eq (TYPE_VECTOR_SUBPARTS (type),
167 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@0)))
168 && tree_nop_conversion_p (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
171 /* These are used by gimple_bitwise_inverted_equal_p to simplify
172 detection of BIT_NOT and comparisons. */
173 (match (bit_not_with_nop @0)
175 (match (bit_not_with_nop @0)
176 (convert (bit_not @0))
177 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))))
178 (for cmp (tcc_comparison)
179 (match (maybe_cmp @0)
181 (match (maybe_cmp @0)
182 (convert (cmp@0 @1 @2))
183 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))))
185 /* `a ^ b` is another form of `a != b` when the type
186 is a 1bit precission integer. */
187 (match (maybe_cmp @0)
189 (if (INTEGRAL_TYPE_P (type)
190 && TYPE_PRECISION (type) == 1)))
193 /* Transform likes of (char) ABS_EXPR <(int) x> into (char) ABSU_EXPR <x>
194 ABSU_EXPR returns unsigned absolute value of the operand and the operand
195 of the ABSU_EXPR will have the corresponding signed type. */
196 (simplify (abs (convert @0))
197 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
198 && !TYPE_UNSIGNED (TREE_TYPE (@0))
199 && element_precision (type) > element_precision (TREE_TYPE (@0)))
200 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
201 (convert (absu:utype @0)))))
204 /* Optimize (X + (X >> (prec - 1))) ^ (X >> (prec - 1)) into abs (X). */
206 (bit_xor:c (plus:c @0 (rshift@2 @0 INTEGER_CST@1)) @2)
207 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
208 && !TYPE_UNSIGNED (TREE_TYPE (@0))
209 && wi::to_widest (@1) == element_precision (TREE_TYPE (@0)) - 1)
213 /* Simplifications of operations with one constant operand and
214 simplifications to constants or single values. */
216 (for op (plus pointer_plus minus bit_ior bit_xor)
218 (op @0 integer_zerop)
221 /* 0 +p index -> (type)index */
223 (pointer_plus integer_zerop @1)
224 (non_lvalue (convert @1)))
226 /* ptr - 0 -> (type)ptr */
228 (pointer_diff @0 integer_zerop)
231 /* See if ARG1 is zero and X + ARG1 reduces to X.
232 Likewise if the operands are reversed. */
234 (plus:c @0 real_zerop@1)
235 (if (fold_real_zero_addition_p (type, @0, @1, 0))
238 /* See if ARG1 is zero and X - ARG1 reduces to X. */
240 (minus @0 real_zerop@1)
241 (if (fold_real_zero_addition_p (type, @0, @1, 1))
244 /* Even if the fold_real_zero_addition_p can't simplify X + 0.0
245 into X, we can optimize (X + 0.0) + 0.0 or (X + 0.0) - 0.0
246 or (X - 0.0) + 0.0 into X + 0.0 and (X - 0.0) - 0.0 into X - 0.0
247 if not -frounding-math. For sNaNs the first operation would raise
248 exceptions but turn the result into qNan, so the second operation
249 would not raise it. */
250 (for inner_op (plus minus)
251 (for outer_op (plus minus)
253 (outer_op (inner_op@3 @0 REAL_CST@1) REAL_CST@2)
256 && !HONOR_SIGN_DEPENDENT_ROUNDING (type))
257 (with { bool inner_plus = ((inner_op == PLUS_EXPR)
258 ^ REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)));
260 = ((outer_op == PLUS_EXPR)
261 ^ REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@2))); }
262 (if (outer_plus && !inner_plus)
267 This is unsafe for certain floats even in non-IEEE formats.
268 In IEEE, it is unsafe because it does wrong for NaNs.
269 PR middle-end/98420: x - x may be -0.0 with FE_DOWNWARD.
270 Also note that operand_equal_p is always false if an operand
274 (if (!FLOAT_TYPE_P (type)
275 || (!tree_expr_maybe_nan_p (@0)
276 && !tree_expr_maybe_infinite_p (@0)
277 && (!HONOR_SIGN_DEPENDENT_ROUNDING (type)
278 || !HONOR_SIGNED_ZEROS (type))))
279 { build_zero_cst (type); }))
281 (pointer_diff @@0 @0)
282 { build_zero_cst (type); })
285 (mult @0 integer_zerop@1)
288 /* -x == x -> x == 0 */
291 (cmp:c @0 (negate @0))
292 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
293 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE(@0)))
294 (cmp @0 { build_zero_cst (TREE_TYPE(@0)); }))))
296 /* Maybe fold x * 0 to 0. The expressions aren't the same
297 when x is NaN, since x * 0 is also NaN. Nor are they the
298 same in modes with signed zeros, since multiplying a
299 negative value by 0 gives -0, not +0. Nor when x is +-Inf,
300 since x * 0 is NaN. */
302 (mult @0 real_zerop@1)
303 (if (!tree_expr_maybe_nan_p (@0)
304 && (!HONOR_NANS (type) || !tree_expr_maybe_infinite_p (@0))
305 && (!HONOR_SIGNED_ZEROS (type) || tree_expr_nonnegative_p (@0)))
308 /* In IEEE floating point, x*1 is not equivalent to x for snans.
309 Likewise for complex arithmetic with signed zeros. */
312 (if (!tree_expr_maybe_signaling_nan_p (@0)
313 && (!HONOR_SIGNED_ZEROS (type)
314 || !COMPLEX_FLOAT_TYPE_P (type)))
317 /* Transform x * -1.0 into -x. */
319 (mult @0 real_minus_onep)
320 (if (!tree_expr_maybe_signaling_nan_p (@0)
321 && (!HONOR_SIGNED_ZEROS (type)
322 || !COMPLEX_FLOAT_TYPE_P (type)))
325 /* Transform x * { 0 or 1, 0 or 1, ... } into x & { 0 or -1, 0 or -1, ...},
326 unless the target has native support for the former but not the latter. */
328 (mult @0 VECTOR_CST@1)
329 (if (initializer_each_zero_or_onep (@1)
330 && !HONOR_SNANS (type)
331 && !HONOR_SIGNED_ZEROS (type))
332 (with { tree itype = FLOAT_TYPE_P (type) ? unsigned_type_for (type) : type; }
334 && (!VECTOR_MODE_P (TYPE_MODE (type))
335 || (VECTOR_MODE_P (TYPE_MODE (itype))
336 && optab_handler (and_optab,
337 TYPE_MODE (itype)) != CODE_FOR_nothing)))
338 (view_convert (bit_and:itype (view_convert @0)
339 (ne @1 { build_zero_cst (type); })))))))
341 /* In SWAR (SIMD within a register) code a signed comparison of packed data
342 can be constructed with a particular combination of shift, bitwise and,
343 and multiplication by constants. If that code is vectorized we can
344 convert this pattern into a more efficient vector comparison. */
346 (mult (bit_and (rshift @0 uniform_integer_cst_p@1)
347 uniform_integer_cst_p@2)
348 uniform_integer_cst_p@3)
350 tree rshift_cst = uniform_integer_cst_p (@1);
351 tree bit_and_cst = uniform_integer_cst_p (@2);
352 tree mult_cst = uniform_integer_cst_p (@3);
354 /* Make sure we're working with vectors and uniform vector constants. */
355 (if (VECTOR_TYPE_P (type)
356 && tree_fits_uhwi_p (rshift_cst)
357 && tree_fits_uhwi_p (mult_cst)
358 && tree_fits_uhwi_p (bit_and_cst))
359 /* Compute what constants would be needed for this to represent a packed
360 comparison based on the shift amount denoted by RSHIFT_CST. */
362 HOST_WIDE_INT vec_elem_bits = vector_element_bits (type);
363 poly_int64 vec_nelts = TYPE_VECTOR_SUBPARTS (type);
364 poly_int64 vec_bits = vec_elem_bits * vec_nelts;
365 unsigned HOST_WIDE_INT cmp_bits_i, bit_and_i, mult_i;
366 unsigned HOST_WIDE_INT target_mult_i, target_bit_and_i;
367 cmp_bits_i = tree_to_uhwi (rshift_cst) + 1;
368 mult_i = tree_to_uhwi (mult_cst);
369 target_mult_i = (HOST_WIDE_INT_1U << cmp_bits_i) - 1;
370 bit_and_i = tree_to_uhwi (bit_and_cst);
371 target_bit_and_i = 0;
373 /* The bit pattern in BIT_AND_I should be a mask for the least
374 significant bit of each packed element that is CMP_BITS wide. */
375 for (unsigned i = 0; i < vec_elem_bits / cmp_bits_i; i++)
376 target_bit_and_i = (target_bit_and_i << cmp_bits_i) | 1U;
378 (if ((exact_log2 (cmp_bits_i)) >= 0
379 && cmp_bits_i < HOST_BITS_PER_WIDE_INT
380 && multiple_p (vec_bits, cmp_bits_i)
381 && vec_elem_bits <= HOST_BITS_PER_WIDE_INT
382 && target_mult_i == mult_i
383 && target_bit_and_i == bit_and_i)
384 /* Compute the vector shape for the comparison and check if the target is
385 able to expand the comparison with that type. */
387 /* We're doing a signed comparison. */
388 tree cmp_type = build_nonstandard_integer_type (cmp_bits_i, 0);
389 poly_int64 vector_type_nelts = exact_div (vec_bits, cmp_bits_i);
390 tree vec_cmp_type = build_vector_type (cmp_type, vector_type_nelts);
391 tree vec_truth_type = truth_type_for (vec_cmp_type);
392 tree zeros = build_zero_cst (vec_cmp_type);
393 tree ones = build_all_ones_cst (vec_cmp_type);
395 (if (expand_vec_cmp_expr_p (vec_cmp_type, vec_truth_type, LT_EXPR)
396 && expand_vec_cond_expr_p (vec_cmp_type, vec_truth_type, LT_EXPR))
397 (view_convert:type (vec_cond (lt:vec_truth_type
398 (view_convert:vec_cmp_type @0)
400 { ones; } { zeros; })))))))))
402 (for cmp (gt ge lt le)
403 outp (convert convert negate negate)
404 outn (negate negate convert convert)
405 /* Transform X * (X > 0.0 ? 1.0 : -1.0) into abs(X). */
406 /* Transform X * (X >= 0.0 ? 1.0 : -1.0) into abs(X). */
407 /* Transform X * (X < 0.0 ? 1.0 : -1.0) into -abs(X). */
408 /* Transform X * (X <= 0.0 ? 1.0 : -1.0) into -abs(X). */
410 (mult:c @0 (cond (cmp @0 real_zerop) real_onep@1 real_minus_onep))
411 (if (!tree_expr_maybe_nan_p (@0) && !HONOR_SIGNED_ZEROS (type))
413 /* Transform X * (X > 0.0 ? -1.0 : 1.0) into -abs(X). */
414 /* Transform X * (X >= 0.0 ? -1.0 : 1.0) into -abs(X). */
415 /* Transform X * (X < 0.0 ? -1.0 : 1.0) into abs(X). */
416 /* Transform X * (X <= 0.0 ? -1.0 : 1.0) into abs(X). */
418 (mult:c @0 (cond (cmp @0 real_zerop) real_minus_onep real_onep@1))
419 (if (!tree_expr_maybe_nan_p (@0) && !HONOR_SIGNED_ZEROS (type))
422 /* Transform X * copysign (1.0, X) into abs(X). */
424 (mult:c @0 (COPYSIGN_ALL real_onep @0))
425 (if (!tree_expr_maybe_nan_p (@0) && !HONOR_SIGNED_ZEROS (type))
428 /* Transform X * copysign (1.0, -X) into -abs(X). */
430 (mult:c @0 (COPYSIGN_ALL real_onep (negate @0)))
431 (if (!tree_expr_maybe_nan_p (@0) && !HONOR_SIGNED_ZEROS (type))
434 /* Transform copysign (CST, X) into copysign (ABS(CST), X). */
436 (COPYSIGN_ALL REAL_CST@0 @1)
437 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@0)))
438 (COPYSIGN_ALL (negate @0) @1)))
440 /* Transform c ? x * copysign (1, y) : z to c ? x ^ signs(y) : z.
441 tree-ssa-math-opts.cc does the corresponding optimization for
442 unconditional multiplications (via xorsign). */
444 (IFN_COND_MUL:c @0 @1 (IFN_COPYSIGN real_onep @2) @3)
445 (with { tree signs = sign_mask_for (type); }
447 (with { tree inttype = TREE_TYPE (signs); }
449 (IFN_COND_XOR:inttype @0
450 (view_convert:inttype @1)
451 (bit_and (view_convert:inttype @2) { signs; })
452 (view_convert:inttype @3)))))))
454 /* (x >= 0 ? x : 0) + (x <= 0 ? -x : 0) -> abs x. */
456 (plus:c (max @0 integer_zerop) (max (negate @0) integer_zerop))
459 /* X * 1, X / 1 -> X. */
460 (for op (mult trunc_div ceil_div floor_div round_div exact_div)
465 /* (A / (1 << B)) -> (A >> B).
466 Only for unsigned A. For signed A, this would not preserve rounding
468 For example: (-1 / ( 1 << B)) != -1 >> B.
469 Also handle widening conversions, like:
470 (A / (unsigned long long) (1U << B)) -> (A >> B)
472 (A / (unsigned long long) (1 << B)) -> (A >> B).
473 If the left shift is signed, it can be done only if the upper bits
474 of A starting from shift's type sign bit are zero, as
475 (unsigned long long) (1 << 31) is -2147483648ULL, not 2147483648ULL,
476 so it is valid only if A >> 31 is zero. */
478 (trunc_div (convert?@0 @3) (convert2? (lshift integer_onep@1 @2)))
479 (if ((TYPE_UNSIGNED (type) || tree_expr_nonnegative_p (@0))
480 && (!VECTOR_TYPE_P (type)
481 || target_supports_op_p (type, RSHIFT_EXPR, optab_vector)
482 || target_supports_op_p (type, RSHIFT_EXPR, optab_scalar))
483 && (useless_type_conversion_p (type, TREE_TYPE (@1))
484 || (element_precision (type) >= element_precision (TREE_TYPE (@1))
485 && (TYPE_UNSIGNED (TREE_TYPE (@1))
486 || (element_precision (type)
487 == element_precision (TREE_TYPE (@1)))
488 || (INTEGRAL_TYPE_P (type)
489 && (tree_nonzero_bits (@0)
490 & wi::mask (element_precision (TREE_TYPE (@1)) - 1,
492 element_precision (type))) == 0)))))
493 (if (!VECTOR_TYPE_P (type)
494 && useless_type_conversion_p (TREE_TYPE (@3), TREE_TYPE (@1))
495 && element_precision (TREE_TYPE (@3)) < element_precision (type))
496 (convert (rshift @3 @2))
499 /* Preserve explicit divisions by 0: the C++ front-end wants to detect
500 undefined behavior in constexpr evaluation, and assuming that the division
501 traps enables better optimizations than these anyway. */
502 (for div (trunc_div ceil_div floor_div round_div exact_div)
503 /* 0 / X is always zero. */
505 (div integer_zerop@0 @1)
506 /* But not for 0 / 0 so that we can get the proper warnings and errors. */
507 (if (!integer_zerop (@1))
511 (div @0 integer_minus_onep@1)
512 (if (!TYPE_UNSIGNED (type))
514 /* X / bool_range_Y is X. */
517 (if (INTEGRAL_TYPE_P (type)
518 && ssa_name_has_boolean_range (@1)
519 && !flag_non_call_exceptions)
524 /* But not for 0 / 0 so that we can get the proper warnings and errors.
525 And not for _Fract types where we can't build 1. */
526 (if (!ALL_FRACT_MODE_P (TYPE_MODE (type))
527 && !integer_zerop (@0)
528 && (!flag_non_call_exceptions || tree_expr_nonzero_p (@0)))
529 { build_one_cst (type); }))
530 /* X / abs (X) is X < 0 ? -1 : 1. */
533 (if (INTEGRAL_TYPE_P (type)
534 && TYPE_OVERFLOW_UNDEFINED (type)
535 && !integer_zerop (@0)
536 && (!flag_non_call_exceptions || tree_expr_nonzero_p (@0)))
537 (cond (lt @0 { build_zero_cst (type); })
538 { build_minus_one_cst (type); } { build_one_cst (type); })))
541 (div:C @0 (negate @0))
542 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
543 && TYPE_OVERFLOW_UNDEFINED (type)
544 && !integer_zerop (@0)
545 && (!flag_non_call_exceptions || tree_expr_nonzero_p (@0)))
546 { build_minus_one_cst (type); })))
548 /* For unsigned integral types, FLOOR_DIV_EXPR is the same as
549 TRUNC_DIV_EXPR. Rewrite into the latter in this case. Similarly
550 for MOD instead of DIV. */
551 (for floor_divmod (floor_div floor_mod)
552 trunc_divmod (trunc_div trunc_mod)
555 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
556 && TYPE_UNSIGNED (type))
557 (trunc_divmod @0 @1))))
559 /* 1 / X -> X == 1 for unsigned integer X.
560 1 / X -> X >= -1 && X <= 1 ? X : 0 for signed integer X.
561 But not for 1 / 0 so that we can get proper warnings and errors,
562 and not for 1-bit integers as they are edge cases better handled
563 elsewhere. Delay the conversion of the signed division until late
564 because `1 / X` is simplier to handle than the resulting COND_EXPR. */
566 (trunc_div integer_onep@0 @1)
567 (if (INTEGRAL_TYPE_P (type)
568 && TYPE_PRECISION (type) > 1
569 && !integer_zerop (@1)
570 && (!flag_non_call_exceptions || tree_expr_nonzero_p (@1)))
571 (if (TYPE_UNSIGNED (type))
572 (convert (eq:boolean_type_node @1 { build_one_cst (type); }))
573 (if (!canonicalize_math_p ())
574 (with { tree utype = unsigned_type_for (type); }
575 (cond (le (plus (convert:utype @1) { build_one_cst (utype); })
576 { build_int_cst (utype, 2); })
577 @1 { build_zero_cst (type); }))))))
579 /* Combine two successive divisions. Note that combining ceil_div
580 and floor_div is trickier and combining round_div even more so. */
581 (for div (trunc_div exact_div)
583 (div (div@3 @0 INTEGER_CST@1) INTEGER_CST@2)
585 wi::overflow_type overflow;
586 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
587 TYPE_SIGN (type), &overflow);
589 (if (div == EXACT_DIV_EXPR
590 || optimize_successive_divisions_p (@2, @3))
592 (div @0 { wide_int_to_tree (type, mul); })
593 (if (TYPE_UNSIGNED (type)
594 || mul != wi::min_value (TYPE_PRECISION (type), SIGNED))
595 { build_zero_cst (type); }))))))
597 /* Combine successive multiplications. Similar to above, but handling
598 overflow is different. */
600 (mult (mult @0 INTEGER_CST@1) INTEGER_CST@2)
602 wi::overflow_type overflow;
603 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
604 TYPE_SIGN (type), &overflow);
606 /* Skip folding on overflow: the only special case is @1 * @2 == -INT_MIN,
607 otherwise undefined overflow implies that @0 must be zero. */
608 (if (!overflow || TYPE_OVERFLOW_WRAPS (type))
609 (mult @0 { wide_int_to_tree (type, mul); }))))
611 /* Similar to above, but there could be an extra add/sub between
612 successive multuiplications. */
614 (mult (plus:s (mult:s@4 @0 INTEGER_CST@1) INTEGER_CST@2) INTEGER_CST@3)
616 bool overflowed = true;
617 wi::overflow_type ovf1, ovf2;
618 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@3),
619 TYPE_SIGN (type), &ovf1);
620 wide_int add = wi::mul (wi::to_wide (@2), wi::to_wide (@3),
621 TYPE_SIGN (type), &ovf2);
622 if (TYPE_OVERFLOW_UNDEFINED (type))
626 if (ovf1 == wi::OVF_NONE && ovf2 == wi::OVF_NONE
627 && get_global_range_query ()->range_of_expr (vr0, @4)
628 && !vr0.varying_p () && !vr0.undefined_p ())
630 wide_int wmin0 = vr0.lower_bound ();
631 wide_int wmax0 = vr0.upper_bound ();
632 wmin0 = wi::mul (wmin0, wi::to_wide (@3), TYPE_SIGN (type), &ovf1);
633 wmax0 = wi::mul (wmax0, wi::to_wide (@3), TYPE_SIGN (type), &ovf2);
634 if (ovf1 == wi::OVF_NONE && ovf2 == wi::OVF_NONE)
636 wi::add (wmin0, add, TYPE_SIGN (type), &ovf1);
637 wi::add (wmax0, add, TYPE_SIGN (type), &ovf2);
638 if (ovf1 == wi::OVF_NONE && ovf2 == wi::OVF_NONE)
647 /* Skip folding on overflow. */
649 (plus (mult @0 { wide_int_to_tree (type, mul); })
650 { wide_int_to_tree (type, add); }))))
652 /* Similar to above, but a multiplication between successive additions. */
654 (plus (mult:s (plus:s @0 INTEGER_CST@1) INTEGER_CST@2) INTEGER_CST@3)
656 bool overflowed = true;
657 wi::overflow_type ovf1;
658 wi::overflow_type ovf2;
659 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
660 TYPE_SIGN (type), &ovf1);
661 wide_int add = wi::add (mul, wi::to_wide (@3),
662 TYPE_SIGN (type), &ovf2);
663 if (TYPE_OVERFLOW_UNDEFINED (type))
667 if (ovf1 == wi::OVF_NONE && ovf2 == wi::OVF_NONE
668 && get_global_range_query ()->range_of_expr (vr0, @0)
669 && !vr0.varying_p () && !vr0.undefined_p ())
671 wide_int wmin0 = vr0.lower_bound ();
672 wide_int wmax0 = vr0.upper_bound ();
673 wmin0 = wi::mul (wmin0, wi::to_wide (@2), TYPE_SIGN (type), &ovf1);
674 wmax0 = wi::mul (wmax0, wi::to_wide (@2), TYPE_SIGN (type), &ovf2);
675 if (ovf1 == wi::OVF_NONE && ovf2 == wi::OVF_NONE)
677 wi::add (wmin0, mul, TYPE_SIGN (type), &ovf1);
678 wi::add (wmax0, mul, TYPE_SIGN (type), &ovf2);
679 if (ovf1 == wi::OVF_NONE && ovf2 == wi::OVF_NONE)
688 /* Skip folding on overflow. */
690 (plus (mult @0 @2) { wide_int_to_tree (type, add); }))))
692 /* Optimize A / A to 1.0 if we don't care about
693 NaNs or Infinities. */
696 (if (FLOAT_TYPE_P (type)
697 && ! HONOR_NANS (type)
698 && ! HONOR_INFINITIES (type))
699 { build_one_cst (type); }))
701 /* Optimize -A / A to -1.0 if we don't care about
702 NaNs or Infinities. */
704 (rdiv:C @0 (negate @0))
705 (if (FLOAT_TYPE_P (type)
706 && ! HONOR_NANS (type)
707 && ! HONOR_INFINITIES (type))
708 { build_minus_one_cst (type); }))
710 /* PR71078: x / abs(x) -> copysign (1.0, x) */
712 (rdiv:C (convert? @0) (convert? (abs @0)))
713 (if (SCALAR_FLOAT_TYPE_P (type)
714 && ! HONOR_NANS (type)
715 && ! HONOR_INFINITIES (type))
717 (if (types_match (type, float_type_node))
718 (BUILT_IN_COPYSIGNF { build_one_cst (type); } (convert @0)))
719 (if (types_match (type, double_type_node))
720 (BUILT_IN_COPYSIGN { build_one_cst (type); } (convert @0)))
721 (if (types_match (type, long_double_type_node))
722 (BUILT_IN_COPYSIGNL { build_one_cst (type); } (convert @0))))))
724 /* In IEEE floating point, x/1 is not equivalent to x for snans. */
727 (if (!tree_expr_maybe_signaling_nan_p (@0))
730 /* In IEEE floating point, x/-1 is not equivalent to -x for snans. */
732 (rdiv @0 real_minus_onep)
733 (if (!tree_expr_maybe_signaling_nan_p (@0))
736 (if (flag_reciprocal_math)
737 /* Convert (A/B)/C to A/(B*C). */
739 (rdiv (rdiv:s @0 @1) @2)
740 (rdiv @0 (mult @1 @2)))
742 /* Canonicalize x / (C1 * y) to (x * C2) / y. */
744 (rdiv @0 (mult:s @1 REAL_CST@2))
746 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @2); }
748 (rdiv (mult @0 { tem; } ) @1))))
750 /* Convert A/(B/C) to (A/B)*C */
752 (rdiv @0 (rdiv:s @1 @2))
753 (mult (rdiv @0 @1) @2)))
755 /* Simplify x / (- y) to -x / y. */
757 (rdiv @0 (negate @1))
758 (rdiv (negate @0) @1))
760 (if (flag_unsafe_math_optimizations)
761 /* Simplify (C / x op 0.0) to x op 0.0 for C != 0, C != Inf/Nan.
762 Since C / x may underflow to zero, do this only for unsafe math. */
763 (for op (lt le gt ge)
766 (op (rdiv REAL_CST@0 @1) real_zerop@2)
767 (if (!HONOR_SIGNED_ZEROS (@1) && !HONOR_INFINITIES (@1))
769 (if (real_less (&dconst0, TREE_REAL_CST_PTR (@0)))
771 /* For C < 0, use the inverted operator. */
772 (if (real_less (TREE_REAL_CST_PTR (@0), &dconst0))
775 /* Optimize (X & (-A)) / A where A is a power of 2, to X >> log2(A) */
776 (for div (trunc_div ceil_div floor_div round_div exact_div)
778 (div (convert? (bit_and @0 INTEGER_CST@1)) INTEGER_CST@2)
779 (if (integer_pow2p (@2)
780 && tree_int_cst_sgn (@2) > 0
781 && tree_nop_conversion_p (type, TREE_TYPE (@0))
782 && wi::to_wide (@2) + wi::to_wide (@1) == 0)
784 { build_int_cst (integer_type_node,
785 wi::exact_log2 (wi::to_wide (@2))); }))))
787 /* If ARG1 is a constant, we can convert this to a multiply by the
788 reciprocal. This does not have the same rounding properties,
789 so only do this if -freciprocal-math. We can actually
790 always safely do it if ARG1 is a power of two, but it's hard to
791 tell if it is or not in a portable manner. */
792 (for cst (REAL_CST COMPLEX_CST VECTOR_CST)
796 (if (flag_reciprocal_math
799 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @1); }
801 (mult @0 { tem; } )))
802 (if (cst != COMPLEX_CST)
803 (with { tree inverse = exact_inverse (type, @1); }
805 (mult @0 { inverse; } ))))))))
807 (for mod (ceil_mod floor_mod round_mod trunc_mod)
808 /* 0 % X is always zero. */
810 (mod integer_zerop@0 @1)
811 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
812 (if (!integer_zerop (@1))
814 /* X % 1 is always zero. */
816 (mod @0 integer_onep)
817 { build_zero_cst (type); })
818 /* X % -1 is zero. */
820 (mod @0 integer_minus_onep@1)
821 (if (!TYPE_UNSIGNED (type))
822 { build_zero_cst (type); }))
826 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
827 (if (!integer_zerop (@0))
828 { build_zero_cst (type); }))
829 /* (X % Y) % Y is just X % Y. */
831 (mod (mod@2 @0 @1) @1)
833 /* From extract_muldiv_1: (X * C1) % C2 is zero if C1 is a multiple of C2. */
835 (mod (mult @0 INTEGER_CST@1) INTEGER_CST@2)
836 (if (ANY_INTEGRAL_TYPE_P (type)
837 && TYPE_OVERFLOW_UNDEFINED (type)
838 && wi::multiple_of_p (wi::to_wide (@1), wi::to_wide (@2),
840 { build_zero_cst (type); }))
841 /* For (X % C) == 0, if X is signed and C is power of 2, use unsigned
842 modulo and comparison, since it is simpler and equivalent. */
845 (cmp (mod @0 integer_pow2p@2) integer_zerop@1)
846 (if (!TYPE_UNSIGNED (TREE_TYPE (@0)))
847 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
848 (cmp (mod (convert:utype @0) (convert:utype @2)) (convert:utype @1)))))))
850 /* X % -C is the same as X % C. */
852 (trunc_mod @0 INTEGER_CST@1)
853 (if (TYPE_SIGN (type) == SIGNED
854 && !TREE_OVERFLOW (@1)
855 && wi::neg_p (wi::to_wide (@1))
856 && !TYPE_OVERFLOW_TRAPS (type)
857 /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */
858 && !sign_bit_p (@1, @1))
859 (trunc_mod @0 (negate @1))))
861 /* X % -Y is the same as X % Y. */
863 (trunc_mod @0 (convert? (negate @1)))
864 (if (INTEGRAL_TYPE_P (type)
865 && !TYPE_UNSIGNED (type)
866 && !TYPE_OVERFLOW_TRAPS (type)
867 && tree_nop_conversion_p (type, TREE_TYPE (@1))
868 /* Avoid this transformation if X might be INT_MIN or
869 Y might be -1, because we would then change valid
870 INT_MIN % -(-1) into invalid INT_MIN % -1. */
871 && (expr_not_equal_to (@0, wi::to_wide (TYPE_MIN_VALUE (type)))
872 || expr_not_equal_to (@1, wi::minus_one (TYPE_PRECISION
874 (trunc_mod @0 (convert @1))))
876 /* X - (X / Y) * Y is the same as X % Y. */
878 (minus (convert1? @0) (convert2? (mult:c (trunc_div @@0 @@1) @1)))
879 (if (INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
880 (convert (trunc_mod @0 @1))))
882 /* x * (1 + y / x) - y -> x - y % x */
884 (minus (mult:cs @0 (plus:s (trunc_div:s @1 @0) integer_onep)) @1)
885 (if (INTEGRAL_TYPE_P (type))
886 (minus @0 (trunc_mod @1 @0))))
888 /* Optimize TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR,
889 i.e. "X % C" into "X & (C - 1)", if X and C are positive.
890 Also optimize A % (C << N) where C is a power of 2,
891 to A & ((C << N) - 1).
892 Also optimize "A shift (B % C)", if C is a power of 2, to
893 "A shift (B & (C - 1))". SHIFT operation include "<<" and ">>"
894 and assume (B % C) is nonnegative as shifts negative values would
896 (match (power_of_two_cand @1)
898 (match (power_of_two_cand @1)
899 (lshift INTEGER_CST@1 @2))
900 (for mod (trunc_mod floor_mod)
901 (for shift (lshift rshift)
903 (shift @0 (mod @1 (power_of_two_cand@2 @3)))
904 (if (integer_pow2p (@3) && tree_int_cst_sgn (@3) > 0)
905 (shift @0 (bit_and @1 (minus @2 { build_int_cst (TREE_TYPE (@2),
908 (mod @0 (convert? (power_of_two_cand@1 @2)))
909 (if ((TYPE_UNSIGNED (type) || tree_expr_nonnegative_p (@0))
910 /* Allow any integral conversions of the divisor, except
911 conversion from narrower signed to wider unsigned type
912 where if @1 would be negative power of two, the divisor
913 would not be a power of two. */
914 && INTEGRAL_TYPE_P (type)
915 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
916 && (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@1))
917 || TYPE_UNSIGNED (TREE_TYPE (@1))
918 || !TYPE_UNSIGNED (type))
919 && integer_pow2p (@2) && tree_int_cst_sgn (@2) > 0)
920 (with { tree utype = TREE_TYPE (@1);
921 if (!TYPE_OVERFLOW_WRAPS (utype))
922 utype = unsigned_type_for (utype); }
923 (bit_and @0 (convert (minus (convert:utype @1)
924 { build_one_cst (utype); })))))))
926 /* Simplify (unsigned t * 2)/2 -> unsigned t & 0x7FFFFFFF. */
928 (trunc_div (mult @0 integer_pow2p@1) @1)
929 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) && TYPE_UNSIGNED (TREE_TYPE (@0)))
930 (bit_and @0 { wide_int_to_tree
931 (type, wi::mask (TYPE_PRECISION (type)
932 - wi::exact_log2 (wi::to_wide (@1)),
933 false, TYPE_PRECISION (type))); })))
935 /* Simplify (unsigned t / 2) * 2 -> unsigned t & ~1. */
937 (mult (trunc_div @0 integer_pow2p@1) @1)
938 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) && TYPE_UNSIGNED (TREE_TYPE (@0)))
939 (bit_and @0 (negate @1))))
941 (for div (trunc_div ceil_div floor_div round_div exact_div)
942 /* Simplify (t * u) / u -> t. */
944 (div (mult:c @0 @1) @1)
945 (if (ANY_INTEGRAL_TYPE_P (type))
946 (if (TYPE_OVERFLOW_UNDEFINED (type) && !TYPE_OVERFLOW_SANITIZED (type))
949 (with {value_range vr0, vr1;}
950 (if (INTEGRAL_TYPE_P (type)
951 && get_range_query (cfun)->range_of_expr (vr0, @0)
952 && get_range_query (cfun)->range_of_expr (vr1, @1)
953 && range_op_handler (MULT_EXPR).overflow_free_p (vr0, vr1))
958 /* Simplify (t * u) / v -> t * (u / v) if u is multiple of v. */
960 (div (mult @0 INTEGER_CST@1) INTEGER_CST@2)
961 (if (INTEGRAL_TYPE_P (type)
962 && wi::multiple_of_p (wi::to_widest (@1), wi::to_widest (@2), SIGNED))
963 (if (TYPE_OVERFLOW_UNDEFINED (type) && !TYPE_OVERFLOW_SANITIZED (type))
964 (mult @0 (div! @1 @2))
965 (with {value_range vr0, vr1;}
966 (if (get_range_query (cfun)->range_of_expr (vr0, @0)
967 && get_range_query (cfun)->range_of_expr (vr1, @1)
968 && range_op_handler (MULT_EXPR).overflow_free_p (vr0, vr1))
969 (mult @0 (div! @1 @2))))
972 /* Simplify (t * u) / (t * v) -> (u / v) if u is multiple of v. */
974 (div (mult @0 INTEGER_CST@1) (mult @0 INTEGER_CST@2))
975 (if (INTEGRAL_TYPE_P (type)
976 && wi::multiple_of_p (wi::to_widest (@1), wi::to_widest (@2), SIGNED))
977 (if (TYPE_OVERFLOW_UNDEFINED (type) && !TYPE_OVERFLOW_SANITIZED (type))
980 (with {value_range vr0, vr1, vr2;}
981 (if (get_range_query (cfun)->range_of_expr (vr0, @0)
982 && get_range_query (cfun)->range_of_expr (vr1, @1)
983 && get_range_query (cfun)->range_of_expr (vr2, @2)
984 && range_op_handler (MULT_EXPR).overflow_free_p (vr0, vr1)
985 && range_op_handler (MULT_EXPR).overflow_free_p (vr0, vr2))
991 (for div (trunc_div exact_div)
992 /* Simplify (X + M*N) / N -> X / N + M. */
994 (div (plus:c@4 @0 (mult:c@3 @1 @2)) @2)
995 (with {value_range vr0, vr1, vr2, vr3, vr4;}
996 (if (INTEGRAL_TYPE_P (type)
997 && get_range_query (cfun)->range_of_expr (vr1, @1)
998 && get_range_query (cfun)->range_of_expr (vr2, @2)
999 /* "N*M" doesn't overflow. */
1000 && range_op_handler (MULT_EXPR).overflow_free_p (vr1, vr2)
1001 && get_range_query (cfun)->range_of_expr (vr0, @0)
1002 && get_range_query (cfun)->range_of_expr (vr3, @3)
1003 /* "X+(N*M)" doesn't overflow. */
1004 && range_op_handler (PLUS_EXPR).overflow_free_p (vr0, vr3)
1005 && get_range_query (cfun)->range_of_expr (vr4, @4)
1006 && !vr4.undefined_p ()
1007 /* "X+N*M" is not with opposite sign as "X". */
1008 && (TYPE_UNSIGNED (type)
1009 || (vr0.nonnegative_p () && vr4.nonnegative_p ())
1010 || (vr0.nonpositive_p () && vr4.nonpositive_p ())))
1011 (plus (div @0 @2) @1))))
1013 /* Simplify (X - M*N) / N -> X / N - M. */
1015 (div (minus@4 @0 (mult:c@3 @1 @2)) @2)
1016 (with {value_range vr0, vr1, vr2, vr3, vr4;}
1017 (if (INTEGRAL_TYPE_P (type)
1018 && get_range_query (cfun)->range_of_expr (vr1, @1)
1019 && get_range_query (cfun)->range_of_expr (vr2, @2)
1020 /* "N * M" doesn't overflow. */
1021 && range_op_handler (MULT_EXPR).overflow_free_p (vr1, vr2)
1022 && get_range_query (cfun)->range_of_expr (vr0, @0)
1023 && get_range_query (cfun)->range_of_expr (vr3, @3)
1024 /* "X - (N*M)" doesn't overflow. */
1025 && range_op_handler (MINUS_EXPR).overflow_free_p (vr0, vr3)
1026 && get_range_query (cfun)->range_of_expr (vr4, @4)
1027 && !vr4.undefined_p ()
1028 /* "X-N*M" is not with opposite sign as "X". */
1029 && (TYPE_UNSIGNED (type)
1030 || (vr0.nonnegative_p () && vr4.nonnegative_p ())
1031 || (vr0.nonpositive_p () && vr4.nonpositive_p ())))
1032 (minus (div @0 @2) @1)))))
1035 (X + C) / N -> X / N + C / N where C is multiple of N.
1036 (X + C) >> N -> X >> N + C>>N if low N bits of C is 0. */
1037 (for op (trunc_div exact_div rshift)
1039 (op (plus@3 @0 INTEGER_CST@1) INTEGER_CST@2)
1042 wide_int c = wi::to_wide (@1);
1043 wide_int n = wi::to_wide (@2);
1044 bool shift = op == RSHIFT_EXPR;
1045 #define plus_op1(v) (shift ? wi::rshift (v, n, TYPE_SIGN (type)) \
1046 : wi::div_trunc (v, n, TYPE_SIGN (type)))
1047 #define exact_mod(v) (shift ? wi::ctz (v) >= n.to_shwi () \
1048 : wi::multiple_of_p (v, n, TYPE_SIGN (type)))
1049 value_range vr0, vr1, vr3;
1051 (if (INTEGRAL_TYPE_P (type)
1052 && get_range_query (cfun)->range_of_expr (vr0, @0))
1054 && get_range_query (cfun)->range_of_expr (vr1, @1)
1055 /* "X+C" doesn't overflow. */
1056 && range_op_handler (PLUS_EXPR).overflow_free_p (vr0, vr1)
1057 && get_range_query (cfun)->range_of_expr (vr3, @3)
1058 && !vr3.undefined_p ()
1059 /* "X+C" and "X" are not of opposite sign. */
1060 && (TYPE_UNSIGNED (type)
1061 || (vr0.nonnegative_p () && vr3.nonnegative_p ())
1062 || (vr0.nonpositive_p () && vr3.nonpositive_p ())))
1063 (plus (op @0 @2) { wide_int_to_tree (type, plus_op1 (c)); })
1064 (if (!vr0.undefined_p () && TYPE_UNSIGNED (type) && c.sign_mask () < 0
1066 /* unsigned "X-(-C)" doesn't underflow. */
1067 && wi::geu_p (vr0.lower_bound (), -c))
1068 (plus (op @0 @2) { wide_int_to_tree (type, -plus_op1 (-c)); })))))))
1073 /* (nop_outer_cast)-(inner_cast)var -> -(outer_cast)(var)
1074 if var is smaller in precision.
1075 This is always safe for both doing the negative in signed or unsigned
1076 as the value for undefined will not show up.
1077 Note the outer cast cannot be a boolean type as the only valid values
1078 are 0,-1/1 (depending on the signedness of the boolean) and the negative
1079 is there to get the correct value. */
1081 (convert (negate:s@1 (convert:s @0)))
1082 (if (INTEGRAL_TYPE_P (type)
1083 && tree_nop_conversion_p (type, TREE_TYPE (@1))
1084 && TYPE_PRECISION (type) > TYPE_PRECISION (TREE_TYPE (@0))
1085 && TREE_CODE (type) != BOOLEAN_TYPE)
1086 (negate (convert @0))))
1088 (for op (negate abs)
1089 /* Simplify cos(-x) and cos(|x|) -> cos(x). Similarly for cosh. */
1090 (for coss (COS COSH)
1094 /* Simplify pow(-x, y) and pow(|x|,y) -> pow(x,y) if y is an even integer. */
1097 (pows (op @0) REAL_CST@1)
1098 (with { HOST_WIDE_INT n; }
1099 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
1101 /* Likewise for powi. */
1104 (pows (op @0) INTEGER_CST@1)
1105 (if ((wi::to_wide (@1) & 1) == 0)
1107 /* Strip negate and abs from both operands of hypot. */
1115 /* copysign(-x, y) and copysign(abs(x), y) -> copysign(x, y). */
1116 (for copysigns (COPYSIGN_ALL)
1118 (copysigns (op @0) @1)
1119 (copysigns @0 @1))))
1121 /* abs(x)*abs(x) -> x*x. Should be valid for all types. */
1123 (mult (abs@1 @0) @1)
1126 /* Convert absu(x)*absu(x) -> x*x. */
1128 (mult (absu@1 @0) @1)
1129 (mult (convert@2 @0) @2))
1131 /* cos(copysign(x, y)) -> cos(x). Similarly for cosh. */
1132 (for coss (COS COSH)
1133 (for copysigns (COPYSIGN)
1135 (coss (copysigns @0 @1))
1138 /* pow(copysign(x, y), z) -> pow(x, z) if z is an even integer. */
1140 (for copysigns (COPYSIGN)
1142 (pows (copysigns @0 @2) REAL_CST@1)
1143 (with { HOST_WIDE_INT n; }
1144 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
1146 /* Likewise for powi. */
1148 (for copysigns (COPYSIGN)
1150 (pows (copysigns @0 @2) INTEGER_CST@1)
1151 (if ((wi::to_wide (@1) & 1) == 0)
1155 (for copysigns (COPYSIGN)
1156 /* hypot(copysign(x, y), z) -> hypot(x, z). */
1158 (hypots (copysigns @0 @1) @2)
1160 /* hypot(x, copysign(y, z)) -> hypot(x, y). */
1162 (hypots @0 (copysigns @1 @2))
1165 /* copysign(x, CST) -> abs (x). If the target does not
1166 support the copysign optab then canonicalize
1167 copysign(x, -CST) -> fneg (abs (x)). */
1168 (for copysigns (COPYSIGN_ALL)
1170 (copysigns @0 REAL_CST@1)
1171 (if (!REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
1174 (if (!direct_internal_fn_supported_p (IFN_COPYSIGN, type,
1181 /* Transform fneg (fabs (X)) -> copysign (X, -1) as the canonical
1182 representation if the target supports the copysign optab. */
1185 (if (direct_internal_fn_supported_p (IFN_COPYSIGN, type,
1187 (IFN_COPYSIGN @0 { build_minus_one_cst (type); })))
1189 /* copysign(copysign(x, y), z) -> copysign(x, z). */
1190 (for copysigns (COPYSIGN_ALL)
1192 (copysigns (copysigns @0 @1) @2)
1195 /* copysign(x,y)*copysign(x,y) -> x*x. */
1196 (for copysigns (COPYSIGN_ALL)
1198 (mult (copysigns@2 @0 @1) @2)
1201 /* ccos(-x) -> ccos(x). Similarly for ccosh. */
1202 (for ccoss (CCOS CCOSH)
1207 /* cabs(-x) and cos(conj(x)) -> cabs(x). */
1208 (for ops (conj negate)
1214 /* Fold (a * (1 << b)) into (a << b) */
1216 (mult:c @0 (convert? (lshift integer_onep@1 @2)))
1217 (if (! FLOAT_TYPE_P (type)
1218 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1221 /* Shifts by precision or greater result in zero. */
1222 (for shift (lshift rshift)
1224 (shift @0 uniform_integer_cst_p@1)
1225 (if ((GIMPLE || !sanitize_flags_p (SANITIZE_SHIFT_EXPONENT))
1226 /* Leave arithmetic right shifts of possibly negative values alone. */
1227 && (TYPE_UNSIGNED (type)
1228 || shift == LSHIFT_EXPR
1229 || tree_expr_nonnegative_p (@0))
1230 /* Use a signed compare to leave negative shift counts alone. */
1231 && wi::ges_p (wi::to_wide (uniform_integer_cst_p (@1)),
1232 element_precision (type)))
1233 { build_zero_cst (type); })))
1235 /* Shifts by constants distribute over several binary operations,
1236 hence (X << C) + (Y << C) can be simplified to (X + Y) << C. */
1237 (for op (plus minus)
1239 (op (lshift:s @0 @1) (lshift:s @2 @1))
1240 (if (INTEGRAL_TYPE_P (type)
1241 && TYPE_OVERFLOW_WRAPS (type)
1242 && !TYPE_SATURATING (type))
1243 (lshift (op @0 @2) @1))))
1245 (for op (bit_and bit_ior bit_xor)
1247 (op (lshift:s @0 @1) (lshift:s @2 @1))
1248 (if (INTEGRAL_TYPE_P (type))
1249 (lshift (op @0 @2) @1)))
1251 (op (rshift:s @0 @1) (rshift:s @2 @1))
1252 (if (INTEGRAL_TYPE_P (type))
1253 (rshift (op @0 @2) @1))))
1255 /* Fold (1 << (C - x)) where C = precision(type) - 1
1256 into ((1 << C) >> x). */
1258 (lshift integer_onep@0 (minus@1 INTEGER_CST@2 @3))
1259 (if (INTEGRAL_TYPE_P (type)
1260 && wi::eq_p (wi::to_wide (@2), TYPE_PRECISION (type) - 1)
1262 (if (TYPE_UNSIGNED (type))
1263 (rshift (lshift @0 @2) @3)
1265 { tree utype = unsigned_type_for (type); }
1266 (convert (rshift (lshift (convert:utype @0) @2) @3))))))
1268 /* Fold ((type)(a<0)) << SIGNBITOFA into ((type)a) & signbit. */
1270 (lshift (convert (lt @0 integer_zerop@1)) INTEGER_CST@2)
1271 (if (TYPE_SIGN (TREE_TYPE (@0)) == SIGNED
1272 && wi::eq_p (wi::to_wide (@2), TYPE_PRECISION (TREE_TYPE (@0)) - 1))
1273 (with { wide_int wone = wi::one (TYPE_PRECISION (type)); }
1274 (bit_and (convert @0)
1275 { wide_int_to_tree (type,
1276 wi::lshift (wone, wi::to_wide (@2))); }))))
1278 /* Fold (-x >> C) into -(x > 0) where C = precision(type) - 1. */
1279 (for cst (INTEGER_CST VECTOR_CST)
1281 (rshift (negate:s @0) cst@1)
1282 (if (!TYPE_UNSIGNED (type)
1283 && TYPE_OVERFLOW_UNDEFINED (type))
1284 (with { tree stype = TREE_TYPE (@1);
1285 tree bt = truth_type_for (type);
1286 tree zeros = build_zero_cst (type);
1287 tree cst = NULL_TREE; }
1289 /* Handle scalar case. */
1290 (if (INTEGRAL_TYPE_P (type)
1291 /* If we apply the rule to the scalar type before vectorization
1292 we will enforce the result of the comparison being a bool
1293 which will require an extra AND on the result that will be
1294 indistinguishable from when the user did actually want 0
1295 or 1 as the result so it can't be removed. */
1296 && canonicalize_math_after_vectorization_p ()
1297 && wi::eq_p (wi::to_wide (@1), TYPE_PRECISION (type) - 1))
1298 (negate (convert (gt @0 { zeros; }))))
1299 /* Handle vector case. */
1300 (if (VECTOR_INTEGER_TYPE_P (type)
1301 /* First check whether the target has the same mode for vector
1302 comparison results as it's operands do. */
1303 && TYPE_MODE (bt) == TYPE_MODE (type)
1304 /* Then check to see if the target is able to expand the comparison
1305 with the given type later on, otherwise we may ICE. */
1306 && expand_vec_cmp_expr_p (type, bt, GT_EXPR)
1307 && (cst = uniform_integer_cst_p (@1)) != NULL
1308 && wi::eq_p (wi::to_wide (cst), element_precision (type) - 1))
1309 (view_convert (gt:bt @0 { zeros; }))))))))
1311 /* Fold (C1/X)*C2 into (C1*C2)/X. */
1313 (mult (rdiv@3 REAL_CST@0 @1) REAL_CST@2)
1314 (if (flag_associative_math
1317 { tree tem = const_binop (MULT_EXPR, type, @0, @2); }
1319 (rdiv { tem; } @1)))))
1321 /* Simplify ~X & X as zero. */
1323 (bit_and (convert? @0) (convert? @1))
1324 (with { bool wascmp; }
1325 (if (types_match (TREE_TYPE (@0), TREE_TYPE (@1))
1326 && bitwise_inverted_equal_p (@0, @1, wascmp))
1327 { wascmp ? constant_boolean_node (false, type) : build_zero_cst (type); })))
1329 /* PR71636: Transform x & ((1U << b) - 1) -> x & ~(~0U << b); */
1331 (bit_and:c @0 (plus:s (lshift:s integer_onep @1) integer_minus_onep))
1332 (if (TYPE_UNSIGNED (type))
1333 (bit_and @0 (bit_not (lshift { build_all_ones_cst (type); } @1)))))
1335 (for bitop (bit_and bit_ior)
1337 /* PR35691: Transform
1338 (x == 0 & y == 0) -> (x | typeof(x)(y)) == 0.
1339 (x != 0 | y != 0) -> (x | typeof(x)(y)) != 0. */
1341 (bitop (cmp @0 integer_zerop@2) (cmp @1 integer_zerop))
1342 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1343 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
1344 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
1345 (cmp (bit_ior @0 (convert @1)) @2)))
1347 (x == -1 & y == -1) -> (x & typeof(x)(y)) == -1.
1348 (x != -1 | y != -1) -> (x & typeof(x)(y)) != -1. */
1350 (bitop (cmp @0 integer_all_onesp@2) (cmp @1 integer_all_onesp))
1351 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1352 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
1353 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
1354 (cmp (bit_and @0 (convert @1)) @2))))
1356 /* Fold (A & ~B) - (A & B) into (A ^ B) - B. */
1358 (minus (bit_and:cs @0 (bit_not @1)) (bit_and:cs @0 @1))
1359 (minus (bit_xor @0 @1) @1))
1361 (minus (bit_and:s @0 INTEGER_CST@2) (bit_and:s @0 INTEGER_CST@1))
1362 (if (~wi::to_wide (@2) == wi::to_wide (@1))
1363 (minus (bit_xor @0 @1) @1)))
1365 /* Fold (A & B) - (A & ~B) into B - (A ^ B). */
1367 (minus (bit_and:cs @0 @1) (bit_and:cs @0 (bit_not @1)))
1368 (minus @1 (bit_xor @0 @1)))
1370 /* Simplify (X & ~Y) |^+ (~X & Y) -> X ^ Y. */
1371 (for op (bit_ior bit_xor plus)
1373 (op (bit_and:c @0 @2) (bit_and:c @3 @1))
1374 (with { bool wascmp0, wascmp1; }
1375 (if (bitwise_inverted_equal_p (@2, @1, wascmp0)
1376 && bitwise_inverted_equal_p (@0, @3, wascmp1)
1377 && ((!wascmp0 && !wascmp1)
1378 || element_precision (type) == 1))
1381 /* PR53979: Transform ((a ^ b) | a) -> (a | b) */
1383 (bit_ior:c (bit_xor:c @0 @1) @0)
1386 /* (a & ~b) | (a ^ b) --> a ^ b */
1388 (bit_ior:c (bit_and:c @0 (bit_not @1)) (bit_xor:c@2 @0 @1))
1391 /* (a & ~b) ^ ~a --> ~(a & b) */
1393 (bit_xor:c (bit_and:cs @0 (bit_not @1)) (bit_not @0))
1394 (bit_not (bit_and @0 @1)))
1396 /* (~a & b) ^ a --> (a | b) */
1398 (bit_xor:c (bit_and:cs (bit_not @0) @1) @0)
1401 /* (a | b) & ~(a ^ b) --> a & b */
1403 (bit_and:c (bit_ior @0 @1) (bit_not (bit_xor:c @0 @1)))
1406 /* (a | b) & (a == b) --> a & b (boolean version of the above). */
1408 (bit_and:c (bit_ior @0 @1) (nop_convert? (eq:c @0 @1)))
1409 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1410 && TYPE_PRECISION (TREE_TYPE (@0)) == 1)
1413 /* a | ~(a ^ b) --> a | ~b */
1415 (bit_ior:c @0 (bit_not:s (bit_xor:c @0 @1)))
1416 (bit_ior @0 (bit_not @1)))
1418 /* a | (a == b) --> a | (b^1) (boolean version of the above). */
1420 (bit_ior:c @0 (nop_convert? (eq:c @0 @1)))
1421 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1422 && TYPE_PRECISION (TREE_TYPE (@0)) == 1)
1423 (bit_ior @0 (bit_xor @1 { build_one_cst (type); }))))
1425 /* a | ((~a) ^ b) --> a | (~b) (alt version of the above 2) */
1427 (bit_ior:c @0 (bit_xor:cs @1 @2))
1428 (with { bool wascmp; }
1429 (if (bitwise_inverted_equal_p (@0, @1, wascmp)
1430 && (!wascmp || element_precision (type) == 1))
1431 (bit_ior @0 (bit_not @2)))))
1433 /* a & ~(a ^ b) --> a & b */
1435 (bit_and:c @0 (bit_not (bit_xor:c @0 @1)))
1438 /* a & (a == b) --> a & b (boolean version of the above). */
1440 (bit_and:c @0 (nop_convert? (eq:c @0 @1)))
1441 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1442 && TYPE_PRECISION (TREE_TYPE (@0)) == 1)
1445 /* a & ((~a) ^ b) --> a & b (alt version of the above 2) */
1447 (bit_and:c @0 (bit_xor:c @1 @2))
1448 (with { bool wascmp; }
1449 (if (bitwise_inverted_equal_p (@0, @1, wascmp)
1450 && (!wascmp || element_precision (type) == 1))
1453 /* (a | b) | (a &^ b) --> a | b */
1454 (for op (bit_and bit_xor)
1456 (bit_ior:c (bit_ior@2 @0 @1) (op:c @0 @1))
1459 /* (a & b) | ~(a ^ b) --> ~(a ^ b) */
1461 (bit_ior:c (bit_and:c @0 @1) (bit_not@2 (bit_xor @0 @1)))
1464 /* (a & b) | (a == b) --> a == b */
1466 (bit_ior:c (bit_and:c @0 @1) (nop_convert?@2 (eq @0 @1)))
1467 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1468 && TYPE_PRECISION (TREE_TYPE (@0)) == 1)
1471 /* ~(~a & b) --> a | ~b */
1473 (bit_not (bit_and:cs (bit_not @0) @1))
1474 (bit_ior @0 (bit_not @1)))
1476 /* ~(~a | b) --> a & ~b */
1478 (bit_not (bit_ior:cs (bit_not @0) @1))
1479 (bit_and @0 (bit_not @1)))
1481 /* (a ^ b) & ((b ^ c) ^ a) --> (a ^ b) & ~c */
1483 (bit_and:c (bit_xor:c@3 @0 @1) (bit_xor:cs (bit_xor:cs @1 @2) @0))
1484 (bit_and @3 (bit_not @2)))
1486 /* (a ^ b) | ((b ^ c) ^ a) --> (a ^ b) | c */
1488 (bit_ior:c (bit_xor:c@3 @0 @1) (bit_xor:c (bit_xor:c @1 @2) @0))
1491 /* (~X | C) ^ D -> (X | C) ^ (~D ^ C) if (~D ^ C) can be simplified. */
1493 (bit_xor:c (bit_ior:cs (bit_not:s @0) @1) @2)
1494 (bit_xor (bit_ior @0 @1) (bit_xor! (bit_not! @2) @1)))
1496 /* (~X & C) ^ D -> (X & C) ^ (D ^ C) if (D ^ C) can be simplified. */
1498 (bit_xor:c (bit_and:cs (bit_not:s @0) @1) @2)
1499 (bit_xor (bit_and @0 @1) (bit_xor! @2 @1)))
1501 /* Simplify (~X & Y) to X ^ Y if we know that (X & ~Y) is 0. */
1503 (bit_and (bit_not SSA_NAME@0) INTEGER_CST@1)
1504 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1505 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
1508 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1509 ((A & N) + B) & M -> (A + B) & M
1510 Similarly if (N & M) == 0,
1511 ((A | N) + B) & M -> (A + B) & M
1512 and for - instead of + (or unary - instead of +)
1513 and/or ^ instead of |.
1514 If B is constant and (B & M) == 0, fold into A & M. */
1515 (for op (plus minus)
1516 (for bitop (bit_and bit_ior bit_xor)
1518 (bit_and (op:s (bitop:s@0 @3 INTEGER_CST@4) @1) INTEGER_CST@2)
1521 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, bitop,
1522 @3, @4, @1, ERROR_MARK, NULL_TREE,
1525 (convert (bit_and (op (convert:utype { pmop[0]; })
1526 (convert:utype { pmop[1]; }))
1527 (convert:utype @2))))))
1529 (bit_and (op:s @0 (bitop:s@1 @3 INTEGER_CST@4)) INTEGER_CST@2)
1532 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK,
1533 NULL_TREE, NULL_TREE, @1, bitop, @3,
1536 (convert (bit_and (op (convert:utype { pmop[0]; })
1537 (convert:utype { pmop[1]; }))
1538 (convert:utype @2)))))))
1540 (bit_and (op:s @0 @1) INTEGER_CST@2)
1543 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK,
1544 NULL_TREE, NULL_TREE, @1, ERROR_MARK,
1545 NULL_TREE, NULL_TREE, pmop); }
1547 (convert (bit_and (op (convert:utype { pmop[0]; })
1548 (convert:utype { pmop[1]; }))
1549 (convert:utype @2)))))))
1550 (for bitop (bit_and bit_ior bit_xor)
1552 (bit_and (negate:s (bitop:s@0 @2 INTEGER_CST@3)) INTEGER_CST@1)
1555 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @1, NEGATE_EXPR, @0,
1556 bitop, @2, @3, NULL_TREE, ERROR_MARK,
1557 NULL_TREE, NULL_TREE, pmop); }
1559 (convert (bit_and (negate (convert:utype { pmop[0]; }))
1560 (convert:utype @1)))))))
1562 /* X % Y is smaller than Y. */
1565 (cmp:c (trunc_mod @0 @1) @1)
1566 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
1567 { constant_boolean_node (cmp == LT_EXPR, type); })))
1571 (bit_ior @0 integer_all_onesp@1)
1576 (bit_ior @0 integer_zerop)
1581 (bit_and @0 integer_zerop@1)
1586 (for op (bit_ior bit_xor)
1588 (op (convert? @0) (convert? @1))
1589 (with { bool wascmp; }
1590 (if (types_match (TREE_TYPE (@0), TREE_TYPE (@1))
1591 && bitwise_inverted_equal_p (@0, @1, wascmp))
1594 ? constant_boolean_node (true, type)
1595 : build_all_ones_cst (TREE_TYPE (@0)); })))))
1600 { build_zero_cst (type); })
1602 /* Canonicalize X ^ ~0 to ~X. */
1604 (bit_xor @0 integer_all_onesp@1)
1609 (bit_and @0 integer_all_onesp)
1612 /* x & x -> x, x | x -> x */
1613 (for bitop (bit_and bit_ior)
1618 /* x & C -> x if we know that x & ~C == 0. */
1621 (bit_and SSA_NAME@0 INTEGER_CST@1)
1622 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1623 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
1626 /* `a & (x | CST)` -> a if we know that (a & ~CST) == 0 */
1628 (bit_and:c SSA_NAME@0 (bit_ior @1 INTEGER_CST@2))
1629 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1630 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@2)) == 0)
1633 /* x | C -> C if we know that x & ~C == 0. */
1635 (bit_ior SSA_NAME@0 INTEGER_CST@1)
1636 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1637 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
1641 /* ~(~X - Y) -> X + Y and ~(~X + Y) -> X - Y. */
1643 (bit_not (minus (bit_not @0) @1))
1646 (bit_not (plus:c (bit_not @0) @1))
1648 /* (~X - ~Y) -> Y - X. */
1650 (minus (bit_not @0) (bit_not @1))
1651 (if (!TYPE_OVERFLOW_SANITIZED (type))
1652 (with { tree utype = unsigned_type_for (type); }
1653 (convert (minus (convert:utype @1) (convert:utype @0))))))
1655 /* ~(X - Y) -> ~X + Y. */
1657 (bit_not (minus:s @0 @1))
1658 (plus (bit_not @0) @1))
1660 (bit_not (plus:s @0 INTEGER_CST@1))
1661 (if ((INTEGRAL_TYPE_P (type)
1662 && TYPE_UNSIGNED (type))
1663 || (!TYPE_OVERFLOW_SANITIZED (type)
1664 && may_negate_without_overflow_p (@1)))
1665 (plus (bit_not @0) { const_unop (NEGATE_EXPR, type, @1); })))
1668 /* ~X + Y -> (Y - X) - 1. */
1670 (plus:c (bit_not @0) @1)
1671 (if (ANY_INTEGRAL_TYPE_P (type)
1672 && TYPE_OVERFLOW_WRAPS (type)
1673 /* -1 - X is folded to ~X, so we'd recurse endlessly. */
1674 && !integer_all_onesp (@1))
1675 (plus (minus @1 @0) { build_minus_one_cst (type); })
1676 (if (INTEGRAL_TYPE_P (type)
1677 && TREE_CODE (@1) == INTEGER_CST
1678 && wi::to_wide (@1) != wi::min_value (TYPE_PRECISION (type),
1680 (minus (plus @1 { build_minus_one_cst (type); }) @0))))
1683 /* ~(X >> Y) -> ~X >> Y if ~X can be simplified. */
1685 (bit_not (rshift:s @0 @1))
1686 (if (!TYPE_UNSIGNED (TREE_TYPE (@0)))
1687 (rshift (bit_not! @0) @1)
1688 /* For logical right shifts, this is possible only if @0 doesn't
1689 have MSB set and the logical right shift is changed into
1690 arithmetic shift. */
1691 (if (INTEGRAL_TYPE_P (type)
1692 && !wi::neg_p (tree_nonzero_bits (@0)))
1693 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
1694 (convert (rshift (bit_not! (convert:stype @0)) @1))))))
1696 /* x + (x & 1) -> (x + 1) & ~1 */
1698 (plus:c @0 (bit_and:s @0 integer_onep@1))
1699 (bit_and (plus @0 @1) (bit_not @1)))
1701 /* x & ~(x & y) -> x & ~y */
1702 /* x | ~(x | y) -> x | ~y */
1703 (for bitop (bit_and bit_ior)
1705 (bitop:c @0 (bit_not (bitop:cs @0 @1)))
1706 (bitop @0 (bit_not @1))))
1708 /* (~x & y) | ~(x | y) -> ~x */
1710 (bit_ior:c (bit_and:c (bit_not@2 @0) @1) (bit_not (bit_ior:c @0 @1)))
1713 /* (x | y) ^ (x | ~y) -> ~x */
1715 (bit_xor:c (bit_ior:c @0 @1) (bit_ior:c @0 (bit_not @1)))
1718 /* (x & y) | ~(x | y) -> ~(x ^ y) */
1720 (bit_ior:c (bit_and:s @0 @1) (bit_not:s (bit_ior:s @0 @1)))
1721 (bit_not (bit_xor @0 @1)))
1723 /* (~x | y) ^ (x ^ y) -> x | ~y */
1725 (bit_xor:c (bit_ior:cs (bit_not @0) @1) (bit_xor:s @0 @1))
1726 (bit_ior @0 (bit_not @1)))
1728 /* (x ^ y) | ~(x | y) -> ~(x & y) */
1730 (bit_ior:c (bit_xor:s @0 @1) (bit_not:s (bit_ior:s @0 @1)))
1731 (bit_not (bit_and @0 @1)))
1733 /* (x & y) ^ (x | y) -> x ^ y */
1735 (bit_xor:c (bit_and @0 @1) (bit_ior @0 @1))
1738 /* (x ^ y) ^ (x | y) -> x & y */
1740 (bit_xor:c (bit_xor @0 @1) (bit_ior @0 @1))
1743 /* (x & y) + (x ^ y) -> x | y */
1744 /* (x & y) | (x ^ y) -> x | y */
1745 /* (x & y) ^ (x ^ y) -> x | y */
1746 (for op (plus bit_ior bit_xor)
1748 (op:c (bit_and @0 @1) (bit_xor @0 @1))
1751 /* (x & y) + (x | y) -> x + y */
1753 (plus:c (bit_and @0 @1) (bit_ior @0 @1))
1756 /* (x + y) - (x | y) -> x & y */
1758 (minus (plus @0 @1) (bit_ior @0 @1))
1759 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1760 && !TYPE_SATURATING (type))
1763 /* (x + y) - (x & y) -> x | y */
1765 (minus (plus @0 @1) (bit_and @0 @1))
1766 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1767 && !TYPE_SATURATING (type))
1770 /* (x | y) - y -> (x & ~y) */
1772 (minus (bit_ior:cs @0 @1) @1)
1773 (bit_and @0 (bit_not @1)))
1775 /* (x | y) - (x ^ y) -> x & y */
1777 (minus (bit_ior @0 @1) (bit_xor @0 @1))
1780 /* (x | y) - (x & y) -> x ^ y */
1782 (minus (bit_ior @0 @1) (bit_and @0 @1))
1785 /* (x | y) & ~(x & y) -> x ^ y */
1787 (bit_and:c (bit_ior @0 @1) (bit_not (bit_and @0 @1)))
1790 /* (x | y) & (~x ^ y) -> x & y */
1792 (bit_and:c (bit_ior:c @0 @1) (bit_xor:c @1 @2))
1793 (with { bool wascmp; }
1794 (if (bitwise_inverted_equal_p (@0, @2, wascmp)
1795 && (!wascmp || element_precision (type) == 1))
1798 /* (~x | y) & (x | ~y) -> ~(x ^ y) */
1800 (bit_and (bit_ior:cs (bit_not @0) @1) (bit_ior:cs @0 (bit_not @1)))
1801 (bit_not (bit_xor @0 @1)))
1803 /* (~x | y) ^ (x | ~y) -> x ^ y */
1805 (bit_xor (bit_ior:c (bit_not @0) @1) (bit_ior:c @0 (bit_not @1)))
1808 /* ((x & y) - (x | y)) - 1 -> ~(x ^ y) */
1810 (plus (nop_convert1? (minus@2 (nop_convert2? (bit_and:c @0 @1))
1811 (nop_convert2? (bit_ior @0 @1))))
1813 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1814 && !TYPE_SATURATING (type) && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))
1815 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@2))
1816 && !TYPE_SATURATING (TREE_TYPE (@2)))
1817 (bit_not (convert (bit_xor @0 @1)))))
1819 (minus (nop_convert1? (plus@2 (nop_convert2? (bit_and:c @0 @1))
1821 (nop_convert3? (bit_ior @0 @1)))
1822 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1823 && !TYPE_SATURATING (type) && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))
1824 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@2))
1825 && !TYPE_SATURATING (TREE_TYPE (@2)))
1826 (bit_not (convert (bit_xor @0 @1)))))
1828 (minus (nop_convert1? (bit_and @0 @1))
1829 (nop_convert2? (plus@2 (nop_convert3? (bit_ior:c @0 @1))
1831 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1832 && !TYPE_SATURATING (type) && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))
1833 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@2))
1834 && !TYPE_SATURATING (TREE_TYPE (@2)))
1835 (bit_not (convert (bit_xor @0 @1)))))
1837 /* ~x & ~y -> ~(x | y)
1838 ~x | ~y -> ~(x & y) */
1839 (for op (bit_and bit_ior)
1840 rop (bit_ior bit_and)
1842 (op (convert1? (bit_not @0)) (convert2? (bit_not @1)))
1843 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1844 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
1845 (bit_not (rop (convert @0) (convert @1))))))
1847 /* If we are XORing or adding two BIT_AND_EXPR's, both of which are and'ing
1848 with a constant, and the two constants have no bits in common,
1849 we should treat this as a BIT_IOR_EXPR since this may produce more
1851 (for op (bit_xor plus)
1853 (op (convert1? (bit_and@4 @0 INTEGER_CST@1))
1854 (convert2? (bit_and@5 @2 INTEGER_CST@3)))
1855 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1856 && tree_nop_conversion_p (type, TREE_TYPE (@2))
1857 && (wi::to_wide (@1) & wi::to_wide (@3)) == 0)
1858 (bit_ior (convert @4) (convert @5)))))
1860 /* (X | Y) ^ X -> Y & ~ X*/
1862 (bit_xor:c (convert1? (bit_ior:c @@0 @1)) (convert2? @0))
1863 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1864 (convert (bit_and @1 (bit_not @0)))))
1866 /* (~X | Y) ^ X -> ~(X & Y). */
1868 (bit_xor:c (nop_convert1? (bit_ior:c (nop_convert2? (bit_not @0)) @1)) @2)
1869 (if (bitwise_equal_p (@0, @2))
1870 (convert (bit_not (bit_and @0 (convert @1))))))
1872 /* Convert ~X ^ ~Y to X ^ Y. */
1874 (bit_xor (convert1? (bit_not @0)) (convert2? (bit_not @1)))
1875 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1876 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
1877 (bit_xor (convert @0) (convert @1))))
1879 /* Convert ~X ^ C to X ^ ~C. */
1881 (bit_xor (convert? (bit_not @0)) INTEGER_CST@1)
1882 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1883 (bit_xor (convert @0) (bit_not @1))))
1885 /* Fold (X & Y) ^ Y and (X ^ Y) & Y as ~X & Y. */
1886 (for opo (bit_and bit_xor)
1887 opi (bit_xor bit_and)
1889 (opo:c (opi:cs @0 @1) @1)
1890 (bit_and (bit_not @0) @1)))
1892 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
1893 operands are another bit-wise operation with a common input. If so,
1894 distribute the bit operations to save an operation and possibly two if
1895 constants are involved. For example, convert
1896 (A | B) & (A | C) into A | (B & C)
1897 Further simplification will occur if B and C are constants. */
1898 (for op (bit_and bit_ior bit_xor)
1899 rop (bit_ior bit_and bit_and)
1901 (op (convert? (rop:c @@0 @1)) (convert? (rop:c @0 @2)))
1902 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1903 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
1904 (rop (convert @0) (op (convert @1) (convert @2))))))
1906 /* Some simple reassociation for bit operations, also handled in reassoc. */
1907 /* (X & Y) & Y -> X & Y
1908 (X | Y) | Y -> X | Y */
1909 (for op (bit_and bit_ior)
1911 (op:c (convert1?@2 (op:c @0 @@1)) (convert2? @1))
1913 /* (X ^ Y) ^ Y -> X */
1915 (bit_xor:c (convert1? (bit_xor:c @0 @@1)) (convert2? @1))
1918 /* (X & ~Y) & Y -> 0 */
1920 (bit_and:c (bit_and @0 @1) @2)
1921 (with { bool wascmp; }
1922 (if (bitwise_inverted_equal_p (@0, @2, wascmp)
1923 || bitwise_inverted_equal_p (@1, @2, wascmp))
1924 { wascmp ? constant_boolean_node (false, type) : build_zero_cst (type); })))
1925 /* (X | ~Y) | Y -> -1 */
1927 (bit_ior:c (bit_ior @0 @1) @2)
1928 (with { bool wascmp; }
1929 (if ((bitwise_inverted_equal_p (@0, @2, wascmp)
1930 || bitwise_inverted_equal_p (@1, @2, wascmp))
1931 && (!wascmp || element_precision (type) == 1))
1932 { build_all_ones_cst (TREE_TYPE (@0)); })))
1934 /* (X & Y) & (X & Z) -> (X & Y) & Z
1935 (X | Y) | (X | Z) -> (X | Y) | Z */
1936 (for op (bit_and bit_ior)
1938 (op (convert1?@3 (op:c@4 @0 @1)) (convert2?@5 (op:c@6 @0 @2)))
1939 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1940 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
1941 (if (single_use (@5) && single_use (@6))
1942 (op @3 (convert @2))
1943 (if (single_use (@3) && single_use (@4))
1944 (op (convert @1) @5))))))
1945 /* (X ^ Y) ^ (X ^ Z) -> Y ^ Z */
1947 (bit_xor (convert1? (bit_xor:c @0 @1)) (convert2? (bit_xor:c @0 @2)))
1948 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1949 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
1950 (bit_xor (convert @1) (convert @2))))
1952 /* Convert abs (abs (X)) into abs (X).
1953 also absu (absu (X)) into absu (X). */
1959 (absu (convert@2 (absu@1 @0)))
1960 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@1)))
1963 /* Convert abs[u] (-X) -> abs[u] (X). */
1972 /* Convert abs[u] (X) where X is nonnegative -> (X). */
1974 (abs tree_expr_nonnegative_p@0)
1978 (absu tree_expr_nonnegative_p@0)
1981 /* Simplify (-(X < 0) | 1) * X into abs (X) or absu(X). */
1983 (mult:c (nop_convert1?
1984 (bit_ior (nop_convert2? (negate (convert? (lt @0 integer_zerop))))
1987 (if (INTEGRAL_TYPE_P (type)
1988 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
1989 && !TYPE_UNSIGNED (TREE_TYPE (@0)))
1990 (if (TYPE_UNSIGNED (type))
1997 /* A few cases of fold-const.cc negate_expr_p predicate. */
1998 (match negate_expr_p
2000 (if ((INTEGRAL_TYPE_P (type)
2001 && TYPE_UNSIGNED (type))
2002 || (!TYPE_OVERFLOW_SANITIZED (type)
2003 && may_negate_without_overflow_p (t)))))
2004 (match negate_expr_p
2006 (match negate_expr_p
2008 (if (!TYPE_OVERFLOW_SANITIZED (type))))
2009 (match negate_expr_p
2011 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (t)))))
2012 /* VECTOR_CST handling of non-wrapping types would recurse in unsupported
2014 (match negate_expr_p
2016 (if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type))))
2017 (match negate_expr_p
2019 (if ((ANY_INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type))
2020 || (FLOAT_TYPE_P (type)
2021 && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
2022 && !HONOR_SIGNED_ZEROS (type)))))
2024 /* (-A) * (-B) -> A * B */
2026 (mult:c (convert1? (negate @0)) (convert2? negate_expr_p@1))
2027 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
2028 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
2029 (mult (convert @0) (convert (negate @1)))))
2031 /* -(A + B) -> (-B) - A. */
2033 (negate (plus:c @0 negate_expr_p@1))
2034 (if (!HONOR_SIGN_DEPENDENT_ROUNDING (type)
2035 && !HONOR_SIGNED_ZEROS (type))
2036 (minus (negate @1) @0)))
2038 /* -(A - B) -> B - A. */
2040 (negate (minus @0 @1))
2041 (if ((ANY_INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_SANITIZED (type))
2042 || (FLOAT_TYPE_P (type)
2043 && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
2044 && !HONOR_SIGNED_ZEROS (type)))
2047 (negate (pointer_diff @0 @1))
2048 (if (TYPE_OVERFLOW_UNDEFINED (type))
2049 (pointer_diff @1 @0)))
2051 /* A - B -> A + (-B) if B is easily negatable. */
2053 (minus @0 negate_expr_p@1)
2054 (if (!FIXED_POINT_TYPE_P (type))
2055 (plus @0 (negate @1))))
2057 /* 1 - a is a ^ 1 if a had a bool range. */
2058 /* This is only enabled for gimple as sometimes
2059 cfun is not set for the function which contains
2060 the SSA_NAME (e.g. while IPA passes are happening,
2061 fold might be called). */
2063 (minus integer_onep@0 SSA_NAME@1)
2064 (if (INTEGRAL_TYPE_P (type)
2065 && ssa_name_has_boolean_range (@1))
2068 /* Other simplifications of negation (c.f. fold_negate_expr_1). */
2070 (negate (mult:c@0 @1 negate_expr_p@2))
2071 (if (! TYPE_UNSIGNED (type)
2072 && ! HONOR_SIGN_DEPENDENT_ROUNDING (type)
2074 (mult @1 (negate @2))))
2077 (negate (rdiv@0 @1 negate_expr_p@2))
2078 (if (! HONOR_SIGN_DEPENDENT_ROUNDING (type)
2080 (rdiv @1 (negate @2))))
2083 (negate (rdiv@0 negate_expr_p@1 @2))
2084 (if (! HONOR_SIGN_DEPENDENT_ROUNDING (type)
2086 (rdiv (negate @1) @2)))
2088 /* Fold -((int)x >> (prec - 1)) into (unsigned)x >> (prec - 1). */
2090 (negate (convert? (rshift @0 INTEGER_CST@1)))
2091 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
2092 && wi::to_wide (@1) == element_precision (type) - 1)
2093 (with { tree stype = TREE_TYPE (@0);
2094 tree ntype = TYPE_UNSIGNED (stype) ? signed_type_for (stype)
2095 : unsigned_type_for (stype); }
2096 (if (VECTOR_TYPE_P (type))
2097 (view_convert (rshift (view_convert:ntype @0) @1))
2098 (convert (rshift (convert:ntype @0) @1))))))
2100 /* Try to fold (type) X op CST -> (type) (X op ((type-x) CST))
2102 For bitwise binary operations apply operand conversions to the
2103 binary operation result instead of to the operands. This allows
2104 to combine successive conversions and bitwise binary operations.
2105 We combine the above two cases by using a conditional convert. */
2106 (for bitop (bit_and bit_ior bit_xor)
2108 (bitop (convert@2 @0) (convert?@3 @1))
2109 (if (((TREE_CODE (@1) == INTEGER_CST
2110 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
2111 && (int_fits_type_p (@1, TREE_TYPE (@0))
2112 || tree_nop_conversion_p (TREE_TYPE (@0), type)))
2113 || types_match (@0, @1))
2114 && !POINTER_TYPE_P (TREE_TYPE (@0))
2115 && !VECTOR_TYPE_P (TREE_TYPE (@0))
2116 && TREE_CODE (TREE_TYPE (@0)) != OFFSET_TYPE
2117 /* ??? This transform conflicts with fold-const.cc doing
2118 Convert (T)(x & c) into (T)x & (T)c, if c is an integer
2119 constants (if x has signed type, the sign bit cannot be set
2120 in c). This folds extension into the BIT_AND_EXPR.
2121 Restrict it to GIMPLE to avoid endless recursions. */
2122 && (bitop != BIT_AND_EXPR || GIMPLE)
2123 && (/* That's a good idea if the conversion widens the operand, thus
2124 after hoisting the conversion the operation will be narrower.
2125 It is also a good if the conversion is a nop as moves the
2126 conversion to one side; allowing for combining of the conversions. */
2127 TYPE_PRECISION (TREE_TYPE (@0)) < TYPE_PRECISION (type)
2128 /* The conversion check for being a nop can only be done at the gimple
2129 level as fold_binary has some re-association code which can conflict
2130 with this if there is a "constant" which is not a full INTEGER_CST. */
2131 || (GIMPLE && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (type))
2132 /* It's also a good idea if the conversion is to a non-integer
2134 || GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT
2135 /* Or if the precision of TO is not the same as the precision
2137 || !type_has_mode_precision_p (type)
2138 /* In GIMPLE, getting rid of 2 conversions for one new results
2141 && TREE_CODE (@1) != INTEGER_CST
2142 && tree_nop_conversion_p (type, TREE_TYPE (@0))
2144 && single_use (@3))))
2145 (convert (bitop @0 (convert @1)))))
2146 /* In GIMPLE, getting rid of 2 conversions for one new results
2149 (convert (bitop:cs@2 (nop_convert:s @0) @1))
2151 && TREE_CODE (@1) != INTEGER_CST
2152 && tree_nop_conversion_p (type, TREE_TYPE (@2))
2153 && types_match (type, @0)
2154 && !POINTER_TYPE_P (TREE_TYPE (@0))
2155 && TREE_CODE (TREE_TYPE (@0)) != OFFSET_TYPE)
2156 (bitop @0 (convert @1)))))
2158 (for bitop (bit_and bit_ior)
2159 rbitop (bit_ior bit_and)
2160 /* (x | y) & x -> x */
2161 /* (x & y) | x -> x */
2163 (bitop:c (rbitop:c @0 @1) @0)
2165 /* (~x | y) & x -> x & y */
2166 /* (~x & y) | x -> x | y */
2168 (bitop:c (rbitop:c @2 @1) @0)
2169 (with { bool wascmp; }
2170 (if (bitwise_inverted_equal_p (@0, @2, wascmp)
2171 && (!wascmp || element_precision (type) == 1))
2173 /* (x | y) & (x & z) -> (x & z) */
2174 /* (x & y) | (x | z) -> (x | z) */
2176 (bitop:c (rbitop:c @0 @1) (bitop:c@3 @0 @2))
2178 /* (x | c) & ~(y | c) -> x & ~(y | c) */
2179 /* (x & c) | ~(y & c) -> x | ~(y & c) */
2181 (bitop:c (rbitop:c @0 @1) (bit_not@3 (rbitop:c @1 @2)))
2183 /* x & ~(y | x) -> 0 */
2184 /* x | ~(y & x) -> -1 */
2186 (bitop:c @0 (bit_not (rbitop:c @0 @1)))
2187 (if (bitop == BIT_AND_EXPR)
2188 { build_zero_cst (type); }
2189 { build_minus_one_cst (type); })))
2191 /* ((x | y) & z) | x -> (z & y) | x
2192 ((x ^ y) & z) | x -> (z & y) | x */
2193 (for op (bit_ior bit_xor)
2195 (bit_ior:c (nop_convert1?:s
2196 (bit_and:cs (nop_convert2?:s (op:cs @0 @1)) @2)) @3)
2197 (if (bitwise_equal_p (@0, @3))
2198 (convert (bit_ior (bit_and @1 (convert @2)) (convert @0))))))
2200 /* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */
2202 (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
2203 (bit_ior (bit_and @0 @2) (bit_and! @1 @2)))
2205 /* Combine successive equal operations with constants. */
2206 (for bitop (bit_and bit_ior bit_xor)
2208 (bitop (bitop @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
2209 (if (!CONSTANT_CLASS_P (@0))
2210 /* This is the canonical form regardless of whether (bitop @1 @2) can be
2211 folded to a constant. */
2212 (bitop @0 (bitop! @1 @2))
2213 /* In this case we have three constants and (bitop @0 @1) doesn't fold
2214 to a constant. This can happen if @0 or @1 is a POLY_INT_CST and if
2215 the values involved are such that the operation can't be decided at
2216 compile time. Try folding one of @0 or @1 with @2 to see whether
2217 that combination can be decided at compile time.
2219 Keep the existing form if both folds fail, to avoid endless
2221 (with { tree cst1 = const_binop (bitop, type, @0, @2); }
2223 (bitop @1 { cst1; })
2224 (with { tree cst2 = const_binop (bitop, type, @1, @2); }
2226 (bitop @0 { cst2; }))))))))
2228 /* Try simple folding for X op !X, and X op X with the help
2229 of the truth_valued_p and logical_inverted_value predicates. */
2230 (match truth_valued_p
2232 (if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1)))
2233 (for op (tcc_comparison truth_and truth_andif truth_or truth_orif truth_xor)
2234 (match truth_valued_p
2236 (match truth_valued_p
2239 (match (logical_inverted_value @0)
2241 (match (logical_inverted_value @0)
2242 (bit_not truth_valued_p@0))
2243 (match (logical_inverted_value @0)
2244 (eq @0 integer_zerop))
2245 (match (logical_inverted_value @0)
2246 (ne truth_valued_p@0 integer_truep))
2247 (match (logical_inverted_value @0)
2248 (bit_xor truth_valued_p@0 integer_truep))
2252 (bit_and:c @0 (logical_inverted_value @0))
2253 { build_zero_cst (type); })
2254 /* X | !X and X ^ !X -> 1, , if X is truth-valued. */
2255 (for op (bit_ior bit_xor)
2257 (op:c truth_valued_p@0 (logical_inverted_value @0))
2258 { constant_boolean_node (true, type); }))
2259 /* X ==/!= !X is false/true. */
2262 (op:c truth_valued_p@0 (logical_inverted_value @0))
2263 { constant_boolean_node (op == NE_EXPR ? true : false, type); }))
2267 (bit_not (bit_not @0))
2270 /* zero_one_valued_p will match when a value is known to be either
2271 0 or 1 including constants 0 or 1.
2272 Signed 1-bits includes -1 so they cannot match here. */
2273 (match zero_one_valued_p
2275 (if (INTEGRAL_TYPE_P (type)
2276 && (TYPE_UNSIGNED (type)
2277 || TYPE_PRECISION (type) > 1)
2278 && wi::leu_p (tree_nonzero_bits (@0), 1))))
2279 (match zero_one_valued_p
2281 (if (INTEGRAL_TYPE_P (type)
2282 && (TYPE_UNSIGNED (type)
2283 || TYPE_PRECISION (type) > 1))))
2285 /* (a&1) is always [0,1] too. This is useful again when
2286 the range is not known. */
2287 /* Note this can't be recursive due to VN handling of equivalents,
2288 VN and would cause an infinite recursion. */
2289 (match zero_one_valued_p
2290 (bit_and:c@0 @1 integer_onep)
2291 (if (INTEGRAL_TYPE_P (type))))
2293 /* A conversion from an zero_one_valued_p is still a [0,1].
2294 This is useful when the range of a variable is not known */
2295 /* Note this matches can't be recursive because of the way VN handles
2296 nop conversions being equivalent and then recursive between them. */
2297 (match zero_one_valued_p
2299 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2300 && (TYPE_UNSIGNED (TREE_TYPE (@1))
2301 || TYPE_PRECISION (TREE_TYPE (@1)) > 1)
2302 && INTEGRAL_TYPE_P (type)
2303 && (TYPE_UNSIGNED (type)
2304 || TYPE_PRECISION (type) > 1)
2305 && wi::leu_p (tree_nonzero_bits (@1), 1))))
2307 /* Transform { 0 or 1 } * { 0 or 1 } into { 0 or 1 } & { 0 or 1 }. */
2309 (mult zero_one_valued_p@0 zero_one_valued_p@1)
2310 (if (INTEGRAL_TYPE_P (type))
2313 (for cmp (tcc_comparison)
2314 icmp (inverted_tcc_comparison)
2315 /* Fold (((a < b) & c) | ((a >= b) & d)) into (a < b ? c : d) & 1. */
2318 (bit_and:c (convert? (cmp@0 @01 @02)) @3)
2319 (bit_and:c (convert? (icmp@4 @01 @02)) @5))
2320 (if (INTEGRAL_TYPE_P (type)
2321 && invert_tree_comparison (cmp, HONOR_NANS (@01)) == icmp
2322 /* The scalar version has to be canonicalized after vectorization
2323 because it makes unconditional loads conditional ones, which
2324 means we lose vectorization because the loads may trap. */
2325 && canonicalize_math_after_vectorization_p ())
2326 (bit_and (cond @0 @3 @5) { build_one_cst (type); })))
2328 /* Fold ((-(a < b) & c) | (-(a >= b) & d)) into a < b ? c : d. This is
2329 canonicalized further and we recognize the conditional form:
2330 (a < b ? c : 0) | (a >= b ? d : 0) into a < b ? c : d. */
2333 (cond (cmp@0 @01 @02) @3 zerop)
2334 (cond (icmp@4 @01 @02) @5 zerop))
2335 (if (INTEGRAL_TYPE_P (type)
2336 && invert_tree_comparison (cmp, HONOR_NANS (@01)) == icmp
2337 /* The scalar version has to be canonicalized after vectorization
2338 because it makes unconditional loads conditional ones, which
2339 means we lose vectorization because the loads may trap. */
2340 && canonicalize_math_after_vectorization_p ())
2343 /* Vector Fold (((a < b) & c) | ((a >= b) & d)) into a < b ? c : d.
2344 and ((~(a < b) & c) | (~(a >= b) & d)) into a < b ? c : d. */
2347 (bit_and:c (vec_cond:s (cmp@0 @6 @7) @4 @5) @2)
2348 (bit_and:c (vec_cond:s (icmp@1 @6 @7) @4 @5) @3))
2349 (if (integer_zerop (@5)
2350 && invert_tree_comparison (cmp, HONOR_NANS (@6)) == icmp)
2352 (if (integer_onep (@4))
2353 (bit_and (vec_cond @0 @2 @3) @4))
2354 (if (integer_minus_onep (@4))
2355 (vec_cond @0 @2 @3)))
2356 (if (integer_zerop (@4)
2357 && invert_tree_comparison (cmp, HONOR_NANS (@6)) == icmp)
2359 (if (integer_onep (@5))
2360 (bit_and (vec_cond @0 @3 @2) @5))
2361 (if (integer_minus_onep (@5))
2362 (vec_cond @0 @3 @2))))))
2364 /* Scalar Vectorized Fold ((-(a < b) & c) | (-(a >= b) & d))
2365 into a < b ? d : c. */
2368 (vec_cond:s (cmp@0 @4 @5) @2 integer_zerop)
2369 (vec_cond:s (icmp@1 @4 @5) @3 integer_zerop))
2370 (if (invert_tree_comparison (cmp, HONOR_NANS (@4)) == icmp)
2371 (vec_cond @0 @2 @3))))
2373 /* Transform X & -Y into X * Y when Y is { 0 or 1 }. */
2375 (bit_and:c (convert? (negate zero_one_valued_p@0)) @1)
2376 (if (INTEGRAL_TYPE_P (type)
2377 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
2378 && TREE_CODE (TREE_TYPE (@0)) != BOOLEAN_TYPE
2379 /* Sign extending of the neg or a truncation of the neg
2381 && (!TYPE_UNSIGNED (TREE_TYPE (@0))
2382 || TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))))
2383 (mult (convert @0) @1)))
2385 /* Narrow integer multiplication by a zero_one_valued_p operand.
2386 Multiplication by [0,1] is guaranteed not to overflow. */
2388 (convert (mult@0 zero_one_valued_p@1 INTEGER_CST@2))
2389 (if (INTEGRAL_TYPE_P (type)
2390 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
2391 && TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (@0)))
2392 (mult (convert @1) (convert @2))))
2394 /* (X << C) != 0 can be simplified to X, when C is zero_one_valued_p.
2395 Check that the shift is well-defined (C is less than TYPE_PRECISION)
2396 as some targets (such as x86's SSE) may return zero for larger C. */
2398 (ne (lshift zero_one_valued_p@0 INTEGER_CST@1) integer_zerop@2)
2399 (if (tree_fits_shwi_p (@1)
2400 && tree_to_shwi (@1) > 0
2401 && tree_to_shwi (@1) < TYPE_PRECISION (TREE_TYPE (@0)))
2404 /* (X << C) == 0 can be simplified to X == 0, when C is zero_one_valued_p.
2405 Check that the shift is well-defined (C is less than TYPE_PRECISION)
2406 as some targets (such as x86's SSE) may return zero for larger C. */
2408 (eq (lshift zero_one_valued_p@0 INTEGER_CST@1) integer_zerop@2)
2409 (if (tree_fits_shwi_p (@1)
2410 && tree_to_shwi (@1) > 0
2411 && tree_to_shwi (@1) < TYPE_PRECISION (TREE_TYPE (@0)))
2414 /* Convert ~ (-A) to A - 1. */
2416 (bit_not (convert? (negate @0)))
2417 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
2418 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
2419 (convert (minus @0 { build_each_one_cst (TREE_TYPE (@0)); }))))
2421 /* Convert - (~A) to A + 1. */
2423 (negate (nop_convert? (bit_not @0)))
2424 (plus (view_convert @0) { build_each_one_cst (type); }))
2426 /* (a & b) ^ (a == b) -> !(a | b) */
2427 /* (a & b) == (a ^ b) -> !(a | b) */
2428 (for first_op (bit_xor eq)
2429 second_op (eq bit_xor)
2431 (first_op:c (bit_and:c truth_valued_p@0 truth_valued_p@1) (second_op:c @0 @1))
2432 (bit_not (bit_ior @0 @1))))
2434 /* Convert ~ (A - 1) or ~ (A + -1) to -A. */
2436 (bit_not (convert? (minus @0 integer_each_onep)))
2437 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
2438 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
2439 (convert (negate @0))))
2441 (bit_not (convert? (plus @0 integer_all_onesp)))
2442 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
2443 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
2444 (convert (negate @0))))
2446 /* Part of convert ~(X ^ Y) to ~X ^ Y or X ^ ~Y if ~X or ~Y simplify. */
2448 (bit_not (convert? (bit_xor @0 INTEGER_CST@1)))
2449 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2450 (convert (bit_xor @0 (bit_not @1)))))
2452 (bit_not (convert? (bit_xor:c (bit_not @0) @1)))
2453 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2454 (convert (bit_xor @0 @1))))
2456 /* Otherwise prefer ~(X ^ Y) to ~X ^ Y as more canonical. */
2458 (bit_xor:c (nop_convert?:s (bit_not:s @0)) @1)
2459 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2460 (bit_not (bit_xor (view_convert @0) @1))))
2462 /* ~(a ^ b) is a == b for truth valued a and b. */
2464 (bit_not (bit_xor:s truth_valued_p@0 truth_valued_p@1))
2465 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
2466 && TYPE_PRECISION (TREE_TYPE (@0)) == 1)
2467 (convert (eq @0 @1))))
2469 /* (~a) == b is a ^ b for truth valued a and b. */
2471 (eq:c (bit_not:s truth_valued_p@0) truth_valued_p@1)
2472 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
2473 && TYPE_PRECISION (TREE_TYPE (@0)) == 1)
2474 (convert (bit_xor @0 @1))))
2476 /* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */
2478 (bit_ior:c (bit_and:cs @0 (bit_not @2)) (bit_and:cs @1 @2))
2479 (bit_xor (bit_and (bit_xor @0 @1) @2) @0))
2481 /* Fold A - (A & B) into ~B & A. */
2483 (minus (convert1? @0) (convert2?:s (bit_and:cs @@0 @1)))
2484 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
2485 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
2486 (convert (bit_and (bit_not @1) @0))))
2488 /* (m1 CMP m2) * d -> (m1 CMP m2) ? d : 0 */
2489 (if (!canonicalize_math_p ())
2490 (for cmp (tcc_comparison)
2492 (mult:c (convert (cmp@0 @1 @2)) @3)
2493 (if (INTEGRAL_TYPE_P (type)
2494 && INTEGRAL_TYPE_P (TREE_TYPE (@0)))
2495 (cond @0 @3 { build_zero_cst (type); })))
2496 /* (-(m1 CMP m2)) & d -> (m1 CMP m2) ? d : 0 */
2498 (bit_and:c (negate (convert (cmp@0 @1 @2))) @3)
2499 (if (INTEGRAL_TYPE_P (type)
2500 && INTEGRAL_TYPE_P (TREE_TYPE (@0)))
2501 (cond @0 @3 { build_zero_cst (type); })))
2505 /* For integral types with undefined overflow and C != 0 fold
2506 x * C EQ/NE y * C into x EQ/NE y. */
2509 (cmp (mult:c @0 @1) (mult:c @2 @1))
2510 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2511 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
2512 && tree_expr_nonzero_p (@1))
2515 /* For integral types with wrapping overflow and C odd fold
2516 x * C EQ/NE y * C into x EQ/NE y. */
2519 (cmp (mult @0 INTEGER_CST@1) (mult @2 @1))
2520 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2521 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
2522 && (TREE_INT_CST_LOW (@1) & 1) != 0)
2525 /* For integral types with undefined overflow and C != 0 fold
2526 x * C RELOP y * C into:
2528 x RELOP y for nonnegative C
2529 y RELOP x for negative C */
2530 (for cmp (lt gt le ge)
2532 (cmp (mult:c @0 @1) (mult:c @2 @1))
2533 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2534 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2535 (if (tree_expr_nonnegative_p (@1) && tree_expr_nonzero_p (@1))
2537 (if (TREE_CODE (@1) == INTEGER_CST
2538 && wi::neg_p (wi::to_wide (@1), TYPE_SIGN (TREE_TYPE (@1))))
2541 /* (X - 1U) <= INT_MAX-1U into (int) X > 0. */
2545 (cmp (plus @0 integer_minus_onep@1) INTEGER_CST@2)
2546 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
2547 && TYPE_UNSIGNED (TREE_TYPE (@0))
2548 && TYPE_PRECISION (TREE_TYPE (@0)) > 1
2549 && (wi::to_wide (@2)
2550 == wi::max_value (TYPE_PRECISION (TREE_TYPE (@0)), SIGNED) - 1))
2551 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
2552 (icmp (convert:stype @0) { build_int_cst (stype, 0); })))))
2554 /* X / 4 < Y / 4 iff X < Y when the division is known to be exact. */
2555 (for cmp (simple_comparison)
2557 (cmp (convert?@3 (exact_div @0 INTEGER_CST@2)) (convert? (exact_div @1 @2)))
2558 (if (element_precision (@3) >= element_precision (@0)
2559 && types_match (@0, @1))
2560 (if (wi::lt_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2))))
2561 (if (!TYPE_UNSIGNED (TREE_TYPE (@3)))
2563 (if (tree_expr_nonzero_p (@0) && tree_expr_nonzero_p (@1))
2566 tree utype = unsigned_type_for (TREE_TYPE (@0));
2568 (cmp (convert:utype @1) (convert:utype @0)))))
2569 (if (wi::gt_p (wi::to_wide (@2), 1, TYPE_SIGN (TREE_TYPE (@2))))
2570 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) || !TYPE_UNSIGNED (TREE_TYPE (@3)))
2574 tree utype = unsigned_type_for (TREE_TYPE (@0));
2576 (cmp (convert:utype @0) (convert:utype @1)))))))))
2578 /* X / C1 op C2 into a simple range test. */
2579 (for cmp (simple_comparison)
2581 (cmp (trunc_div:s @0 INTEGER_CST@1) INTEGER_CST@2)
2582 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
2583 && integer_nonzerop (@1)
2584 && !TREE_OVERFLOW (@1)
2585 && !TREE_OVERFLOW (@2))
2586 (with { tree lo, hi; bool neg_overflow;
2587 enum tree_code code = fold_div_compare (cmp, @1, @2, &lo, &hi,
2590 (if (code == LT_EXPR || code == GE_EXPR)
2591 (if (TREE_OVERFLOW (lo))
2592 { build_int_cst (type, (code == LT_EXPR) ^ neg_overflow); }
2593 (if (code == LT_EXPR)
2596 (if (code == LE_EXPR || code == GT_EXPR)
2597 (if (TREE_OVERFLOW (hi))
2598 { build_int_cst (type, (code == LE_EXPR) ^ neg_overflow); }
2599 (if (code == LE_EXPR)
2603 { build_int_cst (type, code == NE_EXPR); })
2604 (if (code == EQ_EXPR && !hi)
2606 (if (code == EQ_EXPR && !lo)
2608 (if (code == NE_EXPR && !hi)
2610 (if (code == NE_EXPR && !lo)
2613 { build_range_check (UNKNOWN_LOCATION, type, @0, code == EQ_EXPR,
2617 tree etype = range_check_type (TREE_TYPE (@0));
2620 hi = fold_convert (etype, hi);
2621 lo = fold_convert (etype, lo);
2622 hi = const_binop (MINUS_EXPR, etype, hi, lo);
2625 (if (etype && hi && !TREE_OVERFLOW (hi))
2626 (if (code == EQ_EXPR)
2627 (le (minus (convert:etype @0) { lo; }) { hi; })
2628 (gt (minus (convert:etype @0) { lo; }) { hi; })))))))))
2630 /* X + Z < Y + Z is the same as X < Y when there is no overflow. */
2631 (for op (lt le ge gt)
2633 (op (plus:c @0 @2) (plus:c @1 @2))
2634 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2635 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2638 /* As a special case, X + C < Y + C is the same as (signed) X < (signed) Y
2639 when C is an unsigned integer constant with only the MSB set, and X and
2640 Y have types of equal or lower integer conversion rank than C's. */
2641 (for op (lt le ge gt)
2643 (op (plus @1 INTEGER_CST@0) (plus @2 @0))
2644 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
2645 && TYPE_UNSIGNED (TREE_TYPE (@0))
2646 && wi::only_sign_bit_p (wi::to_wide (@0)))
2647 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
2648 (op (convert:stype @1) (convert:stype @2))))))
2650 /* For equality and subtraction, this is also true with wrapping overflow. */
2651 (for op (eq ne minus)
2653 (op (plus:c @0 @2) (plus:c @1 @2))
2654 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2655 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
2656 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
2658 /* And similar for pointers. */
2661 (op (pointer_plus @0 @1) (pointer_plus @0 @2))
2664 (pointer_diff (pointer_plus @0 @1) (pointer_plus @0 @2))
2665 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
2666 (convert (minus @1 @2))))
2668 /* X - Z < Y - Z is the same as X < Y when there is no overflow. */
2669 (for op (lt le ge gt)
2671 (op (minus @0 @2) (minus @1 @2))
2672 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2673 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2675 /* For equality and subtraction, this is also true with wrapping overflow. */
2676 (for op (eq ne minus)
2678 (op (minus @0 @2) (minus @1 @2))
2679 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2680 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
2681 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
2683 /* And for pointers... */
2684 (for op (simple_comparison)
2686 (op (pointer_diff@3 @0 @2) (pointer_diff @1 @2))
2687 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
2690 (minus (pointer_diff@3 @0 @2) (pointer_diff @1 @2))
2691 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3))
2692 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
2693 (pointer_diff @0 @1)))
2695 /* Z - X < Z - Y is the same as Y < X when there is no overflow. */
2696 (for op (lt le ge gt)
2698 (op (minus @2 @0) (minus @2 @1))
2699 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2700 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2702 /* For equality and subtraction, this is also true with wrapping overflow. */
2703 (for op (eq ne minus)
2705 (op (minus @2 @0) (minus @2 @1))
2706 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2707 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
2708 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
2710 /* And for pointers... */
2711 (for op (simple_comparison)
2713 (op (pointer_diff@3 @2 @0) (pointer_diff @2 @1))
2714 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
2717 (minus (pointer_diff@3 @2 @0) (pointer_diff @2 @1))
2718 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3))
2719 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
2720 (pointer_diff @1 @0)))
2722 /* X + Y < Y is the same as X < 0 when there is no overflow. */
2723 (for op (lt le gt ge)
2725 (op:c (plus:c@2 @0 @1) @1)
2726 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2727 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
2728 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
2729 && (CONSTANT_CLASS_P (@0) || single_use (@2)))
2730 (op @0 { build_zero_cst (TREE_TYPE (@0)); }))))
2731 /* For equality, this is also true with wrapping overflow. */
2734 (op:c (nop_convert?@3 (plus:c@2 @0 (convert1? @1))) (convert2? @1))
2735 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2736 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
2737 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
2738 && (CONSTANT_CLASS_P (@0) || (single_use (@2) && single_use (@3)))
2739 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@2))
2740 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@1)))
2741 (op @0 { build_zero_cst (TREE_TYPE (@0)); })))
2743 (op:c (nop_convert?@3 (pointer_plus@2 (convert1? @0) @1)) (convert2? @0))
2744 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0))
2745 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
2746 && (CONSTANT_CLASS_P (@1) || (single_use (@2) && single_use (@3))))
2747 (op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
2749 /* (&a + b) !=/== (&a[1] + c) -> (&a[0] - &a[1]) + b !=/== c */
2752 (neeq:c ADDR_EXPR@0 (pointer_plus @2 @3))
2753 (with { poly_int64 diff; tree inner_type = TREE_TYPE (@3);}
2754 (if (ptr_difference_const (@0, @2, &diff))
2755 (neeq { build_int_cst_type (inner_type, diff); } @3))))
2757 (neeq (pointer_plus ADDR_EXPR@0 @1) (pointer_plus ADDR_EXPR@2 @3))
2758 (with { poly_int64 diff; tree inner_type = TREE_TYPE (@1);}
2759 (if (ptr_difference_const (@0, @2, &diff))
2760 (neeq (plus { build_int_cst_type (inner_type, diff); } @1) @3)))))
2762 /* X - Y < X is the same as Y > 0 when there is no overflow.
2763 For equality, this is also true with wrapping overflow. */
2764 (for op (simple_comparison)
2766 (op:c @0 (minus@2 @0 @1))
2767 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2768 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
2769 || ((op == EQ_EXPR || op == NE_EXPR)
2770 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
2771 && (CONSTANT_CLASS_P (@1) || single_use (@2)))
2772 (op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
2775 (X / Y) == 0 -> X < Y if X, Y are unsigned.
2776 (X / Y) != 0 -> X >= Y, if X, Y are unsigned. */
2780 (cmp (trunc_div @0 @1) integer_zerop)
2781 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
2782 /* Complex ==/!= is allowed, but not </>=. */
2783 && TREE_CODE (TREE_TYPE (@0)) != COMPLEX_TYPE
2784 && (VECTOR_TYPE_P (type) || !VECTOR_TYPE_P (TREE_TYPE (@0))))
2787 /* X == C - X can never be true if C is odd. */
2790 (cmp:c (convert? @0) (convert1? (minus INTEGER_CST@1 (convert2? @0))))
2791 (if (TREE_INT_CST_LOW (@1) & 1)
2792 { constant_boolean_node (cmp == NE_EXPR, type); })))
2797 U needs to be non-negative.
2801 U and N needs to be non-negative
2805 U needs to be non-negative and N needs to be a negative constant.
2807 (for cmp (lt ge le gt )
2808 bitop (bit_ior bit_ior bit_and bit_and)
2810 (cmp:c (bitop:c tree_expr_nonnegative_p@0 @1) @0)
2811 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)))
2812 (if (bitop == BIT_AND_EXPR || tree_expr_nonnegative_p (@1))
2813 { constant_boolean_node (cmp == GE_EXPR || cmp == LE_EXPR, type); }
2814 /* The sign is opposite now so the comparison is swapped around. */
2815 (if (TREE_CODE (@1) == INTEGER_CST && wi::neg_p (wi::to_wide (@1)))
2816 { constant_boolean_node (cmp == LT_EXPR, type); })))))
2818 /* Arguments on which one can call get_nonzero_bits to get the bits
2820 (match with_possible_nonzero_bits
2822 (match with_possible_nonzero_bits
2824 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))))
2825 /* Slightly extended version, do not make it recursive to keep it cheap. */
2826 (match (with_possible_nonzero_bits2 @0)
2827 with_possible_nonzero_bits@0)
2828 (match (with_possible_nonzero_bits2 @0)
2829 (bit_and:c with_possible_nonzero_bits@0 @2))
2831 /* Same for bits that are known to be set, but we do not have
2832 an equivalent to get_nonzero_bits yet. */
2833 (match (with_certain_nonzero_bits2 @0)
2835 (match (with_certain_nonzero_bits2 @0)
2836 (bit_ior @1 INTEGER_CST@0))
2838 /* X == C (or X & Z == Y | C) is impossible if ~nonzero(X) & C != 0. */
2841 (cmp:c (with_possible_nonzero_bits2 @0) (with_certain_nonzero_bits2 @1))
2842 (if (wi::bit_and_not (wi::to_wide (@1), get_nonzero_bits (@0)) != 0)
2843 { constant_boolean_node (cmp == NE_EXPR, type); })))
2845 /* ((X inner_op C0) outer_op C1)
2846 With X being a tree where value_range has reasoned certain bits to always be
2847 zero throughout its computed value range,
2848 inner_op = {|,^}, outer_op = {|,^} and inner_op != outer_op
2849 where zero_mask has 1's for all bits that are sure to be 0 in
2851 if (inner_op == '^') C0 &= ~C1;
2852 if ((C0 & ~zero_mask) == 0) then emit (X outer_op (C0 outer_op C1)
2853 if ((C1 & ~zero_mask) == 0) then emit (X inner_op (C0 outer_op C1)
2855 (for inner_op (bit_ior bit_xor)
2856 outer_op (bit_xor bit_ior)
2859 (inner_op:s @2 INTEGER_CST@0) INTEGER_CST@1)
2863 wide_int zero_mask_not;
2867 if (TREE_CODE (@2) == SSA_NAME)
2868 zero_mask_not = get_nonzero_bits (@2);
2872 if (inner_op == BIT_XOR_EXPR)
2874 C0 = wi::bit_and_not (wi::to_wide (@0), wi::to_wide (@1));
2875 cst_emit = C0 | wi::to_wide (@1);
2879 C0 = wi::to_wide (@0);
2880 cst_emit = C0 ^ wi::to_wide (@1);
2883 (if (!fail && (C0 & zero_mask_not) == 0)
2884 (outer_op @2 { wide_int_to_tree (type, cst_emit); })
2885 (if (!fail && (wi::to_wide (@1) & zero_mask_not) == 0)
2886 (inner_op @2 { wide_int_to_tree (type, cst_emit); }))))))
2888 /* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */
2890 (pointer_plus (pointer_plus:s @0 @1) @3)
2891 (pointer_plus @0 (plus @1 @3)))
2894 (pointer_plus (convert:s (pointer_plus:s @0 @1)) @3)
2895 (convert:type (pointer_plus @0 (plus @1 @3))))
2902 tem4 = (unsigned long) tem3;
2907 (pointer_plus @0 (convert?@2 (minus@3 (convert @1) (convert @0))))
2908 /* Conditionally look through a sign-changing conversion. */
2909 (if (TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@3))
2910 && ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@1)))
2911 || (GENERIC && type == TREE_TYPE (@1))))
2914 (pointer_plus @0 (convert?@2 (pointer_diff@3 @1 @@0)))
2915 (if (TYPE_PRECISION (TREE_TYPE (@2)) >= TYPE_PRECISION (TREE_TYPE (@3)))
2919 tem = (sizetype) ptr;
2923 and produce the simpler and easier to analyze with respect to alignment
2924 ... = ptr & ~algn; */
2926 (pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1)))
2927 (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), ~wi::to_wide (@1)); }
2928 (bit_and @0 { algn; })))
2930 /* Try folding difference of addresses. */
2932 (minus (convert ADDR_EXPR@0) (convert (pointer_plus @1 @2)))
2933 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2934 (with { poly_int64 diff; }
2935 (if (ptr_difference_const (@0, @1, &diff))
2936 (minus { build_int_cst_type (type, diff); } (convert @2))))))
2938 (minus (convert (pointer_plus @0 @2)) (convert ADDR_EXPR@1))
2939 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2940 (with { poly_int64 diff; }
2941 (if (ptr_difference_const (@0, @1, &diff))
2942 (plus (convert @2) { build_int_cst_type (type, diff); })))))
2944 (minus (convert ADDR_EXPR@0) (convert @1))
2945 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2946 (with { poly_int64 diff; }
2947 (if (ptr_difference_const (@0, @1, &diff))
2948 { build_int_cst_type (type, diff); }))))
2950 (minus (convert @0) (convert ADDR_EXPR@1))
2951 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2952 (with { poly_int64 diff; }
2953 (if (ptr_difference_const (@0, @1, &diff))
2954 { build_int_cst_type (type, diff); }))))
2956 (pointer_diff (convert?@2 ADDR_EXPR@0) (convert1?@3 @1))
2957 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0))
2958 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1)))
2959 (with { poly_int64 diff; }
2960 (if (ptr_difference_const (@0, @1, &diff))
2961 { build_int_cst_type (type, diff); }))))
2963 (pointer_diff (convert?@2 @0) (convert1?@3 ADDR_EXPR@1))
2964 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0))
2965 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1)))
2966 (with { poly_int64 diff; }
2967 (if (ptr_difference_const (@0, @1, &diff))
2968 { build_int_cst_type (type, diff); }))))
2970 /* (&a+b) - (&a[1] + c) -> sizeof(a[0]) + (b - c) */
2972 (pointer_diff (pointer_plus ADDR_EXPR@0 @1) (pointer_plus ADDR_EXPR@2 @3))
2973 (with { poly_int64 diff; }
2974 (if (ptr_difference_const (@0, @2, &diff))
2975 (plus { build_int_cst_type (type, diff); } (convert (minus @1 @3))))))
2976 /* (p + b) - &p->d -> offsetof (*p, d) + b */
2978 (pointer_diff (pointer_plus @0 @1) ADDR_EXPR@2)
2979 (with { poly_int64 diff; }
2980 (if (ptr_difference_const (@0, @2, &diff))
2981 (plus { build_int_cst_type (type, diff); } (convert @1)))))
2983 (pointer_diff ADDR_EXPR@0 (pointer_plus @1 @2))
2984 (with { poly_int64 diff; }
2985 (if (ptr_difference_const (@0, @1, &diff))
2986 (minus { build_int_cst_type (type, diff); } (convert @2)))))
2988 /* Canonicalize (T *)(ptr - ptr-cst) to &MEM[ptr + -ptr-cst]. */
2990 (convert (pointer_diff @0 INTEGER_CST@1))
2991 (if (POINTER_TYPE_P (type))
2992 { build_fold_addr_expr_with_type
2993 (build2 (MEM_REF, char_type_node, @0,
2994 wide_int_to_tree (ptr_type_node, wi::neg (wi::to_wide (@1)))),
2997 /* If arg0 is derived from the address of an object or function, we may
2998 be able to fold this expression using the object or function's
3001 (bit_and (convert? @0) INTEGER_CST@1)
3002 (if (POINTER_TYPE_P (TREE_TYPE (@0))
3003 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
3007 unsigned HOST_WIDE_INT bitpos;
3008 get_pointer_alignment_1 (@0, &align, &bitpos);
3010 (if (wi::ltu_p (wi::to_wide (@1), align / BITS_PER_UNIT))
3011 { wide_int_to_tree (type, (wi::to_wide (@1)
3012 & (bitpos / BITS_PER_UNIT))); }))))
3015 uniform_integer_cst_p
3017 tree int_cst = uniform_integer_cst_p (t);
3018 tree inner_type = TREE_TYPE (int_cst);
3020 (if ((INTEGRAL_TYPE_P (inner_type)
3021 || POINTER_TYPE_P (inner_type))
3022 && wi::eq_p (wi::to_wide (int_cst), wi::min_value (inner_type))))))
3025 uniform_integer_cst_p
3027 tree int_cst = uniform_integer_cst_p (t);
3028 tree itype = TREE_TYPE (int_cst);
3030 (if ((INTEGRAL_TYPE_P (itype)
3031 || POINTER_TYPE_P (itype))
3032 && wi::eq_p (wi::to_wide (int_cst), wi::max_value (itype))))))
3034 /* x > y && x != XXX_MIN --> x > y
3035 x > y && x == XXX_MIN --> false . */
3038 (bit_and:c (gt:c@2 @0 @1) (eqne @0 min_value))
3040 (if (eqne == EQ_EXPR)
3041 { constant_boolean_node (false, type); })
3042 (if (eqne == NE_EXPR)
3046 /* x < y && x != XXX_MAX --> x < y
3047 x < y && x == XXX_MAX --> false. */
3050 (bit_and:c (lt:c@2 @0 @1) (eqne @0 max_value))
3052 (if (eqne == EQ_EXPR)
3053 { constant_boolean_node (false, type); })
3054 (if (eqne == NE_EXPR)
3058 /* x <= y && x == XXX_MIN --> x == XXX_MIN. */
3060 (bit_and:c (le:c @0 @1) (eq@2 @0 min_value))
3063 /* x >= y && x == XXX_MAX --> x == XXX_MAX. */
3065 (bit_and:c (ge:c @0 @1) (eq@2 @0 max_value))
3068 /* x > y || x != XXX_MIN --> x != XXX_MIN. */
3070 (bit_ior:c (gt:c @0 @1) (ne@2 @0 min_value))
3073 /* x <= y || x != XXX_MIN --> true. */
3075 (bit_ior:c (le:c @0 @1) (ne @0 min_value))
3076 { constant_boolean_node (true, type); })
3078 /* x <= y || x == XXX_MIN --> x <= y. */
3080 (bit_ior:c (le:c@2 @0 @1) (eq @0 min_value))
3083 /* x < y || x != XXX_MAX --> x != XXX_MAX. */
3085 (bit_ior:c (lt:c @0 @1) (ne@2 @0 max_value))
3088 /* x >= y || x != XXX_MAX --> true
3089 x >= y || x == XXX_MAX --> x >= y. */
3092 (bit_ior:c (ge:c@2 @0 @1) (eqne @0 max_value))
3094 (if (eqne == EQ_EXPR)
3096 (if (eqne == NE_EXPR)
3097 { constant_boolean_node (true, type); }))))
3099 /* y == XXX_MIN || x < y --> x <= y - 1 */
3101 (bit_ior:c (eq:s @1 min_value) (lt:cs @0 @1))
3102 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
3103 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
3104 (le @0 (minus @1 { build_int_cst (TREE_TYPE (@1), 1); }))))
3106 /* y != XXX_MIN && x >= y --> x > y - 1 */
3108 (bit_and:c (ne:s @1 min_value) (ge:cs @0 @1))
3109 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
3110 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
3111 (gt @0 (minus @1 { build_int_cst (TREE_TYPE (@1), 1); }))))
3113 /* Convert (X == CST1) && ((other)X OP2 CST2) to a known value
3114 based on CST1 OP2 CST2. Similarly for (X != CST1). */
3115 /* Convert (X == Y) && (X OP2 Y) to a known value if X is an integral type.
3116 Similarly for (X != Y). */
3119 (for code2 (eq ne lt gt le ge)
3121 (bit_and:c (code1:c@3 @0 @1) (code2:c@4 (convert?@c0 @0) @2))
3122 (if ((TREE_CODE (@1) == INTEGER_CST
3123 && TREE_CODE (@2) == INTEGER_CST)
3124 || ((INTEGRAL_TYPE_P (TREE_TYPE (@1))
3125 || POINTER_TYPE_P (TREE_TYPE (@1)))
3126 && bitwise_equal_p (@1, @2)))
3129 bool one_before = false;
3130 bool one_after = false;
3132 bool allbits = true;
3133 if (TREE_CODE (@1) == INTEGER_CST
3134 && TREE_CODE (@2) == INTEGER_CST)
3136 allbits = TYPE_PRECISION (TREE_TYPE (@1)) <= TYPE_PRECISION (TREE_TYPE (@2));
3137 auto t1 = wi::to_wide (fold_convert (TREE_TYPE (@2), @1));
3138 auto t2 = wi::to_wide (@2);
3139 cmp = wi::cmp (t1, t2, TYPE_SIGN (TREE_TYPE (@2)));
3150 case EQ_EXPR: val = (cmp == 0); break;
3151 case NE_EXPR: val = (cmp != 0); break;
3152 case LT_EXPR: val = (cmp < 0); break;
3153 case GT_EXPR: val = (cmp > 0); break;
3154 case LE_EXPR: val = (cmp <= 0); break;
3155 case GE_EXPR: val = (cmp >= 0); break;
3156 default: gcc_unreachable ();
3160 (if (code1 == EQ_EXPR && val) @3)
3161 (if (code1 == EQ_EXPR && !val) { constant_boolean_node (false, type); })
3162 (if (code1 == NE_EXPR && !val && allbits) @4)
3163 (if (code1 == NE_EXPR
3167 (gt @c0 (convert @1)))
3168 (if (code1 == NE_EXPR
3172 (lt @c0 (convert @1)))
3173 /* (a != (b+1)) & (a > b) -> a > (b+1) */
3174 (if (code1 == NE_EXPR
3178 (gt @c0 (convert @1)))
3179 /* (a != (b-1)) & (a < b) -> a < (b-1) */
3180 (if (code1 == NE_EXPR
3184 (lt @c0 (convert @1)))
3192 /* Convert (X OP1 CST1) && (X OP2 CST2).
3193 Convert (X OP1 Y) && (X OP2 Y). */
3195 (for code1 (lt le gt ge)
3196 (for code2 (lt le gt ge)
3198 (bit_and (code1:c@3 @0 @1) (code2:c@4 @0 @2))
3199 (if ((TREE_CODE (@1) == INTEGER_CST
3200 && TREE_CODE (@2) == INTEGER_CST)
3201 || ((INTEGRAL_TYPE_P (TREE_TYPE (@1))
3202 || POINTER_TYPE_P (TREE_TYPE (@1)))
3203 && operand_equal_p (@1, @2)))
3207 if (TREE_CODE (@1) == INTEGER_CST
3208 && TREE_CODE (@2) == INTEGER_CST)
3209 cmp = tree_int_cst_compare (@1, @2);
3212 /* Choose the more restrictive of two < or <= comparisons. */
3213 (if ((code1 == LT_EXPR || code1 == LE_EXPR)
3214 && (code2 == LT_EXPR || code2 == LE_EXPR))
3215 (if ((cmp < 0) || (cmp == 0 && code1 == LT_EXPR))
3218 /* Likewise chose the more restrictive of two > or >= comparisons. */
3219 (if ((code1 == GT_EXPR || code1 == GE_EXPR)
3220 && (code2 == GT_EXPR || code2 == GE_EXPR))
3221 (if ((cmp > 0) || (cmp == 0 && code1 == GT_EXPR))
3224 /* Check for singleton ranges. */
3226 && ((code1 == LE_EXPR && code2 == GE_EXPR)
3227 || (code1 == GE_EXPR && code2 == LE_EXPR)))
3229 /* Check for disjoint ranges. */
3231 && (code1 == LT_EXPR || code1 == LE_EXPR)
3232 && (code2 == GT_EXPR || code2 == GE_EXPR))
3233 { constant_boolean_node (false, type); })
3235 && (code1 == GT_EXPR || code1 == GE_EXPR)
3236 && (code2 == LT_EXPR || code2 == LE_EXPR))
3237 { constant_boolean_node (false, type); })
3240 /* Convert (X == CST1) || (X OP2 CST2) to a known value
3241 based on CST1 OP2 CST2. Similarly for (X != CST1). */
3242 /* Convert (X == Y) || (X OP2 Y) to a known value if X is an integral type.
3243 Similarly for (X != Y). */
3246 (for code2 (eq ne lt gt le ge)
3248 (bit_ior:c (code1:c@3 @0 @1) (code2:c@4 (convert?@c0 @0) @2))
3249 (if ((TREE_CODE (@1) == INTEGER_CST
3250 && TREE_CODE (@2) == INTEGER_CST)
3251 || ((INTEGRAL_TYPE_P (TREE_TYPE (@1))
3252 || POINTER_TYPE_P (TREE_TYPE (@1)))
3253 && bitwise_equal_p (@1, @2)))
3256 bool one_before = false;
3257 bool one_after = false;
3259 bool allbits = true;
3260 if (TREE_CODE (@1) == INTEGER_CST
3261 && TREE_CODE (@2) == INTEGER_CST)
3263 allbits = TYPE_PRECISION (TREE_TYPE (@1)) <= TYPE_PRECISION (TREE_TYPE (@2));
3264 auto t1 = wi::to_wide (fold_convert (TREE_TYPE (@2), @1));
3265 auto t2 = wi::to_wide (@2);
3266 cmp = wi::cmp (t1, t2, TYPE_SIGN (TREE_TYPE (@2)));
3277 case EQ_EXPR: val = (cmp == 0); break;
3278 case NE_EXPR: val = (cmp != 0); break;
3279 case LT_EXPR: val = (cmp < 0); break;
3280 case GT_EXPR: val = (cmp > 0); break;
3281 case LE_EXPR: val = (cmp <= 0); break;
3282 case GE_EXPR: val = (cmp >= 0); break;
3283 default: gcc_unreachable ();
3287 (if (code1 == EQ_EXPR && val) @4)
3288 (if (code1 == NE_EXPR && val && allbits) { constant_boolean_node (true, type); })
3289 (if (code1 == NE_EXPR && !val && allbits) @3)
3290 (if (code1 == EQ_EXPR
3295 (if (code1 == EQ_EXPR
3300 /* (a == (b-1)) | (a >= b) -> a >= (b-1) */
3301 (if (code1 == EQ_EXPR
3305 (ge @c0 (convert @1)))
3306 /* (a == (b+1)) | (a <= b) -> a <= (b-1) */
3307 (if (code1 == EQ_EXPR
3311 (le @c0 (convert @1)))
3319 /* Convert (X OP1 CST1) || (X OP2 CST2).
3320 Convert (X OP1 Y) || (X OP2 Y). */
3322 (for code1 (lt le gt ge)
3323 (for code2 (lt le gt ge)
3325 (bit_ior (code1@3 @0 @1) (code2@4 @0 @2))
3326 (if ((TREE_CODE (@1) == INTEGER_CST
3327 && TREE_CODE (@2) == INTEGER_CST)
3328 || ((INTEGRAL_TYPE_P (TREE_TYPE (@1))
3329 || POINTER_TYPE_P (TREE_TYPE (@1)))
3330 && operand_equal_p (@1, @2)))
3334 if (TREE_CODE (@1) == INTEGER_CST
3335 && TREE_CODE (@2) == INTEGER_CST)
3336 cmp = tree_int_cst_compare (@1, @2);
3339 /* Choose the more restrictive of two < or <= comparisons. */
3340 (if ((code1 == LT_EXPR || code1 == LE_EXPR)
3341 && (code2 == LT_EXPR || code2 == LE_EXPR))
3342 (if ((cmp < 0) || (cmp == 0 && code1 == LT_EXPR))
3345 /* Likewise chose the more restrictive of two > or >= comparisons. */
3346 (if ((code1 == GT_EXPR || code1 == GE_EXPR)
3347 && (code2 == GT_EXPR || code2 == GE_EXPR))
3348 (if ((cmp > 0) || (cmp == 0 && code1 == GT_EXPR))
3351 /* Check for singleton ranges. */
3353 && ((code1 == LT_EXPR && code2 == GT_EXPR)
3354 || (code1 == GT_EXPR && code2 == LT_EXPR)))
3356 /* Check for disjoint ranges. */
3358 && (code1 == LT_EXPR || code1 == LE_EXPR)
3359 && (code2 == GT_EXPR || code2 == GE_EXPR))
3360 { constant_boolean_node (true, type); })
3362 && (code1 == GT_EXPR || code1 == GE_EXPR)
3363 && (code2 == LT_EXPR || code2 == LE_EXPR))
3364 { constant_boolean_node (true, type); })
3367 /* Optimize (a CMP b) ^ (a CMP b) */
3368 /* Optimize (a CMP b) != (a CMP b) */
3369 (for op (bit_xor ne)
3370 (for cmp1 (lt lt lt le le le)
3371 cmp2 (gt eq ne ge eq ne)
3372 rcmp (ne le gt ne lt ge)
3374 (op:c (cmp1:c @0 @1) (cmp2:c @0 @1))
3375 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
3378 /* Optimize (a CMP b) == (a CMP b) */
3379 (for cmp1 (lt lt lt le le le)
3380 cmp2 (gt eq ne ge eq ne)
3381 rcmp (eq gt le eq ge lt)
3383 (eq:c (cmp1:c @0 @1) (cmp2:c @0 @1))
3384 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
3387 /* (type)([0,1]@a != 0) -> (type)a
3388 (type)([0,1]@a == 1) -> (type)a
3389 (type)([0,1]@a == 0) -> a ^ 1
3390 (type)([0,1]@a != 1) -> a ^ 1. */
3393 (convert (eqne zero_one_valued_p@0 INTEGER_CST@1))
3394 (if ((integer_zerop (@1) || integer_onep (@1)))
3395 (if ((eqne == EQ_EXPR) ^ integer_zerop (@1))
3397 /* Only do this if the types match as (type)(a == 0) is
3398 canonical form normally, while `a ^ 1` is canonical when
3399 there is no type change. */
3400 (if (types_match (type, TREE_TYPE (@0)))
3401 (bit_xor @0 { build_one_cst (type); } ))))))
3403 /* We can't reassociate at all for saturating types. */
3404 (if (!TYPE_SATURATING (type))
3406 /* Contract negates. */
3407 /* A + (-B) -> A - B */
3409 (plus:c @0 (convert? (negate @1)))
3410 /* Apply STRIP_NOPS on the negate. */
3411 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
3412 && !TYPE_OVERFLOW_SANITIZED (type))
3416 if (INTEGRAL_TYPE_P (type)
3417 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
3418 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
3420 (convert (minus (convert:t1 @0) (convert:t1 @1))))))
3421 /* A - (-B) -> A + B */
3423 (minus @0 (convert? (negate @1)))
3424 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
3425 && !TYPE_OVERFLOW_SANITIZED (type))
3429 if (INTEGRAL_TYPE_P (type)
3430 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
3431 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
3433 (convert (plus (convert:t1 @0) (convert:t1 @1))))))
3435 Sign-extension is ok except for INT_MIN, which thankfully cannot
3436 happen without overflow. */
3438 (negate (convert (negate @1)))
3439 (if (INTEGRAL_TYPE_P (type)
3440 && (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@1))
3441 || (!TYPE_UNSIGNED (TREE_TYPE (@1))
3442 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
3443 && !TYPE_OVERFLOW_SANITIZED (type)
3444 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
3447 (negate (convert negate_expr_p@1))
3448 (if (SCALAR_FLOAT_TYPE_P (type)
3449 && ((DECIMAL_FLOAT_TYPE_P (type)
3450 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))
3451 && TYPE_PRECISION (type) >= TYPE_PRECISION (TREE_TYPE (@1)))
3452 || !HONOR_SIGN_DEPENDENT_ROUNDING (type)))
3453 (convert (negate @1))))
3455 (negate (nop_convert? (negate @1)))
3456 (if (!TYPE_OVERFLOW_SANITIZED (type)
3457 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
3460 /* We can't reassociate floating-point unless -fassociative-math
3461 or fixed-point plus or minus because of saturation to +-Inf. */
3462 (if ((!FLOAT_TYPE_P (type) || flag_associative_math)
3463 && !FIXED_POINT_TYPE_P (type))
3465 /* Match patterns that allow contracting a plus-minus pair
3466 irrespective of overflow issues. */
3467 /* (A +- B) - A -> +- B */
3468 /* (A +- B) -+ B -> A */
3469 /* A - (A +- B) -> -+ B */
3470 /* A +- (B -+ A) -> +- B */
3472 (minus (nop_convert1? (plus:c (nop_convert2? @0) @1)) @0)
3475 (minus (nop_convert1? (minus (nop_convert2? @0) @1)) @0)
3476 (if (!ANY_INTEGRAL_TYPE_P (type)
3477 || TYPE_OVERFLOW_WRAPS (type))
3478 (negate (view_convert @1))
3479 (view_convert (negate @1))))
3481 (plus:c (nop_convert1? (minus @0 (nop_convert2? @1))) @1)
3484 (minus @0 (nop_convert1? (plus:c (nop_convert2? @0) @1)))
3485 (if (!ANY_INTEGRAL_TYPE_P (type)
3486 || TYPE_OVERFLOW_WRAPS (type))
3487 (negate (view_convert @1))
3488 (view_convert (negate @1))))
3490 (minus @0 (nop_convert1? (minus (nop_convert2? @0) @1)))
3492 /* (A +- B) + (C - A) -> C +- B */
3493 /* (A + B) - (A - C) -> B + C */
3494 /* More cases are handled with comparisons. */
3496 (plus:c (plus:c @0 @1) (minus @2 @0))
3499 (plus:c (minus @0 @1) (minus @2 @0))
3502 (plus:c (pointer_diff @0 @1) (pointer_diff @2 @0))
3503 (if (TYPE_OVERFLOW_UNDEFINED (type)
3504 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0)))
3505 (pointer_diff @2 @1)))
3507 (minus (plus:c @0 @1) (minus @0 @2))
3510 /* (A +- CST1) +- CST2 -> A + CST3
3511 Use view_convert because it is safe for vectors and equivalent for
3513 (for outer_op (plus minus)
3514 (for inner_op (plus minus)
3515 neg_inner_op (minus plus)
3517 (outer_op (nop_convert? (inner_op @0 CONSTANT_CLASS_P@1))
3519 /* If one of the types wraps, use that one. */
3520 (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
3521 /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse
3522 forever if something doesn't simplify into a constant. */
3523 (if (!CONSTANT_CLASS_P (@0))
3524 (if (outer_op == PLUS_EXPR)
3525 (plus (view_convert @0) (inner_op! @2 (view_convert @1)))
3526 (minus (view_convert @0) (neg_inner_op! @2 (view_convert @1)))))
3527 (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3528 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
3529 (if (outer_op == PLUS_EXPR)
3530 (view_convert (plus @0 (inner_op! (view_convert @2) @1)))
3531 (view_convert (minus @0 (neg_inner_op! (view_convert @2) @1))))
3532 /* If the constant operation overflows we cannot do the transform
3533 directly as we would introduce undefined overflow, for example
3534 with (a - 1) + INT_MIN. */
3535 (if (types_match (type, @0) && !TYPE_OVERFLOW_SANITIZED (type))
3536 (with { tree cst = const_binop (outer_op == inner_op
3537 ? PLUS_EXPR : MINUS_EXPR,
3540 (if (INTEGRAL_TYPE_P (type) && !TREE_OVERFLOW (cst))
3541 (inner_op @0 { cst; } )
3542 /* X+INT_MAX+1 is X-INT_MIN. */
3543 (if (INTEGRAL_TYPE_P (type)
3544 && wi::to_wide (cst) == wi::min_value (type))
3545 (neg_inner_op @0 { wide_int_to_tree (type, wi::to_wide (cst)); })
3546 /* Last resort, use some unsigned type. */
3547 (with { tree utype = unsigned_type_for (type); }
3549 (view_convert (inner_op
3550 (view_convert:utype @0)
3552 { TREE_OVERFLOW (cst)
3553 ? drop_tree_overflow (cst) : cst; })))))))))))))))
3555 /* (CST1 - A) +- CST2 -> CST3 - A */
3556 (for outer_op (plus minus)
3558 (outer_op (nop_convert? (minus CONSTANT_CLASS_P@1 @0)) CONSTANT_CLASS_P@2)
3559 /* If one of the types wraps, use that one. */
3560 (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
3561 /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse
3562 forever if something doesn't simplify into a constant. */
3563 (if (!CONSTANT_CLASS_P (@0))
3564 (minus (outer_op! (view_convert @1) @2) (view_convert @0)))
3565 (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3566 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
3567 (view_convert (minus (outer_op! @1 (view_convert @2)) @0))
3568 (if (types_match (type, @0) && !TYPE_OVERFLOW_SANITIZED (type))
3569 (with { tree cst = const_binop (outer_op, type, @1, @2); }
3570 (if (cst && !TREE_OVERFLOW (cst))
3571 (minus { cst; } @0))))))))
3573 /* CST1 - (CST2 - A) -> CST3 + A
3574 Use view_convert because it is safe for vectors and equivalent for
3577 (minus CONSTANT_CLASS_P@1 (nop_convert? (minus CONSTANT_CLASS_P@2 @0)))
3578 /* If one of the types wraps, use that one. */
3579 (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
3580 /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse
3581 forever if something doesn't simplify into a constant. */
3582 (if (!CONSTANT_CLASS_P (@0))
3583 (plus (view_convert @0) (minus! @1 (view_convert @2))))
3584 (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3585 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
3586 (view_convert (plus @0 (minus! (view_convert @1) @2)))
3587 (if (types_match (type, @0) && !TYPE_OVERFLOW_SANITIZED (type))
3588 (with { tree cst = const_binop (MINUS_EXPR, type, @1, @2); }
3589 (if (cst && !TREE_OVERFLOW (cst))
3590 (plus { cst; } @0)))))))
3592 /* ((T)(A)) + CST -> (T)(A + CST) */
3595 (plus (convert:s SSA_NAME@0) INTEGER_CST@1)
3596 (if (TREE_CODE (TREE_TYPE (@0)) == INTEGER_TYPE
3597 && TREE_CODE (type) == INTEGER_TYPE
3598 && TYPE_PRECISION (type) > TYPE_PRECISION (TREE_TYPE (@0))
3599 && int_fits_type_p (@1, TREE_TYPE (@0)))
3600 /* Perform binary operation inside the cast if the constant fits
3601 and (A + CST)'s range does not overflow. */
3604 wi::overflow_type min_ovf = wi::OVF_OVERFLOW,
3605 max_ovf = wi::OVF_OVERFLOW;
3606 tree inner_type = TREE_TYPE (@0);
3609 = wide_int::from (wi::to_wide (@1), TYPE_PRECISION (inner_type),
3610 TYPE_SIGN (inner_type));
3613 if (get_global_range_query ()->range_of_expr (vr, @0)
3614 && !vr.varying_p () && !vr.undefined_p ())
3616 wide_int wmin0 = vr.lower_bound ();
3617 wide_int wmax0 = vr.upper_bound ();
3618 wi::add (wmin0, w1, TYPE_SIGN (inner_type), &min_ovf);
3619 wi::add (wmax0, w1, TYPE_SIGN (inner_type), &max_ovf);
3622 (if (min_ovf == wi::OVF_NONE && max_ovf == wi::OVF_NONE)
3623 (convert (plus @0 { wide_int_to_tree (TREE_TYPE (@0), w1); } )))
3627 /* ((T)(A + CST1)) + CST2 -> (T)(A) + (T)CST1 + CST2 */
3629 (for op (plus minus)
3631 (plus (convert:s (op:s @0 INTEGER_CST@1)) INTEGER_CST@2)
3632 (if (TREE_CODE (TREE_TYPE (@0)) == INTEGER_TYPE
3633 && TREE_CODE (type) == INTEGER_TYPE
3634 && TYPE_PRECISION (type) > TYPE_PRECISION (TREE_TYPE (@0))
3635 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
3636 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
3637 && TYPE_OVERFLOW_WRAPS (type))
3638 (plus (convert @0) (op @2 (convert @1))))))
3641 /* (T)(A) +- (T)(B) -> (T)(A +- B) only when (A +- B) could be simplified
3642 to a simple value. */
3643 (for op (plus minus)
3645 (op (convert @0) (convert @1))
3646 (if (INTEGRAL_TYPE_P (type)
3647 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
3648 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))
3649 && types_match (TREE_TYPE (@0), TREE_TYPE (@1))
3650 && !TYPE_OVERFLOW_TRAPS (type)
3651 && !TYPE_OVERFLOW_SANITIZED (type))
3652 (convert (op! @0 @1)))))
3656 (plus:c (convert? (bit_not @0)) (convert? @0))
3657 (if (!TYPE_OVERFLOW_TRAPS (type))
3658 (convert { build_all_ones_cst (TREE_TYPE (@0)); })))
3662 (plus (convert? (bit_not @0)) integer_each_onep)
3663 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
3664 (negate (convert @0))))
3668 (minus (convert? (negate @0)) integer_each_onep)
3669 (if (!TYPE_OVERFLOW_TRAPS (type)
3670 && TREE_CODE (type) != COMPLEX_TYPE
3671 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
3672 (bit_not (convert @0))))
3676 (minus integer_all_onesp @0)
3677 (if (TREE_CODE (type) != COMPLEX_TYPE)
3680 /* (T)(P + A) - (T)P -> (T) A */
3682 (minus (convert (plus:c @@0 @1))
3684 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
3685 /* For integer types, if A has a smaller type
3686 than T the result depends on the possible
3688 E.g. T=size_t, A=(unsigned)429497295, P>0.
3689 However, if an overflow in P + A would cause
3690 undefined behavior, we can assume that there
3692 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
3693 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
3696 (minus (convert (pointer_plus @@0 @1))
3698 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
3699 /* For pointer types, if the conversion of A to the
3700 final type requires a sign- or zero-extension,
3701 then we have to punt - it is not defined which
3703 || (POINTER_TYPE_P (TREE_TYPE (@0))
3704 && TREE_CODE (@1) == INTEGER_CST
3705 && tree_int_cst_sign_bit (@1) == 0))
3708 (pointer_diff (pointer_plus @@0 @1) @0)
3709 /* The second argument of pointer_plus must be interpreted as signed, and
3710 thus sign-extended if necessary. */
3711 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
3712 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
3713 second arg is unsigned even when we need to consider it as signed,
3714 we don't want to diagnose overflow here. */
3715 (convert (view_convert:stype @1))))
3717 /* (T)P - (T)(P + A) -> -(T) A */
3719 (minus (convert? @0)
3720 (convert (plus:c @@0 @1)))
3721 (if (INTEGRAL_TYPE_P (type)
3722 && TYPE_OVERFLOW_UNDEFINED (type)
3723 /* For integer literals, using an intermediate unsigned type to avoid
3724 an overflow at run time is counter-productive because it introduces
3725 spurious overflows at compile time, in the form of TREE_OVERFLOW on
3726 the result, which may be problematic in GENERIC for some front-ends:
3727 (T)P - (T)(P + 4) -> (T)(-(U)4) -> (T)(4294967292) -> -4(OVF)
3728 so we use the direct path for them. */
3729 && TREE_CODE (@1) != INTEGER_CST
3730 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
3731 (with { tree utype = unsigned_type_for (type); }
3732 (convert (negate (convert:utype @1))))
3733 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
3734 /* For integer types, if A has a smaller type
3735 than T the result depends on the possible
3737 E.g. T=size_t, A=(unsigned)429497295, P>0.
3738 However, if an overflow in P + A would cause
3739 undefined behavior, we can assume that there
3741 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
3742 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
3743 (negate (convert @1)))))
3746 (convert (pointer_plus @@0 @1)))
3747 (if (INTEGRAL_TYPE_P (type)
3748 && TYPE_OVERFLOW_UNDEFINED (type)
3749 /* See above the rationale for this condition. */
3750 && TREE_CODE (@1) != INTEGER_CST
3751 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
3752 (with { tree utype = unsigned_type_for (type); }
3753 (convert (negate (convert:utype @1))))
3754 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
3755 /* For pointer types, if the conversion of A to the
3756 final type requires a sign- or zero-extension,
3757 then we have to punt - it is not defined which
3759 || (POINTER_TYPE_P (TREE_TYPE (@0))
3760 && TREE_CODE (@1) == INTEGER_CST
3761 && tree_int_cst_sign_bit (@1) == 0))
3762 (negate (convert @1)))))
3764 (pointer_diff @0 (pointer_plus @@0 @1))
3765 /* The second argument of pointer_plus must be interpreted as signed, and
3766 thus sign-extended if necessary. */
3767 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
3768 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
3769 second arg is unsigned even when we need to consider it as signed,
3770 we don't want to diagnose overflow here. */
3771 (negate (convert (view_convert:stype @1)))))
3773 /* (T)(P + A) - (T)(P + B) -> (T)A - (T)B */
3775 (minus (convert (plus:c @@0 @1))
3776 (convert (plus:c @0 @2)))
3777 (if (INTEGRAL_TYPE_P (type)
3778 && TYPE_OVERFLOW_UNDEFINED (type)
3779 && element_precision (type) <= element_precision (TREE_TYPE (@1))
3780 && element_precision (type) <= element_precision (TREE_TYPE (@2)))
3781 (with { tree utype = unsigned_type_for (type); }
3782 (convert (minus (convert:utype @1) (convert:utype @2))))
3783 (if (((element_precision (type) <= element_precision (TREE_TYPE (@1)))
3784 == (element_precision (type) <= element_precision (TREE_TYPE (@2))))
3785 && (element_precision (type) <= element_precision (TREE_TYPE (@1))
3786 /* For integer types, if A has a smaller type
3787 than T the result depends on the possible
3789 E.g. T=size_t, A=(unsigned)429497295, P>0.
3790 However, if an overflow in P + A would cause
3791 undefined behavior, we can assume that there
3793 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
3794 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
3795 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))
3796 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@2)))))
3797 (minus (convert @1) (convert @2)))))
3799 (minus (convert (pointer_plus @@0 @1))
3800 (convert (pointer_plus @0 @2)))
3801 (if (INTEGRAL_TYPE_P (type)
3802 && TYPE_OVERFLOW_UNDEFINED (type)
3803 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
3804 (with { tree utype = unsigned_type_for (type); }
3805 (convert (minus (convert:utype @1) (convert:utype @2))))
3806 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
3807 /* For pointer types, if the conversion of A to the
3808 final type requires a sign- or zero-extension,
3809 then we have to punt - it is not defined which
3811 || (POINTER_TYPE_P (TREE_TYPE (@0))
3812 && TREE_CODE (@1) == INTEGER_CST
3813 && tree_int_cst_sign_bit (@1) == 0
3814 && TREE_CODE (@2) == INTEGER_CST
3815 && tree_int_cst_sign_bit (@2) == 0))
3816 (minus (convert @1) (convert @2)))))
3818 (pointer_diff (pointer_plus @0 @2) (pointer_plus @1 @2))
3819 (pointer_diff @0 @1))
3821 (pointer_diff (pointer_plus @@0 @1) (pointer_plus @0 @2))
3822 /* The second argument of pointer_plus must be interpreted as signed, and
3823 thus sign-extended if necessary. */
3824 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
3825 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
3826 second arg is unsigned even when we need to consider it as signed,
3827 we don't want to diagnose overflow here. */
3828 (minus (convert (view_convert:stype @1))
3829 (convert (view_convert:stype @2)))))))
3831 /* (A * C) +- (B * C) -> (A+-B) * C and (A * C) +- A -> A * (C+-1).
3832 Modeled after fold_plusminus_mult_expr. */
3833 (if (!TYPE_SATURATING (type)
3834 && (!FLOAT_TYPE_P (type) || flag_associative_math))
3835 (for plusminus (plus minus)
3837 (plusminus (mult:cs@3 @0 @1) (mult:cs@4 @0 @2))
3838 (if (!ANY_INTEGRAL_TYPE_P (type)
3839 || TYPE_OVERFLOW_WRAPS (type)
3840 || (INTEGRAL_TYPE_P (type)
3841 && tree_expr_nonzero_p (@0)
3842 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type)))))
3843 (if (single_use (@3) || single_use (@4))
3844 /* If @1 +- @2 is constant require a hard single-use on either
3845 original operand (but not on both). */
3846 (mult (plusminus @1 @2) @0)
3847 (mult! (plusminus @1 @2) @0)
3849 /* We cannot generate constant 1 for fract. */
3850 (if (!ALL_FRACT_MODE_P (TYPE_MODE (type)))
3852 (plusminus @0 (mult:c@3 @0 @2))
3853 (if ((!ANY_INTEGRAL_TYPE_P (type)
3854 || TYPE_OVERFLOW_WRAPS (type)
3855 /* For @0 + @0*@2 this transformation would introduce UB
3856 (where there was none before) for @0 in [-1,0] and @2 max.
3857 For @0 - @0*@2 this transformation would introduce UB
3858 for @0 0 and @2 in [min,min+1] or @0 -1 and @2 min+1. */
3859 || (INTEGRAL_TYPE_P (type)
3860 && ((tree_expr_nonzero_p (@0)
3861 && expr_not_equal_to (@0,
3862 wi::minus_one (TYPE_PRECISION (type))))
3863 || (plusminus == PLUS_EXPR
3864 ? expr_not_equal_to (@2,
3865 wi::max_value (TYPE_PRECISION (type), SIGNED))
3866 /* Let's ignore the @0 -1 and @2 min case. */
3867 : (expr_not_equal_to (@2,
3868 wi::min_value (TYPE_PRECISION (type), SIGNED))
3869 && expr_not_equal_to (@2,
3870 wi::min_value (TYPE_PRECISION (type), SIGNED)
3873 (mult (plusminus { build_one_cst (type); } @2) @0)))
3875 (plusminus (mult:c@3 @0 @2) @0)
3876 (if ((!ANY_INTEGRAL_TYPE_P (type)
3877 || TYPE_OVERFLOW_WRAPS (type)
3878 /* For @0*@2 + @0 this transformation would introduce UB
3879 (where there was none before) for @0 in [-1,0] and @2 max.
3880 For @0*@2 - @0 this transformation would introduce UB
3881 for @0 0 and @2 min. */
3882 || (INTEGRAL_TYPE_P (type)
3883 && ((tree_expr_nonzero_p (@0)
3884 && (plusminus == MINUS_EXPR
3885 || expr_not_equal_to (@0,
3886 wi::minus_one (TYPE_PRECISION (type)))))
3887 || expr_not_equal_to (@2,
3888 (plusminus == PLUS_EXPR
3889 ? wi::max_value (TYPE_PRECISION (type), SIGNED)
3890 : wi::min_value (TYPE_PRECISION (type), SIGNED))))))
3892 (mult (plusminus @2 { build_one_cst (type); }) @0))))))
3895 /* Canonicalize X + (X << C) into X * (1 + (1 << C)) and
3896 (X << C1) + (X << C2) into X * ((1 << C1) + (1 << C2)). */
3898 (plus:c @0 (lshift:s @0 INTEGER_CST@1))
3899 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3900 && tree_fits_uhwi_p (@1)
3901 && tree_to_uhwi (@1) < element_precision (type)
3902 && (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3903 || optab_handler (smul_optab,
3904 TYPE_MODE (type)) != CODE_FOR_nothing))
3905 (with { tree t = type;
3906 if (!TYPE_OVERFLOW_WRAPS (t)) t = unsigned_type_for (t);
3907 wide_int w = wi::set_bit_in_zero (tree_to_uhwi (@1),
3908 element_precision (type));
3910 tree cst = wide_int_to_tree (VECTOR_TYPE_P (t) ? TREE_TYPE (t)
3912 cst = build_uniform_cst (t, cst); }
3913 (convert (mult (convert:t @0) { cst; })))))
3915 (plus (lshift:s @0 INTEGER_CST@1) (lshift:s @0 INTEGER_CST@2))
3916 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3917 && tree_fits_uhwi_p (@1)
3918 && tree_to_uhwi (@1) < element_precision (type)
3919 && tree_fits_uhwi_p (@2)
3920 && tree_to_uhwi (@2) < element_precision (type)
3921 && (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3922 || optab_handler (smul_optab,
3923 TYPE_MODE (type)) != CODE_FOR_nothing))
3924 (with { tree t = type;
3925 if (!TYPE_OVERFLOW_WRAPS (t)) t = unsigned_type_for (t);
3926 unsigned int prec = element_precision (type);
3927 wide_int w = wi::set_bit_in_zero (tree_to_uhwi (@1), prec);
3928 w += wi::set_bit_in_zero (tree_to_uhwi (@2), prec);
3929 tree cst = wide_int_to_tree (VECTOR_TYPE_P (t) ? TREE_TYPE (t)
3931 cst = build_uniform_cst (t, cst); }
3932 (convert (mult (convert:t @0) { cst; })))))
3935 /* Canonicalize (X*C1)|(X*C2) and (X*C1)^(X*C2) to (C1+C2)*X when
3936 tree_nonzero_bits allows IOR and XOR to be treated like PLUS.
3937 Likewise, handle (X<<C3) and X as legitimate variants of X*C. */
3938 (for op (bit_ior bit_xor)
3940 (op (mult:s@0 @1 INTEGER_CST@2)
3941 (mult:s@3 @1 INTEGER_CST@4))
3942 (if (INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type)
3943 && (tree_nonzero_bits (@0) & tree_nonzero_bits (@3)) == 0)
3945 { wide_int_to_tree (type, wi::to_wide (@2) + wi::to_wide (@4)); })))
3947 (op:c (mult:s@0 @1 INTEGER_CST@2)
3948 (lshift:s@3 @1 INTEGER_CST@4))
3949 (if (INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type)
3950 && tree_int_cst_sgn (@4) > 0
3951 && (tree_nonzero_bits (@0) & tree_nonzero_bits (@3)) == 0)
3952 (with { wide_int wone = wi::one (TYPE_PRECISION (type));
3953 wide_int c = wi::add (wi::to_wide (@2),
3954 wi::lshift (wone, wi::to_wide (@4))); }
3955 (mult @1 { wide_int_to_tree (type, c); }))))
3957 (op:c (mult:s@0 @1 INTEGER_CST@2)
3959 (if (INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type)
3960 && (tree_nonzero_bits (@0) & tree_nonzero_bits (@1)) == 0)
3962 { wide_int_to_tree (type,
3963 wi::add (wi::to_wide (@2), 1)); })))
3965 (op (lshift:s@0 @1 INTEGER_CST@2)
3966 (lshift:s@3 @1 INTEGER_CST@4))
3967 (if (INTEGRAL_TYPE_P (type)
3968 && tree_int_cst_sgn (@2) > 0
3969 && tree_int_cst_sgn (@4) > 0
3970 && (tree_nonzero_bits (@0) & tree_nonzero_bits (@3)) == 0)
3971 (with { tree t = type;
3972 if (!TYPE_OVERFLOW_WRAPS (t))
3973 t = unsigned_type_for (t);
3974 wide_int wone = wi::one (TYPE_PRECISION (t));
3975 wide_int c = wi::add (wi::lshift (wone, wi::to_wide (@2)),
3976 wi::lshift (wone, wi::to_wide (@4))); }
3977 (convert (mult:t (convert:t @1) { wide_int_to_tree (t,c); })))))
3979 (op:c (lshift:s@0 @1 INTEGER_CST@2)
3981 (if (INTEGRAL_TYPE_P (type)
3982 && tree_int_cst_sgn (@2) > 0
3983 && (tree_nonzero_bits (@0) & tree_nonzero_bits (@1)) == 0)
3984 (with { tree t = type;
3985 if (!TYPE_OVERFLOW_WRAPS (t))
3986 t = unsigned_type_for (t);
3987 wide_int wone = wi::one (TYPE_PRECISION (t));
3988 wide_int c = wi::add (wi::lshift (wone, wi::to_wide (@2)), wone); }
3989 (convert (mult:t (convert:t @1) { wide_int_to_tree (t, c); }))))))
3991 /* Simplifications of MIN_EXPR, MAX_EXPR, fmin() and fmax(). */
3993 (for minmax (min max)
3997 /* max(max(x,y),x) -> max(x,y) */
3999 (minmax:c (minmax:c@2 @0 @1) @0)
4001 /* For fmin() and fmax(), skip folding when both are sNaN. */
4002 (for minmax (FMIN_ALL FMAX_ALL)
4005 (if (!tree_expr_maybe_signaling_nan_p (@0))
4007 /* min(max(x,y),y) -> y. */
4009 (min:c (max:c @0 @1) @1)
4011 /* max(min(x,y),y) -> y. */
4013 (max:c (min:c @0 @1) @1)
4015 /* max(a,-a) -> abs(a). */
4017 (max:c @0 (negate @0))
4018 (if (TREE_CODE (type) != COMPLEX_TYPE
4019 && (! ANY_INTEGRAL_TYPE_P (type)
4020 || TYPE_OVERFLOW_UNDEFINED (type)))
4022 /* min(a,-a) -> -abs(a). */
4024 (min:c @0 (negate @0))
4025 (if (TREE_CODE (type) != COMPLEX_TYPE
4026 && (! ANY_INTEGRAL_TYPE_P (type)
4027 || TYPE_OVERFLOW_UNDEFINED (type)))
4032 (if (INTEGRAL_TYPE_P (type)
4033 && TYPE_MIN_VALUE (type)
4034 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
4036 (if (INTEGRAL_TYPE_P (type)
4037 && TYPE_MAX_VALUE (type)
4038 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
4043 (if (INTEGRAL_TYPE_P (type)
4044 && TYPE_MAX_VALUE (type)
4045 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
4047 (if (INTEGRAL_TYPE_P (type)
4048 && TYPE_MIN_VALUE (type)
4049 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
4052 /* max (a, a + CST) -> a + CST where CST is positive. */
4053 /* max (a, a + CST) -> a where CST is negative. */
4055 (max:c @0 (plus@2 @0 INTEGER_CST@1))
4056 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
4057 (if (tree_int_cst_sgn (@1) > 0)
4061 /* min (a, a + CST) -> a where CST is positive. */
4062 /* min (a, a + CST) -> a + CST where CST is negative. */
4064 (min:c @0 (plus@2 @0 INTEGER_CST@1))
4065 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
4066 (if (tree_int_cst_sgn (@1) > 0)
4070 /* Simplify min (&var[off0], &var[off1]) etc. depending on whether
4071 the addresses are known to be less, equal or greater. */
4072 (for minmax (min max)
4075 (minmax (convert1?@2 addr@0) (convert2?@3 addr@1))
4078 poly_int64 off0, off1;
4080 int equal = address_compare (cmp, TREE_TYPE (@2), @0, @1, base0, base1,
4081 off0, off1, GENERIC);
4084 (if (minmax == MIN_EXPR)
4085 (if (known_le (off0, off1))
4087 (if (known_gt (off0, off1))
4089 (if (known_ge (off0, off1))
4091 (if (known_lt (off0, off1))
4094 /* (convert (minmax ((convert (x) c)))) -> minmax (x c) if x is promoted
4095 and the outer convert demotes the expression back to x's type. */
4096 (for minmax (min max)
4098 (convert (minmax@0 (convert @1) INTEGER_CST@2))
4099 (if (INTEGRAL_TYPE_P (type)
4100 && types_match (@1, type) && int_fits_type_p (@2, type)
4101 && TYPE_SIGN (TREE_TYPE (@0)) == TYPE_SIGN (type)
4102 && TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type))
4103 (minmax @1 (convert @2)))))
4105 (for minmax (FMIN_ALL FMAX_ALL)
4106 /* If either argument is NaN and other one is not sNaN, return the other
4107 one. Avoid the transformation if we get (and honor) a signalling NaN. */
4109 (minmax:c @0 REAL_CST@1)
4110 (if (real_isnan (TREE_REAL_CST_PTR (@1))
4111 && (!HONOR_SNANS (@1) || !TREE_REAL_CST (@1).signalling)
4112 && !tree_expr_maybe_signaling_nan_p (@0))
4114 /* Convert fmin/fmax to MIN_EXPR/MAX_EXPR. C99 requires these
4115 functions to return the numeric arg if the other one is NaN.
4116 MIN and MAX don't honor that, so only transform if -ffinite-math-only
4117 is set. C99 doesn't require -0.0 to be handled, so we don't have to
4118 worry about it either. */
4119 (if (flag_finite_math_only)
4126 /* min (-A, -B) -> -max (A, B) */
4127 (for minmax (min max FMIN_ALL FMAX_ALL)
4128 maxmin (max min FMAX_ALL FMIN_ALL)
4130 (minmax (negate:s@2 @0) (negate:s@3 @1))
4131 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
4132 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
4133 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
4134 (negate (maxmin @0 @1)))))
4135 /* MIN (~X, ~Y) -> ~MAX (X, Y)
4136 MAX (~X, ~Y) -> ~MIN (X, Y) */
4137 (for minmax (min max)
4140 (minmax (bit_not:s@2 @0) (bit_not:s@3 @1))
4141 (bit_not (maxmin @0 @1)))
4142 /* ~MAX(~X, Y) --> MIN(X, ~Y) */
4143 /* ~MIN(~X, Y) --> MAX(X, ~Y) */
4145 (bit_not (minmax:cs (bit_not @0) @1))
4146 (maxmin @0 (bit_not @1))))
4148 /* MIN (X, Y) == X -> X <= Y */
4149 /* MIN (X, Y) < X -> X > Y */
4150 /* MIN (X, Y) >= X -> X <= Y */
4151 (for minmax (min min min min max max max max)
4152 cmp (eq ne lt ge eq ne gt le )
4153 out (le gt gt le ge lt lt ge )
4155 (cmp:c (minmax:c @0 @1) @0)
4156 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)))
4158 /* MIN (X, 5) == 0 -> X == 0
4159 MIN (X, 5) == 7 -> false */
4162 (cmp (min @0 INTEGER_CST@1) INTEGER_CST@2)
4163 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
4164 TYPE_SIGN (TREE_TYPE (@0))))
4165 { constant_boolean_node (cmp == NE_EXPR, type); }
4166 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
4167 TYPE_SIGN (TREE_TYPE (@0))))
4171 (cmp (max @0 INTEGER_CST@1) INTEGER_CST@2)
4172 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
4173 TYPE_SIGN (TREE_TYPE (@0))))
4174 { constant_boolean_node (cmp == NE_EXPR, type); }
4175 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
4176 TYPE_SIGN (TREE_TYPE (@0))))
4179 /* X <= MAX(X, Y) -> true
4180 X > MAX(X, Y) -> false
4181 X >= MIN(X, Y) -> true
4182 X < MIN(X, Y) -> false */
4183 (for minmax (min min max max )
4186 (cmp:c @0 (minmax:c @0 @1))
4187 { constant_boolean_node (cmp == GE_EXPR || cmp == LE_EXPR, type); } ))
4189 /* MIN (X, C1) < C2 -> X < C2 || C1 < C2 */
4190 (for minmax (min min max max min min max max )
4191 cmp (lt le gt ge gt ge lt le )
4192 comb (bit_ior bit_ior bit_ior bit_ior bit_and bit_and bit_and bit_and)
4194 (cmp (minmax @0 INTEGER_CST@1) INTEGER_CST@2)
4195 (comb (cmp @0 @2) (cmp @1 @2))))
4197 /* Undo fancy ways of writing max/min or other ?: expressions, like
4198 a - ((a - b) & -(a < b)) and a - (a - b) * (a < b) into (a < b) ? b : a.
4199 People normally use ?: and that is what we actually try to optimize. */
4200 /* Transform A + (B-A)*cmp into cmp ? B : A. */
4202 (plus:c @0 (mult:c (minus @1 @0) zero_one_valued_p@2))
4203 (if (INTEGRAL_TYPE_P (type)
4204 && (GIMPLE || !TREE_SIDE_EFFECTS (@1)))
4205 (cond (convert:boolean_type_node @2) @1 @0)))
4206 /* Transform A - (A-B)*cmp into cmp ? B : A. */
4208 (minus @0 (mult:c (minus @0 @1) zero_one_valued_p@2))
4209 (if (INTEGRAL_TYPE_P (type)
4210 && (GIMPLE || !TREE_SIDE_EFFECTS (@1)))
4211 (cond (convert:boolean_type_node @2) @1 @0)))
4212 /* Transform A ^ (A^B)*cmp into cmp ? B : A. */
4214 (bit_xor:c @0 (mult:c (bit_xor:c @0 @1) zero_one_valued_p@2))
4215 (if (INTEGRAL_TYPE_P (type)
4216 && (GIMPLE || !TREE_SIDE_EFFECTS (@1)))
4217 (cond (convert:boolean_type_node @2) @1 @0)))
4219 /* (x <= 0 ? -x : 0) -> max(-x, 0). */
4221 (cond (le @0 integer_zerop@1) (negate@2 @0) integer_zerop@1)
4224 /* (zero_one == 0) ? y : z <op> y -> ((typeof(y))zero_one * z) <op> y */
4225 (for op (bit_xor bit_ior plus)
4227 (cond (eq zero_one_valued_p@0
4231 (if (INTEGRAL_TYPE_P (type)
4232 && TYPE_PRECISION (type) > 1
4233 && (INTEGRAL_TYPE_P (TREE_TYPE (@0))))
4234 (op (mult (convert:type @0) @2) @1))))
4236 /* (zero_one != 0) ? z <op> y : y -> ((typeof(y))zero_one * z) <op> y */
4237 (for op (bit_xor bit_ior plus)
4239 (cond (ne zero_one_valued_p@0
4243 (if (INTEGRAL_TYPE_P (type)
4244 && TYPE_PRECISION (type) > 1
4245 && (INTEGRAL_TYPE_P (TREE_TYPE (@0))))
4246 (op (mult (convert:type @0) @2) @1))))
4248 /* ?: Value replacement. */
4249 /* a == 0 ? b : b + a -> b + a */
4250 (for op (plus bit_ior bit_xor)
4252 (cond (eq @0 integer_zerop) @1 (op:c@2 @1 @0))
4254 /* a == 0 ? b : b - a -> b - a */
4255 /* a == 0 ? b : b ptr+ a -> b ptr+ a */
4256 /* a == 0 ? b : b shift/rotate a -> b shift/rotate a */
4257 (for op (lrotate rrotate lshift rshift minus pointer_plus)
4259 (cond (eq @0 integer_zerop) @1 (op@2 @1 @0))
4262 /* a == 1 ? b : b / a -> b / a */
4263 (for op (trunc_div ceil_div floor_div round_div exact_div)
4265 (cond (eq @0 integer_onep) @1 (op@2 @1 @0))
4268 /* a == 1 ? b : a * b -> a * b */
4271 (cond (eq @0 integer_onep) @1 (op:c@2 @1 @0))
4274 /* a == -1 ? b : a & b -> a & b */
4277 (cond (eq @0 integer_all_onesp) @1 (op:c@2 @1 @0))
4280 /* Simplifications of shift and rotates. */
4282 (for rotate (lrotate rrotate)
4284 (rotate integer_all_onesp@0 @1)
4287 /* Optimize -1 >> x for arithmetic right shifts. */
4289 (rshift integer_all_onesp@0 @1)
4290 (if (!TYPE_UNSIGNED (type))
4293 /* Optimize (x >> c) << c into x & (-1<<c). */
4295 (lshift (nop_convert? (rshift @0 INTEGER_CST@1)) @1)
4296 (if (wi::ltu_p (wi::to_wide (@1), element_precision (type)))
4297 /* It doesn't matter if the right shift is arithmetic or logical. */
4298 (bit_and (view_convert @0) (lshift { build_minus_one_cst (type); } @1))))
4301 (lshift (convert (convert@2 (rshift @0 INTEGER_CST@1))) @1)
4302 (if (wi::ltu_p (wi::to_wide (@1), element_precision (type))
4303 /* Allow intermediate conversion to integral type with whatever sign, as
4304 long as the low TYPE_PRECISION (type)
4305 - TYPE_PRECISION (TREE_TYPE (@2)) bits are preserved. */
4306 && INTEGRAL_TYPE_P (type)
4307 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
4308 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
4309 && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0))
4310 && (TYPE_PRECISION (TREE_TYPE (@2)) >= TYPE_PRECISION (type)
4311 || wi::geu_p (wi::to_wide (@1),
4312 TYPE_PRECISION (type)
4313 - TYPE_PRECISION (TREE_TYPE (@2)))))
4314 (bit_and (convert @0) (lshift { build_minus_one_cst (type); } @1))))
4316 /* For (x << c) >> c, optimize into x & ((unsigned)-1 >> c) for
4317 unsigned x OR truncate into the precision(type) - c lowest bits
4318 of signed x (if they have mode precision or a precision of 1). */
4320 (rshift (nop_convert? (lshift @0 INTEGER_CST@1)) @@1)
4321 (if (wi::ltu_p (wi::to_wide (@1), element_precision (type)))
4322 (if (TYPE_UNSIGNED (type))
4323 (bit_and (convert @0) (rshift { build_minus_one_cst (type); } @1))
4324 (if (INTEGRAL_TYPE_P (type))
4326 int width = element_precision (type) - tree_to_uhwi (@1);
4327 tree stype = NULL_TREE;
4328 if (width <= MAX_FIXED_MODE_SIZE)
4329 stype = build_nonstandard_integer_type (width, 0);
4331 (if (stype && (width == 1 || type_has_mode_precision_p (stype)))
4332 (convert (convert:stype @0))))))))
4334 /* Optimize x >> x into 0 */
4337 { build_zero_cst (type); })
4339 (for shiftrotate (lrotate rrotate lshift rshift)
4341 (shiftrotate @0 integer_zerop)
4344 (shiftrotate integer_zerop@0 @1)
4346 /* Prefer vector1 << scalar to vector1 << vector2
4347 if vector2 is uniform. */
4348 (for vec (VECTOR_CST CONSTRUCTOR)
4350 (shiftrotate @0 vec@1)
4351 (with { tree tem = uniform_vector_p (@1); }
4353 (shiftrotate @0 { tem; }))))))
4355 /* Simplify X << Y where Y's low width bits are 0 to X, as only valid
4356 Y is 0. Similarly for X >> Y. */
4358 (for shift (lshift rshift)
4360 (shift @0 SSA_NAME@1)
4361 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
4363 int width = ceil_log2 (element_precision (TREE_TYPE (@0)));
4364 int prec = TYPE_PRECISION (TREE_TYPE (@1));
4366 (if ((get_nonzero_bits (@1) & wi::mask (width, false, prec)) == 0)
4370 /* Rewrite an LROTATE_EXPR by a constant into an
4371 RROTATE_EXPR by a new constant. */
4373 (lrotate @0 INTEGER_CST@1)
4374 (rrotate @0 { const_binop (MINUS_EXPR, TREE_TYPE (@1),
4375 build_int_cst (TREE_TYPE (@1),
4376 element_precision (type)), @1); }))
4378 /* Turn (a OP c1) OP c2 into a OP (c1+c2). */
4379 (for op (lrotate rrotate rshift lshift)
4381 (op (op @0 INTEGER_CST@1) INTEGER_CST@2)
4382 (with { unsigned int prec = element_precision (type); }
4383 (if (wi::ge_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1)))
4384 && wi::lt_p (wi::to_wide (@1), prec, TYPE_SIGN (TREE_TYPE (@1)))
4385 && wi::ge_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2)))
4386 && wi::lt_p (wi::to_wide (@2), prec, TYPE_SIGN (TREE_TYPE (@2))))
4387 (with { unsigned int low = (tree_to_uhwi (@1)
4388 + tree_to_uhwi (@2)); }
4389 /* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2
4390 being well defined. */
4392 (if (op == LROTATE_EXPR || op == RROTATE_EXPR)
4393 (op @0 { build_int_cst (TREE_TYPE (@1), low % prec); })
4394 (if (TYPE_UNSIGNED (type) || op == LSHIFT_EXPR)
4395 { build_zero_cst (type); }
4396 (op @0 { build_int_cst (TREE_TYPE (@1), prec - 1); })))
4397 (op @0 { build_int_cst (TREE_TYPE (@1), low); })))))))
4400 /* Simplify (CST << x) & 1 to 0 if CST is even or to x == 0 if it is odd. */
4402 (bit_and (lshift INTEGER_CST@1 @0) integer_onep)
4403 (if ((wi::to_wide (@1) & 1) != 0)
4404 (convert (eq:boolean_type_node @0 { build_zero_cst (TREE_TYPE (@0)); }))
4405 { build_zero_cst (type); }))
4407 /* Simplify ((C << x) & D) != 0 where C and D are power of two constants,
4408 either to false if D is smaller (unsigned comparison) than C, or to
4409 x == log2 (D) - log2 (C). Similarly for right shifts.
4410 Note for `(1 >> x)`, the & 1 has been removed so matching that seperately. */
4414 (cmp (bit_and (lshift integer_pow2p@1 @0) integer_pow2p@2) integer_zerop)
4415 (with { int c1 = wi::clz (wi::to_wide (@1));
4416 int c2 = wi::clz (wi::to_wide (@2)); }
4418 { constant_boolean_node (cmp == NE_EXPR ? false : true, type); }
4419 (icmp @0 { build_int_cst (TREE_TYPE (@0), c1 - c2); }))))
4421 (cmp (bit_and (rshift integer_pow2p@1 @0) integer_pow2p@2) integer_zerop)
4422 (if (tree_int_cst_sgn (@1) > 0)
4423 (with { int c1 = wi::clz (wi::to_wide (@1));
4424 int c2 = wi::clz (wi::to_wide (@2)); }
4426 { constant_boolean_node (cmp == NE_EXPR ? false : true, type); }
4427 (icmp @0 { build_int_cst (TREE_TYPE (@0), c2 - c1); })))))
4428 /* `(1 >> X) != 0` -> `X == 0` */
4429 /* `(1 >> X) == 0` -> `X != 0` */
4431 (cmp (rshift integer_onep@1 @0) integer_zerop)
4432 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
4433 (icmp @0 { build_zero_cst (TREE_TYPE (@0)); }))))
4435 /* (CST1 << A) == CST2 -> A == ctz (CST2) - ctz (CST1)
4436 (CST1 << A) != CST2 -> A != ctz (CST2) - ctz (CST1)
4440 (cmp (lshift INTEGER_CST@0 @1) INTEGER_CST@2)
4441 (with { int cand = wi::ctz (wi::to_wide (@2)) - wi::ctz (wi::to_wide (@0)); }
4443 || (!integer_zerop (@2)
4444 && wi::lshift (wi::to_wide (@0), cand) != wi::to_wide (@2)))
4445 { constant_boolean_node (cmp == NE_EXPR, type); }
4446 (if (!integer_zerop (@2)
4447 && wi::lshift (wi::to_wide (@0), cand) == wi::to_wide (@2))
4448 (cmp @1 { build_int_cst (TREE_TYPE (@1), cand); }))))))
4450 /* Fold ((X << C1) & C2) cmp C3 into (X & (C2 >> C1)) cmp (C3 >> C1)
4451 ((X >> C1) & C2) cmp C3 into (X & (C2 << C1)) cmp (C3 << C1). */
4454 (cmp (bit_and:s (lshift:s @0 INTEGER_CST@1) INTEGER_CST@2) INTEGER_CST@3)
4455 (if (tree_fits_shwi_p (@1)
4456 && tree_to_shwi (@1) > 0
4457 && tree_to_shwi (@1) < TYPE_PRECISION (TREE_TYPE (@0)))
4458 (if (tree_to_shwi (@1) > wi::ctz (wi::to_wide (@3)))
4459 { constant_boolean_node (cmp == NE_EXPR, type); }
4460 (with { wide_int c1 = wi::to_wide (@1);
4461 wide_int c2 = wi::lrshift (wi::to_wide (@2), c1);
4462 wide_int c3 = wi::lrshift (wi::to_wide (@3), c1); }
4463 (cmp (bit_and @0 { wide_int_to_tree (TREE_TYPE (@0), c2); })
4464 { wide_int_to_tree (TREE_TYPE (@0), c3); })))))
4466 (cmp (bit_and:s (rshift:s @0 INTEGER_CST@1) INTEGER_CST@2) INTEGER_CST@3)
4467 (if (tree_fits_shwi_p (@1)
4468 && tree_to_shwi (@1) > 0
4469 && tree_to_shwi (@1) < TYPE_PRECISION (TREE_TYPE (@0)))
4470 (with { tree t0 = TREE_TYPE (@0);
4471 unsigned int prec = TYPE_PRECISION (t0);
4472 wide_int c1 = wi::to_wide (@1);
4473 wide_int c2 = wi::to_wide (@2);
4474 wide_int c3 = wi::to_wide (@3);
4475 wide_int sb = wi::set_bit_in_zero (prec - 1, prec); }
4476 (if ((c2 & c3) != c3)
4477 { constant_boolean_node (cmp == NE_EXPR, type); }
4478 (if (TYPE_UNSIGNED (t0))
4479 (if ((c3 & wi::arshift (sb, c1 - 1)) != 0)
4480 { constant_boolean_node (cmp == NE_EXPR, type); }
4481 (cmp (bit_and @0 { wide_int_to_tree (t0, c2 << c1); })
4482 { wide_int_to_tree (t0, c3 << c1); }))
4483 (with { wide_int smask = wi::arshift (sb, c1); }
4485 (if ((c2 & smask) == 0)
4486 (cmp (bit_and @0 { wide_int_to_tree (t0, c2 << c1); })
4487 { wide_int_to_tree (t0, c3 << c1); }))
4488 (if ((c3 & smask) == 0)
4489 (cmp (bit_and @0 { wide_int_to_tree (t0, (c2 << c1) | sb); })
4490 { wide_int_to_tree (t0, c3 << c1); }))
4491 (if ((c2 & smask) != (c3 & smask))
4492 { constant_boolean_node (cmp == NE_EXPR, type); })
4493 (cmp (bit_and @0 { wide_int_to_tree (t0, (c2 << c1) | sb); })
4494 { wide_int_to_tree (t0, (c3 << c1) | sb); })))))))))
4496 /* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1))
4497 (X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1))
4498 if the new mask might be further optimized. */
4499 (for shift (lshift rshift)
4501 (bit_and (convert?:s@4 (shift:s@5 (convert1?@3 @0) INTEGER_CST@1))
4503 (if (tree_nop_conversion_p (TREE_TYPE (@4), TREE_TYPE (@5))
4504 && TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT
4505 && tree_fits_uhwi_p (@1)
4506 && tree_to_uhwi (@1) > 0
4507 && tree_to_uhwi (@1) < TYPE_PRECISION (type))
4510 unsigned int shiftc = tree_to_uhwi (@1);
4511 unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (@2);
4512 unsigned HOST_WIDE_INT newmask, zerobits = 0;
4513 tree shift_type = TREE_TYPE (@3);
4516 if (shift == LSHIFT_EXPR)
4517 zerobits = ((HOST_WIDE_INT_1U << shiftc) - 1);
4518 else if (shift == RSHIFT_EXPR
4519 && type_has_mode_precision_p (shift_type))
4521 prec = TYPE_PRECISION (TREE_TYPE (@3));
4523 /* See if more bits can be proven as zero because of
4526 && TYPE_UNSIGNED (TREE_TYPE (@0)))
4528 tree inner_type = TREE_TYPE (@0);
4529 if (type_has_mode_precision_p (inner_type)
4530 && TYPE_PRECISION (inner_type) < prec)
4532 prec = TYPE_PRECISION (inner_type);
4533 /* See if we can shorten the right shift. */
4535 shift_type = inner_type;
4536 /* Otherwise X >> C1 is all zeros, so we'll optimize
4537 it into (X, 0) later on by making sure zerobits
4541 zerobits = HOST_WIDE_INT_M1U;
4544 zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc;
4545 zerobits <<= prec - shiftc;
4547 /* For arithmetic shift if sign bit could be set, zerobits
4548 can contain actually sign bits, so no transformation is
4549 possible, unless MASK masks them all away. In that
4550 case the shift needs to be converted into logical shift. */
4551 if (!TYPE_UNSIGNED (TREE_TYPE (@3))
4552 && prec == TYPE_PRECISION (TREE_TYPE (@3)))
4554 if ((mask & zerobits) == 0)
4555 shift_type = unsigned_type_for (TREE_TYPE (@3));
4561 /* ((X << 16) & 0xff00) is (X, 0). */
4562 (if ((mask & zerobits) == mask)
4563 { build_int_cst (type, 0); }
4564 (with { newmask = mask | zerobits; }
4565 (if (newmask != mask && (newmask & (newmask + 1)) == 0)
4568 /* Only do the transformation if NEWMASK is some integer
4570 for (prec = BITS_PER_UNIT;
4571 prec < HOST_BITS_PER_WIDE_INT; prec <<= 1)
4572 if (newmask == (HOST_WIDE_INT_1U << prec) - 1)
4575 (if (prec < HOST_BITS_PER_WIDE_INT
4576 || newmask == HOST_WIDE_INT_M1U)
4578 { tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); }
4579 (if (!tree_int_cst_equal (newmaskt, @2))
4580 (if (shift_type != TREE_TYPE (@3))
4581 (bit_and (convert (shift:shift_type (convert @3) @1)) { newmaskt; })
4582 (bit_and @4 { newmaskt; })))))))))))))
4584 /* ((1 << n) & M) != 0 -> n == log2 (M) */
4590 (nop_convert? (lshift integer_onep @0)) integer_pow2p@1) integer_zerop)
4591 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)))
4592 (icmp @0 { wide_int_to_tree (TREE_TYPE (@0),
4593 wi::exact_log2 (wi::to_wide (@1))); }))))
4595 /* Fold (X {&,^,|} C2) << C1 into (X << C1) {&,^,|} (C2 << C1)
4596 (X {&,^,|} C2) >> C1 into (X >> C1) & (C2 >> C1). */
4597 (for shift (lshift rshift)
4598 (for bit_op (bit_and bit_xor bit_ior)
4600 (shift (convert?:s (bit_op:s @0 INTEGER_CST@2)) INTEGER_CST@1)
4601 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
4602 (with { tree mask = int_const_binop (shift, fold_convert (type, @2), @1); }
4604 (bit_op (shift (convert @0) @1) { mask; })))))))
4606 /* ~(~X >> Y) -> X >> Y (for arithmetic shift). */
4608 (bit_not (convert1?:s (rshift:s (convert2?@0 (bit_not @1)) @2)))
4609 (if (!TYPE_UNSIGNED (TREE_TYPE (@0))
4610 && (element_precision (TREE_TYPE (@0))
4611 <= element_precision (TREE_TYPE (@1))
4612 || !TYPE_UNSIGNED (TREE_TYPE (@1))))
4614 { tree shift_type = TREE_TYPE (@0); }
4615 (convert (rshift (convert:shift_type @1) @2)))))
4617 /* ~(~X >>r Y) -> X >>r Y
4618 ~(~X <<r Y) -> X <<r Y */
4619 (for rotate (lrotate rrotate)
4621 (bit_not (convert1?:s (rotate:s (convert2?@0 (bit_not @1)) @2)))
4622 (if ((element_precision (TREE_TYPE (@0))
4623 <= element_precision (TREE_TYPE (@1))
4624 || !TYPE_UNSIGNED (TREE_TYPE (@1)))
4625 && (element_precision (type) <= element_precision (TREE_TYPE (@0))
4626 || !TYPE_UNSIGNED (TREE_TYPE (@0))))
4628 { tree rotate_type = TREE_TYPE (@0); }
4629 (convert (rotate (convert:rotate_type @1) @2))))))
4632 (for rotate (lrotate rrotate)
4633 invrot (rrotate lrotate)
4634 /* (X >>r Y) cmp (Z >>r Y) may simplify to X cmp Y. */
4636 (cmp (rotate @1 @0) (rotate @2 @0))
4638 /* (X >>r C1) cmp C2 may simplify to X cmp C3. */
4640 (cmp (rotate @0 INTEGER_CST@1) INTEGER_CST@2)
4641 (cmp @0 { const_binop (invrot, TREE_TYPE (@0), @2, @1); }))
4642 /* (X >>r Y) cmp C where C is 0 or ~0, may simplify to X cmp C. */
4644 (cmp (rotate @0 @1) INTEGER_CST@2)
4645 (if (integer_zerop (@2) || integer_all_onesp (@2))
4648 /* Narrow a lshift by constant. */
4650 (convert (lshift:s@0 @1 INTEGER_CST@2))
4651 (if (INTEGRAL_TYPE_P (type)
4652 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
4653 && !integer_zerop (@2)
4654 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0)))
4655 (if (TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0))
4656 || wi::ltu_p (wi::to_wide (@2), TYPE_PRECISION (type)))
4657 (lshift (convert @1) @2)
4658 (if (wi::ltu_p (wi::to_wide (@2), TYPE_PRECISION (TREE_TYPE (@0))))
4659 { build_zero_cst (type); }))))
4661 /* Simplifications of conversions. */
4663 /* Basic strip-useless-type-conversions / strip_nops. */
4664 (for cvt (convert view_convert float fix_trunc)
4667 (if ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@0)))
4668 || (GENERIC && type == TREE_TYPE (@0)))
4671 /* Contract view-conversions. */
4673 (view_convert (view_convert @0))
4676 /* For integral conversions with the same precision or pointer
4677 conversions use a NOP_EXPR instead. */
4680 (if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
4681 && (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
4682 && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0)))
4685 /* Strip inner integral conversions that do not change precision or size, or
4686 zero-extend while keeping the same size (for bool-to-char). */
4688 (view_convert (convert@0 @1))
4689 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
4690 && (INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
4691 && TYPE_SIZE (TREE_TYPE (@0)) == TYPE_SIZE (TREE_TYPE (@1))
4692 && (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))
4693 || (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@1))
4694 && TYPE_UNSIGNED (TREE_TYPE (@1)))))
4697 /* Simplify a view-converted empty or single-element constructor. */
4699 (view_convert CONSTRUCTOR@0)
4701 { tree ctor = (TREE_CODE (@0) == SSA_NAME
4702 ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0); }
4704 (if (CONSTRUCTOR_NELTS (ctor) == 0)
4705 { build_zero_cst (type); })
4706 (if (CONSTRUCTOR_NELTS (ctor) == 1
4707 && VECTOR_TYPE_P (TREE_TYPE (ctor))
4708 && operand_equal_p (TYPE_SIZE (type),
4709 TYPE_SIZE (TREE_TYPE
4710 (CONSTRUCTOR_ELT (ctor, 0)->value))))
4711 (view_convert { CONSTRUCTOR_ELT (ctor, 0)->value; })))))
4713 /* Re-association barriers around constants and other re-association
4714 barriers can be removed. */
4716 (paren CONSTANT_CLASS_P@0)
4719 (paren (paren@1 @0))
4722 /* Handle cases of two conversions in a row. */
4723 (for ocvt (convert float fix_trunc)
4724 (for icvt (convert float)
4729 tree inside_type = TREE_TYPE (@0);
4730 tree inter_type = TREE_TYPE (@1);
4731 int inside_int = INTEGRAL_TYPE_P (inside_type);
4732 int inside_ptr = POINTER_TYPE_P (inside_type);
4733 int inside_float = FLOAT_TYPE_P (inside_type);
4734 int inside_vec = VECTOR_TYPE_P (inside_type);
4735 unsigned int inside_prec = element_precision (inside_type);
4736 int inside_unsignedp = TYPE_UNSIGNED (inside_type);
4737 int inter_int = INTEGRAL_TYPE_P (inter_type);
4738 int inter_ptr = POINTER_TYPE_P (inter_type);
4739 int inter_float = FLOAT_TYPE_P (inter_type);
4740 int inter_vec = VECTOR_TYPE_P (inter_type);
4741 unsigned int inter_prec = element_precision (inter_type);
4742 int inter_unsignedp = TYPE_UNSIGNED (inter_type);
4743 int final_int = INTEGRAL_TYPE_P (type);
4744 int final_ptr = POINTER_TYPE_P (type);
4745 int final_float = FLOAT_TYPE_P (type);
4746 int final_vec = VECTOR_TYPE_P (type);
4747 unsigned int final_prec = element_precision (type);
4748 int final_unsignedp = TYPE_UNSIGNED (type);
4751 /* In addition to the cases of two conversions in a row
4752 handled below, if we are converting something to its own
4753 type via an object of identical or wider precision, neither
4754 conversion is needed. */
4755 (if (((GIMPLE && useless_type_conversion_p (type, inside_type))
4757 && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type)))
4758 && (((inter_int || inter_ptr) && final_int)
4759 || (inter_float && final_float))
4760 && inter_prec >= final_prec)
4763 /* Likewise, if the intermediate and initial types are either both
4764 float or both integer, we don't need the middle conversion if the
4765 former is wider than the latter and doesn't change the signedness
4766 (for integers). Avoid this if the final type is a pointer since
4767 then we sometimes need the middle conversion. */
4768 (if (((inter_int && inside_int) || (inter_float && inside_float))
4769 && (final_int || final_float)
4770 && inter_prec >= inside_prec
4771 && (inter_float || inter_unsignedp == inside_unsignedp))
4774 /* If we have a sign-extension of a zero-extended value, we can
4775 replace that by a single zero-extension. Likewise if the
4776 final conversion does not change precision we can drop the
4777 intermediate conversion. Similarly truncation of a sign-extension
4778 can be replaced by a single sign-extension. */
4779 (if (inside_int && inter_int && final_int
4780 && ((inside_prec < inter_prec && inter_prec < final_prec
4781 && inside_unsignedp && !inter_unsignedp)
4782 || final_prec == inter_prec
4783 || (inside_prec < inter_prec && inter_prec > final_prec
4784 && !inside_unsignedp && inter_unsignedp)))
4787 /* Two conversions in a row are not needed unless:
4788 - some conversion is floating-point (overstrict for now), or
4789 - some conversion is a vector (overstrict for now), or
4790 - the intermediate type is narrower than both initial and
4792 - the intermediate type and innermost type differ in signedness,
4793 and the outermost type is wider than the intermediate, or
4794 - the initial type is a pointer type and the precisions of the
4795 intermediate and final types differ, or
4796 - the final type is a pointer type and the precisions of the
4797 initial and intermediate types differ. */
4798 (if (! inside_float && ! inter_float && ! final_float
4799 && ! inside_vec && ! inter_vec && ! final_vec
4800 && (inter_prec >= inside_prec || inter_prec >= final_prec)
4801 && ! (inside_int && inter_int
4802 && inter_unsignedp != inside_unsignedp
4803 && inter_prec < final_prec)
4804 && ((inter_unsignedp && inter_prec > inside_prec)
4805 == (final_unsignedp && final_prec > inter_prec))
4806 && ! (inside_ptr && inter_prec != final_prec)
4807 && ! (final_ptr && inside_prec != inter_prec))
4810 /* `(outer:M)(inter:N) a:O`
4811 can be converted to `(outer:M) a`
4812 if M <= O && N >= O. No matter what signedness of the casts,
4813 as the final is either a truncation from the original or just
4814 a sign change of the type. */
4815 (if (inside_int && inter_int && final_int
4816 && final_prec <= inside_prec
4817 && inter_prec >= inside_prec)
4820 /* A truncation to an unsigned type (a zero-extension) should be
4821 canonicalized as bitwise and of a mask. */
4822 (if (GIMPLE /* PR70366: doing this in GENERIC breaks -Wconversion. */
4823 && final_int && inter_int && inside_int
4824 && final_prec == inside_prec
4825 && final_prec > inter_prec
4827 (convert (bit_and @0 { wide_int_to_tree
4829 wi::mask (inter_prec, false,
4830 TYPE_PRECISION (inside_type))); })))
4832 /* If we are converting an integer to a floating-point that can
4833 represent it exactly and back to an integer, we can skip the
4834 floating-point conversion. */
4835 (if (GIMPLE /* PR66211 */
4836 && inside_int && inter_float && final_int &&
4837 (unsigned) significand_size (TYPE_MODE (inter_type))
4838 >= inside_prec - !inside_unsignedp)
4841 /* (float_type)(integer_type) x -> trunc (x) if the type of x matches
4842 float_type. Only do the transformation if we do not need to preserve
4843 trapping behaviour, so require !flag_trapping_math. */
4846 (float (fix_trunc @0))
4847 (if (!flag_trapping_math
4848 && types_match (type, TREE_TYPE (@0))
4849 && direct_internal_fn_supported_p (IFN_TRUNC, type,
4854 /* If we have a narrowing conversion to an integral type that is fed by a
4855 BIT_AND_EXPR, we might be able to remove the BIT_AND_EXPR if it merely
4856 masks off bits outside the final type (and nothing else). */
4858 (convert (bit_and @0 INTEGER_CST@1))
4859 (if (INTEGRAL_TYPE_P (type)
4860 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
4861 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))
4862 && operand_equal_p (@1, build_low_bits_mask (TREE_TYPE (@1),
4863 TYPE_PRECISION (type)), 0))
4867 /* (X /[ex] A) * A -> X. */
4869 (mult (convert1? (exact_div @0 @@1)) (convert2? @1))
4872 /* Simplify (A / B) * B + (A % B) -> A. */
4873 (for div (trunc_div ceil_div floor_div round_div)
4874 mod (trunc_mod ceil_mod floor_mod round_mod)
4876 (plus:c (mult:c (div @0 @1) @1) (mod @0 @1))
4879 /* x / y * y == x -> x % y == 0. */
4881 (eq:c (mult:c (trunc_div:s @0 @1) @1) @0)
4882 (if (TREE_CODE (TREE_TYPE (@0)) != COMPLEX_TYPE)
4883 (eq (trunc_mod @0 @1) { build_zero_cst (TREE_TYPE (@0)); })))
4885 /* ((X /[ex] A) +- B) * A --> X +- A * B. */
4886 (for op (plus minus)
4888 (mult (convert1? (op (convert2? (exact_div @0 INTEGER_CST@@1)) INTEGER_CST@2)) @1)
4889 (if (tree_nop_conversion_p (type, TREE_TYPE (@2))
4890 && tree_nop_conversion_p (TREE_TYPE (@0), TREE_TYPE (@2)))
4893 wi::overflow_type overflow;
4894 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
4895 TYPE_SIGN (type), &overflow);
4897 (if (types_match (type, TREE_TYPE (@2))
4898 && types_match (TREE_TYPE (@0), TREE_TYPE (@2)) && !overflow)
4899 (op @0 { wide_int_to_tree (type, mul); })
4900 (with { tree utype = unsigned_type_for (type); }
4901 (convert (op (convert:utype @0)
4902 (mult (convert:utype @1) (convert:utype @2))))))))))
4904 /* Canonicalization of binary operations. */
4906 /* Convert X + -C into X - C. */
4908 (plus @0 REAL_CST@1)
4909 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
4910 (with { tree tem = const_unop (NEGATE_EXPR, type, @1); }
4911 (if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
4912 (minus @0 { tem; })))))
4914 /* Convert x+x into x*2. */
4917 (if (SCALAR_FLOAT_TYPE_P (type))
4918 (mult @0 { build_real (type, dconst2); })
4919 (if (INTEGRAL_TYPE_P (type))
4920 (mult @0 { build_int_cst (type, 2); }))))
4924 (minus integer_zerop @1)
4927 (pointer_diff integer_zerop @1)
4928 (negate (convert @1)))
4930 /* (ARG0 - ARG1) is the same as (-ARG1 + ARG0). So check whether
4931 ARG0 is zero and X + ARG0 reduces to X, since that would mean
4932 (-ARG1 + ARG0) reduces to -ARG1. */
4934 (minus real_zerop@0 @1)
4935 (if (fold_real_zero_addition_p (type, @1, @0, 0))
4938 /* Transform x * -1 into -x. */
4940 (mult @0 integer_minus_onep)
4943 /* Reassociate (X * CST) * Y to (X * Y) * CST. This does not introduce
4944 signed overflow for CST != 0 && CST != -1. */
4946 (mult:c (mult:s@3 @0 INTEGER_CST@1) @2)
4947 (if (TREE_CODE (@2) != INTEGER_CST
4949 && !integer_zerop (@1) && !integer_minus_onep (@1))
4950 (mult (mult @0 @2) @1)))
4952 /* True if we can easily extract the real and imaginary parts of a complex
4954 (match compositional_complex
4955 (convert? (complex @0 @1)))
4957 /* COMPLEX_EXPR and REALPART/IMAGPART_EXPR cancellations. */
4959 (complex (realpart @0) (imagpart @0))
4962 (realpart (complex @0 @1))
4965 (imagpart (complex @0 @1))
4968 /* Sometimes we only care about half of a complex expression. */
4970 (realpart (convert?:s (conj:s @0)))
4971 (convert (realpart @0)))
4973 (imagpart (convert?:s (conj:s @0)))
4974 (convert (negate (imagpart @0))))
4975 (for part (realpart imagpart)
4976 (for op (plus minus)
4978 (part (convert?:s@2 (op:s @0 @1)))
4979 (convert (op (part @0) (part @1))))))
4981 (realpart (convert?:s (CEXPI:s @0)))
4984 (imagpart (convert?:s (CEXPI:s @0)))
4987 /* conj(conj(x)) -> x */
4989 (conj (convert? (conj @0)))
4990 (if (tree_nop_conversion_p (TREE_TYPE (@0), type))
4993 /* conj({x,y}) -> {x,-y} */
4995 (conj (convert?:s (complex:s @0 @1)))
4996 (with { tree itype = TREE_TYPE (type); }
4997 (complex (convert:itype @0) (negate (convert:itype @1)))))
4999 /* BSWAP simplifications, transforms checked by gcc.dg/builtin-bswap-8.c. */
5005 (bswap (bit_not (bswap @0)))
5007 (for bitop (bit_xor bit_ior bit_and)
5009 (bswap (bitop:c (bswap @0) @1))
5010 (bitop @0 (bswap @1))))
5013 (cmp (bswap@2 @0) (bswap @1))
5014 (with { tree ctype = TREE_TYPE (@2); }
5015 (cmp (convert:ctype @0) (convert:ctype @1))))
5017 (cmp (bswap @0) INTEGER_CST@1)
5018 (with { tree ctype = TREE_TYPE (@1); }
5019 (cmp (convert:ctype @0) (bswap! @1)))))
5020 /* (bswap(x) >> C1) & C2 can sometimes be simplified to (x >> C3) & C2. */
5022 (bit_and (convert1? (rshift@0 (convert2? (bswap@4 @1)) INTEGER_CST@2))
5024 (if (BITS_PER_UNIT == 8
5025 && tree_fits_uhwi_p (@2)
5026 && tree_fits_uhwi_p (@3))
5029 unsigned HOST_WIDE_INT prec = TYPE_PRECISION (TREE_TYPE (@4));
5030 unsigned HOST_WIDE_INT bits = tree_to_uhwi (@2);
5031 unsigned HOST_WIDE_INT mask = tree_to_uhwi (@3);
5032 unsigned HOST_WIDE_INT lo = bits & 7;
5033 unsigned HOST_WIDE_INT hi = bits - lo;
5036 && mask < (256u>>lo)
5037 && bits < TYPE_PRECISION (TREE_TYPE(@0)))
5038 (with { unsigned HOST_WIDE_INT ns = (prec - (hi + 8)) + lo; }
5040 (bit_and (convert @1) @3)
5043 tree utype = unsigned_type_for (TREE_TYPE (@1));
5044 tree nst = build_int_cst (integer_type_node, ns);
5046 (bit_and (convert (rshift:utype (convert:utype @1) {nst;})) @3))))))))
5047 /* bswap(x) >> C1 can sometimes be simplified to (T)x >> C2. */
5049 (rshift (convert? (bswap@2 @0)) INTEGER_CST@1)
5050 (if (BITS_PER_UNIT == 8
5051 && CHAR_TYPE_SIZE == 8
5052 && tree_fits_uhwi_p (@1))
5055 unsigned HOST_WIDE_INT prec = TYPE_PRECISION (TREE_TYPE (@2));
5056 unsigned HOST_WIDE_INT bits = tree_to_uhwi (@1);
5057 /* If the bswap was extended before the original shift, this
5058 byte (shift) has the sign of the extension, not the sign of
5059 the original shift. */
5060 tree st = TYPE_PRECISION (type) > prec ? TREE_TYPE (@2) : type;
5062 /* Special case: logical right shift of sign-extended bswap.
5063 (unsigned)(short)bswap16(x)>>12 is (unsigned)((short)x<<8)>>12. */
5064 (if (TYPE_PRECISION (type) > prec
5065 && !TYPE_UNSIGNED (TREE_TYPE (@2))
5066 && TYPE_UNSIGNED (type)
5067 && bits < prec && bits + 8 >= prec)
5068 (with { tree nst = build_int_cst (integer_type_node, prec - 8); }
5069 (rshift (convert (lshift:st (convert:st @0) {nst;})) @1))
5070 (if (bits + 8 == prec)
5071 (if (TYPE_UNSIGNED (st))
5072 (convert (convert:unsigned_char_type_node @0))
5073 (convert (convert:signed_char_type_node @0)))
5074 (if (bits < prec && bits + 8 > prec)
5077 tree nst = build_int_cst (integer_type_node, bits & 7);
5078 tree bt = TYPE_UNSIGNED (st) ? unsigned_char_type_node
5079 : signed_char_type_node;
5081 (convert (rshift:bt (convert:bt @0) {nst;})))))))))
5082 /* bswap(x) & C1 can sometimes be simplified to (x >> C2) & C1. */
5084 (bit_and (convert? (bswap@2 @0)) INTEGER_CST@1)
5085 (if (BITS_PER_UNIT == 8
5086 && tree_fits_uhwi_p (@1)
5087 && tree_to_uhwi (@1) < 256)
5090 unsigned HOST_WIDE_INT prec = TYPE_PRECISION (TREE_TYPE (@2));
5091 tree utype = unsigned_type_for (TREE_TYPE (@0));
5092 tree nst = build_int_cst (integer_type_node, prec - 8);
5094 (bit_and (convert (rshift:utype (convert:utype @0) {nst;})) @1)))))
5097 /* Combine COND_EXPRs and VEC_COND_EXPRs. */
5099 /* Simplify constant conditions.
5100 Only optimize constant conditions when the selected branch
5101 has the same type as the COND_EXPR. This avoids optimizing
5102 away "c ? x : throw", where the throw has a void type.
5103 Note that we cannot throw away the fold-const.cc variant nor
5104 this one as we depend on doing this transform before possibly
5105 A ? B : B -> B triggers and the fold-const.cc one can optimize
5106 0 ? A : B to B even if A has side-effects. Something
5107 genmatch cannot handle. */
5109 (cond INTEGER_CST@0 @1 @2)
5110 (if (integer_zerop (@0))
5111 (if (!VOID_TYPE_P (TREE_TYPE (@2)) || VOID_TYPE_P (type))
5113 (if (!VOID_TYPE_P (TREE_TYPE (@1)) || VOID_TYPE_P (type))
5116 (vec_cond VECTOR_CST@0 @1 @2)
5117 (if (integer_all_onesp (@0))
5119 (if (integer_zerop (@0))
5122 /* Sink unary operations to branches, but only if we do fold both. */
5123 (for op (negate bit_not abs absu)
5125 (op (vec_cond:s @0 @1 @2))
5126 (vec_cond @0 (op! @1) (op! @2))))
5128 /* Sink unary conversions to branches, but only if we do fold both
5129 and the target's truth type is the same as we already have. */
5131 (convert (vec_cond:s @0 @1 @2))
5132 (if (VECTOR_TYPE_P (type)
5133 && types_match (TREE_TYPE (@0), truth_type_for (type)))
5134 (vec_cond @0 (convert! @1) (convert! @2))))
5136 /* Likewise for view_convert of nop_conversions. */
5138 (view_convert (vec_cond:s @0 @1 @2))
5139 (if (VECTOR_TYPE_P (type) && VECTOR_TYPE_P (TREE_TYPE (@1))
5140 && known_eq (TYPE_VECTOR_SUBPARTS (type),
5141 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
5142 && tree_nop_conversion_p (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@1))))
5143 (vec_cond @0 (view_convert! @1) (view_convert! @2))))
5145 /* Sink binary operation to branches, but only if we can fold it. */
5146 (for op (tcc_comparison plus minus mult bit_and bit_ior bit_xor
5147 lshift rshift rdiv trunc_div ceil_div floor_div round_div
5148 trunc_mod ceil_mod floor_mod round_mod min max)
5149 /* (c ? a : b) op (c ? d : e) --> c ? (a op d) : (b op e) */
5151 (op (vec_cond:s @0 @1 @2) (vec_cond:s @0 @3 @4))
5152 (vec_cond @0 (op! @1 @3) (op! @2 @4)))
5154 /* (c ? a : b) op d --> c ? (a op d) : (b op d) */
5156 (op (vec_cond:s @0 @1 @2) @3)
5157 (vec_cond @0 (op! @1 @3) (op! @2 @3)))
5159 (op @3 (vec_cond:s @0 @1 @2))
5160 (vec_cond @0 (op! @3 @1) (op! @3 @2))))
5163 (match (nop_atomic_bit_test_and_p @0 @1 @4)
5164 (bit_and (convert?@4 (ATOMIC_FETCH_OR_XOR_N @2 INTEGER_CST@0 @3))
5167 int ibit = tree_log2 (@0);
5168 int ibit2 = tree_log2 (@1);
5172 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))))))
5174 (match (nop_atomic_bit_test_and_p @0 @1 @3)
5175 (bit_and (convert?@3 (SYNC_FETCH_OR_XOR_N @2 INTEGER_CST@0))
5178 int ibit = tree_log2 (@0);
5179 int ibit2 = tree_log2 (@1);
5183 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))))))
5185 (match (nop_atomic_bit_test_and_p @0 @0 @4)
5188 (ATOMIC_FETCH_OR_XOR_N @2 (nop_convert? (lshift@0 integer_onep@5 @6)) @3))
5190 (if (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0)))))
5192 (match (nop_atomic_bit_test_and_p @0 @0 @4)
5195 (SYNC_FETCH_OR_XOR_N @2 (nop_convert? (lshift@0 integer_onep@3 @5))))
5197 (if (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0)))))
5199 (match (nop_atomic_bit_test_and_p @0 @1 @3)
5200 (bit_and@4 (convert?@3 (ATOMIC_FETCH_AND_N @2 INTEGER_CST@0 @5))
5203 int ibit = wi::exact_log2 (wi::zext (wi::bit_not (wi::to_wide (@0)),
5204 TYPE_PRECISION(type)));
5205 int ibit2 = tree_log2 (@1);
5209 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))))))
5211 (match (nop_atomic_bit_test_and_p @0 @1 @3)
5213 (convert?@3 (SYNC_FETCH_AND_AND_N @2 INTEGER_CST@0))
5216 int ibit = wi::exact_log2 (wi::zext (wi::bit_not (wi::to_wide (@0)),
5217 TYPE_PRECISION(type)));
5218 int ibit2 = tree_log2 (@1);
5222 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))))))
5224 (match (nop_atomic_bit_test_and_p @4 @0 @3)
5227 (ATOMIC_FETCH_AND_N @2 (nop_convert?@4 (bit_not (lshift@0 integer_onep@6 @7))) @5))
5229 (if (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@4)))))
5231 (match (nop_atomic_bit_test_and_p @4 @0 @3)
5234 (SYNC_FETCH_AND_AND_N @2 (nop_convert?@4 (bit_not (lshift@0 integer_onep@6 @7)))))
5236 (if (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@4)))))
5240 /* (v ? w : 0) ? a : b is just (v & w) ? a : b
5241 Currently disabled after pass lvec because ARM understands
5242 VEC_COND_EXPR<v==w,-1,0> but not a plain v==w fed to BIT_IOR_EXPR. */
5244 /* These can only be done in gimple as fold likes to convert:
5245 (CMP) & N into (CMP) ? N : 0
5246 and we try to match the same pattern again and again. */
5248 (vec_cond (vec_cond:s @0 @3 integer_zerop) @1 @2)
5249 (if (optimize_vectors_before_lowering_p () && types_match (@0, @3))
5250 (vec_cond (bit_and @0 @3) @1 @2)))
5252 (vec_cond (vec_cond:s @0 integer_all_onesp @3) @1 @2)
5253 (if (optimize_vectors_before_lowering_p () && types_match (@0, @3))
5254 (vec_cond (bit_ior @0 @3) @1 @2)))
5256 (vec_cond (vec_cond:s @0 integer_zerop @3) @1 @2)
5257 (if (optimize_vectors_before_lowering_p () && types_match (@0, @3))
5258 (vec_cond (bit_ior @0 (bit_not @3)) @2 @1)))
5260 (vec_cond (vec_cond:s @0 @3 integer_all_onesp) @1 @2)
5261 (if (optimize_vectors_before_lowering_p () && types_match (@0, @3))
5262 (vec_cond (bit_and @0 (bit_not @3)) @2 @1)))
5264 /* ((VCE (a cmp b ? -1 : 0)) < 0) ? c : d is just
5265 (VCE ((a cmp b) ? (VCE c) : (VCE d))) when TYPE_PRECISION of the
5266 component type of the outer vec_cond is greater equal the inner one. */
5267 (for cmp (simple_comparison)
5270 (lt (view_convert@5 (vec_cond@6 (cmp@4 @0 @1)
5273 integer_zerop) @2 @3)
5274 (if (VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0))
5275 && VECTOR_INTEGER_TYPE_P (TREE_TYPE (@5))
5276 && !TYPE_UNSIGNED (TREE_TYPE (@5))
5277 && VECTOR_TYPE_P (TREE_TYPE (@6))
5278 && VECTOR_TYPE_P (type)
5279 && tree_int_cst_le (TYPE_SIZE (TREE_TYPE (type)),
5280 TYPE_SIZE (TREE_TYPE (TREE_TYPE (@6))))
5281 && TYPE_SIZE (type) == TYPE_SIZE (TREE_TYPE (@6)))
5282 (with { tree vtype = TREE_TYPE (@6);}
5284 (vec_cond @4 (view_convert:vtype @2) (view_convert:vtype @3)))))))
5286 /* c1 ? c2 ? a : b : b --> (c1 & c2) ? a : b */
5288 (vec_cond @0 (vec_cond:s @1 @2 @3) @3)
5289 (if (optimize_vectors_before_lowering_p () && types_match (@0, @1))
5290 (vec_cond (bit_and @0 @1) @2 @3)))
5292 (vec_cond @0 @2 (vec_cond:s @1 @2 @3))
5293 (if (optimize_vectors_before_lowering_p () && types_match (@0, @1))
5294 (vec_cond (bit_ior @0 @1) @2 @3)))
5296 (vec_cond @0 (vec_cond:s @1 @2 @3) @2)
5297 (if (optimize_vectors_before_lowering_p () && types_match (@0, @1))
5298 (vec_cond (bit_ior (bit_not @0) @1) @2 @3)))
5300 (vec_cond @0 @3 (vec_cond:s @1 @2 @3))
5301 (if (optimize_vectors_before_lowering_p () && types_match (@0, @1))
5302 (vec_cond (bit_and (bit_not @0) @1) @2 @3)))
5305 /* Canonicalize mask ? { 0, ... } : { -1, ...} to ~mask if the mask
5306 types are compatible. */
5308 (vec_cond @0 VECTOR_CST@1 VECTOR_CST@2)
5309 (if (VECTOR_BOOLEAN_TYPE_P (type)
5310 && types_match (type, TREE_TYPE (@0)))
5311 (if (integer_zerop (@1) && integer_all_onesp (@2))
5313 (if (integer_all_onesp (@1) && integer_zerop (@2))
5316 /* A few simplifications of "a ? CST1 : CST2". */
5317 /* NOTE: Only do this on gimple as the if-chain-to-switch
5318 optimization depends on the gimple to have if statements in it. */
5321 (cond @0 INTEGER_CST@1 INTEGER_CST@2)
5323 (if (integer_zerop (@2))
5325 /* a ? 1 : 0 -> a if 0 and 1 are integral types. */
5326 (if (integer_onep (@1))
5327 (convert (convert:boolean_type_node @0)))
5328 /* a ? -1 : 0 -> -a. */
5329 (if (INTEGRAL_TYPE_P (type) && integer_all_onesp (@1))
5330 (if (TYPE_PRECISION (type) == 1)
5331 /* For signed 1-bit precision just cast bool to the type. */
5332 (convert (convert:boolean_type_node @0))
5333 (if (TREE_CODE (type) == BOOLEAN_TYPE)
5335 tree intt = build_nonstandard_integer_type (TYPE_PRECISION (type),
5336 TYPE_UNSIGNED (type));
5338 (convert (negate (convert:intt (convert:boolean_type_node @0)))))
5339 (negate (convert:type (convert:boolean_type_node @0))))))
5340 /* a ? powerof2cst : 0 -> a << (log2(powerof2cst)) */
5341 (if (INTEGRAL_TYPE_P (type) && integer_pow2p (@1))
5343 tree shift = build_int_cst (integer_type_node, tree_log2 (@1));
5345 (lshift (convert (convert:boolean_type_node @0)) { shift; })))))
5346 (if (integer_zerop (@1))
5348 /* a ? 0 : 1 -> !a. */
5349 (if (integer_onep (@2))
5350 (convert (bit_xor (convert:boolean_type_node @0) { boolean_true_node; })))
5351 /* a ? 0 : -1 -> -(!a). */
5352 (if (INTEGRAL_TYPE_P (type) && integer_all_onesp (@2))
5353 (if (TYPE_PRECISION (type) == 1)
5354 /* For signed 1-bit precision just cast bool to the type. */
5355 (convert (bit_xor (convert:boolean_type_node @0) { boolean_true_node; }))
5356 (if (TREE_CODE (type) == BOOLEAN_TYPE)
5358 tree intt = build_nonstandard_integer_type (TYPE_PRECISION (type),
5359 TYPE_UNSIGNED (type));
5361 (convert (negate (convert:intt (bit_xor (convert:boolean_type_node @0)
5362 { boolean_true_node; })))))
5363 (negate (convert:type (bit_xor (convert:boolean_type_node @0)
5364 { boolean_true_node; }))))))
5365 /* a ? 0 : powerof2cst -> (!a) << (log2(powerof2cst)) */
5366 (if (INTEGRAL_TYPE_P (type) && integer_pow2p (@2))
5368 tree shift = build_int_cst (integer_type_node, tree_log2 (@2));
5370 (lshift (convert (bit_xor (convert:boolean_type_node @0)
5371 { boolean_true_node; })) { shift; })))))))
5373 /* (a > 1) ? 0 : (cast)a is the same as (cast)(a == 1)
5374 for unsigned types. */
5376 (cond (gt @0 integer_onep@1) integer_zerop (convert? @2))
5377 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
5378 && bitwise_equal_p (@0, @2))
5379 (convert (eq @0 @1))
5383 /* (a <= 1) & (cast)a is the same as (cast)(a == 1)
5384 for unsigned types. */
5386 (bit_and:c (convert1? (le @0 integer_onep@1)) (convert2? @2))
5387 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
5388 && bitwise_equal_p (@0, @2))
5389 (convert (eq @0 @1))
5393 /* `(a == CST) & a` can be simplified to `0` or `(a == CST)` depending
5394 on the first bit of the CST. */
5396 (bit_and:c (convert@2 (eq @0 INTEGER_CST@1)) (convert? @0))
5397 (if ((wi::to_wide (@1) & 1) != 0)
5399 { build_zero_cst (type); }))
5402 # x_5 in range [cst1, cst2] where cst2 = cst1 + 1
5403 x_5 == cstN ? cst4 : cst3
5404 # op is == or != and N is 1 or 2
5405 to r_6 = x_5 + (min (cst3, cst4) - cst1) or
5406 r_6 = (min (cst3, cst4) + cst1) - x_5 depending on op, N and which
5407 of cst3 and cst4 is smaller.
5408 This was originally done by two_value_replacement in phiopt (PR 88676). */
5411 (cond (eqne SSA_NAME@0 INTEGER_CST@1) INTEGER_CST@2 INTEGER_CST@3)
5412 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
5413 && INTEGRAL_TYPE_P (type)
5414 && (wi::to_widest (@2) + 1 == wi::to_widest (@3)
5415 || wi::to_widest (@2) == wi::to_widest (@3) + 1))
5418 get_range_query (cfun)->range_of_expr (r, @0);
5419 if (r.undefined_p ())
5420 r.set_varying (TREE_TYPE (@0));
5422 wide_int min = r.lower_bound ();
5423 wide_int max = r.upper_bound ();
5426 && (wi::to_wide (@1) == min
5427 || wi::to_wide (@1) == max))
5429 tree arg0 = @2, arg1 = @3;
5431 if ((eqne == EQ_EXPR) ^ (wi::to_wide (@1) == min))
5432 std::swap (arg0, arg1);
5433 if (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type))
5434 type1 = TREE_TYPE (@0);
5437 auto prec = TYPE_PRECISION (type1);
5438 auto unsign = TYPE_UNSIGNED (type1);
5439 if (TREE_CODE (type1) == BOOLEAN_TYPE)
5440 type1 = build_nonstandard_integer_type (prec, unsign);
5441 min = wide_int::from (min, prec,
5442 TYPE_SIGN (TREE_TYPE (@0)));
5443 wide_int a = wide_int::from (wi::to_wide (arg0), prec,
5445 enum tree_code code;
5446 wi::overflow_type ovf;
5447 if (tree_int_cst_lt (arg0, arg1))
5453 /* lhs is known to be in range [min, min+1] and we want to add a
5454 to it. Check if that operation can overflow for those 2 values
5455 and if yes, force unsigned type. */
5456 wi::add (min + (wi::neg_p (a) ? 0 : 1), a, SIGNED, &ovf);
5458 type1 = unsigned_type_for (type1);
5467 /* lhs is known to be in range [min, min+1] and we want to subtract
5468 it from a. Check if that operation can overflow for those 2
5469 values and if yes, force unsigned type. */
5470 wi::sub (a, min + (wi::neg_p (min) ? 0 : 1), SIGNED, &ovf);
5472 type1 = unsigned_type_for (type1);
5475 tree arg = wide_int_to_tree (type1, a);
5477 (if (code == PLUS_EXPR)
5478 (convert (plus (convert:type1 @0) { arg; }))
5479 (convert (minus { arg; } (convert:type1 @0))))))))))
5483 (convert (cond@0 @1 INTEGER_CST@2 INTEGER_CST@3))
5484 (if (INTEGRAL_TYPE_P (type)
5485 && INTEGRAL_TYPE_P (TREE_TYPE (@0)))
5486 (cond @1 (convert @2) (convert @3))))
5488 /* Simplification moved from fold_cond_expr_with_comparison. It may also
5490 /* This pattern implements two kinds simplification:
5493 (cond (cmp (convert1? x) c1) (convert2? x) c2) -> (minmax (x c)) if:
5494 1) Conversions are type widening from smaller type.
5495 2) Const c1 equals to c2 after canonicalizing comparison.
5496 3) Comparison has tree code LT, LE, GT or GE.
5497 This specific pattern is needed when (cmp (convert x) c) may not
5498 be simplified by comparison patterns because of multiple uses of
5499 x. It also makes sense here because simplifying across multiple
5500 referred var is always benefitial for complicated cases.
5503 (cond (eq (convert1? x) c1) (convert2? x) c2) -> (cond (eq x c1) c1 c2). */
5504 (for cmp (lt le gt ge eq ne)
5506 (cond (cmp (convert1? @1) INTEGER_CST@3) (convert2? @1) INTEGER_CST@2)
5509 tree from_type = TREE_TYPE (@1);
5510 tree c1_type = TREE_TYPE (@3), c2_type = TREE_TYPE (@2);
5511 enum tree_code code = ERROR_MARK;
5513 if (INTEGRAL_TYPE_P (from_type)
5514 && int_fits_type_p (@2, from_type)
5515 && (types_match (c1_type, from_type)
5516 || (TYPE_PRECISION (c1_type) > TYPE_PRECISION (from_type)
5517 && (TYPE_UNSIGNED (from_type)
5518 || TYPE_SIGN (c1_type) == TYPE_SIGN (from_type))))
5519 && (types_match (c2_type, from_type)
5520 || (TYPE_PRECISION (c2_type) > TYPE_PRECISION (from_type)
5521 && (TYPE_UNSIGNED (from_type)
5522 || TYPE_SIGN (c2_type) == TYPE_SIGN (from_type)))))
5525 code = minmax_from_comparison (cmp, @1, @3, @1, @2);
5526 /* Can do A == C1 ? A : C2 -> A == C1 ? C1 : C2? */
5527 else if (int_fits_type_p (@3, from_type))
5531 (if (code == MAX_EXPR)
5532 (convert (max @1 (convert @2)))
5533 (if (code == MIN_EXPR)
5534 (convert (min @1 (convert @2)))
5535 (if (code == EQ_EXPR)
5536 (convert (cond (eq @1 (convert @3))
5537 (convert:from_type @3) (convert:from_type @2)))))))))
5539 /* (cond (cmp (convert? x) c1) (op x c2) c3) -> (op (minmax x c1) c2) if:
5541 1) OP is PLUS or MINUS.
5542 2) CMP is LT, LE, GT or GE.
5543 3) C3 == (C1 op C2), and computation doesn't have undefined behavior.
5545 This pattern also handles special cases like:
5547 A) Operand x is a unsigned to signed type conversion and c1 is
5548 integer zero. In this case,
5549 (signed type)x < 0 <=> x > MAX_VAL(signed type)
5550 (signed type)x >= 0 <=> x <= MAX_VAL(signed type)
5551 B) Const c1 may not equal to (C3 op' C2). In this case we also
5552 check equality for (c1+1) and (c1-1) by adjusting comparison
5555 TODO: Though signed type is handled by this pattern, it cannot be
5556 simplified at the moment because C standard requires additional
5557 type promotion. In order to match&simplify it here, the IR needs
5558 to be cleaned up by other optimizers, i.e, VRP. */
5559 (for op (plus minus)
5560 (for cmp (lt le gt ge)
5562 (cond (cmp (convert? @X) INTEGER_CST@1) (op @X INTEGER_CST@2) INTEGER_CST@3)
5563 (with { tree from_type = TREE_TYPE (@X), to_type = TREE_TYPE (@1); }
5564 (if (types_match (from_type, to_type)
5565 /* Check if it is special case A). */
5566 || (TYPE_UNSIGNED (from_type)
5567 && !TYPE_UNSIGNED (to_type)
5568 && TYPE_PRECISION (from_type) == TYPE_PRECISION (to_type)
5569 && integer_zerop (@1)
5570 && (cmp == LT_EXPR || cmp == GE_EXPR)))
5573 wi::overflow_type overflow = wi::OVF_NONE;
5574 enum tree_code code, cmp_code = cmp;
5576 wide_int c1 = wi::to_wide (@1);
5577 wide_int c2 = wi::to_wide (@2);
5578 wide_int c3 = wi::to_wide (@3);
5579 signop sgn = TYPE_SIGN (from_type);
5581 /* Handle special case A), given x of unsigned type:
5582 ((signed type)x < 0) <=> (x > MAX_VAL(signed type))
5583 ((signed type)x >= 0) <=> (x <= MAX_VAL(signed type)) */
5584 if (!types_match (from_type, to_type))
5586 if (cmp_code == LT_EXPR)
5588 if (cmp_code == GE_EXPR)
5590 c1 = wi::max_value (to_type);
5592 /* To simplify this pattern, we require c3 = (c1 op c2). Here we
5593 compute (c3 op' c2) and check if it equals to c1 with op' being
5594 the inverted operator of op. Make sure overflow doesn't happen
5595 if it is undefined. */
5596 if (op == PLUS_EXPR)
5597 real_c1 = wi::sub (c3, c2, sgn, &overflow);
5599 real_c1 = wi::add (c3, c2, sgn, &overflow);
5602 if (!overflow || !TYPE_OVERFLOW_UNDEFINED (from_type))
5604 /* Check if c1 equals to real_c1. Boundary condition is handled
5605 by adjusting comparison operation if necessary. */
5606 if (!wi::cmp (wi::sub (real_c1, 1, sgn, &overflow), c1, sgn)
5609 /* X <= Y - 1 equals to X < Y. */
5610 if (cmp_code == LE_EXPR)
5612 /* X > Y - 1 equals to X >= Y. */
5613 if (cmp_code == GT_EXPR)
5616 if (!wi::cmp (wi::add (real_c1, 1, sgn, &overflow), c1, sgn)
5619 /* X < Y + 1 equals to X <= Y. */
5620 if (cmp_code == LT_EXPR)
5622 /* X >= Y + 1 equals to X > Y. */
5623 if (cmp_code == GE_EXPR)
5626 if (code != cmp_code || !wi::cmp (real_c1, c1, sgn))
5628 if (cmp_code == LT_EXPR || cmp_code == LE_EXPR)
5630 if (cmp_code == GT_EXPR || cmp_code == GE_EXPR)
5635 (if (code == MAX_EXPR)
5636 (op (max @X { wide_int_to_tree (from_type, real_c1); })
5637 { wide_int_to_tree (from_type, c2); })
5638 (if (code == MIN_EXPR)
5639 (op (min @X { wide_int_to_tree (from_type, real_c1); })
5640 { wide_int_to_tree (from_type, c2); })))))))))
5643 /* A >= B ? A : B -> max (A, B) and friends. The code is still
5644 in fold_cond_expr_with_comparison for GENERIC folding with
5645 some extra constraints. */
5646 (for cmp (eq ne le lt unle unlt ge gt unge ungt uneq ltgt)
5648 (cond (cmp:c (nop_convert1?@c0 @0) (nop_convert2?@c1 @1))
5649 (convert3? @0) (convert4? @1))
5650 (if (!HONOR_SIGNED_ZEROS (type)
5651 && (/* Allow widening conversions of the compare operands as data. */
5652 (INTEGRAL_TYPE_P (type)
5653 && types_match (TREE_TYPE (@c0), TREE_TYPE (@0))
5654 && types_match (TREE_TYPE (@c1), TREE_TYPE (@1))
5655 && TYPE_PRECISION (TREE_TYPE (@0)) <= TYPE_PRECISION (type)
5656 && TYPE_PRECISION (TREE_TYPE (@1)) <= TYPE_PRECISION (type))
5657 /* Or sign conversions for the comparison. */
5658 || (types_match (type, TREE_TYPE (@0))
5659 && types_match (type, TREE_TYPE (@1)))))
5661 (if (cmp == EQ_EXPR)
5662 (if (VECTOR_TYPE_P (type))
5665 (if (cmp == NE_EXPR)
5666 (if (VECTOR_TYPE_P (type))
5669 (if (cmp == LE_EXPR || cmp == UNLE_EXPR || cmp == LT_EXPR || cmp == UNLT_EXPR)
5670 (if (!HONOR_NANS (type))
5671 (if (VECTOR_TYPE_P (type))
5672 (view_convert (min @c0 @c1))
5673 (convert (min @c0 @c1)))))
5674 (if (cmp == GE_EXPR || cmp == UNGE_EXPR || cmp == GT_EXPR || cmp == UNGT_EXPR)
5675 (if (!HONOR_NANS (type))
5676 (if (VECTOR_TYPE_P (type))
5677 (view_convert (max @c0 @c1))
5678 (convert (max @c0 @c1)))))
5679 (if (cmp == UNEQ_EXPR)
5680 (if (!HONOR_NANS (type))
5681 (if (VECTOR_TYPE_P (type))
5684 (if (cmp == LTGT_EXPR)
5685 (if (!HONOR_NANS (type))
5686 (if (VECTOR_TYPE_P (type))
5688 (convert @c0))))))))
5690 /* This is for VEC_COND_EXPR
5691 Optimize A < B ? A : B to MIN (A, B)
5692 A > B ? A : B to MAX (A, B). */
5693 (for cmp (lt le ungt unge gt ge unlt unle)
5694 minmax (min min min min max max max max)
5695 MINMAX (MIN_EXPR MIN_EXPR MIN_EXPR MIN_EXPR MAX_EXPR MAX_EXPR MAX_EXPR MAX_EXPR)
5697 (vec_cond (cmp @0 @1) @0 @1)
5698 (if (VECTOR_INTEGER_TYPE_P (type)
5699 && target_supports_op_p (type, MINMAX, optab_vector))
5702 (for cmp (lt le ungt unge gt ge unlt unle)
5703 minmax (max max max max min min min min)
5704 MINMAX (MAX_EXPR MAX_EXPR MAX_EXPR MAX_EXPR MIN_EXPR MIN_EXPR MIN_EXPR MIN_EXPR)
5706 (vec_cond (cmp @0 @1) @1 @0)
5707 (if (VECTOR_INTEGER_TYPE_P (type)
5708 && target_supports_op_p (type, MINMAX, optab_vector))
5712 (for cnd (cond vec_cond)
5713 /* (a != b) ? (a - b) : 0 -> (a - b) */
5715 (cnd (ne:c @0 @1) (minus@2 @0 @1) integer_zerop)
5717 /* (a != b) ? (a ^ b) : 0 -> (a ^ b) */
5719 (cnd (ne:c @0 @1) (bit_xor:c@2 @0 @1) integer_zerop)
5721 /* (a != b) ? (a & b) : a -> (a & b) */
5722 /* (a != b) ? (a | b) : a -> (a | b) */
5723 /* (a != b) ? min(a,b) : a -> min(a,b) */
5724 /* (a != b) ? max(a,b) : a -> max(a,b) */
5725 (for op (bit_and bit_ior min max)
5727 (cnd (ne:c @0 @1) (op:c@2 @0 @1) @0)
5729 /* (a != b) ? (a * b) : (a * a) -> (a * b) */
5730 /* (a != b) ? (a + b) : (a + a) -> (a + b) */
5733 (cnd (ne:c @0 @1) (op@2 @0 @1) (op @0 @0))
5734 (if (ANY_INTEGRAL_TYPE_P (type))
5736 /* (a != b) ? (a + b) : (2 * a) -> (a + b) */
5738 (cnd (ne:c @0 @1) (plus:c@2 @0 @1) (mult @0 uniform_integer_cst_p@3))
5739 (if (wi::to_wide (uniform_integer_cst_p (@3)) == 2)
5743 /* These was part of minmax phiopt. */
5744 /* Optimize (a CMP b) ? minmax<a, c> : minmax<b, c>
5745 to minmax<min/max<a, b>, c> */
5746 (for minmax (min max)
5747 (for cmp (lt le gt ge ne)
5749 (cond (cmp:c @1 @3) (minmax:c @1 @4) (minmax:c @2 @4))
5752 tree_code code = minmax_from_comparison (cmp, @1, @2, @1, @3);
5754 (if (code == MIN_EXPR)
5755 (minmax (min @1 @2) @4)
5756 (if (code == MAX_EXPR)
5757 (minmax (max @1 @2) @4)))))))
5759 /* Optimize (a CMP CST1) ? max<a,CST2> : a */
5760 (for cmp (gt ge lt le)
5761 minmax (min min max max)
5763 (cond (cmp:c @0 @1) (minmax:c@2 @0 @3) @4)
5766 tree_code code = minmax_from_comparison (cmp, @0, @1, @0, @4);
5768 (if ((cmp == LT_EXPR || cmp == LE_EXPR)
5770 && integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node, @3, @4)))
5772 (if ((cmp == GT_EXPR || cmp == GE_EXPR)
5774 && integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node, @3, @4)))
5778 /* These patterns should be after min/max detection as simplifications
5779 of `(type)(zero_one ==/!= 0)` to `(type)(zero_one)`
5780 and `(type)(zero_one^1)` are not done yet. See PR 110637.
5781 Even without those, reaching min/max/and/ior faster is better. */
5783 (cond @0 zero_one_valued_p@1 zero_one_valued_p@2)
5785 /* bool0 ? bool1 : 0 -> bool0 & bool1 */
5786 (if (integer_zerop (@2))
5787 (bit_and (convert @0) @1))
5788 /* bool0 ? 0 : bool2 -> (bool0^1) & bool2 */
5789 (if (integer_zerop (@1))
5790 (bit_and (bit_xor (convert @0) { build_one_cst (type); } ) @2))
5791 /* bool0 ? 1 : bool2 -> bool0 | bool2 */
5792 (if (integer_onep (@1))
5793 (bit_ior (convert @0) @2))
5794 /* bool0 ? bool1 : 1 -> (bool0^1) | bool1 */
5795 (if (integer_onep (@2))
5796 (bit_ior (bit_xor (convert @0) @2) @1))
5801 /* X != C1 ? -X : C2 simplifies to -X when -C1 == C2. */
5803 (cond (ne @0 INTEGER_CST@1) (negate@3 @0) INTEGER_CST@2)
5804 (if (!TYPE_SATURATING (type)
5805 && (TYPE_OVERFLOW_WRAPS (type)
5806 || !wi::only_sign_bit_p (wi::to_wide (@1)))
5807 && wi::eq_p (wi::neg (wi::to_wide (@1)), wi::to_wide (@2)))
5810 /* X != C1 ? ~X : C2 simplifies to ~X when ~C1 == C2. */
5812 (cond (ne @0 INTEGER_CST@1) (bit_not@3 @0) INTEGER_CST@2)
5813 (if (wi::eq_p (wi::bit_not (wi::to_wide (@1)), wi::to_wide (@2)))
5816 /* X != C1 ? abs(X) : C2 simplifies to abs(x) when abs(C1) == C2. */
5819 (cond (ne @0 INTEGER_CST@1) (op@3 @0) INTEGER_CST@2)
5820 (if (wi::abs (wi::to_wide (@1)) == wi::to_wide (@2))
5821 (if (op != ABSU_EXPR && wi::only_sign_bit_p (wi::to_wide (@1)))
5822 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
5823 (convert (absu:utype @0)))
5826 /* (X + 1) > Y ? -X : 1 simplifies to X >= Y ? -X : 1 when
5827 X is unsigned, as when X + 1 overflows, X is -1, so -X == 1. */
5829 (cond (gt (plus @0 integer_onep) @1) (negate @0) integer_onep@2)
5830 (if (TYPE_UNSIGNED (type))
5831 (cond (ge @0 @1) (negate @0) @2)))
5833 (for cnd (cond vec_cond)
5834 /* A ? B : (A ? X : C) -> A ? B : C. */
5836 (cnd @0 (cnd @0 @1 @2) @3)
5839 (cnd @0 @1 (cnd @0 @2 @3))
5841 /* A ? B : (!A ? C : X) -> A ? B : C. */
5842 /* ??? This matches embedded conditions open-coded because genmatch
5843 would generate matching code for conditions in separate stmts only.
5844 The following is still important to merge then and else arm cases
5845 from if-conversion. */
5847 (cnd @0 @1 (cnd @2 @3 @4))
5848 (if (inverse_conditions_p (@0, @2))
5851 (cnd @0 (cnd @1 @2 @3) @4)
5852 (if (inverse_conditions_p (@0, @1))
5855 /* A ? B : B -> B. */
5860 /* !A ? B : C -> A ? C : B. */
5862 (cnd (logical_inverted_value truth_valued_p@0) @1 @2)
5865 /* abs/negative simplifications moved from fold_cond_expr_with_comparison.
5867 None of these transformations work for modes with signed
5868 zeros. If A is +/-0, the first two transformations will
5869 change the sign of the result (from +0 to -0, or vice
5870 versa). The last four will fix the sign of the result,
5871 even though the original expressions could be positive or
5872 negative, depending on the sign of A.
5874 Note that all these transformations are correct if A is
5875 NaN, since the two alternatives (A and -A) are also NaNs. */
5877 (for cnd (cond vec_cond)
5878 /* A == 0 ? A : -A same as -A */
5881 (cnd (cmp @0 zerop) @2 (negate@1 @2))
5882 (if (!HONOR_SIGNED_ZEROS (type)
5883 && bitwise_equal_p (@0, @2))
5886 (cnd (cmp @0 zerop) zerop (negate@1 @2))
5887 (if (!HONOR_SIGNED_ZEROS (type)
5888 && bitwise_equal_p (@0, @2))
5891 /* A != 0 ? A : -A same as A */
5894 (cnd (cmp @0 zerop) @1 (negate @1))
5895 (if (!HONOR_SIGNED_ZEROS (type)
5896 && bitwise_equal_p (@0, @1))
5899 (cnd (cmp @0 zerop) @1 integer_zerop)
5900 (if (!HONOR_SIGNED_ZEROS (type)
5901 && bitwise_equal_p (@0, @1))
5904 /* A >=/> 0 ? A : -A same as abs (A) */
5907 (cnd (cmp @0 zerop) @1 (negate @1))
5908 (if (!HONOR_SIGNED_ZEROS (TREE_TYPE(@0))
5909 && !TYPE_UNSIGNED (TREE_TYPE(@0))
5910 && bitwise_equal_p (@0, @1))
5911 (if (TYPE_UNSIGNED (type))
5914 /* A <=/< 0 ? A : -A same as -abs (A) */
5917 (cnd (cmp @0 zerop) @1 (negate @1))
5918 (if (!HONOR_SIGNED_ZEROS (TREE_TYPE(@0))
5919 && !TYPE_UNSIGNED (TREE_TYPE(@0))
5920 && bitwise_equal_p (@0, @1))
5921 (if ((ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
5922 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
5923 || TYPE_UNSIGNED (type))
5925 tree utype = unsigned_type_for (TREE_TYPE(@0));
5927 (convert (negate (absu:utype @0))))
5928 (negate (abs @0)))))
5931 /* (A - B) == 0 ? (A - B) : (B - A) same as (B - A) */
5934 (cnd (cmp (minus@0 @1 @2) zerop) @0 (minus@3 @2 @1))
5935 (if (!HONOR_SIGNED_ZEROS (type))
5938 (cnd (cmp (minus@0 @1 @2) integer_zerop) integer_zerop (minus@3 @2 @1))
5941 /* (A - B) != 0 ? (A - B) : (B - A) same as (A - B) */
5944 (cnd (cmp (minus@0 @1 @2) zerop) @0 (minus @2 @1))
5945 (if (!HONOR_SIGNED_ZEROS (type))
5948 (cnd (cmp (minus@0 @1 @2) integer_zerop) @0 integer_zerop)
5951 /* (A - B) >=/> 0 ? (A - B) : (B - A) same as abs (A - B) */
5954 (cnd (cmp (minus@0 @1 @2) zerop) @0 (minus @2 @1))
5955 (if (!HONOR_SIGNED_ZEROS (type)
5956 && !TYPE_UNSIGNED (type))
5958 /* (A - B) <=/< 0 ? (A - B) : (B - A) same as -abs (A - B) */
5961 (cnd (cmp (minus@0 @1 @2) zerop) @0 (minus @2 @1))
5962 (if (!HONOR_SIGNED_ZEROS (type)
5963 && !TYPE_UNSIGNED (type))
5964 (if (ANY_INTEGRAL_TYPE_P (type)
5965 && !TYPE_OVERFLOW_WRAPS (type))
5967 tree utype = unsigned_type_for (type);
5969 (convert (negate (absu:utype @0))))
5970 (negate (abs @0)))))
5974 /* -(type)!A -> (type)A - 1. */
5976 (negate (convert?:s (logical_inverted_value:s @0)))
5977 (if (INTEGRAL_TYPE_P (type)
5978 && TREE_CODE (type) != BOOLEAN_TYPE
5979 && TYPE_PRECISION (type) > 1
5980 && TREE_CODE (@0) == SSA_NAME
5981 && ssa_name_has_boolean_range (@0))
5982 (plus (convert:type @0) { build_all_ones_cst (type); })))
5984 /* A + (B vcmp C ? 1 : 0) -> A - (B vcmp C ? -1 : 0), since vector comparisons
5985 return all -1 or all 0 results. */
5986 /* ??? We could instead convert all instances of the vec_cond to negate,
5987 but that isn't necessarily a win on its own. */
5989 (plus:c @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
5990 (if (VECTOR_TYPE_P (type)
5991 && known_eq (TYPE_VECTOR_SUBPARTS (type),
5992 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
5993 && (TYPE_MODE (TREE_TYPE (type))
5994 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
5995 (minus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
5997 /* ... likewise A - (B vcmp C ? 1 : 0) -> A + (B vcmp C ? -1 : 0). */
5999 (minus @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
6000 (if (VECTOR_TYPE_P (type)
6001 && known_eq (TYPE_VECTOR_SUBPARTS (type),
6002 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
6003 && (TYPE_MODE (TREE_TYPE (type))
6004 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
6005 (plus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
6008 /* Simplifications of comparisons. */
6010 /* See if we can reduce the magnitude of a constant involved in a
6011 comparison by changing the comparison code. This is a canonicalization
6012 formerly done by maybe_canonicalize_comparison_1. */
6016 (cmp @0 uniform_integer_cst_p@1)
6017 (with { tree cst = uniform_integer_cst_p (@1); }
6018 (if (tree_int_cst_sgn (cst) == -1)
6019 (acmp @0 { build_uniform_cst (TREE_TYPE (@1),
6020 wide_int_to_tree (TREE_TYPE (cst),
6026 (cmp @0 uniform_integer_cst_p@1)
6027 (with { tree cst = uniform_integer_cst_p (@1); }
6028 (if (tree_int_cst_sgn (cst) == 1)
6029 (acmp @0 { build_uniform_cst (TREE_TYPE (@1),
6030 wide_int_to_tree (TREE_TYPE (cst),
6031 wi::to_wide (cst) - 1)); })))))
6033 /* We can simplify a logical negation of a comparison to the
6034 inverted comparison. As we cannot compute an expression
6035 operator using invert_tree_comparison we have to simulate
6036 that with expression code iteration. */
6037 (for cmp (tcc_comparison)
6038 icmp (inverted_tcc_comparison)
6039 ncmp (inverted_tcc_comparison_with_nans)
6040 /* Ideally we'd like to combine the following two patterns
6041 and handle some more cases by using
6042 (logical_inverted_value (cmp @0 @1))
6043 here but for that genmatch would need to "inline" that.
6044 For now implement what forward_propagate_comparison did. */
6046 (bit_not (cmp @0 @1))
6047 (if (VECTOR_TYPE_P (type)
6048 || (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1))
6049 /* Comparison inversion may be impossible for trapping math,
6050 invert_tree_comparison will tell us. But we can't use
6051 a computed operator in the replacement tree thus we have
6052 to play the trick below. */
6053 (with { enum tree_code ic = invert_tree_comparison
6054 (cmp, HONOR_NANS (@0)); }
6060 (bit_xor (cmp @0 @1) integer_truep)
6061 (with { enum tree_code ic = invert_tree_comparison
6062 (cmp, HONOR_NANS (@0)); }
6067 /* The following bits are handled by fold_binary_op_with_conditional_arg. */
6069 (ne (cmp@2 @0 @1) integer_zerop)
6070 (if (types_match (type, TREE_TYPE (@2)))
6073 (eq (cmp@2 @0 @1) integer_truep)
6074 (if (types_match (type, TREE_TYPE (@2)))
6077 (ne (cmp@2 @0 @1) integer_truep)
6078 (if (types_match (type, TREE_TYPE (@2)))
6079 (with { enum tree_code ic = invert_tree_comparison
6080 (cmp, HONOR_NANS (@0)); }
6086 (eq (cmp@2 @0 @1) integer_zerop)
6087 (if (types_match (type, TREE_TYPE (@2)))
6088 (with { enum tree_code ic = invert_tree_comparison
6089 (cmp, HONOR_NANS (@0)); }
6095 /* Transform comparisons of the form X - Y CMP 0 to X CMP Y.
6096 ??? The transformation is valid for the other operators if overflow
6097 is undefined for the type, but performing it here badly interacts
6098 with the transformation in fold_cond_expr_with_comparison which
6099 attempts to synthetize ABS_EXPR. */
6101 (for sub (minus pointer_diff)
6103 (cmp (sub@2 @0 @1) integer_zerop)
6104 (if (single_use (@2))
6107 /* Simplify (x < 0) ^ (y < 0) to (x ^ y) < 0 and
6108 (x >= 0) ^ (y >= 0) to (x ^ y) < 0. */
6111 (bit_xor (cmp:s @0 integer_zerop) (cmp:s @1 integer_zerop))
6112 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
6113 && !TYPE_UNSIGNED (TREE_TYPE (@0))
6114 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
6115 (lt (bit_xor @0 @1) { build_zero_cst (TREE_TYPE (@0)); }))))
6116 /* Simplify (x < 0) ^ (y >= 0) to (x ^ y) >= 0 and
6117 (x >= 0) ^ (y < 0) to (x ^ y) >= 0. */
6119 (bit_xor:c (lt:s @0 integer_zerop) (ge:s @1 integer_zerop))
6120 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
6121 && !TYPE_UNSIGNED (TREE_TYPE (@0))
6122 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
6123 (ge (bit_xor @0 @1) { build_zero_cst (TREE_TYPE (@0)); })))
6125 /* Transform comparisons of the form X * C1 CMP 0 to X CMP 0 in the
6126 signed arithmetic case. That form is created by the compiler
6127 often enough for folding it to be of value. One example is in
6128 computing loop trip counts after Operator Strength Reduction. */
6129 (for cmp (simple_comparison)
6130 scmp (swapped_simple_comparison)
6132 (cmp (mult@3 @0 INTEGER_CST@1) integer_zerop@2)
6133 /* Handle unfolded multiplication by zero. */
6134 (if (integer_zerop (@1))
6136 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
6137 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
6139 /* If @1 is negative we swap the sense of the comparison. */
6140 (if (tree_int_cst_sgn (@1) < 0)
6144 /* For integral types with undefined overflow fold
6145 x * C1 == C2 into x == C2 / C1 or false.
6146 If overflow wraps and C1 is odd, simplify to x == C2 / C1 in the ring
6150 (cmp (mult @0 INTEGER_CST@1) INTEGER_CST@2)
6151 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
6152 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
6153 && wi::to_wide (@1) != 0)
6154 (with { widest_int quot; }
6155 (if (wi::multiple_of_p (wi::to_widest (@2), wi::to_widest (@1),
6156 TYPE_SIGN (TREE_TYPE (@0)), "))
6157 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), quot); })
6158 { constant_boolean_node (cmp == NE_EXPR, type); }))
6159 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
6160 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
6161 && (wi::bit_and (wi::to_wide (@1), 1) == 1))
6164 tree itype = TREE_TYPE (@0);
6165 int p = TYPE_PRECISION (itype);
6166 wide_int m = wi::one (p + 1) << p;
6167 wide_int a = wide_int::from (wi::to_wide (@1), p + 1, UNSIGNED);
6168 wide_int i = wide_int::from (wi::mod_inv (a, m),
6169 p, TYPE_SIGN (itype));
6170 wide_int_to_tree (itype, wi::mul (i, wi::to_wide (@2)));
6173 /* Simplify comparison of something with itself. For IEEE
6174 floating-point, we can only do some of these simplifications. */
6178 (if (! FLOAT_TYPE_P (TREE_TYPE (@0))
6179 || ! tree_expr_maybe_nan_p (@0))
6180 { constant_boolean_node (true, type); }
6182 /* With -ftrapping-math conversion to EQ loses an exception. */
6183 && (! FLOAT_TYPE_P (TREE_TYPE (@0))
6184 || ! flag_trapping_math))
6190 || ! FLOAT_TYPE_P (TREE_TYPE (@0))
6191 || ! tree_expr_maybe_nan_p (@0))
6192 { constant_boolean_node (false, type); })))
6193 (for cmp (unle unge uneq)
6196 { constant_boolean_node (true, type); }))
6197 (for cmp (unlt ungt)
6203 (if (!flag_trapping_math || !tree_expr_maybe_nan_p (@0))
6204 { constant_boolean_node (false, type); }))
6206 /* x == ~x -> false */
6207 /* x != ~x -> true */
6210 (cmp:c @0 (bit_not @0))
6211 { constant_boolean_node (cmp == NE_EXPR, type); }))
6213 /* Fold ~X op ~Y as Y op X. */
6214 (for cmp (simple_comparison)
6216 (cmp (nop_convert1?@4 (bit_not@2 @0)) (nop_convert2? (bit_not@3 @1)))
6217 (if (single_use (@2) && single_use (@3))
6218 (with { tree otype = TREE_TYPE (@4); }
6219 (cmp (convert:otype @1) (convert:otype @0))))))
6221 /* Fold ~X op C as X op' ~C, where op' is the swapped comparison. */
6222 (for cmp (simple_comparison)
6223 scmp (swapped_simple_comparison)
6225 (cmp (nop_convert? (bit_not@2 @0)) CONSTANT_CLASS_P@1)
6226 (if (single_use (@2)
6227 && (TREE_CODE (@1) == INTEGER_CST || TREE_CODE (@1) == VECTOR_CST))
6228 (with { tree otype = TREE_TYPE (@1); }
6229 (scmp (convert:otype @0) (bit_not @1))))))
6231 (for cmp (simple_comparison)
6234 /* IEEE doesn't distinguish +0 and -0 in comparisons. */
6236 /* a CMP (-0) -> a CMP 0 */
6237 (if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)))
6238 (cmp @0 { build_real (TREE_TYPE (@1), dconst0); }))
6239 /* (-0) CMP b -> 0 CMP b. */
6240 (if (TREE_CODE (@0) == REAL_CST
6241 && REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@0)))
6242 (cmp { build_real (TREE_TYPE (@0), dconst0); } @1))
6243 /* x != NaN is always true, other ops are always false. */
6244 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
6245 && (cmp == EQ_EXPR || cmp == NE_EXPR || !flag_trapping_math)
6246 && !tree_expr_signaling_nan_p (@1)
6247 && !tree_expr_maybe_signaling_nan_p (@0))
6248 { constant_boolean_node (cmp == NE_EXPR, type); })
6249 /* NaN != y is always true, other ops are always false. */
6250 (if (TREE_CODE (@0) == REAL_CST
6251 && REAL_VALUE_ISNAN (TREE_REAL_CST (@0))
6252 && (cmp == EQ_EXPR || cmp == NE_EXPR || !flag_trapping_math)
6253 && !tree_expr_signaling_nan_p (@0)
6254 && !tree_expr_signaling_nan_p (@1))
6255 { constant_boolean_node (cmp == NE_EXPR, type); })
6256 /* Fold comparisons against infinity. */
6257 (if (REAL_VALUE_ISINF (TREE_REAL_CST (@1))
6258 && MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (@1))))
6261 REAL_VALUE_TYPE max;
6262 enum tree_code code = cmp;
6263 bool neg = REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1));
6265 code = swap_tree_comparison (code);
6268 /* x > +Inf is always false, if we ignore NaNs or exceptions. */
6269 (if (code == GT_EXPR
6270 && !(HONOR_NANS (@0) && flag_trapping_math))
6271 { constant_boolean_node (false, type); })
6272 (if (code == LE_EXPR)
6273 /* x <= +Inf is always true, if we don't care about NaNs. */
6274 (if (! HONOR_NANS (@0))
6275 { constant_boolean_node (true, type); }
6276 /* x <= +Inf is the same as x == x, i.e. !isnan(x), but this loses
6277 an "invalid" exception. */
6278 (if (!flag_trapping_math)
6280 /* x == +Inf and x >= +Inf are always equal to x > DBL_MAX, but
6281 for == this introduces an exception for x a NaN. */
6282 (if ((code == EQ_EXPR && !(HONOR_NANS (@0) && flag_trapping_math))
6284 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
6286 (lt @0 { build_real (TREE_TYPE (@0), max); })
6287 (gt @0 { build_real (TREE_TYPE (@0), max); }))))
6288 /* x < +Inf is always equal to x <= DBL_MAX. */
6289 (if (code == LT_EXPR)
6290 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
6292 (ge @0 { build_real (TREE_TYPE (@0), max); })
6293 (le @0 { build_real (TREE_TYPE (@0), max); }))))
6294 /* x != +Inf is always equal to !(x > DBL_MAX), but this introduces
6295 an exception for x a NaN so use an unordered comparison. */
6296 (if (code == NE_EXPR)
6297 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
6298 (if (! HONOR_NANS (@0))
6300 (ge @0 { build_real (TREE_TYPE (@0), max); })
6301 (le @0 { build_real (TREE_TYPE (@0), max); }))
6303 (unge @0 { build_real (TREE_TYPE (@0), max); })
6304 (unle @0 { build_real (TREE_TYPE (@0), max); }))))))))))
6306 /* If this is a comparison of a real constant with a PLUS_EXPR
6307 or a MINUS_EXPR of a real constant, we can convert it into a
6308 comparison with a revised real constant as long as no overflow
6309 occurs when unsafe_math_optimizations are enabled. */
6310 (if (flag_unsafe_math_optimizations)
6311 (for op (plus minus)
6313 (cmp (op @0 REAL_CST@1) REAL_CST@2)
6316 tree tem = const_binop (op == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR,
6317 TREE_TYPE (@1), @2, @1);
6319 (if (tem && !TREE_OVERFLOW (tem))
6320 (cmp @0 { tem; }))))))
6322 /* Likewise, we can simplify a comparison of a real constant with
6323 a MINUS_EXPR whose first operand is also a real constant, i.e.
6324 (c1 - x) < c2 becomes x > c1-c2. Reordering is allowed on
6325 floating-point types only if -fassociative-math is set. */
6326 (if (flag_associative_math)
6328 (cmp (minus REAL_CST@0 @1) REAL_CST@2)
6329 (with { tree tem = const_binop (MINUS_EXPR, TREE_TYPE (@1), @0, @2); }
6330 (if (tem && !TREE_OVERFLOW (tem))
6331 (cmp { tem; } @1)))))
6333 /* Fold comparisons against built-in math functions. */
6334 (if (flag_unsafe_math_optimizations && ! flag_errno_math)
6337 (cmp (sq @0) REAL_CST@1)
6339 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
6341 /* sqrt(x) < y is always false, if y is negative. */
6342 (if (cmp == EQ_EXPR || cmp == LT_EXPR || cmp == LE_EXPR)
6343 { constant_boolean_node (false, type); })
6344 /* sqrt(x) > y is always true, if y is negative and we
6345 don't care about NaNs, i.e. negative values of x. */
6346 (if (cmp == NE_EXPR || !HONOR_NANS (@0))
6347 { constant_boolean_node (true, type); })
6348 /* sqrt(x) > y is the same as x >= 0, if y is negative. */
6349 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })))
6350 (if (real_equal (TREE_REAL_CST_PTR (@1), &dconst0))
6352 /* sqrt(x) < 0 is always false. */
6353 (if (cmp == LT_EXPR)
6354 { constant_boolean_node (false, type); })
6355 /* sqrt(x) >= 0 is always true if we don't care about NaNs. */
6356 (if (cmp == GE_EXPR && !HONOR_NANS (@0))
6357 { constant_boolean_node (true, type); })
6358 /* sqrt(x) <= 0 -> x == 0. */
6359 (if (cmp == LE_EXPR)
6361 /* Otherwise sqrt(x) cmp 0 -> x cmp 0. Here cmp can be >=, >,
6362 == or !=. In the last case:
6364 (sqrt(x) != 0) == (NaN != 0) == true == (x != 0)
6366 if x is negative or NaN. Due to -funsafe-math-optimizations,
6367 the results for other x follow from natural arithmetic. */
6369 (if ((cmp == LT_EXPR
6373 && !REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
6374 /* Give up for -frounding-math. */
6375 && !HONOR_SIGN_DEPENDENT_ROUNDING (TREE_TYPE (@0)))
6379 enum tree_code ncmp = cmp;
6380 const real_format *fmt
6381 = REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@0)));
6382 real_arithmetic (&c2, MULT_EXPR,
6383 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
6384 real_convert (&c2, fmt, &c2);
6385 /* See PR91734: if c2 is inexact and sqrt(c2) < c (or sqrt(c2) >= c),
6386 then change LT_EXPR into LE_EXPR or GE_EXPR into GT_EXPR. */
6387 if (!REAL_VALUE_ISINF (c2))
6389 tree c3 = fold_const_call (CFN_SQRT, TREE_TYPE (@0),
6390 build_real (TREE_TYPE (@0), c2));
6391 if (c3 == NULL_TREE || TREE_CODE (c3) != REAL_CST)
6393 else if ((cmp == LT_EXPR || cmp == GE_EXPR)
6394 && real_less (&TREE_REAL_CST (c3), &TREE_REAL_CST (@1)))
6395 ncmp = cmp == LT_EXPR ? LE_EXPR : GT_EXPR;
6396 else if ((cmp == LE_EXPR || cmp == GT_EXPR)
6397 && real_less (&TREE_REAL_CST (@1), &TREE_REAL_CST (c3)))
6398 ncmp = cmp == LE_EXPR ? LT_EXPR : GE_EXPR;
6401 /* With rounding to even, sqrt of up to 3 different values
6402 gives the same normal result, so in some cases c2 needs
6404 REAL_VALUE_TYPE c2alt, tow;
6405 if (cmp == LT_EXPR || cmp == GE_EXPR)
6409 real_nextafter (&c2alt, fmt, &c2, &tow);
6410 real_convert (&c2alt, fmt, &c2alt);
6411 if (REAL_VALUE_ISINF (c2alt))
6415 c3 = fold_const_call (CFN_SQRT, TREE_TYPE (@0),
6416 build_real (TREE_TYPE (@0), c2alt));
6417 if (c3 == NULL_TREE || TREE_CODE (c3) != REAL_CST)
6419 else if (real_equal (&TREE_REAL_CST (c3),
6420 &TREE_REAL_CST (@1)))
6426 (if (cmp == GT_EXPR || cmp == GE_EXPR)
6427 (if (REAL_VALUE_ISINF (c2))
6428 /* sqrt(x) > y is x == +Inf, when y is very large. */
6429 (if (HONOR_INFINITIES (@0))
6430 (eq @0 { build_real (TREE_TYPE (@0), c2); })
6431 { constant_boolean_node (false, type); })
6432 /* sqrt(x) > c is the same as x > c*c. */
6433 (if (ncmp != ERROR_MARK)
6434 (if (ncmp == GE_EXPR)
6435 (ge @0 { build_real (TREE_TYPE (@0), c2); })
6436 (gt @0 { build_real (TREE_TYPE (@0), c2); }))))
6437 /* else if (cmp == LT_EXPR || cmp == LE_EXPR) */
6438 (if (REAL_VALUE_ISINF (c2))
6440 /* sqrt(x) < y is always true, when y is a very large
6441 value and we don't care about NaNs or Infinities. */
6442 (if (! HONOR_NANS (@0) && ! HONOR_INFINITIES (@0))
6443 { constant_boolean_node (true, type); })
6444 /* sqrt(x) < y is x != +Inf when y is very large and we
6445 don't care about NaNs. */
6446 (if (! HONOR_NANS (@0))
6447 (ne @0 { build_real (TREE_TYPE (@0), c2); }))
6448 /* sqrt(x) < y is x >= 0 when y is very large and we
6449 don't care about Infinities. */
6450 (if (! HONOR_INFINITIES (@0))
6451 (ge @0 { build_real (TREE_TYPE (@0), dconst0); }))
6452 /* sqrt(x) < y is x >= 0 && x != +Inf, when y is large. */
6455 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
6456 (ne @0 { build_real (TREE_TYPE (@0), c2); }))))
6457 /* sqrt(x) < c is the same as x < c*c, if we ignore NaNs. */
6458 (if (ncmp != ERROR_MARK && ! HONOR_NANS (@0))
6459 (if (ncmp == LT_EXPR)
6460 (lt @0 { build_real (TREE_TYPE (@0), c2); })
6461 (le @0 { build_real (TREE_TYPE (@0), c2); }))
6462 /* sqrt(x) < c is the same as x >= 0 && x < c*c. */
6463 (if (ncmp != ERROR_MARK && GENERIC)
6464 (if (ncmp == LT_EXPR)
6466 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
6467 (lt @0 { build_real (TREE_TYPE (@0), c2); }))
6469 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
6470 (le @0 { build_real (TREE_TYPE (@0), c2); })))))))))))
6471 /* Transform sqrt(x) cmp sqrt(y) -> x cmp y. */
6473 (cmp (sq @0) (sq @1))
6474 (if (! HONOR_NANS (@0))
6477 /* Optimize various special cases of (FTYPE) N CMP (FTYPE) M. */
6478 (for cmp (lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
6479 icmp (lt le eq ne ge gt unordered ordered lt le gt ge eq ne)
6481 (cmp (float@0 @1) (float @2))
6482 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@0))
6483 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
6486 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@0))));
6487 tree type1 = TREE_TYPE (@1);
6488 bool type1_signed_p = TYPE_SIGN (type1) == SIGNED;
6489 tree type2 = TREE_TYPE (@2);
6490 bool type2_signed_p = TYPE_SIGN (type2) == SIGNED;
6492 (if (fmt.can_represent_integral_type_p (type1)
6493 && fmt.can_represent_integral_type_p (type2))
6494 (if (cmp == ORDERED_EXPR || cmp == UNORDERED_EXPR)
6495 { constant_boolean_node (cmp == ORDERED_EXPR, type); }
6496 (if (TYPE_PRECISION (type1) > TYPE_PRECISION (type2)
6497 && type1_signed_p >= type2_signed_p)
6498 (icmp @1 (convert @2))
6499 (if (TYPE_PRECISION (type1) < TYPE_PRECISION (type2)
6500 && type1_signed_p <= type2_signed_p)
6501 (icmp (convert:type2 @1) @2)
6502 (if (TYPE_PRECISION (type1) == TYPE_PRECISION (type2)
6503 && type1_signed_p == type2_signed_p)
6504 (icmp @1 @2))))))))))
6506 /* Optimize various special cases of (FTYPE) N CMP CST. */
6507 (for cmp (lt le eq ne ge gt)
6508 icmp (le le eq ne ge ge)
6510 (cmp (float @0) REAL_CST@1)
6511 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@1))
6512 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1)))
6515 tree itype = TREE_TYPE (@0);
6516 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@1))));
6517 const REAL_VALUE_TYPE *cst = TREE_REAL_CST_PTR (@1);
6518 /* Be careful to preserve any potential exceptions due to
6519 NaNs. qNaNs are ok in == or != context.
6520 TODO: relax under -fno-trapping-math or
6521 -fno-signaling-nans. */
6523 = real_isnan (cst) && (cst->signalling
6524 || (cmp != EQ_EXPR && cmp != NE_EXPR));
6526 /* TODO: allow non-fitting itype and SNaNs when
6527 -fno-trapping-math. */
6528 (if (fmt.can_represent_integral_type_p (itype) && ! exception_p)
6531 signop isign = TYPE_SIGN (itype);
6532 REAL_VALUE_TYPE imin, imax;
6533 real_from_integer (&imin, fmt, wi::min_value (itype), isign);
6534 real_from_integer (&imax, fmt, wi::max_value (itype), isign);
6536 REAL_VALUE_TYPE icst;
6537 if (cmp == GT_EXPR || cmp == GE_EXPR)
6538 real_ceil (&icst, fmt, cst);
6539 else if (cmp == LT_EXPR || cmp == LE_EXPR)
6540 real_floor (&icst, fmt, cst);
6542 real_trunc (&icst, fmt, cst);
6544 bool cst_int_p = !real_isnan (cst) && real_identical (&icst, cst);
6546 bool overflow_p = false;
6548 = real_to_integer (&icst, &overflow_p, TYPE_PRECISION (itype));
6551 /* Optimize cases when CST is outside of ITYPE's range. */
6552 (if (real_compare (LT_EXPR, cst, &imin))
6553 { constant_boolean_node (cmp == GT_EXPR || cmp == GE_EXPR || cmp == NE_EXPR,
6555 (if (real_compare (GT_EXPR, cst, &imax))
6556 { constant_boolean_node (cmp == LT_EXPR || cmp == LE_EXPR || cmp == NE_EXPR,
6558 /* Remove cast if CST is an integer representable by ITYPE. */
6560 (cmp @0 { gcc_assert (!overflow_p);
6561 wide_int_to_tree (itype, icst_val); })
6563 /* When CST is fractional, optimize
6564 (FTYPE) N == CST -> 0
6565 (FTYPE) N != CST -> 1. */
6566 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
6567 { constant_boolean_node (cmp == NE_EXPR, type); })
6568 /* Otherwise replace with sensible integer constant. */
6571 gcc_checking_assert (!overflow_p);
6573 (icmp @0 { wide_int_to_tree (itype, icst_val); })))))))))
6575 /* Fold A /[ex] B CMP C to A CMP B * C. */
6578 (cmp (exact_div @0 @1) INTEGER_CST@2)
6579 (if (!integer_zerop (@1))
6580 (if (wi::to_wide (@2) == 0)
6582 (if (TREE_CODE (@1) == INTEGER_CST)
6585 wi::overflow_type ovf;
6586 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
6587 TYPE_SIGN (TREE_TYPE (@1)), &ovf);
6590 { constant_boolean_node (cmp == NE_EXPR, type); }
6591 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))))
6592 (for cmp (lt le gt ge)
6594 (cmp (exact_div @0 INTEGER_CST@1) INTEGER_CST@2)
6595 (if (wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))))
6598 wi::overflow_type ovf;
6599 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
6600 TYPE_SIGN (TREE_TYPE (@1)), &ovf);
6603 { constant_boolean_node (wi::lt_p (wi::to_wide (@2), 0,
6604 TYPE_SIGN (TREE_TYPE (@2)))
6605 != (cmp == LT_EXPR || cmp == LE_EXPR), type); }
6606 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))
6608 /* Fold (size_t)(A /[ex] B) CMP C to (size_t)A CMP (size_t)B * C or A CMP' 0.
6610 For small C (less than max/B), this is (size_t)A CMP (size_t)B * C.
6611 For large C (more than min/B+2^size), this is also true, with the
6612 multiplication computed modulo 2^size.
6613 For intermediate C, this just tests the sign of A. */
6614 (for cmp (lt le gt ge)
6617 (cmp (convert (exact_div @0 INTEGER_CST@1)) INTEGER_CST@2)
6618 (if (tree_nop_conversion_p (TREE_TYPE (@0), TREE_TYPE (@2))
6619 && TYPE_UNSIGNED (TREE_TYPE (@2)) && !TYPE_UNSIGNED (TREE_TYPE (@0))
6620 && wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))))
6623 tree utype = TREE_TYPE (@2);
6624 wide_int denom = wi::to_wide (@1);
6625 wide_int right = wi::to_wide (@2);
6626 wide_int smax = wi::sdiv_trunc (wi::max_value (TREE_TYPE (@0)), denom);
6627 wide_int smin = wi::sdiv_trunc (wi::min_value (TREE_TYPE (@0)), denom);
6628 bool small = wi::leu_p (right, smax);
6629 bool large = wi::geu_p (right, smin);
6631 (if (small || large)
6632 (cmp (convert:utype @0) (mult @2 (convert @1)))
6633 (cmp2 @0 { build_zero_cst (TREE_TYPE (@0)); }))))))
6635 /* Unordered tests if either argument is a NaN. */
6637 (bit_ior (unordered @0 @0) (unordered @1 @1))
6638 (if (types_match (@0, @1))
6641 (bit_and (ordered @0 @0) (ordered @1 @1))
6642 (if (types_match (@0, @1))
6645 (bit_ior:c (unordered @0 @0) (unordered:c@2 @0 @1))
6648 (bit_and:c (ordered @0 @0) (ordered:c@2 @0 @1))
6651 /* A & (2**N - 1) <= 2**K - 1 -> A & (2**N - 2**K) == 0
6652 A & (2**N - 1) > 2**K - 1 -> A & (2**N - 2**K) != 0
6654 Note that comparisons
6655 A & (2**N - 1) < 2**K -> A & (2**N - 2**K) == 0
6656 A & (2**N - 1) >= 2**K -> A & (2**N - 2**K) != 0
6657 will be canonicalized to above so there's no need to
6664 (cmp (bit_and@0 @1 INTEGER_CST@2) INTEGER_CST@3)
6665 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)))
6668 tree ty = TREE_TYPE (@0);
6669 unsigned prec = TYPE_PRECISION (ty);
6670 wide_int mask = wi::to_wide (@2, prec);
6671 wide_int rhs = wi::to_wide (@3, prec);
6672 signop sgn = TYPE_SIGN (ty);
6674 (if ((mask & (mask + 1)) == 0 && wi::gt_p (rhs, 0, sgn)
6675 && (rhs & (rhs + 1)) == 0 && wi::ge_p (mask, rhs, sgn))
6676 (eqcmp (bit_and @1 { wide_int_to_tree (ty, mask - rhs); })
6677 { build_zero_cst (ty); }))))))
6679 /* -A CMP -B -> B CMP A. */
6680 (for cmp (tcc_comparison)
6681 scmp (swapped_tcc_comparison)
6683 (cmp (negate @0) (negate @1))
6684 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
6685 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
6688 || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))))
6691 (cmp (negate @0) CONSTANT_CLASS_P@1)
6692 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
6693 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
6696 || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))))
6697 (with { tree tem = const_unop (NEGATE_EXPR, TREE_TYPE (@0), @1); }
6698 (if (tem && !TREE_OVERFLOW (tem))
6699 (scmp @0 { tem; }))))))
6701 /* Convert ABS[U]_EXPR<x> == 0 or ABS[U]_EXPR<x> != 0 to x == 0 or x != 0. */
6705 (eqne (op @0) zerop@1)
6706 (eqne @0 { build_zero_cst (TREE_TYPE (@0)); }))))
6708 /* From fold_sign_changed_comparison and fold_widened_comparison.
6709 FIXME: the lack of symmetry is disturbing. */
6710 (for cmp (simple_comparison)
6712 (cmp (convert@0 @00) (convert?@1 @10))
6713 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
6714 /* Disable this optimization if we're casting a function pointer
6715 type on targets that require function pointer canonicalization. */
6716 && !(targetm.have_canonicalize_funcptr_for_compare ()
6717 && ((POINTER_TYPE_P (TREE_TYPE (@00))
6718 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@00))))
6719 || (POINTER_TYPE_P (TREE_TYPE (@10))
6720 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@10))))))
6722 (if (TYPE_PRECISION (TREE_TYPE (@00)) == TYPE_PRECISION (TREE_TYPE (@0))
6723 && (TREE_CODE (@10) == INTEGER_CST
6725 && (TYPE_UNSIGNED (TREE_TYPE (@00)) == TYPE_UNSIGNED (TREE_TYPE (@0))
6728 && !POINTER_TYPE_P (TREE_TYPE (@00))
6729 /* (int)bool:32 != (int)uint is not the same as
6730 bool:32 != (bool:32)uint since boolean types only have two valid
6731 values independent of their precision. */
6732 && (TREE_CODE (TREE_TYPE (@00)) != BOOLEAN_TYPE
6733 || TREE_CODE (TREE_TYPE (@10)) == BOOLEAN_TYPE))
6734 /* ??? The special-casing of INTEGER_CST conversion was in the original
6735 code and here to avoid a spurious overflow flag on the resulting
6736 constant which fold_convert produces. */
6737 (if (TREE_CODE (@1) == INTEGER_CST)
6738 (cmp @00 { force_fit_type (TREE_TYPE (@00),
6739 wide_int::from (wi::to_wide (@1),
6740 MAX (TYPE_PRECISION (TREE_TYPE (@1)),
6741 TYPE_PRECISION (TREE_TYPE (@00))),
6742 TYPE_SIGN (TREE_TYPE (@1))),
6743 0, TREE_OVERFLOW (@1)); })
6744 (cmp @00 (convert @1)))
6746 (if (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@00)))
6747 /* If possible, express the comparison in the shorter mode. */
6748 (if ((cmp == EQ_EXPR || cmp == NE_EXPR
6749 || TYPE_UNSIGNED (TREE_TYPE (@0)) == TYPE_UNSIGNED (TREE_TYPE (@00))
6750 || (!TYPE_UNSIGNED (TREE_TYPE (@0))
6751 && TYPE_UNSIGNED (TREE_TYPE (@00))))
6752 && (types_match (TREE_TYPE (@10), TREE_TYPE (@00))
6753 || ((TYPE_PRECISION (TREE_TYPE (@00))
6754 >= TYPE_PRECISION (TREE_TYPE (@10)))
6755 && (TYPE_UNSIGNED (TREE_TYPE (@00))
6756 == TYPE_UNSIGNED (TREE_TYPE (@10))))
6757 || (TREE_CODE (@10) == INTEGER_CST
6758 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
6759 && int_fits_type_p (@10, TREE_TYPE (@00)))))
6760 (cmp @00 (convert @10))
6761 (if (TREE_CODE (@10) == INTEGER_CST
6762 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
6763 && !int_fits_type_p (@10, TREE_TYPE (@00)))
6766 tree min = lower_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
6767 tree max = upper_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
6768 bool above = integer_nonzerop (const_binop (LT_EXPR, type, max, @10));
6769 bool below = integer_nonzerop (const_binop (LT_EXPR, type, @10, min));
6771 (if (above || below)
6772 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
6773 { constant_boolean_node (cmp == EQ_EXPR ? false : true, type); }
6774 (if (cmp == LT_EXPR || cmp == LE_EXPR)
6775 { constant_boolean_node (above ? true : false, type); }
6776 (if (cmp == GT_EXPR || cmp == GE_EXPR)
6777 { constant_boolean_node (above ? false : true, type); })))))))))
6778 /* Fold (double)float1 CMP (double)float2 into float1 CMP float2. */
6779 (if (FLOAT_TYPE_P (TREE_TYPE (@00))
6780 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0))
6781 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@00)))
6782 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0))
6783 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@10))))
6786 tree type1 = TREE_TYPE (@10);
6787 if (TREE_CODE (@10) == REAL_CST && !DECIMAL_FLOAT_TYPE_P (type1))
6789 REAL_VALUE_TYPE orig = TREE_REAL_CST (@10);
6790 if (TYPE_PRECISION (type1) > TYPE_PRECISION (float_type_node)
6791 && exact_real_truncate (TYPE_MODE (float_type_node), &orig))
6792 type1 = float_type_node;
6793 if (TYPE_PRECISION (type1) > TYPE_PRECISION (double_type_node)
6794 && exact_real_truncate (TYPE_MODE (double_type_node), &orig))
6795 type1 = double_type_node;
6798 = (element_precision (TREE_TYPE (@00)) > element_precision (type1)
6799 ? TREE_TYPE (@00) : type1);
6801 (if (element_precision (TREE_TYPE (@0)) > element_precision (newtype)
6802 && (!VECTOR_TYPE_P (type) || is_truth_type_for (newtype, type)))
6803 (cmp (convert:newtype @00) (convert:newtype @10))))))))
6808 /* SSA names are canonicalized to 2nd place. */
6809 (cmp addr@0 SSA_NAME@1)
6812 poly_int64 off; tree base;
6813 tree addr = (TREE_CODE (@0) == SSA_NAME
6814 ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0);
6816 /* A local variable can never be pointed to by
6817 the default SSA name of an incoming parameter. */
6818 (if (SSA_NAME_IS_DEFAULT_DEF (@1)
6819 && TREE_CODE (SSA_NAME_VAR (@1)) == PARM_DECL
6820 && (base = get_base_address (TREE_OPERAND (addr, 0)))
6821 && TREE_CODE (base) == VAR_DECL
6822 && auto_var_in_fn_p (base, current_function_decl))
6823 (if (cmp == NE_EXPR)
6824 { constant_boolean_node (true, type); }
6825 { constant_boolean_node (false, type); })
6826 /* If the address is based on @1 decide using the offset. */
6827 (if ((base = get_addr_base_and_unit_offset (TREE_OPERAND (addr, 0), &off))
6828 && TREE_CODE (base) == MEM_REF
6829 && TREE_OPERAND (base, 0) == @1)
6830 (with { off += mem_ref_offset (base).force_shwi (); }
6831 (if (known_ne (off, 0))
6832 { constant_boolean_node (cmp == NE_EXPR, type); }
6833 (if (known_eq (off, 0))
6834 { constant_boolean_node (cmp == EQ_EXPR, type); }))))))))
6836 /* Equality compare simplifications from fold_binary */
6839 /* If we have (A | C) == D where C & ~D != 0, convert this into 0.
6840 Similarly for NE_EXPR. */
6842 (cmp (convert?@3 (bit_ior @0 INTEGER_CST@1)) INTEGER_CST@2)
6843 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
6844 && wi::bit_and_not (wi::to_wide (@1), wi::to_wide (@2)) != 0)
6845 { constant_boolean_node (cmp == NE_EXPR, type); }))
6847 /* (X ^ Y) == 0 becomes X == Y, and (X ^ Y) != 0 becomes X != Y. */
6849 (cmp (bit_xor @0 @1) integer_zerop)
6852 /* (X ^ Y) == Y becomes X == 0.
6853 Likewise (X ^ Y) == X becomes Y == 0. */
6855 (cmp:c (bit_xor:c @0 @1) @0)
6856 (cmp @1 { build_zero_cst (TREE_TYPE (@1)); }))
6858 /* (X & Y) == X becomes (X & ~Y) == 0. */
6860 (cmp:c (bit_and:c @0 @1) @0)
6861 (cmp (bit_and @0 (bit_not! @1)) { build_zero_cst (TREE_TYPE (@0)); }))
6863 (cmp:c (convert@3 (bit_and (convert@2 @0) INTEGER_CST@1)) (convert @0))
6864 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
6865 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
6866 && INTEGRAL_TYPE_P (TREE_TYPE (@3))
6867 && TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@0))
6868 && TYPE_PRECISION (TREE_TYPE (@3)) > TYPE_PRECISION (TREE_TYPE (@2))
6869 && !wi::neg_p (wi::to_wide (@1)))
6870 (cmp (bit_and @0 (convert (bit_not @1)))
6871 { build_zero_cst (TREE_TYPE (@0)); })))
6873 /* (X | Y) == Y becomes (X & ~Y) == 0. */
6875 (cmp:c (bit_ior:c @0 @1) @1)
6876 (cmp (bit_and @0 (bit_not! @1)) { build_zero_cst (TREE_TYPE (@0)); }))
6878 /* (X ^ C1) op C2 can be rewritten as X op (C1 ^ C2). */
6880 (cmp (convert?@3 (bit_xor @0 INTEGER_CST@1)) INTEGER_CST@2)
6881 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)))
6882 (cmp @0 (bit_xor @1 (convert @2)))))
6885 (cmp (nop_convert? @0) integer_zerop)
6886 (if (tree_expr_nonzero_p (@0))
6887 { constant_boolean_node (cmp == NE_EXPR, type); }))
6889 /* (X & C) op (Y & C) into (X ^ Y) & C op 0. */
6891 (cmp (bit_and:cs @0 @2) (bit_and:cs @1 @2))
6892 (cmp (bit_and (bit_xor @0 @1) @2) { build_zero_cst (TREE_TYPE (@2)); })))
6894 /* (X < 0) != (Y < 0) into (X ^ Y) < 0.
6895 (X >= 0) != (Y >= 0) into (X ^ Y) < 0.
6896 (X < 0) == (Y < 0) into (X ^ Y) >= 0.
6897 (X >= 0) == (Y >= 0) into (X ^ Y) >= 0. */
6902 (cmp (sgncmp @0 integer_zerop@2) (sgncmp @1 integer_zerop))
6903 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
6904 && !TYPE_UNSIGNED (TREE_TYPE (@0))
6905 && types_match (@0, @1))
6906 (ncmp (bit_xor @0 @1) @2)))))
6907 /* (X < 0) == (Y >= 0) into (X ^ Y) < 0.
6908 (X < 0) != (Y >= 0) into (X ^ Y) >= 0. */
6912 (cmp:c (lt @0 integer_zerop@2) (ge @1 integer_zerop))
6913 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
6914 && !TYPE_UNSIGNED (TREE_TYPE (@0))
6915 && types_match (@0, @1))
6916 (ncmp (bit_xor @0 @1) @2))))
6918 /* If we have (A & C) == C where C is a power of 2, convert this into
6919 (A & C) != 0. Similarly for NE_EXPR. */
6923 (cmp (bit_and@2 @0 integer_pow2p@1) @1)
6924 (icmp @2 { build_zero_cst (TREE_TYPE (@0)); })))
6927 /* From fold_binary_op_with_conditional_arg handle the case of
6928 rewriting (a ? b : c) > d to a ? (b > d) : (c > d) when the
6929 compares simplify. */
6930 (for cmp (simple_comparison)
6932 (cmp:c (cond @0 @1 @2) @3)
6933 /* Do not move possibly trapping operations into the conditional as this
6934 pessimizes code and causes gimplification issues when applied late. */
6935 (if (!FLOAT_TYPE_P (TREE_TYPE (@3))
6936 || !operation_could_trap_p (cmp, true, false, @3))
6937 (cond @0 (cmp! @1 @3) (cmp! @2 @3)))))
6941 /* x < 0 ? ~y : y into (x >> (prec-1)) ^ y. */
6942 /* x >= 0 ? ~y : y into ~((x >> (prec-1)) ^ y). */
6944 (cond (cmp @0 integer_zerop) (bit_not @1) @1)
6945 (if (INTEGRAL_TYPE_P (type)
6946 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
6947 && !TYPE_UNSIGNED (TREE_TYPE (@0))
6948 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (type))
6951 tree shifter = build_int_cst (integer_type_node, TYPE_PRECISION (type) - 1);
6953 (if (cmp == LT_EXPR)
6954 (bit_xor (convert (rshift @0 {shifter;})) @1)
6955 (bit_not (bit_xor (convert (rshift @0 {shifter;})) @1))))))
6956 /* x < 0 ? y : ~y into ~((x >> (prec-1)) ^ y). */
6957 /* x >= 0 ? y : ~y into (x >> (prec-1)) ^ y. */
6959 (cond (cmp @0 integer_zerop) @1 (bit_not @1))
6960 (if (INTEGRAL_TYPE_P (type)
6961 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
6962 && !TYPE_UNSIGNED (TREE_TYPE (@0))
6963 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (type))
6966 tree shifter = build_int_cst (integer_type_node, TYPE_PRECISION (type) - 1);
6968 (if (cmp == GE_EXPR)
6969 (bit_xor (convert (rshift @0 {shifter;})) @1)
6970 (bit_not (bit_xor (convert (rshift @0 {shifter;})) @1)))))))
6972 /* If we have (A & C) != 0 ? D : 0 where C and D are powers of 2,
6973 convert this into a shift followed by ANDing with D. */
6976 (ne (bit_and @0 integer_pow2p@1) integer_zerop)
6977 INTEGER_CST@2 integer_zerop)
6978 (if (!POINTER_TYPE_P (type) && integer_pow2p (@2))
6980 int shift = (wi::exact_log2 (wi::to_wide (@2))
6981 - wi::exact_log2 (wi::to_wide (@1)));
6985 (lshift (convert @0) { build_int_cst (integer_type_node, shift); }) @2)
6987 (convert (rshift @0 { build_int_cst (integer_type_node, -shift); }))
6990 /* If we have (A & C) != 0 where C is the sign bit of A, convert
6991 this into A < 0. Similarly for (A & C) == 0 into A >= 0. */
6995 (cmp (bit_and (convert?@2 @0) integer_pow2p@1) integer_zerop)
6996 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
6997 && type_has_mode_precision_p (TREE_TYPE (@0))
6998 && element_precision (@2) >= element_precision (@0)
6999 && wi::only_sign_bit_p (wi::to_wide (@1), element_precision (@0)))
7000 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
7001 (ncmp (convert:stype @0) { build_zero_cst (stype); })))))
7003 /* If we have A < 0 ? C : 0 where C is a power of 2, convert
7004 this into a right shift or sign extension followed by ANDing with C. */
7007 (lt @0 integer_zerop)
7008 INTEGER_CST@1 integer_zerop)
7009 (if (integer_pow2p (@1)
7010 && !TYPE_UNSIGNED (TREE_TYPE (@0)))
7012 int shift = element_precision (@0) - wi::exact_log2 (wi::to_wide (@1)) - 1;
7016 (convert (rshift @0 { build_int_cst (integer_type_node, shift); }))
7018 /* Otherwise ctype must be wider than TREE_TYPE (@0) and pure
7019 sign extension followed by AND with C will achieve the effect. */
7020 (bit_and (convert @0) @1)))))
7022 /* When the addresses are not directly of decls compare base and offset.
7023 This implements some remaining parts of fold_comparison address
7024 comparisons but still no complete part of it. Still it is good
7025 enough to make fold_stmt not regress when not dispatching to fold_binary. */
7026 (for cmp (simple_comparison)
7028 (cmp (convert1?@2 addr@0) (convert2? addr@1))
7031 poly_int64 off0, off1;
7033 int equal = address_compare (cmp, TREE_TYPE (@2), @0, @1, base0, base1,
7034 off0, off1, GENERIC);
7038 (if (cmp == EQ_EXPR && (known_eq (off0, off1) || known_ne (off0, off1)))
7039 { constant_boolean_node (known_eq (off0, off1), type); })
7040 (if (cmp == NE_EXPR && (known_eq (off0, off1) || known_ne (off0, off1)))
7041 { constant_boolean_node (known_ne (off0, off1), type); })
7042 (if (cmp == LT_EXPR && (known_lt (off0, off1) || known_ge (off0, off1)))
7043 { constant_boolean_node (known_lt (off0, off1), type); })
7044 (if (cmp == LE_EXPR && (known_le (off0, off1) || known_gt (off0, off1)))
7045 { constant_boolean_node (known_le (off0, off1), type); })
7046 (if (cmp == GE_EXPR && (known_ge (off0, off1) || known_lt (off0, off1)))
7047 { constant_boolean_node (known_ge (off0, off1), type); })
7048 (if (cmp == GT_EXPR && (known_gt (off0, off1) || known_le (off0, off1)))
7049 { constant_boolean_node (known_gt (off0, off1), type); }))
7052 (if (cmp == EQ_EXPR)
7053 { constant_boolean_node (false, type); })
7054 (if (cmp == NE_EXPR)
7055 { constant_boolean_node (true, type); })))))))
7058 /* a?~t:t -> (-(a))^t */
7061 (with { bool wascmp; }
7062 (if (INTEGRAL_TYPE_P (type)
7063 && bitwise_inverted_equal_p (@1, @2, wascmp)
7064 && (!wascmp || TYPE_PRECISION (type) == 1))
7065 (if ((!TYPE_UNSIGNED (type) && TREE_CODE (type) == BOOLEAN_TYPE)
7066 || TYPE_PRECISION (type) == 1)
7067 (bit_xor (convert:type @0) @2)
7068 (bit_xor (negate (convert:type @0)) @2)))))
7071 /* Simplify pointer equality compares using PTA. */
7075 (if (POINTER_TYPE_P (TREE_TYPE (@0))
7076 && ptrs_compare_unequal (@0, @1))
7077 { constant_boolean_node (neeq != EQ_EXPR, type); })))
7079 /* PR70920: Transform (intptr_t)x eq/ne CST to x eq/ne (typeof x) CST.
7080 and (typeof ptr_cst) x eq/ne ptr_cst to x eq/ne (typeof x) CST.
7081 Disable the transform if either operand is pointer to function.
7082 This broke pr22051-2.c for arm where function pointer
7083 canonicalizaion is not wanted. */
7087 (cmp (convert @0) INTEGER_CST@1)
7088 (if (((POINTER_TYPE_P (TREE_TYPE (@0))
7089 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@0)))
7090 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
7091 /* Don't perform this optimization in GENERIC if @0 has reference
7092 type when sanitizing. See PR101210. */
7094 && TREE_CODE (TREE_TYPE (@0)) == REFERENCE_TYPE
7095 && (flag_sanitize & (SANITIZE_NULL | SANITIZE_ALIGNMENT))))
7096 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
7097 && POINTER_TYPE_P (TREE_TYPE (@1))
7098 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@1)))))
7099 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
7100 (cmp @0 (convert @1)))))
7102 /* Non-equality compare simplifications from fold_binary */
7103 (for cmp (lt gt le ge)
7104 /* Comparisons with the highest or lowest possible integer of
7105 the specified precision will have known values. */
7107 (cmp (convert?@2 @0) uniform_integer_cst_p@1)
7108 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@1))
7109 || POINTER_TYPE_P (TREE_TYPE (@1))
7110 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@1)))
7111 && tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0)))
7114 tree cst = uniform_integer_cst_p (@1);
7115 tree arg1_type = TREE_TYPE (cst);
7116 unsigned int prec = TYPE_PRECISION (arg1_type);
7117 wide_int max = wi::max_value (arg1_type);
7118 wide_int signed_max = wi::max_value (prec, SIGNED);
7119 wide_int min = wi::min_value (arg1_type);
7122 (if (wi::to_wide (cst) == max)
7124 (if (cmp == GT_EXPR)
7125 { constant_boolean_node (false, type); })
7126 (if (cmp == GE_EXPR)
7128 (if (cmp == LE_EXPR)
7129 { constant_boolean_node (true, type); })
7130 (if (cmp == LT_EXPR)
7132 (if (wi::to_wide (cst) == min)
7134 (if (cmp == LT_EXPR)
7135 { constant_boolean_node (false, type); })
7136 (if (cmp == LE_EXPR)
7138 (if (cmp == GE_EXPR)
7139 { constant_boolean_node (true, type); })
7140 (if (cmp == GT_EXPR)
7142 (if (wi::to_wide (cst) == max - 1)
7144 (if (cmp == GT_EXPR)
7145 (eq @2 { build_uniform_cst (TREE_TYPE (@1),
7146 wide_int_to_tree (TREE_TYPE (cst),
7149 (if (cmp == LE_EXPR)
7150 (ne @2 { build_uniform_cst (TREE_TYPE (@1),
7151 wide_int_to_tree (TREE_TYPE (cst),
7154 (if (wi::to_wide (cst) == min + 1)
7156 (if (cmp == GE_EXPR)
7157 (ne @2 { build_uniform_cst (TREE_TYPE (@1),
7158 wide_int_to_tree (TREE_TYPE (cst),
7161 (if (cmp == LT_EXPR)
7162 (eq @2 { build_uniform_cst (TREE_TYPE (@1),
7163 wide_int_to_tree (TREE_TYPE (cst),
7166 (if (wi::to_wide (cst) == signed_max
7167 && TYPE_UNSIGNED (arg1_type)
7168 && TYPE_MODE (arg1_type) != BLKmode
7169 /* We will flip the signedness of the comparison operator
7170 associated with the mode of @1, so the sign bit is
7171 specified by this mode. Check that @1 is the signed
7172 max associated with this sign bit. */
7173 && prec == GET_MODE_PRECISION (SCALAR_INT_TYPE_MODE (arg1_type))
7174 /* signed_type does not work on pointer types. */
7175 && INTEGRAL_TYPE_P (arg1_type))
7176 /* The following case also applies to X < signed_max+1
7177 and X >= signed_max+1 because previous transformations. */
7178 (if (cmp == LE_EXPR || cmp == GT_EXPR)
7179 (with { tree st = signed_type_for (TREE_TYPE (@1)); }
7181 (if (cst == @1 && cmp == LE_EXPR)
7182 (ge (convert:st @0) { build_zero_cst (st); }))
7183 (if (cst == @1 && cmp == GT_EXPR)
7184 (lt (convert:st @0) { build_zero_cst (st); }))
7185 (if (cmp == LE_EXPR)
7186 (ge (view_convert:st @0) { build_zero_cst (st); }))
7187 (if (cmp == GT_EXPR)
7188 (lt (view_convert:st @0) { build_zero_cst (st); })))))))))))
7190 /* unsigned < (typeof unsigned)(unsigned != 0) is always false. */
7192 (lt:c @0 (convert (ne @0 integer_zerop)))
7193 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
7194 { constant_boolean_node (false, type); }))
7196 /* x != (typeof x)(x == CST) -> CST == 0 ? 1 : (CST == 1 ? (x!=0&&x!=1) : x != 0) */
7197 /* x != (typeof x)(x != CST) -> CST == 1 ? 1 : (CST == 0 ? (x!=0&&x!=1) : x != 1) */
7198 /* x == (typeof x)(x == CST) -> CST == 0 ? 0 : (CST == 1 ? (x==0||x==1) : x == 0) */
7199 /* x == (typeof x)(x != CST) -> CST == 1 ? 0 : (CST == 0 ? (x==0||x==1) : x == 1) */
7203 (outer:c @0 (convert (inner @0 INTEGER_CST@1)))
7205 bool cst1 = integer_onep (@1);
7206 bool cst0 = integer_zerop (@1);
7207 bool innereq = inner == EQ_EXPR;
7208 bool outereq = outer == EQ_EXPR;
7211 (if (innereq ? cst0 : cst1)
7212 { constant_boolean_node (!outereq, type); })
7213 (if (innereq ? cst1 : cst0)
7215 tree utype = unsigned_type_for (TREE_TYPE (@0));
7216 tree ucst1 = build_one_cst (utype);
7219 (gt (convert:utype @0) { ucst1; })
7220 (le (convert:utype @0) { ucst1; })
7225 tree value = build_int_cst (TREE_TYPE (@0), !innereq);
7238 (for cmp (unordered ordered unlt unle ungt unge uneq ltgt)
7239 /* If the second operand is NaN, the result is constant. */
7242 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
7243 && (cmp != LTGT_EXPR || ! flag_trapping_math))
7244 { constant_boolean_node (cmp == ORDERED_EXPR || cmp == LTGT_EXPR
7245 ? false : true, type); })))
7247 /* Fold UNORDERED if either operand must be NaN, or neither can be. */
7251 (if (tree_expr_nan_p (@0) || tree_expr_nan_p (@1))
7252 { constant_boolean_node (true, type); })
7253 (if (!tree_expr_maybe_nan_p (@0) && !tree_expr_maybe_nan_p (@1))
7254 { constant_boolean_node (false, type); })))
7256 /* Fold ORDERED if either operand must be NaN, or neither can be. */
7260 (if (tree_expr_nan_p (@0) || tree_expr_nan_p (@1))
7261 { constant_boolean_node (false, type); })
7262 (if (!tree_expr_maybe_nan_p (@0) && !tree_expr_maybe_nan_p (@1))
7263 { constant_boolean_node (true, type); })))
7265 /* bool_var != 0 becomes bool_var. */
7267 (ne @0 integer_zerop)
7268 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
7269 && types_match (type, TREE_TYPE (@0)))
7271 /* bool_var == 1 becomes bool_var. */
7273 (eq @0 integer_onep)
7274 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
7275 && types_match (type, TREE_TYPE (@0)))
7278 bool_var == 0 becomes !bool_var or
7279 bool_var != 1 becomes !bool_var
7280 here because that only is good in assignment context as long
7281 as we require a tcc_comparison in GIMPLE_CONDs where we'd
7282 replace if (x == 0) with tem = ~x; if (tem != 0) which is
7283 clearly less optimal and which we'll transform again in forwprop. */
7285 /* Transform comparisons of the form (X & Y) CMP 0 to X CMP2 Z
7286 where ~Y + 1 == pow2 and Z = ~Y. */
7287 (for cst (VECTOR_CST INTEGER_CST)
7291 (cmp (bit_and:c@2 @0 cst@1) integer_zerop)
7292 (with { tree csts = bitmask_inv_cst_vector_p (@1); }
7293 (if (csts && (VECTOR_TYPE_P (TREE_TYPE (@1)) || single_use (@2)))
7294 (with { auto optab = VECTOR_TYPE_P (TREE_TYPE (@1))
7295 ? optab_vector : optab_default;
7296 tree utype = unsigned_type_for (TREE_TYPE (@1)); }
7297 (if (target_supports_op_p (utype, icmp, optab)
7298 || (optimize_vectors_before_lowering_p ()
7299 && (!target_supports_op_p (type, cmp, optab)
7300 || !target_supports_op_p (type, BIT_AND_EXPR, optab))))
7301 (if (TYPE_UNSIGNED (TREE_TYPE (@1)))
7303 (icmp (view_convert:utype @0) { csts; })))))))))
7305 /* When one argument is a constant, overflow detection can be simplified.
7306 Currently restricted to single use so as not to interfere too much with
7307 ADD_OVERFLOW detection in tree-ssa-math-opts.cc.
7308 CONVERT?(CONVERT?(A) + CST) CMP A -> A CMP' CST' */
7309 (for cmp (lt le ge gt)
7312 (cmp:c (convert?@3 (plus@2 (convert?@4 @0) INTEGER_CST@1)) @0)
7313 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@2))
7314 && types_match (TREE_TYPE (@0), TREE_TYPE (@3))
7315 && tree_nop_conversion_p (TREE_TYPE (@4), TREE_TYPE (@0))
7316 && wi::to_wide (@1) != 0
7319 unsigned int prec = TYPE_PRECISION (TREE_TYPE (@0));
7320 signop sign = TYPE_SIGN (TREE_TYPE (@0));
7322 (out @0 { wide_int_to_tree (TREE_TYPE (@0),
7323 wi::max_value (prec, sign)
7324 - wi::to_wide (@1)); })))))
7326 /* To detect overflow in unsigned A - B, A < B is simpler than A - B > A.
7327 However, the detection logic for SUB_OVERFLOW in tree-ssa-math-opts.cc
7328 expects the long form, so we restrict the transformation for now. */
7331 (cmp:c (minus@2 @0 @1) @0)
7332 (if (single_use (@2)
7333 && ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
7334 && TYPE_UNSIGNED (TREE_TYPE (@0)))
7337 /* Optimize A - B + -1 >= A into B >= A for unsigned comparisons. */
7340 (cmp:c (plus (minus @0 @1) integer_minus_onep) @0)
7341 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
7342 && TYPE_UNSIGNED (TREE_TYPE (@0)))
7345 /* Testing for overflow is unnecessary if we already know the result. */
7350 (cmp:c (realpart (IFN_SUB_OVERFLOW@2 @0 @1)) @0)
7351 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
7352 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
7353 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
7358 (cmp:c (realpart (IFN_ADD_OVERFLOW:c@2 @0 @1)) @0)
7359 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
7360 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
7361 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
7363 /* For unsigned operands, -1 / B < A checks whether A * B would overflow.
7364 Simplify it to __builtin_mul_overflow (A, B, <unused>). */
7368 (cmp:c (trunc_div:s integer_all_onesp @1) @0)
7369 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) && !VECTOR_TYPE_P (TREE_TYPE (@0)))
7370 (with { tree t = TREE_TYPE (@0), cpx = build_complex_type (t); }
7371 (out (imagpart (IFN_MUL_OVERFLOW:cpx @0 @1)) { build_zero_cst (t); })))))
7373 /* Similarly, for unsigned operands, (((type) A * B) >> prec) != 0 where type
7374 is at least twice as wide as type of A and B, simplify to
7375 __builtin_mul_overflow (A, B, <unused>). */
7378 (cmp (rshift (mult:s (convert@3 @0) (convert @1)) INTEGER_CST@2)
7380 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
7381 && INTEGRAL_TYPE_P (TREE_TYPE (@3))
7382 && TYPE_UNSIGNED (TREE_TYPE (@0))
7383 && (TYPE_PRECISION (TREE_TYPE (@3))
7384 >= 2 * TYPE_PRECISION (TREE_TYPE (@0)))
7385 && tree_fits_uhwi_p (@2)
7386 && tree_to_uhwi (@2) == TYPE_PRECISION (TREE_TYPE (@0))
7387 && types_match (@0, @1)
7388 && type_has_mode_precision_p (TREE_TYPE (@0))
7389 && (optab_handler (umulv4_optab, TYPE_MODE (TREE_TYPE (@0)))
7390 != CODE_FOR_nothing))
7391 (with { tree t = TREE_TYPE (@0), cpx = build_complex_type (t); }
7392 (cmp (imagpart (IFN_MUL_OVERFLOW:cpx @0 @1)) { build_zero_cst (t); })))))
7394 /* Demote operands of IFN_{ADD,SUB,MUL}_OVERFLOW. */
7395 (for ovf (IFN_ADD_OVERFLOW IFN_SUB_OVERFLOW IFN_MUL_OVERFLOW)
7397 (ovf (convert@2 @0) @1)
7398 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
7399 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
7400 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
7401 && (!TYPE_UNSIGNED (TREE_TYPE (@2)) || TYPE_UNSIGNED (TREE_TYPE (@0))))
7404 (ovf @1 (convert@2 @0))
7405 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
7406 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
7407 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
7408 && (!TYPE_UNSIGNED (TREE_TYPE (@2)) || TYPE_UNSIGNED (TREE_TYPE (@0))))
7411 /* Optimize __builtin_mul_overflow_p (x, cst, (utype) 0) if all 3 types
7412 are unsigned to x > (umax / cst). Similarly for signed type, but
7413 in that case it needs to be outside of a range. */
7415 (imagpart (IFN_MUL_OVERFLOW:cs@2 @0 integer_nonzerop@1))
7416 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
7417 && TYPE_MAX_VALUE (TREE_TYPE (@0))
7418 && types_match (TREE_TYPE (@0), TREE_TYPE (TREE_TYPE (@2)))
7419 && int_fits_type_p (@1, TREE_TYPE (@0)))
7420 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
7421 (convert (gt @0 (trunc_div! { TYPE_MAX_VALUE (TREE_TYPE (@0)); } @1)))
7422 (if (TYPE_MIN_VALUE (TREE_TYPE (@0)))
7423 (if (integer_minus_onep (@1))
7424 (convert (eq @0 { TYPE_MIN_VALUE (TREE_TYPE (@0)); }))
7427 tree div = fold_convert (TREE_TYPE (@0), @1);
7428 tree lo = int_const_binop (TRUNC_DIV_EXPR,
7429 TYPE_MIN_VALUE (TREE_TYPE (@0)), div);
7430 tree hi = int_const_binop (TRUNC_DIV_EXPR,
7431 TYPE_MAX_VALUE (TREE_TYPE (@0)), div);
7432 tree etype = range_check_type (TREE_TYPE (@0));
7435 if (wi::neg_p (wi::to_wide (div)))
7437 lo = fold_convert (etype, lo);
7438 hi = fold_convert (etype, hi);
7439 hi = int_const_binop (MINUS_EXPR, hi, lo);
7443 (convert (gt (minus (convert:etype @0) { lo; }) { hi; })))))))))
7445 /* Simplification of math builtins. These rules must all be optimizations
7446 as well as IL simplifications. If there is a possibility that the new
7447 form could be a pessimization, the rule should go in the canonicalization
7448 section that follows this one.
7450 Rules can generally go in this section if they satisfy one of
7453 - the rule describes an identity
7455 - the rule replaces calls with something as simple as addition or
7458 - the rule contains unary calls only and simplifies the surrounding
7459 arithmetic. (The idea here is to exclude non-unary calls in which
7460 one operand is constant and in which the call is known to be cheap
7461 when the operand has that value.) */
7463 (if (flag_unsafe_math_optimizations)
7464 /* Simplify sqrt(x) * sqrt(x) -> x. */
7466 (mult (SQRT_ALL@1 @0) @1)
7467 (if (!tree_expr_maybe_signaling_nan_p (@0))
7470 (for op (plus minus)
7471 /* Simplify (A / C) +- (B / C) -> (A +- B) / C. */
7475 (rdiv (op @0 @2) @1)))
7477 (for cmp (lt le gt ge)
7478 neg_cmp (gt ge lt le)
7479 /* Simplify (x * C1) cmp C2 -> x cmp (C2 / C1), where C1 != 0. */
7481 (cmp (mult @0 REAL_CST@1) REAL_CST@2)
7483 { tree tem = const_binop (RDIV_EXPR, type, @2, @1); }
7485 && !(REAL_VALUE_ISINF (TREE_REAL_CST (tem))
7486 || (real_zerop (tem) && !real_zerop (@1))))
7488 (if (real_less (&dconst0, TREE_REAL_CST_PTR (@1)))
7490 (if (real_less (TREE_REAL_CST_PTR (@1), &dconst0))
7491 (neg_cmp @0 { tem; })))))))
7493 /* Simplify sqrt(x) * sqrt(y) -> sqrt(x*y). */
7494 (for root (SQRT CBRT)
7496 (mult (root:s @0) (root:s @1))
7497 (root (mult @0 @1))))
7499 /* Simplify expN(x) * expN(y) -> expN(x+y). */
7500 (for exps (EXP EXP2 EXP10 POW10)
7502 (mult (exps:s @0) (exps:s @1))
7503 (exps (plus @0 @1))))
7505 /* Simplify a/root(b/c) into a*root(c/b). */
7506 (for root (SQRT CBRT)
7508 (rdiv @0 (root:s (rdiv:s @1 @2)))
7509 (mult @0 (root (rdiv @2 @1)))))
7511 /* Simplify x/expN(y) into x*expN(-y). */
7512 (for exps (EXP EXP2 EXP10 POW10)
7514 (rdiv @0 (exps:s @1))
7515 (mult @0 (exps (negate @1)))))
7517 (for logs (LOG LOG2 LOG10 LOG10)
7518 exps (EXP EXP2 EXP10 POW10)
7519 /* logN(expN(x)) -> x. */
7523 /* expN(logN(x)) -> x. */
7528 /* Optimize logN(func()) for various exponential functions. We
7529 want to determine the value "x" and the power "exponent" in
7530 order to transform logN(x**exponent) into exponent*logN(x). */
7531 (for logs (LOG LOG LOG LOG2 LOG2 LOG2 LOG10 LOG10)
7532 exps (EXP2 EXP10 POW10 EXP EXP10 POW10 EXP EXP2)
7535 (if (SCALAR_FLOAT_TYPE_P (type))
7541 /* Prepare to do logN(exp(exponent)) -> exponent*logN(e). */
7542 x = build_real_truncate (type, dconst_e ());
7545 /* Prepare to do logN(exp2(exponent)) -> exponent*logN(2). */
7546 x = build_real (type, dconst2);
7550 /* Prepare to do logN(exp10(exponent)) -> exponent*logN(10). */
7552 REAL_VALUE_TYPE dconst10;
7553 real_from_integer (&dconst10, VOIDmode, 10, SIGNED);
7554 x = build_real (type, dconst10);
7561 (mult (logs { x; }) @0)))))
7569 (if (SCALAR_FLOAT_TYPE_P (type))
7575 /* Prepare to do logN(sqrt(x)) -> 0.5*logN(x). */
7576 x = build_real (type, dconsthalf);
7579 /* Prepare to do logN(cbrt(x)) -> (1/3)*logN(x). */
7580 x = build_real_truncate (type, dconst_third ());
7586 (mult { x; } (logs @0))))))
7588 /* logN(pow(x,exponent)) -> exponent*logN(x). */
7589 (for logs (LOG LOG2 LOG10)
7593 (mult @1 (logs @0))))
7595 /* pow(C,x) -> exp(log(C)*x) if C > 0,
7596 or if C is a positive power of 2,
7597 pow(C,x) -> exp2(log2(C)*x). */
7605 (pows REAL_CST@0 @1)
7606 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0)
7607 && real_isfinite (TREE_REAL_CST_PTR (@0))
7608 /* As libmvec doesn't have a vectorized exp2, defer optimizing
7609 the use_exp2 case until after vectorization. It seems actually
7610 beneficial for all constants to postpone this until later,
7611 because exp(log(C)*x), while faster, will have worse precision
7612 and if x folds into a constant too, that is unnecessary
7614 && canonicalize_math_after_vectorization_p ())
7616 const REAL_VALUE_TYPE *const value = TREE_REAL_CST_PTR (@0);
7617 bool use_exp2 = false;
7618 if (targetm.libc_has_function (function_c99_misc, TREE_TYPE (@0))
7619 && value->cl == rvc_normal)
7621 REAL_VALUE_TYPE frac_rvt = *value;
7622 SET_REAL_EXP (&frac_rvt, 1);
7623 if (real_equal (&frac_rvt, &dconst1))
7628 (if (optimize_pow_to_exp (@0, @1))
7629 (exps (mult (logs @0) @1)))
7630 (exp2s (mult (log2s @0) @1)))))))
7633 /* pow(C,x)*expN(y) -> expN(logN(C)*x+y) if C > 0. */
7635 exps (EXP EXP2 EXP10 POW10)
7636 logs (LOG LOG2 LOG10 LOG10)
7638 (mult:c (pows:s REAL_CST@0 @1) (exps:s @2))
7639 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0)
7640 && real_isfinite (TREE_REAL_CST_PTR (@0)))
7641 (exps (plus (mult (logs @0) @1) @2)))))
7646 exps (EXP EXP2 EXP10 POW10)
7647 /* sqrt(expN(x)) -> expN(x*0.5). */
7650 (exps (mult @0 { build_real (type, dconsthalf); })))
7651 /* cbrt(expN(x)) -> expN(x/3). */
7654 (exps (mult @0 { build_real_truncate (type, dconst_third ()); })))
7655 /* pow(expN(x), y) -> expN(x*y). */
7658 (exps (mult @0 @1))))
7660 /* tan(atan(x)) -> x. */
7667 /* Simplify sin(atan(x)) -> x / sqrt(x*x + 1). */
7671 copysigns (COPYSIGN)
7676 REAL_VALUE_TYPE r_cst;
7677 build_sinatan_real (&r_cst, type);
7678 tree t_cst = build_real (type, r_cst);
7679 tree t_one = build_one_cst (type);
7681 (if (SCALAR_FLOAT_TYPE_P (type))
7682 (cond (lt (abs @0) { t_cst; })
7683 (rdiv @0 (sqrts (plus (mult @0 @0) { t_one; })))
7684 (copysigns { t_one; } @0))))))
7686 /* Simplify cos(atan(x)) -> 1 / sqrt(x*x + 1). */
7690 copysigns (COPYSIGN)
7695 REAL_VALUE_TYPE r_cst;
7696 build_sinatan_real (&r_cst, type);
7697 tree t_cst = build_real (type, r_cst);
7698 tree t_one = build_one_cst (type);
7699 tree t_zero = build_zero_cst (type);
7701 (if (SCALAR_FLOAT_TYPE_P (type))
7702 (cond (lt (abs @0) { t_cst; })
7703 (rdiv { t_one; } (sqrts (plus (mult @0 @0) { t_one; })))
7704 (copysigns { t_zero; } @0))))))
7706 (if (!flag_errno_math)
7707 /* Simplify sinh(atanh(x)) -> x / sqrt((1 - x)*(1 + x)). */
7712 (sinhs (atanhs:s @0))
7713 (with { tree t_one = build_one_cst (type); }
7714 (rdiv @0 (sqrts (mult (minus { t_one; } @0) (plus { t_one; } @0)))))))
7716 /* Simplify cosh(atanh(x)) -> 1 / sqrt((1 - x)*(1 + x)) */
7721 (coshs (atanhs:s @0))
7722 (with { tree t_one = build_one_cst (type); }
7723 (rdiv { t_one; } (sqrts (mult (minus { t_one; } @0) (plus { t_one; } @0))))))))
7725 /* cabs(x+0i) or cabs(0+xi) -> abs(x). */
7727 (CABS (complex:C @0 real_zerop@1))
7730 /* trunc(trunc(x)) -> trunc(x), etc. */
7731 (for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL)
7735 /* f(x) -> x if x is integer valued and f does nothing for such values. */
7736 (for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL)
7738 (fns integer_valued_real_p@0)
7741 /* hypot(x,0) and hypot(0,x) -> abs(x). */
7743 (HYPOT:c @0 real_zerop@1)
7746 /* pow(1,x) -> 1. */
7748 (POW real_onep@0 @1)
7752 /* copysign(x,x) -> x. */
7753 (COPYSIGN_ALL @0 @0)
7757 /* copysign(x,-x) -> -x. */
7758 (COPYSIGN_ALL @0 (negate@1 @0))
7762 /* copysign(x,y) -> fabs(x) if y is nonnegative. */
7763 (COPYSIGN_ALL @0 tree_expr_nonnegative_p@1)
7767 /* fabs (copysign(x, y)) -> fabs (x). */
7768 (abs (COPYSIGN_ALL @0 @1))
7771 (for scale (LDEXP SCALBN SCALBLN)
7772 /* ldexp(0, x) -> 0. */
7774 (scale real_zerop@0 @1)
7776 /* ldexp(x, 0) -> x. */
7778 (scale @0 integer_zerop@1)
7780 /* ldexp(x, y) -> x if x is +-Inf or NaN. */
7782 (scale REAL_CST@0 @1)
7783 (if (!real_isfinite (TREE_REAL_CST_PTR (@0)))
7786 /* Canonicalization of sequences of math builtins. These rules represent
7787 IL simplifications but are not necessarily optimizations.
7789 The sincos pass is responsible for picking "optimal" implementations
7790 of math builtins, which may be more complicated and can sometimes go
7791 the other way, e.g. converting pow into a sequence of sqrts.
7792 We only want to do these canonicalizations before the pass has run. */
7794 (if (flag_unsafe_math_optimizations && canonicalize_math_p ())
7795 /* Simplify tan(x) * cos(x) -> sin(x). */
7797 (mult:c (TAN:s @0) (COS:s @0))
7800 /* Simplify x * pow(x,c) -> pow(x,c+1). */
7802 (mult:c @0 (POW:s @0 REAL_CST@1))
7803 (if (!TREE_OVERFLOW (@1))
7804 (POW @0 (plus @1 { build_one_cst (type); }))))
7806 /* Simplify sin(x) / cos(x) -> tan(x). */
7808 (rdiv (SIN:s @0) (COS:s @0))
7811 /* Simplify sinh(x) / cosh(x) -> tanh(x). */
7813 (rdiv (SINH:s @0) (COSH:s @0))
7816 /* Simplify tanh (x) / sinh (x) -> 1.0 / cosh (x). */
7818 (rdiv (TANH:s @0) (SINH:s @0))
7819 (rdiv {build_one_cst (type);} (COSH @0)))
7821 /* Simplify cos(x) / sin(x) -> 1 / tan(x). */
7823 (rdiv (COS:s @0) (SIN:s @0))
7824 (rdiv { build_one_cst (type); } (TAN @0)))
7826 /* Simplify sin(x) / tan(x) -> cos(x). */
7828 (rdiv (SIN:s @0) (TAN:s @0))
7829 (if (! HONOR_NANS (@0)
7830 && ! HONOR_INFINITIES (@0))
7833 /* Simplify tan(x) / sin(x) -> 1.0 / cos(x). */
7835 (rdiv (TAN:s @0) (SIN:s @0))
7836 (if (! HONOR_NANS (@0)
7837 && ! HONOR_INFINITIES (@0))
7838 (rdiv { build_one_cst (type); } (COS @0))))
7840 /* Simplify pow(x,y) * pow(x,z) -> pow(x,y+z). */
7842 (mult (POW:s @0 @1) (POW:s @0 @2))
7843 (POW @0 (plus @1 @2)))
7845 /* Simplify pow(x,y) * pow(z,y) -> pow(x*z,y). */
7847 (mult (POW:s @0 @1) (POW:s @2 @1))
7848 (POW (mult @0 @2) @1))
7850 /* Simplify powi(x,y) * powi(z,y) -> powi(x*z,y). */
7852 (mult (POWI:s @0 @1) (POWI:s @2 @1))
7853 (POWI (mult @0 @2) @1))
7855 /* Simplify pow(x,c) / x -> pow(x,c-1). */
7857 (rdiv (POW:s @0 REAL_CST@1) @0)
7858 (if (!TREE_OVERFLOW (@1))
7859 (POW @0 (minus @1 { build_one_cst (type); }))))
7861 /* Simplify x / pow (y,z) -> x * pow(y,-z). */
7863 (rdiv @0 (POW:s @1 @2))
7864 (mult @0 (POW @1 (negate @2))))
7869 /* sqrt(sqrt(x)) -> pow(x,1/4). */
7872 (pows @0 { build_real (type, dconst_quarter ()); }))
7873 /* sqrt(cbrt(x)) -> pow(x,1/6). */
7876 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
7877 /* cbrt(sqrt(x)) -> pow(x,1/6). */
7880 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
7881 /* cbrt(cbrt(x)) -> pow(x,1/9), iff x is nonnegative. */
7883 (cbrts (cbrts tree_expr_nonnegative_p@0))
7884 (pows @0 { build_real_truncate (type, dconst_ninth ()); }))
7885 /* sqrt(pow(x,y)) -> pow(|x|,y*0.5). */
7887 (sqrts (pows @0 @1))
7888 (pows (abs @0) (mult @1 { build_real (type, dconsthalf); })))
7889 /* cbrt(pow(x,y)) -> pow(x,y/3), iff x is nonnegative. */
7891 (cbrts (pows tree_expr_nonnegative_p@0 @1))
7892 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
7893 /* pow(sqrt(x),y) -> pow(x,y*0.5). */
7895 (pows (sqrts @0) @1)
7896 (pows @0 (mult @1 { build_real (type, dconsthalf); })))
7897 /* pow(cbrt(x),y) -> pow(x,y/3) iff x is nonnegative. */
7899 (pows (cbrts tree_expr_nonnegative_p@0) @1)
7900 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
7901 /* pow(pow(x,y),z) -> pow(x,y*z) iff x is nonnegative. */
7903 (pows (pows tree_expr_nonnegative_p@0 @1) @2)
7904 (pows @0 (mult @1 @2))))
7906 /* cabs(x+xi) -> fabs(x)*sqrt(2). */
7908 (CABS (complex @0 @0))
7909 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
7911 /* hypot(x,x) -> fabs(x)*sqrt(2). */
7914 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
7916 /* cexp(x+yi) -> exp(x)*cexpi(y). */
7921 (cexps compositional_complex@0)
7922 (if (targetm.libc_has_function (function_c99_math_complex, TREE_TYPE (@0)))
7924 (mult (exps@1 (realpart @0)) (realpart (cexpis:type@2 (imagpart @0))))
7925 (mult @1 (imagpart @2)))))))
7927 (if (canonicalize_math_p ())
7928 /* floor(x) -> trunc(x) if x is nonnegative. */
7929 (for floors (FLOOR_ALL)
7932 (floors tree_expr_nonnegative_p@0)
7935 (match double_value_p
7937 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == double_type_node)))
7938 (for froms (BUILT_IN_TRUNCL
7950 /* truncl(extend(x)) -> extend(trunc(x)), etc., if x is a double. */
7951 (if (optimize && canonicalize_math_p ())
7953 (froms (convert double_value_p@0))
7954 (convert (tos @0)))))
7956 (match float_value_p
7958 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == float_type_node)))
7959 (for froms (BUILT_IN_TRUNCL BUILT_IN_TRUNC
7960 BUILT_IN_FLOORL BUILT_IN_FLOOR
7961 BUILT_IN_CEILL BUILT_IN_CEIL
7962 BUILT_IN_ROUNDL BUILT_IN_ROUND
7963 BUILT_IN_NEARBYINTL BUILT_IN_NEARBYINT
7964 BUILT_IN_RINTL BUILT_IN_RINT)
7965 tos (BUILT_IN_TRUNCF BUILT_IN_TRUNCF
7966 BUILT_IN_FLOORF BUILT_IN_FLOORF
7967 BUILT_IN_CEILF BUILT_IN_CEILF
7968 BUILT_IN_ROUNDF BUILT_IN_ROUNDF
7969 BUILT_IN_NEARBYINTF BUILT_IN_NEARBYINTF
7970 BUILT_IN_RINTF BUILT_IN_RINTF)
7971 /* truncl(extend(x)) and trunc(extend(x)) -> extend(truncf(x)), etc.,
7973 (if (optimize && canonicalize_math_p ()
7974 && targetm.libc_has_function (function_c99_misc, NULL_TREE))
7976 (froms (convert float_value_p@0))
7977 (convert (tos @0)))))
7980 (match float16_value_p
7982 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == float16_type_node)))
7983 (for froms (BUILT_IN_TRUNCL BUILT_IN_TRUNC BUILT_IN_TRUNCF
7984 BUILT_IN_FLOORL BUILT_IN_FLOOR BUILT_IN_FLOORF
7985 BUILT_IN_CEILL BUILT_IN_CEIL BUILT_IN_CEILF
7986 BUILT_IN_ROUNDEVENL BUILT_IN_ROUNDEVEN BUILT_IN_ROUNDEVENF
7987 BUILT_IN_ROUNDL BUILT_IN_ROUND BUILT_IN_ROUNDF
7988 BUILT_IN_NEARBYINTL BUILT_IN_NEARBYINT BUILT_IN_NEARBYINTF
7989 BUILT_IN_RINTL BUILT_IN_RINT BUILT_IN_RINTF
7990 BUILT_IN_SQRTL BUILT_IN_SQRT BUILT_IN_SQRTF)
7991 tos (IFN_TRUNC IFN_TRUNC IFN_TRUNC
7992 IFN_FLOOR IFN_FLOOR IFN_FLOOR
7993 IFN_CEIL IFN_CEIL IFN_CEIL
7994 IFN_ROUNDEVEN IFN_ROUNDEVEN IFN_ROUNDEVEN
7995 IFN_ROUND IFN_ROUND IFN_ROUND
7996 IFN_NEARBYINT IFN_NEARBYINT IFN_NEARBYINT
7997 IFN_RINT IFN_RINT IFN_RINT
7998 IFN_SQRT IFN_SQRT IFN_SQRT)
7999 /* (_Float16) round ((doube) x) -> __built_in_roundf16 (x), etc.,
8000 if x is a _Float16. */
8002 (convert (froms (convert float16_value_p@0)))
8004 && types_match (type, TREE_TYPE (@0))
8005 && direct_internal_fn_supported_p (as_internal_fn (tos),
8006 type, OPTIMIZE_FOR_BOTH))
8009 /* Simplify (trunc)copysign ((extend)x, (extend)y) to copysignf (x, y),
8010 x,y is float value, similar for _Float16/double. */
8011 (for copysigns (COPYSIGN_ALL)
8013 (convert (copysigns (convert@2 @0) (convert @1)))
8015 && !HONOR_SNANS (@2)
8016 && types_match (type, TREE_TYPE (@0))
8017 && types_match (type, TREE_TYPE (@1))
8018 && TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (@2))
8019 && direct_internal_fn_supported_p (IFN_COPYSIGN,
8020 type, OPTIMIZE_FOR_BOTH))
8021 (IFN_COPYSIGN @0 @1))))
8023 (for froms (BUILT_IN_FMAF BUILT_IN_FMA BUILT_IN_FMAL)
8024 tos (IFN_FMA IFN_FMA IFN_FMA)
8026 (convert (froms (convert@3 @0) (convert @1) (convert @2)))
8027 (if (flag_unsafe_math_optimizations
8029 && FLOAT_TYPE_P (type)
8030 && FLOAT_TYPE_P (TREE_TYPE (@3))
8031 && types_match (type, TREE_TYPE (@0))
8032 && types_match (type, TREE_TYPE (@1))
8033 && types_match (type, TREE_TYPE (@2))
8034 && TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (@3))
8035 && direct_internal_fn_supported_p (as_internal_fn (tos),
8036 type, OPTIMIZE_FOR_BOTH))
8039 (for maxmin (max min)
8041 (convert (maxmin (convert@2 @0) (convert @1)))
8043 && FLOAT_TYPE_P (type)
8044 && FLOAT_TYPE_P (TREE_TYPE (@2))
8045 && types_match (type, TREE_TYPE (@0))
8046 && types_match (type, TREE_TYPE (@1))
8047 && element_precision (type) < element_precision (TREE_TYPE (@2)))
8051 (for froms (XFLOORL XCEILL XROUNDL XRINTL)
8052 tos (XFLOOR XCEIL XROUND XRINT)
8053 /* llfloorl(extend(x)) -> llfloor(x), etc., if x is a double. */
8054 (if (optimize && canonicalize_math_p ())
8056 (froms (convert double_value_p@0))
8059 (for froms (XFLOORL XCEILL XROUNDL XRINTL
8060 XFLOOR XCEIL XROUND XRINT)
8061 tos (XFLOORF XCEILF XROUNDF XRINTF)
8062 /* llfloorl(extend(x)) and llfloor(extend(x)) -> llfloorf(x), etc.,
8064 (if (optimize && canonicalize_math_p ())
8066 (froms (convert float_value_p@0))
8069 (if (canonicalize_math_p ())
8070 /* xfloor(x) -> fix_trunc(x) if x is nonnegative. */
8071 (for floors (IFLOOR LFLOOR LLFLOOR)
8073 (floors tree_expr_nonnegative_p@0)
8076 (if (canonicalize_math_p ())
8077 /* xfloor(x) -> fix_trunc(x), etc., if x is integer valued. */
8078 (for fns (IFLOOR LFLOOR LLFLOOR
8080 IROUND LROUND LLROUND)
8082 (fns integer_valued_real_p@0)
8084 (if (!flag_errno_math)
8085 /* xrint(x) -> fix_trunc(x), etc., if x is integer valued. */
8086 (for rints (IRINT LRINT LLRINT)
8088 (rints integer_valued_real_p@0)
8091 (if (canonicalize_math_p ())
8092 (for ifn (IFLOOR ICEIL IROUND IRINT)
8093 lfn (LFLOOR LCEIL LROUND LRINT)
8094 llfn (LLFLOOR LLCEIL LLROUND LLRINT)
8095 /* Canonicalize iround (x) to lround (x) on ILP32 targets where
8096 sizeof (int) == sizeof (long). */
8097 (if (TYPE_PRECISION (integer_type_node)
8098 == TYPE_PRECISION (long_integer_type_node))
8101 (lfn:long_integer_type_node @0)))
8102 /* Canonicalize llround (x) to lround (x) on LP64 targets where
8103 sizeof (long long) == sizeof (long). */
8104 (if (TYPE_PRECISION (long_long_integer_type_node)
8105 == TYPE_PRECISION (long_integer_type_node))
8108 (lfn:long_integer_type_node @0)))))
8110 /* cproj(x) -> x if we're ignoring infinities. */
8113 (if (!HONOR_INFINITIES (type))
8116 /* If the real part is inf and the imag part is known to be
8117 nonnegative, return (inf + 0i). */
8119 (CPROJ (complex REAL_CST@0 tree_expr_nonnegative_p@1))
8120 (if (real_isinf (TREE_REAL_CST_PTR (@0)))
8121 { build_complex_inf (type, false); }))
8123 /* If the imag part is inf, return (inf+I*copysign(0,imag)). */
8125 (CPROJ (complex @0 REAL_CST@1))
8126 (if (real_isinf (TREE_REAL_CST_PTR (@1)))
8127 { build_complex_inf (type, TREE_REAL_CST_PTR (@1)->sign); }))
8133 (pows @0 REAL_CST@1)
8135 const REAL_VALUE_TYPE *value = TREE_REAL_CST_PTR (@1);
8136 REAL_VALUE_TYPE tmp;
8139 /* pow(x,0) -> 1. */
8140 (if (real_equal (value, &dconst0))
8141 { build_real (type, dconst1); })
8142 /* pow(x,1) -> x. */
8143 (if (real_equal (value, &dconst1))
8145 /* pow(x,-1) -> 1/x. */
8146 (if (real_equal (value, &dconstm1))
8147 (rdiv { build_real (type, dconst1); } @0))
8148 /* pow(x,0.5) -> sqrt(x). */
8149 (if (flag_unsafe_math_optimizations
8150 && canonicalize_math_p ()
8151 && real_equal (value, &dconsthalf))
8153 /* pow(x,1/3) -> cbrt(x). */
8154 (if (flag_unsafe_math_optimizations
8155 && canonicalize_math_p ()
8156 && (tmp = real_value_truncate (TYPE_MODE (type), dconst_third ()),
8157 real_equal (value, &tmp)))
8160 /* powi(1,x) -> 1. */
8162 (POWI real_onep@0 @1)
8166 (POWI @0 INTEGER_CST@1)
8168 /* powi(x,0) -> 1. */
8169 (if (wi::to_wide (@1) == 0)
8170 { build_real (type, dconst1); })
8171 /* powi(x,1) -> x. */
8172 (if (wi::to_wide (@1) == 1)
8174 /* powi(x,-1) -> 1/x. */
8175 (if (wi::to_wide (@1) == -1)
8176 (rdiv { build_real (type, dconst1); } @0))))
8178 /* Narrowing of arithmetic and logical operations.
8180 These are conceptually similar to the transformations performed for
8181 the C/C++ front-ends by shorten_binary_op and shorten_compare. Long
8182 term we want to move all that code out of the front-ends into here. */
8184 /* Convert (outertype)((innertype0)a+(innertype1)b)
8185 into ((newtype)a+(newtype)b) where newtype
8186 is the widest mode from all of these. */
8187 (for op (plus minus mult rdiv)
8189 (convert (op:s@0 (convert1?@3 @1) (convert2?@4 @2)))
8190 /* If we have a narrowing conversion of an arithmetic operation where
8191 both operands are widening conversions from the same type as the outer
8192 narrowing conversion. Then convert the innermost operands to a
8193 suitable unsigned type (to avoid introducing undefined behavior),
8194 perform the operation and convert the result to the desired type. */
8195 (if (INTEGRAL_TYPE_P (type)
8198 /* We check for type compatibility between @0 and @1 below,
8199 so there's no need to check that @2/@4 are integral types. */
8200 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
8201 && INTEGRAL_TYPE_P (TREE_TYPE (@3))
8202 /* The precision of the type of each operand must match the
8203 precision of the mode of each operand, similarly for the
8205 && type_has_mode_precision_p (TREE_TYPE (@1))
8206 && type_has_mode_precision_p (TREE_TYPE (@2))
8207 && type_has_mode_precision_p (type)
8208 /* The inner conversion must be a widening conversion. */
8209 && TYPE_PRECISION (TREE_TYPE (@3)) > TYPE_PRECISION (TREE_TYPE (@1))
8210 && types_match (@1, type)
8211 && (types_match (@1, @2)
8212 /* Or the second operand is const integer or converted const
8213 integer from valueize. */
8214 || poly_int_tree_p (@4)))
8215 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
8216 (op @1 (convert @2))
8217 (with { tree utype = unsigned_type_for (TREE_TYPE (@1)); }
8218 (convert (op (convert:utype @1)
8219 (convert:utype @2)))))
8220 (if (FLOAT_TYPE_P (type)
8221 && DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0))
8222 == DECIMAL_FLOAT_TYPE_P (type))
8223 (with { tree arg0 = strip_float_extensions (@1);
8224 tree arg1 = strip_float_extensions (@2);
8225 tree itype = TREE_TYPE (@0);
8226 tree ty1 = TREE_TYPE (arg0);
8227 tree ty2 = TREE_TYPE (arg1);
8228 enum tree_code code = TREE_CODE (itype); }
8229 (if (FLOAT_TYPE_P (ty1)
8230 && FLOAT_TYPE_P (ty2))
8231 (with { tree newtype = type;
8232 if (TYPE_MODE (ty1) == SDmode
8233 || TYPE_MODE (ty2) == SDmode
8234 || TYPE_MODE (type) == SDmode)
8235 newtype = dfloat32_type_node;
8236 if (TYPE_MODE (ty1) == DDmode
8237 || TYPE_MODE (ty2) == DDmode
8238 || TYPE_MODE (type) == DDmode)
8239 newtype = dfloat64_type_node;
8240 if (TYPE_MODE (ty1) == TDmode
8241 || TYPE_MODE (ty2) == TDmode
8242 || TYPE_MODE (type) == TDmode)
8243 newtype = dfloat128_type_node; }
8244 (if ((newtype == dfloat32_type_node
8245 || newtype == dfloat64_type_node
8246 || newtype == dfloat128_type_node)
8248 && types_match (newtype, type))
8249 (op (convert:newtype @1) (convert:newtype @2))
8250 (with { if (element_precision (ty1) > element_precision (newtype))
8252 if (element_precision (ty2) > element_precision (newtype))
8254 /* Sometimes this transformation is safe (cannot
8255 change results through affecting double rounding
8256 cases) and sometimes it is not. If NEWTYPE is
8257 wider than TYPE, e.g. (float)((long double)double
8258 + (long double)double) converted to
8259 (float)(double + double), the transformation is
8260 unsafe regardless of the details of the types
8261 involved; double rounding can arise if the result
8262 of NEWTYPE arithmetic is a NEWTYPE value half way
8263 between two representable TYPE values but the
8264 exact value is sufficiently different (in the
8265 right direction) for this difference to be
8266 visible in ITYPE arithmetic. If NEWTYPE is the
8267 same as TYPE, however, the transformation may be
8268 safe depending on the types involved: it is safe
8269 if the ITYPE has strictly more than twice as many
8270 mantissa bits as TYPE, can represent infinities
8271 and NaNs if the TYPE can, and has sufficient
8272 exponent range for the product or ratio of two
8273 values representable in the TYPE to be within the
8274 range of normal values of ITYPE. */
8275 (if (element_precision (newtype) < element_precision (itype)
8276 && (!VECTOR_MODE_P (TYPE_MODE (newtype))
8277 || target_supports_op_p (newtype, op, optab_default))
8278 && (flag_unsafe_math_optimizations
8279 || (element_precision (newtype) == element_precision (type)
8280 && real_can_shorten_arithmetic (element_mode (itype),
8281 element_mode (type))
8282 && !excess_precision_type (newtype)))
8283 && !types_match (itype, newtype))
8284 (convert:type (op (convert:newtype @1)
8285 (convert:newtype @2)))
8290 /* This is another case of narrowing, specifically when there's an outer
8291 BIT_AND_EXPR which masks off bits outside the type of the innermost
8292 operands. Like the previous case we have to convert the operands
8293 to unsigned types to avoid introducing undefined behavior for the
8294 arithmetic operation. */
8295 (for op (minus plus)
8297 (bit_and (op:s (convert@2 @0) (convert@3 @1)) INTEGER_CST@4)
8298 (if (INTEGRAL_TYPE_P (type)
8299 /* We check for type compatibility between @0 and @1 below,
8300 so there's no need to check that @1/@3 are integral types. */
8301 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
8302 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
8303 /* The precision of the type of each operand must match the
8304 precision of the mode of each operand, similarly for the
8306 && type_has_mode_precision_p (TREE_TYPE (@0))
8307 && type_has_mode_precision_p (TREE_TYPE (@1))
8308 && type_has_mode_precision_p (type)
8309 /* The inner conversion must be a widening conversion. */
8310 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
8311 && types_match (@0, @1)
8312 && (tree_int_cst_min_precision (@4, TYPE_SIGN (TREE_TYPE (@0)))
8313 <= TYPE_PRECISION (TREE_TYPE (@0)))
8314 && (wi::to_wide (@4)
8315 & wi::mask (TYPE_PRECISION (TREE_TYPE (@0)),
8316 true, TYPE_PRECISION (type))) == 0)
8317 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
8318 (with { tree ntype = TREE_TYPE (@0); }
8319 (convert (bit_and (op @0 @1) (convert:ntype @4))))
8320 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
8321 (convert (bit_and (op (convert:utype @0) (convert:utype @1))
8322 (convert:utype @4))))))))
8324 /* Transform (@0 < @1 and @0 < @2) to use min,
8325 (@0 > @1 and @0 > @2) to use max */
8326 (for logic (bit_and bit_and bit_and bit_and bit_ior bit_ior bit_ior bit_ior)
8327 op (lt le gt ge lt le gt ge )
8328 ext (min min max max max max min min )
8330 (logic (op:cs @0 @1) (op:cs @0 @2))
8331 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
8332 && TREE_CODE (@0) != INTEGER_CST)
8333 (op @0 (ext @1 @2)))))
8335 /* Max<bool0, bool1> -> bool0 | bool1
8336 Min<bool0, bool1> -> bool0 & bool1 */
8338 logic (bit_ior bit_and)
8340 (op zero_one_valued_p@0 zero_one_valued_p@1)
8343 /* signbit(x) != 0 ? -x : x -> abs(x)
8344 signbit(x) == 0 ? -x : x -> -abs(x) */
8348 (cond (neeq (sign @0) integer_zerop) (negate @0) @0)
8349 (if (neeq == NE_EXPR)
8351 (negate (abs @0))))))
8354 /* signbit(x) -> 0 if x is nonnegative. */
8355 (SIGNBIT tree_expr_nonnegative_p@0)
8356 { integer_zero_node; })
8359 /* signbit(x) -> x<0 if x doesn't have signed zeros. */
8361 (if (!HONOR_SIGNED_ZEROS (@0))
8362 (convert (lt @0 { build_real (TREE_TYPE (@0), dconst0); }))))
8364 /* Transform comparisons of the form X +- C1 CMP C2 to X CMP C2 -+ C1. */
8366 (for op (plus minus)
8369 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
8370 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
8371 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
8372 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@0))
8373 && !TYPE_SATURATING (TREE_TYPE (@0)))
8374 (with { tree res = int_const_binop (rop, @2, @1); }
8375 (if (TREE_OVERFLOW (res)
8376 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
8377 { constant_boolean_node (cmp == NE_EXPR, type); }
8378 (if (single_use (@3))
8379 (cmp @0 { TREE_OVERFLOW (res)
8380 ? drop_tree_overflow (res) : res; }))))))))
8381 (for cmp (lt le gt ge)
8382 (for op (plus minus)
8385 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
8386 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
8387 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
8388 (with { tree res = int_const_binop (rop, @2, @1); }
8389 (if (TREE_OVERFLOW (res))
8391 fold_overflow_warning (("assuming signed overflow does not occur "
8392 "when simplifying conditional to constant"),
8393 WARN_STRICT_OVERFLOW_CONDITIONAL);
8394 bool less = cmp == LE_EXPR || cmp == LT_EXPR;
8395 /* wi::ges_p (@2, 0) should be sufficient for a signed type. */
8396 bool ovf_high = wi::lt_p (wi::to_wide (@1), 0,
8397 TYPE_SIGN (TREE_TYPE (@1)))
8398 != (op == MINUS_EXPR);
8399 constant_boolean_node (less == ovf_high, type);
8401 (if (single_use (@3))
8404 fold_overflow_warning (("assuming signed overflow does not occur "
8405 "when changing X +- C1 cmp C2 to "
8407 WARN_STRICT_OVERFLOW_COMPARISON);
8409 (cmp @0 { res; })))))))))
8411 /* Canonicalizations of BIT_FIELD_REFs. */
8414 (BIT_FIELD_REF (BIT_FIELD_REF @0 @1 @2) @3 @4)
8415 (BIT_FIELD_REF @0 @3 { const_binop (PLUS_EXPR, bitsizetype, @2, @4); }))
8418 (BIT_FIELD_REF (view_convert @0) @1 @2)
8419 (if (! INTEGRAL_TYPE_P (TREE_TYPE (@0))
8420 || type_has_mode_precision_p (TREE_TYPE (@0)))
8421 (BIT_FIELD_REF @0 @1 @2)))
8424 (BIT_FIELD_REF @0 @1 integer_zerop)
8425 (if (tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (@0))))
8429 (BIT_FIELD_REF @0 @1 @2)
8431 (if (TREE_CODE (TREE_TYPE (@0)) == COMPLEX_TYPE
8432 && tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
8434 (if (integer_zerop (@2))
8435 (view_convert (realpart @0)))
8436 (if (tree_int_cst_equal (@2, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
8437 (view_convert (imagpart @0)))))
8438 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
8439 && INTEGRAL_TYPE_P (type)
8440 /* On GIMPLE this should only apply to register arguments. */
8441 && (! GIMPLE || is_gimple_reg (@0))
8442 /* A bit-field-ref that referenced the full argument can be stripped. */
8443 && ((compare_tree_int (@1, TYPE_PRECISION (TREE_TYPE (@0))) == 0
8444 && integer_zerop (@2))
8445 /* Low-parts can be reduced to integral conversions.
8446 ??? The following doesn't work for PDP endian. */
8447 || (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN
8448 /* But only do this after vectorization. */
8449 && canonicalize_math_after_vectorization_p ()
8450 /* Don't even think about BITS_BIG_ENDIAN. */
8451 && TYPE_PRECISION (TREE_TYPE (@0)) % BITS_PER_UNIT == 0
8452 && TYPE_PRECISION (type) % BITS_PER_UNIT == 0
8453 && compare_tree_int (@2, (BYTES_BIG_ENDIAN
8454 ? (TYPE_PRECISION (TREE_TYPE (@0))
8455 - TYPE_PRECISION (type))
8459 /* Simplify vector extracts. */
8462 (BIT_FIELD_REF CONSTRUCTOR@0 @1 @2)
8463 (if (VECTOR_TYPE_P (TREE_TYPE (@0))
8464 && tree_fits_uhwi_p (TYPE_SIZE (type))
8465 && ((tree_to_uhwi (TYPE_SIZE (type))
8466 == tree_to_uhwi (TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
8467 || (VECTOR_TYPE_P (type)
8468 && (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (type)))
8469 == tree_to_uhwi (TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0))))))))
8472 tree ctor = (TREE_CODE (@0) == SSA_NAME
8473 ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0);
8474 tree eltype = TREE_TYPE (TREE_TYPE (ctor));
8475 unsigned HOST_WIDE_INT width = tree_to_uhwi (TYPE_SIZE (eltype));
8476 unsigned HOST_WIDE_INT n = tree_to_uhwi (@1);
8477 unsigned HOST_WIDE_INT idx = tree_to_uhwi (@2);
8480 && (idx % width) == 0
8482 && known_le ((idx + n) / width,
8483 TYPE_VECTOR_SUBPARTS (TREE_TYPE (ctor))))
8488 /* Constructor elements can be subvectors. */
8490 if (CONSTRUCTOR_NELTS (ctor) != 0)
8492 tree cons_elem = TREE_TYPE (CONSTRUCTOR_ELT (ctor, 0)->value);
8493 if (TREE_CODE (cons_elem) == VECTOR_TYPE)
8494 k = TYPE_VECTOR_SUBPARTS (cons_elem);
8496 unsigned HOST_WIDE_INT elt, count, const_k;
8499 /* We keep an exact subset of the constructor elements. */
8500 (if (multiple_p (idx, k, &elt) && multiple_p (n, k, &count))
8501 (if (CONSTRUCTOR_NELTS (ctor) == 0)
8502 { build_zero_cst (type); }
8504 (if (elt < CONSTRUCTOR_NELTS (ctor))
8505 (view_convert { CONSTRUCTOR_ELT (ctor, elt)->value; })
8506 { build_zero_cst (type); })
8507 /* We don't want to emit new CTORs unless the old one goes away.
8508 ??? Eventually allow this if the CTOR ends up constant or
8510 (if (single_use (@0))
8513 vec<constructor_elt, va_gc> *vals;
8514 vec_alloc (vals, count);
8515 bool constant_p = true;
8517 for (unsigned i = 0;
8518 i < count && elt + i < CONSTRUCTOR_NELTS (ctor); ++i)
8520 tree e = CONSTRUCTOR_ELT (ctor, elt + i)->value;
8521 CONSTRUCTOR_APPEND_ELT (vals, NULL_TREE, e);
8522 if (!CONSTANT_CLASS_P (e))
8525 tree evtype = (types_match (TREE_TYPE (type),
8526 TREE_TYPE (TREE_TYPE (ctor)))
8528 : build_vector_type (TREE_TYPE (TREE_TYPE (ctor)),
8530 /* We used to build a CTOR in the non-constant case here
8531 but that's not a GIMPLE value. We'd have to expose this
8532 operation somehow so the code generation can properly
8533 split it out to a separate stmt. */
8534 res = (constant_p ? build_vector_from_ctor (evtype, vals)
8535 : (GIMPLE ? NULL_TREE : build_constructor (evtype, vals)));
8538 (view_convert { res; })))))))
8539 /* The bitfield references a single constructor element. */
8540 (if (k.is_constant (&const_k)
8541 && idx + n <= (idx / const_k + 1) * const_k)
8543 (if (CONSTRUCTOR_NELTS (ctor) <= idx / const_k)
8544 { build_zero_cst (type); })
8546 (view_convert { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; }))
8547 (BIT_FIELD_REF { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; }
8548 @1 { bitsize_int ((idx % const_k) * width); })))))))))
8550 /* Simplify a bit extraction from a bit insertion for the cases with
8551 the inserted element fully covering the extraction or the insertion
8552 not touching the extraction. */
8554 (BIT_FIELD_REF (bit_insert @0 @1 @ipos) @rsize @rpos)
8557 unsigned HOST_WIDE_INT isize;
8558 if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
8559 isize = TYPE_PRECISION (TREE_TYPE (@1));
8561 isize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (@1)));
8564 (if ((!INTEGRAL_TYPE_P (TREE_TYPE (@1))
8565 || type_has_mode_precision_p (TREE_TYPE (@1)))
8566 && wi::leu_p (wi::to_wide (@ipos), wi::to_wide (@rpos))
8567 && wi::leu_p (wi::to_wide (@rpos) + wi::to_wide (@rsize),
8568 wi::to_wide (@ipos) + isize))
8569 (BIT_FIELD_REF @1 @rsize { wide_int_to_tree (bitsizetype,
8571 - wi::to_wide (@ipos)); }))
8572 (if (wi::eq_p (wi::to_wide (@ipos), wi::to_wide (@rpos))
8573 && compare_tree_int (@rsize, isize) == 0)
8575 (if (wi::geu_p (wi::to_wide (@ipos),
8576 wi::to_wide (@rpos) + wi::to_wide (@rsize))
8577 || wi::geu_p (wi::to_wide (@rpos),
8578 wi::to_wide (@ipos) + isize))
8579 (BIT_FIELD_REF @0 @rsize @rpos)))))
8581 /* Simplify vector inserts of other vector extracts to a permute. */
8583 (bit_insert @0 (BIT_FIELD_REF@2 @1 @rsize @rpos) @ipos)
8584 (if (VECTOR_TYPE_P (type)
8585 && (VECTOR_MODE_P (TYPE_MODE (type))
8586 || optimize_vectors_before_lowering_p ())
8587 && types_match (@0, @1)
8588 && types_match (TREE_TYPE (TREE_TYPE (@0)), TREE_TYPE (@2))
8589 && TYPE_VECTOR_SUBPARTS (type).is_constant ())
8592 unsigned HOST_WIDE_INT elsz
8593 = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (TREE_TYPE (@1))));
8594 poly_uint64 relt = exact_div (tree_to_poly_uint64 (@rpos), elsz);
8595 poly_uint64 ielt = exact_div (tree_to_poly_uint64 (@ipos), elsz);
8596 unsigned nunits = TYPE_VECTOR_SUBPARTS (type).to_constant ();
8597 vec_perm_builder builder;
8598 builder.new_vector (nunits, nunits, 1);
8599 for (unsigned i = 0; i < nunits; ++i)
8600 builder.quick_push (known_eq (ielt, i) ? nunits + relt : i);
8601 vec_perm_indices sel (builder, 2, nunits);
8603 (if (!VECTOR_MODE_P (TYPE_MODE (type))
8604 || can_vec_perm_const_p (TYPE_MODE (type), TYPE_MODE (type), sel, false))
8605 (vec_perm @0 @1 { vec_perm_indices_to_tree
8606 (build_vector_type (ssizetype, nunits), sel); })))))
8608 (if (canonicalize_math_after_vectorization_p ())
8611 (fmas:c (negate @0) @1 @2)
8612 (IFN_FNMA @0 @1 @2))
8614 (fmas @0 @1 (negate @2))
8617 (fmas:c (negate @0) @1 (negate @2))
8618 (IFN_FNMS @0 @1 @2))
8620 (negate (fmas@3 @0 @1 @2))
8621 (if (single_use (@3))
8622 (IFN_FNMS @0 @1 @2))))
8625 (IFN_FMS:c (negate @0) @1 @2)
8626 (IFN_FNMS @0 @1 @2))
8628 (IFN_FMS @0 @1 (negate @2))
8631 (IFN_FMS:c (negate @0) @1 (negate @2))
8632 (IFN_FNMA @0 @1 @2))
8634 (negate (IFN_FMS@3 @0 @1 @2))
8635 (if (single_use (@3))
8636 (IFN_FNMA @0 @1 @2)))
8639 (IFN_FNMA:c (negate @0) @1 @2)
8642 (IFN_FNMA @0 @1 (negate @2))
8643 (IFN_FNMS @0 @1 @2))
8645 (IFN_FNMA:c (negate @0) @1 (negate @2))
8648 (negate (IFN_FNMA@3 @0 @1 @2))
8649 (if (single_use (@3))
8650 (IFN_FMS @0 @1 @2)))
8653 (IFN_FNMS:c (negate @0) @1 @2)
8656 (IFN_FNMS @0 @1 (negate @2))
8657 (IFN_FNMA @0 @1 @2))
8659 (IFN_FNMS:c (negate @0) @1 (negate @2))
8662 (negate (IFN_FNMS@3 @0 @1 @2))
8663 (if (single_use (@3))
8664 (IFN_FMA @0 @1 @2))))
8666 /* CLZ simplifications. */
8671 (op (clz:s@2 @0) INTEGER_CST@1)
8672 (if (integer_zerop (@1) && single_use (@2))
8673 /* clz(X) == 0 is (int)X < 0 and clz(X) != 0 is (int)X >= 0. */
8674 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
8675 (cmp (convert:stype @0) { build_zero_cst (stype); }))
8676 /* clz(X) == (prec-1) is X == 1 and clz(X) != (prec-1) is X != 1. */
8677 (if (wi::to_wide (@1) == TYPE_PRECISION (TREE_TYPE (@0)) - 1)
8678 (op @0 { build_one_cst (TREE_TYPE (@0)); }))))))
8682 (op (IFN_CLZ:s@2 @0 @3) INTEGER_CST@1)
8683 (if (integer_zerop (@1) && single_use (@2))
8684 /* clz(X) == 0 is (int)X < 0 and clz(X) != 0 is (int)X >= 0. */
8685 (with { tree type0 = TREE_TYPE (@0);
8686 tree stype = signed_type_for (TREE_TYPE (@0));
8687 /* Punt if clz(0) == 0. */
8688 if (integer_zerop (@3))
8692 (cmp (convert:stype @0) { build_zero_cst (stype); })))
8693 /* clz(X) == (prec-1) is X == 1 and clz(X) != (prec-1) is X != 1. */
8694 (with { bool ok = true;
8695 tree type0 = TREE_TYPE (@0);
8696 /* Punt if clz(0) == prec - 1. */
8697 if (wi::to_widest (@3) == TYPE_PRECISION (type0) - 1)
8700 (if (ok && wi::to_wide (@1) == (TYPE_PRECISION (type0) - 1))
8701 (op @0 { build_one_cst (type0); }))))))
8703 /* CTZ simplifications. */
8705 (for op (ge gt le lt)
8708 /* __builtin_ctz (x) >= C -> (x & ((1 << C) - 1)) == 0. */
8709 (op (ctz:s @0) INTEGER_CST@1)
8710 (with { bool ok = true;
8711 HOST_WIDE_INT val = 0;
8712 if (!tree_fits_shwi_p (@1))
8716 val = tree_to_shwi (@1);
8717 /* Canonicalize to >= or <. */
8718 if (op == GT_EXPR || op == LE_EXPR)
8720 if (val == HOST_WIDE_INT_MAX)
8726 tree type0 = TREE_TYPE (@0);
8727 int prec = TYPE_PRECISION (type0);
8729 (if (ok && prec <= MAX_FIXED_MODE_SIZE)
8731 { constant_boolean_node (cmp == EQ_EXPR ? true : false, type); }
8733 { constant_boolean_node (cmp == EQ_EXPR ? false : true, type); }
8734 (cmp (bit_and @0 { wide_int_to_tree (type0,
8735 wi::mask (val, false, prec)); })
8736 { build_zero_cst (type0); })))))))
8739 /* __builtin_ctz (x) == C -> (x & ((1 << (C + 1)) - 1)) == (1 << C). */
8740 (op (ctz:s @0) INTEGER_CST@1)
8741 (with { tree type0 = TREE_TYPE (@0);
8742 int prec = TYPE_PRECISION (type0);
8744 (if (prec <= MAX_FIXED_MODE_SIZE)
8745 (if (tree_int_cst_sgn (@1) < 0 || wi::to_widest (@1) >= prec)
8746 { constant_boolean_node (op == EQ_EXPR ? false : true, type); }
8747 (op (bit_and @0 { wide_int_to_tree (type0,
8748 wi::mask (tree_to_uhwi (@1) + 1,
8750 { wide_int_to_tree (type0,
8751 wi::shifted_mask (tree_to_uhwi (@1), 1,
8752 false, prec)); })))))))
8753 (for op (ge gt le lt)
8756 /* __builtin_ctz (x) >= C -> (x & ((1 << C) - 1)) == 0. */
8757 (op (IFN_CTZ:s @0 @2) INTEGER_CST@1)
8758 (with { bool ok = true;
8759 HOST_WIDE_INT val = 0;
8760 if (!tree_fits_shwi_p (@1))
8764 val = tree_to_shwi (@1);
8765 /* Canonicalize to >= or <. */
8766 if (op == GT_EXPR || op == LE_EXPR)
8768 if (val == HOST_WIDE_INT_MAX)
8774 HOST_WIDE_INT zero_val = tree_to_shwi (@2);
8775 tree type0 = TREE_TYPE (@0);
8776 int prec = TYPE_PRECISION (type0);
8777 if (prec > MAX_FIXED_MODE_SIZE)
8781 (if (ok && zero_val >= val)
8782 { constant_boolean_node (cmp == EQ_EXPR ? true : false, type); })
8784 (if (ok && zero_val < val)
8785 { constant_boolean_node (cmp == EQ_EXPR ? false : true, type); })
8786 (if (ok && (zero_val < 0 || zero_val >= prec))
8787 (cmp (bit_and @0 { wide_int_to_tree (type0,
8788 wi::mask (val, false, prec)); })
8789 { build_zero_cst (type0); })))))))
8792 /* __builtin_ctz (x) == C -> (x & ((1 << (C + 1)) - 1)) == (1 << C). */
8793 (op (IFN_CTZ:s @0 @2) INTEGER_CST@1)
8794 (with { HOST_WIDE_INT zero_val = tree_to_shwi (@2);
8795 tree type0 = TREE_TYPE (@0);
8796 int prec = TYPE_PRECISION (type0);
8798 (if (prec <= MAX_FIXED_MODE_SIZE)
8799 (if (tree_int_cst_sgn (@1) < 0 || wi::to_widest (@1) >= prec)
8800 (if (zero_val != wi::to_widest (@1))
8801 { constant_boolean_node (op == EQ_EXPR ? false : true, type); })
8802 (if (zero_val < 0 || zero_val >= prec)
8803 (op (bit_and @0 { wide_int_to_tree (type0,
8804 wi::mask (tree_to_uhwi (@1) + 1,
8806 { wide_int_to_tree (type0,
8807 wi::shifted_mask (tree_to_uhwi (@1), 1,
8808 false, prec)); })))))))
8811 /* ctz(ext(X)) == ctz(X). Valid just for the UB at zero cases though. */
8813 (CTZ (convert@1 @0))
8814 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
8815 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
8816 && TYPE_PRECISION (TREE_TYPE (@1)) > TYPE_PRECISION (TREE_TYPE (@0)))
8817 (with { combined_fn cfn = CFN_LAST;
8818 tree type0 = TREE_TYPE (@0);
8819 if (TREE_CODE (type0) == BITINT_TYPE)
8821 if (TYPE_PRECISION (type0) > MAX_FIXED_MODE_SIZE)
8825 = build_nonstandard_integer_type (TYPE_PRECISION (type0),
8828 type0 = unsigned_type_for (type0);
8830 && direct_internal_fn_supported_p (IFN_CTZ, type0,
8834 && TYPE_PRECISION (TREE_TYPE (@1)) > BITS_PER_WORD
8835 && !direct_internal_fn_supported_p (IFN_CTZ,
8839 if (TYPE_PRECISION (type0)
8840 == TYPE_PRECISION (unsigned_type_node))
8841 cfn = CFN_BUILT_IN_CTZ;
8842 else if (TYPE_PRECISION (type0)
8843 == TYPE_PRECISION (long_long_unsigned_type_node))
8844 cfn = CFN_BUILT_IN_CTZLL;
8846 (if (cfn == CFN_CTZ)
8847 (IFN_CTZ (convert:type0 @0))
8848 (if (cfn == CFN_BUILT_IN_CTZ)
8849 (BUILT_IN_CTZ (convert:type0 @0))
8850 (if (cfn == CFN_BUILT_IN_CTZLL)
8851 (BUILT_IN_CTZLL (convert:type0 @0))))))))
8854 /* POPCOUNT simplifications. */
8855 /* popcount(X) + popcount(Y) is popcount(X|Y) when X&Y must be zero. */
8857 (plus (POPCOUNT:s @0) (POPCOUNT:s @1))
8858 (if (INTEGRAL_TYPE_P (type)
8859 && (wi::bit_and (widest_int::from (tree_nonzero_bits (@0), UNSIGNED),
8860 widest_int::from (tree_nonzero_bits (@1), UNSIGNED))
8862 (with { tree utype = TREE_TYPE (@0);
8863 if (TYPE_PRECISION (utype) < TYPE_PRECISION (TREE_TYPE (@1)))
8864 utype = TREE_TYPE (@1); }
8865 (POPCOUNT (bit_ior (convert:utype @0) (convert:utype @1))))))
8867 /* popcount(X) == 0 is X == 0, and related (in)equalities. */
8868 (for popcount (POPCOUNT)
8869 (for cmp (le eq ne gt)
8872 (cmp (popcount @0) integer_zerop)
8873 (rep @0 { build_zero_cst (TREE_TYPE (@0)); }))))
8875 /* popcount(bswap(x)) is popcount(x). */
8876 (for popcount (POPCOUNT)
8877 (for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32
8878 BUILT_IN_BSWAP64 BUILT_IN_BSWAP128)
8880 (popcount (convert?@0 (bswap:s@1 @2)))
8881 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
8882 && INTEGRAL_TYPE_P (TREE_TYPE (@1)))
8883 (with { tree type0 = TREE_TYPE (@0);
8884 tree type1 = TREE_TYPE (@1);
8885 unsigned int prec0 = TYPE_PRECISION (type0);
8886 unsigned int prec1 = TYPE_PRECISION (type1); }
8887 (if (prec0 == prec1 || (prec0 > prec1 && TYPE_UNSIGNED (type1)))
8888 (popcount (convert:type0 (convert:type1 @2)))))))))
8890 /* popcount(rotate(X Y)) is popcount(X). */
8891 (for popcount (POPCOUNT)
8892 (for rot (lrotate rrotate)
8894 (popcount (convert?@0 (rot:s@1 @2 @3)))
8895 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
8896 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
8897 && (GIMPLE || !TREE_SIDE_EFFECTS (@3)))
8898 (with { tree type0 = TREE_TYPE (@0);
8899 tree type1 = TREE_TYPE (@1);
8900 unsigned int prec0 = TYPE_PRECISION (type0);
8901 unsigned int prec1 = TYPE_PRECISION (type1); }
8902 (if (prec0 == prec1 || (prec0 > prec1 && TYPE_UNSIGNED (type1)))
8903 (popcount (convert:type0 @2))))))))
8905 /* Canonicalize POPCOUNT(x)&1 as PARITY(X). */
8907 (bit_and (POPCOUNT @0) integer_onep)
8910 /* popcount(X&Y) + popcount(X|Y) is popcount(x) + popcount(Y). */
8912 (plus:c (POPCOUNT:s (bit_and:s @0 @1)) (POPCOUNT:s (bit_ior:cs @0 @1)))
8913 (plus (POPCOUNT:type @0) (POPCOUNT:type @1)))
8915 /* popcount(X) + popcount(Y) - popcount(X&Y) is popcount(X|Y). */
8916 /* popcount(X) + popcount(Y) - popcount(X|Y) is popcount(X&Y). */
8917 (for popcount (POPCOUNT)
8918 (for log1 (bit_and bit_ior)
8919 log2 (bit_ior bit_and)
8921 (minus (plus:s (popcount:s @0) (popcount:s @1))
8922 (popcount:s (log1:cs @0 @1)))
8923 (popcount (log2 @0 @1)))
8925 (plus:c (minus:s (popcount:s @0) (popcount:s (log1:cs @0 @1)))
8927 (popcount (log2 @0 @1)))))
8930 /* popcount(zext(X)) == popcount(X). */
8932 (POPCOUNT (convert@1 @0))
8933 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
8934 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
8935 && TYPE_UNSIGNED (TREE_TYPE (@0))
8936 && TYPE_PRECISION (TREE_TYPE (@1)) > TYPE_PRECISION (TREE_TYPE (@0)))
8937 (with { combined_fn cfn = CFN_LAST;
8938 tree type0 = TREE_TYPE (@0);
8939 if (TREE_CODE (type0) == BITINT_TYPE)
8941 if (TYPE_PRECISION (type0) > MAX_FIXED_MODE_SIZE)
8945 = build_nonstandard_integer_type (TYPE_PRECISION (type0),
8949 && direct_internal_fn_supported_p (IFN_POPCOUNT, type0,
8953 && TYPE_PRECISION (TREE_TYPE (@1)) > BITS_PER_WORD
8954 && !direct_internal_fn_supported_p (IFN_POPCOUNT,
8958 if (TYPE_PRECISION (type0)
8959 == TYPE_PRECISION (unsigned_type_node))
8960 cfn = CFN_BUILT_IN_POPCOUNT;
8961 else if (TYPE_PRECISION (type0)
8962 == TYPE_PRECISION (long_long_unsigned_type_node))
8963 cfn = CFN_BUILT_IN_POPCOUNTLL;
8965 (if (cfn == CFN_POPCOUNT)
8966 (IFN_POPCOUNT (convert:type0 @0))
8967 (if (cfn == CFN_BUILT_IN_POPCOUNT)
8968 (BUILT_IN_POPCOUNT (convert:type0 @0))
8969 (if (cfn == CFN_BUILT_IN_POPCOUNTLL)
8970 (BUILT_IN_POPCOUNTLL (convert:type0 @0))))))))
8973 /* PARITY simplifications. */
8974 /* parity(~X) is parity(X). */
8976 (PARITY (bit_not @0))
8979 /* parity(bswap(x)) is parity(x). */
8980 (for parity (PARITY)
8981 (for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32
8982 BUILT_IN_BSWAP64 BUILT_IN_BSWAP128)
8984 (parity (convert?@0 (bswap:s@1 @2)))
8985 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
8986 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
8987 && TYPE_PRECISION (TREE_TYPE (@0))
8988 >= TYPE_PRECISION (TREE_TYPE (@1)))
8989 (with { tree type0 = TREE_TYPE (@0);
8990 tree type1 = TREE_TYPE (@1); }
8991 (parity (convert:type0 (convert:type1 @2))))))))
8993 /* parity(rotate(X Y)) is parity(X). */
8994 (for parity (PARITY)
8995 (for rot (lrotate rrotate)
8997 (parity (convert?@0 (rot:s@1 @2 @3)))
8998 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
8999 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
9000 && (GIMPLE || !TREE_SIDE_EFFECTS (@3))
9001 && TYPE_PRECISION (TREE_TYPE (@0))
9002 >= TYPE_PRECISION (TREE_TYPE (@1)))
9003 (with { tree type0 = TREE_TYPE (@0); }
9004 (parity (convert:type0 @2)))))))
9006 /* parity(X)^parity(Y) is parity(X^Y). */
9008 (bit_xor (PARITY:s @0) (PARITY:s @1))
9009 (if (types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
9010 (PARITY (bit_xor @0 @1))
9011 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
9012 && INTEGRAL_TYPE_P (TREE_TYPE (@1)))
9013 (with { tree utype = TREE_TYPE (@0);
9014 if (TYPE_PRECISION (utype) < TYPE_PRECISION (TREE_TYPE (@1)))
9015 utype = TREE_TYPE (@1); }
9016 (PARITY (bit_xor (convert:utype @0) (convert:utype @1)))))))
9019 /* parity(zext(X)) == parity(X). */
9020 /* parity(sext(X)) == parity(X) if the difference in precision is even. */
9022 (PARITY (convert@1 @0))
9023 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
9024 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
9025 && TYPE_PRECISION (TREE_TYPE (@1)) > TYPE_PRECISION (TREE_TYPE (@0))
9026 && (TYPE_UNSIGNED (TREE_TYPE (@0))
9027 || ((TYPE_PRECISION (TREE_TYPE (@1))
9028 - TYPE_PRECISION (TREE_TYPE (@0))) & 1) == 0))
9029 (with { combined_fn cfn = CFN_LAST;
9030 tree type0 = TREE_TYPE (@0);
9031 if (TREE_CODE (type0) == BITINT_TYPE)
9033 if (TYPE_PRECISION (type0) > MAX_FIXED_MODE_SIZE)
9037 = build_nonstandard_integer_type (TYPE_PRECISION (type0),
9040 type0 = unsigned_type_for (type0);
9042 && direct_internal_fn_supported_p (IFN_PARITY, type0,
9046 && TYPE_PRECISION (TREE_TYPE (@1)) > BITS_PER_WORD
9047 && !direct_internal_fn_supported_p (IFN_PARITY,
9051 if (TYPE_PRECISION (type0)
9052 == TYPE_PRECISION (unsigned_type_node))
9053 cfn = CFN_BUILT_IN_PARITY;
9054 else if (TYPE_PRECISION (type0)
9055 == TYPE_PRECISION (long_long_unsigned_type_node))
9056 cfn = CFN_BUILT_IN_PARITYLL;
9058 (if (cfn == CFN_PARITY)
9059 (IFN_PARITY (convert:type0 @0))
9060 (if (cfn == CFN_BUILT_IN_PARITY)
9061 (BUILT_IN_PARITY (convert:type0 @0))
9062 (if (cfn == CFN_BUILT_IN_PARITYLL)
9063 (BUILT_IN_PARITYLL (convert:type0 @0))))))))
9066 /* a != 0 ? FUN(a) : 0 -> Fun(a) for some builtin functions. */
9067 (for func (POPCOUNT BSWAP FFS PARITY)
9069 (cond (ne @0 integer_zerop@1) (func@3 (convert? @0)) integer_zerop@2)
9072 /* a != 0 ? FUN(a) : CST -> Fun(a) for some CLRSB builtins
9073 where CST is precision-1. */
9076 (cond (ne @0 integer_zerop@1) (func@4 (convert?@3 @0)) INTEGER_CST@2)
9077 (if (wi::to_widest (@2) == TYPE_PRECISION (TREE_TYPE (@3)) - 1)
9081 /* a != 0 ? CLZ(a) : CST -> .CLZ(a) where CST is the result of the internal function for 0. */
9084 (cond (ne @0 integer_zerop@1) (func (convert?@3 @0)) INTEGER_CST@2)
9086 internal_fn ifn = IFN_LAST;
9087 if (TREE_CODE (TREE_TYPE (@3)) == BITINT_TYPE)
9089 if (tree_fits_shwi_p (@2))
9091 HOST_WIDE_INT valw = tree_to_shwi (@2);
9092 if ((int) valw == valw)
9099 else if (direct_internal_fn_supported_p (IFN_CLZ, TREE_TYPE (@3),
9101 && CLZ_DEFINED_VALUE_AT_ZERO
9102 (SCALAR_INT_TYPE_MODE (TREE_TYPE (@3)), val) == 2)
9105 (if (ifn == IFN_CLZ && wi::to_widest (@2) == val)
9108 (cond (ne @0 integer_zerop@1) (IFN_CLZ (convert?@3 @0) INTEGER_CST@2) @2)
9110 internal_fn ifn = IFN_LAST;
9111 if (TREE_CODE (TREE_TYPE (@3)) == BITINT_TYPE)
9113 else if (direct_internal_fn_supported_p (IFN_CLZ, TREE_TYPE (@3),
9117 (if (ifn == IFN_CLZ)
9120 /* a != 0 ? CTZ(a) : CST -> .CTZ(a) where CST is the result of the internal function for 0. */
9123 (cond (ne @0 integer_zerop@1) (func (convert?@3 @0)) INTEGER_CST@2)
9125 internal_fn ifn = IFN_LAST;
9126 if (TREE_CODE (TREE_TYPE (@3)) == BITINT_TYPE)
9128 if (tree_fits_shwi_p (@2))
9130 HOST_WIDE_INT valw = tree_to_shwi (@2);
9131 if ((int) valw == valw)
9138 else if (direct_internal_fn_supported_p (IFN_CTZ, TREE_TYPE (@3),
9140 && CTZ_DEFINED_VALUE_AT_ZERO
9141 (SCALAR_INT_TYPE_MODE (TREE_TYPE (@3)), val) == 2)
9144 (if (ifn == IFN_CTZ && wi::to_widest (@2) == val)
9147 (cond (ne @0 integer_zerop@1) (IFN_CTZ (convert?@3 @0) INTEGER_CST@2) @2)
9149 internal_fn ifn = IFN_LAST;
9150 if (TREE_CODE (TREE_TYPE (@3)) == BITINT_TYPE)
9152 else if (direct_internal_fn_supported_p (IFN_CTZ, TREE_TYPE (@3),
9156 (if (ifn == IFN_CTZ)
9160 /* Common POPCOUNT/PARITY simplifications. */
9161 /* popcount(X&C1) is (X>>C2)&1 when C1 == 1<<C2. Same for parity(X&C1). */
9162 (for pfun (POPCOUNT PARITY)
9165 (if (INTEGRAL_TYPE_P (type))
9166 (with { wide_int nz = tree_nonzero_bits (@0); }
9170 (if (wi::popcount (nz) == 1)
9171 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
9172 (convert (rshift:utype (convert:utype @0)
9173 { build_int_cst (integer_type_node,
9174 wi::ctz (nz)); })))))))))
9177 /* 64- and 32-bits branchless implementations of popcount are detected:
9179 int popcount64c (uint64_t x)
9181 x -= (x >> 1) & 0x5555555555555555ULL;
9182 x = (x & 0x3333333333333333ULL) + ((x >> 2) & 0x3333333333333333ULL);
9183 x = (x + (x >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
9184 return (x * 0x0101010101010101ULL) >> 56;
9187 int popcount32c (uint32_t x)
9189 x -= (x >> 1) & 0x55555555;
9190 x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
9191 x = (x + (x >> 4)) & 0x0f0f0f0f;
9192 return (x * 0x01010101) >> 24;
9199 (rshift @8 INTEGER_CST@5)
9201 (bit_and @6 INTEGER_CST@7)
9205 (bit_and (rshift @0 INTEGER_CST@4) INTEGER_CST@11))
9211 /* Check constants and optab. */
9212 (with { unsigned prec = TYPE_PRECISION (type);
9213 int shift = (64 - prec) & 63;
9214 unsigned HOST_WIDE_INT c1
9215 = HOST_WIDE_INT_UC (0x0101010101010101) >> shift;
9216 unsigned HOST_WIDE_INT c2
9217 = HOST_WIDE_INT_UC (0x0F0F0F0F0F0F0F0F) >> shift;
9218 unsigned HOST_WIDE_INT c3
9219 = HOST_WIDE_INT_UC (0x3333333333333333) >> shift;
9220 unsigned HOST_WIDE_INT c4
9221 = HOST_WIDE_INT_UC (0x5555555555555555) >> shift;
9226 && TYPE_UNSIGNED (type)
9227 && integer_onep (@4)
9228 && wi::to_widest (@10) == 2
9229 && wi::to_widest (@5) == 4
9230 && wi::to_widest (@1) == prec - 8
9231 && tree_to_uhwi (@2) == c1
9232 && tree_to_uhwi (@3) == c2
9233 && tree_to_uhwi (@9) == c3
9234 && tree_to_uhwi (@7) == c3
9235 && tree_to_uhwi (@11) == c4)
9236 (if (direct_internal_fn_supported_p (IFN_POPCOUNT, type,
9238 (convert (IFN_POPCOUNT:type @0))
9239 /* Try to do popcount in two halves. PREC must be at least
9240 five bits for this to work without extension before adding. */
9242 tree half_type = NULL_TREE;
9243 opt_machine_mode m = mode_for_size ((prec + 1) / 2, MODE_INT, 1);
9246 && m.require () != TYPE_MODE (type))
9248 half_prec = GET_MODE_PRECISION (as_a <scalar_int_mode> (m));
9249 half_type = build_nonstandard_integer_type (half_prec, 1);
9251 gcc_assert (half_prec > 2);
9253 (if (half_type != NULL_TREE
9254 && direct_internal_fn_supported_p (IFN_POPCOUNT, half_type,
9257 (IFN_POPCOUNT:half_type (convert @0))
9258 (IFN_POPCOUNT:half_type (convert (rshift @0
9259 { build_int_cst (integer_type_node, half_prec); } )))))))))))
9261 /* __builtin_ffs needs to deal on many targets with the possible zero
9262 argument. If we know the argument is always non-zero, __builtin_ctz + 1
9263 should lead to better code. */
9265 (FFS tree_expr_nonzero_p@0)
9266 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
9267 && direct_internal_fn_supported_p (IFN_CTZ, TREE_TYPE (@0),
9268 OPTIMIZE_FOR_SPEED))
9269 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
9270 (plus (CTZ:type (convert:utype @0)) { build_one_cst (type); }))))
9274 /* __builtin_ffs (X) == 0 -> X == 0.
9275 __builtin_ffs (X) == 6 -> (X & 63) == 32. */
9278 (cmp (ffs@2 @0) INTEGER_CST@1)
9279 (with { int prec = TYPE_PRECISION (TREE_TYPE (@0)); }
9281 (if (integer_zerop (@1))
9282 (cmp @0 { build_zero_cst (TREE_TYPE (@0)); }))
9283 (if (tree_int_cst_sgn (@1) < 0 || wi::to_widest (@1) > prec)
9284 { constant_boolean_node (cmp == NE_EXPR ? true : false, type); })
9285 (if (single_use (@2))
9286 (cmp (bit_and @0 { wide_int_to_tree (TREE_TYPE (@0),
9287 wi::mask (tree_to_uhwi (@1),
9289 { wide_int_to_tree (TREE_TYPE (@0),
9290 wi::shifted_mask (tree_to_uhwi (@1) - 1, 1,
9291 false, prec)); }))))))
9293 /* __builtin_ffs (X) > 6 -> X != 0 && (X & 63) == 0. */
9297 bit_op (bit_and bit_ior)
9299 (cmp (ffs@2 @0) INTEGER_CST@1)
9300 (with { int prec = TYPE_PRECISION (TREE_TYPE (@0)); }
9302 (if (integer_zerop (@1))
9303 (cmp2 @0 { build_zero_cst (TREE_TYPE (@0)); }))
9304 (if (tree_int_cst_sgn (@1) < 0)
9305 { constant_boolean_node (cmp == GT_EXPR ? true : false, type); })
9306 (if (wi::to_widest (@1) >= prec)
9307 { constant_boolean_node (cmp == GT_EXPR ? false : true, type); })
9308 (if (wi::to_widest (@1) == prec - 1)
9309 (cmp3 @0 { wide_int_to_tree (TREE_TYPE (@0),
9310 wi::shifted_mask (prec - 1, 1,
9312 (if (single_use (@2))
9313 (bit_op (cmp2 @0 { build_zero_cst (TREE_TYPE (@0)); })
9315 { wide_int_to_tree (TREE_TYPE (@0),
9316 wi::mask (tree_to_uhwi (@1),
9318 { build_zero_cst (TREE_TYPE (@0)); }))))))))
9321 /* ffs(ext(X)) == ffs(X). */
9323 (FFS (convert@1 @0))
9324 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
9325 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
9326 && TYPE_PRECISION (TREE_TYPE (@1)) > TYPE_PRECISION (TREE_TYPE (@0)))
9327 (with { combined_fn cfn = CFN_LAST;
9328 tree type0 = TREE_TYPE (@0);
9329 if (TREE_CODE (type0) == BITINT_TYPE)
9331 if (TYPE_PRECISION (type0) > MAX_FIXED_MODE_SIZE)
9335 = build_nonstandard_integer_type (TYPE_PRECISION (type0),
9338 type0 = signed_type_for (type0);
9340 && direct_internal_fn_supported_p (IFN_FFS, type0,
9344 && TYPE_PRECISION (TREE_TYPE (@1)) > BITS_PER_WORD
9345 && !direct_internal_fn_supported_p (IFN_FFS,
9349 if (TYPE_PRECISION (type0)
9350 == TYPE_PRECISION (integer_type_node))
9351 cfn = CFN_BUILT_IN_FFS;
9352 else if (TYPE_PRECISION (type0)
9353 == TYPE_PRECISION (long_long_integer_type_node))
9354 cfn = CFN_BUILT_IN_FFSLL;
9356 (if (cfn == CFN_FFS)
9357 (IFN_FFS (convert:type0 @0))
9358 (if (cfn == CFN_BUILT_IN_FFS)
9359 (BUILT_IN_FFS (convert:type0 @0))
9360 (if (cfn == CFN_BUILT_IN_FFSLL)
9361 (BUILT_IN_FFSLL (convert:type0 @0))))))))
9369 --> r = .COND_FN (cond, a, b)
9373 --> r = .COND_FN (~cond, b, a). */
9375 (for uncond_op (UNCOND_UNARY)
9376 cond_op (COND_UNARY)
9378 (vec_cond @0 (view_convert? (uncond_op@3 @1)) @2)
9379 (with { tree op_type = TREE_TYPE (@3); }
9380 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
9381 && is_truth_type_for (op_type, TREE_TYPE (@0)))
9382 (cond_op @0 (view_convert @1) @2))))
9384 (vec_cond @0 @1 (view_convert? (uncond_op@3 @2)))
9385 (with { tree op_type = TREE_TYPE (@3); }
9386 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
9387 && is_truth_type_for (op_type, TREE_TYPE (@0)))
9388 (cond_op (bit_not @0) (view_convert @2) @1)))))
9390 (for uncond_op (UNCOND_UNARY)
9391 cond_op (COND_LEN_UNARY)
9393 (IFN_VCOND_MASK_LEN @0 (view_convert? (uncond_op@3 @1)) @2 @4 @5)
9394 (with { tree op_type = TREE_TYPE (@3); }
9395 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
9396 && is_truth_type_for (op_type, TREE_TYPE (@0)))
9397 (cond_op @0 (view_convert @1) @2 @4 @5))))
9399 (IFN_VCOND_MASK_LEN @0 @1 (view_convert? (uncond_op@3 @2)) @4 @5)
9400 (with { tree op_type = TREE_TYPE (@3); }
9401 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
9402 && is_truth_type_for (op_type, TREE_TYPE (@0)))
9403 (cond_op (bit_not @0) (view_convert @2) @1 @4 @5)))))
9405 /* `(a ? -1 : 0) ^ b` can be converted into a conditional not. */
9407 (bit_xor:c (vec_cond @0 uniform_integer_cst_p@1 uniform_integer_cst_p@2) @3)
9408 (if (canonicalize_math_after_vectorization_p ()
9409 && vectorized_internal_fn_supported_p (IFN_COND_NOT, type)
9410 && is_truth_type_for (type, TREE_TYPE (@0)))
9411 (if (integer_all_onesp (@1) && integer_zerop (@2))
9412 (IFN_COND_NOT @0 @3 @3))
9413 (if (integer_all_onesp (@2) && integer_zerop (@1))
9414 (IFN_COND_NOT (bit_not @0) @3 @3))))
9423 r = c ? a1 op a2 : b;
9425 if the target can do it in one go. This makes the operation conditional
9426 on c, so could drop potentially-trapping arithmetic, but that's a valid
9427 simplification if the result of the operation isn't needed.
9429 Avoid speculatively generating a stand-alone vector comparison
9430 on targets that might not support them. Any target implementing
9431 conditional internal functions must support the same comparisons
9432 inside and outside a VEC_COND_EXPR. */
9434 (for uncond_op (UNCOND_BINARY)
9435 cond_op (COND_BINARY)
9437 (vec_cond @0 (view_convert? (uncond_op@4 @1 @2)) @3)
9438 (with { tree op_type = TREE_TYPE (@4); }
9439 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
9440 && is_truth_type_for (op_type, TREE_TYPE (@0))
9442 (view_convert (cond_op @0 @1 @2 (view_convert:op_type @3))))))
9444 (vec_cond @0 @1 (view_convert? (uncond_op@4 @2 @3)))
9445 (with { tree op_type = TREE_TYPE (@4); }
9446 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
9447 && is_truth_type_for (op_type, TREE_TYPE (@0))
9449 (view_convert (cond_op (bit_not @0) @2 @3 (view_convert:op_type @1)))))))
9451 (for uncond_op (UNCOND_BINARY)
9452 cond_op (COND_LEN_BINARY)
9454 (IFN_VCOND_MASK_LEN @0 (view_convert? (uncond_op@4 @1 @2)) @3 @5 @6)
9455 (with { tree op_type = TREE_TYPE (@4); }
9456 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
9457 && is_truth_type_for (op_type, TREE_TYPE (@0))
9459 (view_convert (cond_op @0 @1 @2 (view_convert:op_type @3) @5 @6)))))
9461 (IFN_VCOND_MASK_LEN @0 @1 (view_convert? (uncond_op@4 @2 @3)) @5 @6)
9462 (with { tree op_type = TREE_TYPE (@4); }
9463 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
9464 && is_truth_type_for (op_type, TREE_TYPE (@0))
9466 (view_convert (cond_op (bit_not @0) @2 @3 (view_convert:op_type @1) @5 @6))))))
9468 /* Same for ternary operations. */
9469 (for uncond_op (UNCOND_TERNARY)
9470 cond_op (COND_TERNARY)
9472 (vec_cond @0 (view_convert? (uncond_op@5 @1 @2 @3)) @4)
9473 (with { tree op_type = TREE_TYPE (@5); }
9474 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
9475 && is_truth_type_for (op_type, TREE_TYPE (@0))
9477 (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @4))))))
9479 (vec_cond @0 @1 (view_convert? (uncond_op@5 @2 @3 @4)))
9480 (with { tree op_type = TREE_TYPE (@5); }
9481 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
9482 && is_truth_type_for (op_type, TREE_TYPE (@0))
9484 (view_convert (cond_op (bit_not @0) @2 @3 @4
9485 (view_convert:op_type @1)))))))
9487 (for uncond_op (UNCOND_TERNARY)
9488 cond_op (COND_LEN_TERNARY)
9490 (IFN_VCOND_MASK_LEN @0 (view_convert? (uncond_op@5 @1 @2 @3)) @4 @6 @7)
9491 (with { tree op_type = TREE_TYPE (@5); }
9492 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
9493 && is_truth_type_for (op_type, TREE_TYPE (@0))
9495 (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @4) @6 @7)))))
9497 (IFN_VCOND_MASK_LEN @0 @1 (view_convert? (uncond_op@5 @2 @3 @4 @6 @7)))
9498 (with { tree op_type = TREE_TYPE (@5); }
9499 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
9500 && is_truth_type_for (op_type, TREE_TYPE (@0))
9502 (view_convert (cond_op (bit_not @0) @2 @3 @4 (view_convert:op_type @1) @6 @7))))))
9505 /* Detect cases in which a VEC_COND_EXPR effectively replaces the
9506 "else" value of an IFN_COND_*. */
9507 (for cond_op (COND_BINARY)
9509 (vec_cond @0 (view_convert? (cond_op @0 @1 @2 @3)) @4)
9510 (with { tree op_type = TREE_TYPE (@3); }
9511 (if (element_precision (type) == element_precision (op_type))
9512 (view_convert (cond_op @0 @1 @2 (view_convert:op_type @4))))))
9514 (vec_cond @0 @1 (view_convert? (cond_op @2 @3 @4 @5)))
9515 (with { tree op_type = TREE_TYPE (@5); }
9516 (if (inverse_conditions_p (@0, @2)
9517 && element_precision (type) == element_precision (op_type))
9518 (view_convert (cond_op @2 @3 @4 (view_convert:op_type @1)))))))
9520 /* Same for ternary operations. */
9521 (for cond_op (COND_TERNARY)
9523 (vec_cond @0 (view_convert? (cond_op @0 @1 @2 @3 @4)) @5)
9524 (with { tree op_type = TREE_TYPE (@4); }
9525 (if (element_precision (type) == element_precision (op_type))
9526 (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @5))))))
9528 (vec_cond @0 @1 (view_convert? (cond_op @2 @3 @4 @5 @6)))
9529 (with { tree op_type = TREE_TYPE (@6); }
9530 (if (inverse_conditions_p (@0, @2)
9531 && element_precision (type) == element_precision (op_type))
9532 (view_convert (cond_op @2 @3 @4 @5 (view_convert:op_type @1)))))))
9534 /* Detect cases in which a VEC_COND_EXPR effectively replaces the
9535 "else" value of an IFN_COND_LEN_*. */
9536 (for cond_len_op (COND_LEN_BINARY)
9538 (vec_cond @0 (view_convert? (cond_len_op @0 @1 @2 @3 @4 @5)) @6)
9539 (with { tree op_type = TREE_TYPE (@3); }
9540 (if (element_precision (type) == element_precision (op_type))
9541 (view_convert (cond_len_op @0 @1 @2 (view_convert:op_type @6) @4 @5)))))
9543 (vec_cond @0 @1 (view_convert? (cond_len_op @2 @3 @4 @5 @6 @7)))
9544 (with { tree op_type = TREE_TYPE (@5); }
9545 (if (inverse_conditions_p (@0, @2)
9546 && element_precision (type) == element_precision (op_type))
9547 (view_convert (cond_len_op @2 @3 @4 (view_convert:op_type @1) @6 @7))))))
9549 /* Same for ternary operations. */
9550 (for cond_len_op (COND_LEN_TERNARY)
9552 (vec_cond @0 (view_convert? (cond_len_op @0 @1 @2 @3 @4 @5 @6)) @7)
9553 (with { tree op_type = TREE_TYPE (@4); }
9554 (if (element_precision (type) == element_precision (op_type))
9555 (view_convert (cond_len_op @0 @1 @2 @3 (view_convert:op_type @7) @5 @6)))))
9557 (vec_cond @0 @1 (view_convert? (cond_len_op @2 @3 @4 @5 @6 @7 @8)))
9558 (with { tree op_type = TREE_TYPE (@6); }
9559 (if (inverse_conditions_p (@0, @2)
9560 && element_precision (type) == element_precision (op_type))
9561 (view_convert (cond_len_op @2 @3 @4 @5 (view_convert:op_type @1) @7 @8))))))
9563 /* Detect simplication for a conditional reduction where
9566 c = mask2 ? d + a : d
9570 c = mask1 && mask2 ? d + b : d. */
9572 (IFN_COND_ADD @0 @1 (vec_cond @2 @3 zerop@4) @1)
9573 (if (ANY_INTEGRAL_TYPE_P (type)
9574 || (FLOAT_TYPE_P (type)
9575 && fold_real_zero_addition_p (type, NULL_TREE, @4, 0)))
9576 (IFN_COND_ADD (bit_and @0 @2) @1 @3 @1)))
9578 /* Detect simplication for a conditional length reduction where
9581 c = i < len + bias ? d + a : d
9585 c = mask && i < len + bias ? d + b : d. */
9587 (IFN_COND_LEN_ADD integer_truep @0 (vec_cond @1 @2 zerop@5) @0 @3 @4)
9588 (if (ANY_INTEGRAL_TYPE_P (type)
9589 || (FLOAT_TYPE_P (type)
9590 && fold_real_zero_addition_p (type, NULL_TREE, @5, 0)))
9591 (IFN_COND_LEN_ADD @1 @0 @2 @0 @3 @4)))
9593 /* Detect simplification for vector condition folding where
9595 c = mask1 ? (masked_op mask2 a b) : b
9599 c = masked_op (mask1 & mask2) a b
9601 where the operation can be partially applied to one operand. */
9603 (for cond_op (COND_BINARY)
9606 (cond_op:s @1 @2 @3 @4) @3)
9607 (cond_op (bit_and @1 @0) @2 @3 @4)))
9609 /* And same for ternary expressions. */
9611 (for cond_op (COND_TERNARY)
9614 (cond_op:s @1 @2 @3 @4 @5) @4)
9615 (cond_op (bit_and @1 @0) @2 @3 @4 @5)))
9617 /* For pointers @0 and @2 and nonnegative constant offset @1, look for
9620 A: (@0 + @1 < @2) | (@2 + @1 < @0)
9621 B: (@0 + @1 <= @2) | (@2 + @1 <= @0)
9623 If pointers are known not to wrap, B checks whether @1 bytes starting
9624 at @0 and @2 do not overlap, while A tests the same thing for @1 + 1
9625 bytes. A is more efficiently tested as:
9627 A: (sizetype) (@0 + @1 - @2) > @1 * 2
9629 The equivalent expression for B is given by replacing @1 with @1 - 1:
9631 B: (sizetype) (@0 + (@1 - 1) - @2) > (@1 - 1) * 2
9633 @0 and @2 can be swapped in both expressions without changing the result.
9635 The folds rely on sizetype's being unsigned (which is always true)
9636 and on its being the same width as the pointer (which we have to check).
9638 The fold replaces two pointer_plus expressions, two comparisons and
9639 an IOR with a pointer_plus, a pointer_diff, and a comparison, so in
9640 the best case it's a saving of two operations. The A fold retains one
9641 of the original pointer_pluses, so is a win even if both pointer_pluses
9642 are used elsewhere. The B fold is a wash if both pointer_pluses are
9643 used elsewhere, since all we end up doing is replacing a comparison with
9644 a pointer_plus. We do still apply the fold under those circumstances
9645 though, in case applying it to other conditions eventually makes one of the
9646 pointer_pluses dead. */
9647 (for ior (truth_orif truth_or bit_ior)
9650 (ior (cmp:cs (pointer_plus@3 @0 INTEGER_CST@1) @2)
9651 (cmp:cs (pointer_plus@4 @2 @1) @0))
9652 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
9653 && TYPE_OVERFLOW_WRAPS (sizetype)
9654 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (sizetype))
9655 /* Calculate the rhs constant. */
9656 (with { offset_int off = wi::to_offset (@1) - (cmp == LE_EXPR ? 1 : 0);
9657 offset_int rhs = off * 2; }
9658 /* Always fails for negative values. */
9659 (if (wi::min_precision (rhs, UNSIGNED) <= TYPE_PRECISION (sizetype))
9660 /* Since the order of @0 and @2 doesn't matter, let tree_swap_operands_p
9661 pick a canonical order. This increases the chances of using the
9662 same pointer_plus in multiple checks. */
9663 (with { bool swap_p = tree_swap_operands_p (@0, @2);
9664 tree rhs_tree = wide_int_to_tree (sizetype, rhs); }
9665 (if (cmp == LT_EXPR)
9666 (gt (convert:sizetype
9667 (pointer_diff:ssizetype { swap_p ? @4 : @3; }
9668 { swap_p ? @0 : @2; }))
9670 (gt (convert:sizetype
9671 (pointer_diff:ssizetype
9672 (pointer_plus { swap_p ? @2 : @0; }
9673 { wide_int_to_tree (sizetype, off); })
9674 { swap_p ? @0 : @2; }))
9675 { rhs_tree; })))))))))
9677 /* Fold REDUC (@0 & @1) -> @0[I] & @1[I] if element I is the only nonzero
9679 (for reduc (IFN_REDUC_PLUS IFN_REDUC_IOR IFN_REDUC_XOR)
9680 (simplify (reduc (view_convert? (bit_and @0 VECTOR_CST@1)))
9681 (with { int i = single_nonzero_element (@1); }
9683 (with { tree elt = vector_cst_elt (@1, i);
9684 tree elt_type = TREE_TYPE (elt);
9685 unsigned int elt_bits = tree_to_uhwi (TYPE_SIZE (elt_type));
9686 tree size = bitsize_int (elt_bits);
9687 tree pos = bitsize_int (elt_bits * i); }
9690 (BIT_FIELD_REF:elt_type @0 { size; } { pos; })
9693 /* Fold reduction of a single nonzero element constructor. */
9694 (for reduc (IFN_REDUC_PLUS IFN_REDUC_IOR IFN_REDUC_XOR)
9695 (simplify (reduc (CONSTRUCTOR@0))
9696 (with { tree ctor = (TREE_CODE (@0) == SSA_NAME
9697 ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0);
9698 tree elt = ctor_single_nonzero_element (ctor); }
9700 && !HONOR_SNANS (type)
9701 && !HONOR_SIGNED_ZEROS (type))
9704 /* Fold REDUC (@0 op VECTOR_CST) as REDUC (@0) op REDUC (VECTOR_CST). */
9705 (for reduc (IFN_REDUC_PLUS IFN_REDUC_MAX IFN_REDUC_MIN IFN_REDUC_FMAX
9706 IFN_REDUC_FMIN IFN_REDUC_AND IFN_REDUC_IOR IFN_REDUC_XOR)
9707 op (plus max min IFN_FMAX IFN_FMIN bit_and bit_ior bit_xor)
9708 (simplify (reduc (op @0 VECTOR_CST@1))
9709 (op (reduc:type @0) (reduc:type @1))))
9711 /* Simplify vector floating point operations of alternating sub/add pairs
9712 into using an fneg of a wider element type followed by a normal add.
9713 under IEEE 754 the fneg of the wider type will negate every even entry
9714 and when doing an add we get a sub of the even and add of every odd
9716 (for plusminus (plus minus)
9717 minusplus (minus plus)
9719 (vec_perm (plusminus @0 @1) (minusplus @2 @3) VECTOR_CST@4)
9720 (if (!VECTOR_INTEGER_TYPE_P (type)
9721 && !FLOAT_WORDS_BIG_ENDIAN
9722 /* plus is commutative, while minus is not, so :c can't be used.
9723 Do equality comparisons by hand and at the end pick the operands
9725 && (operand_equal_p (@0, @2, 0)
9726 ? operand_equal_p (@1, @3, 0)
9727 : operand_equal_p (@0, @3, 0) && operand_equal_p (@1, @2, 0)))
9730 /* Build a vector of integers from the tree mask. */
9731 vec_perm_builder builder;
9733 (if (tree_to_vec_perm_builder (&builder, @4))
9736 /* Create a vec_perm_indices for the integer vector. */
9737 poly_uint64 nelts = TYPE_VECTOR_SUBPARTS (type);
9738 vec_perm_indices sel (builder, 2, nelts);
9739 machine_mode vec_mode = TYPE_MODE (type);
9740 machine_mode wide_mode;
9741 scalar_mode wide_elt_mode;
9742 poly_uint64 wide_nunits;
9743 scalar_mode inner_mode = GET_MODE_INNER (vec_mode);
9745 (if (VECTOR_MODE_P (vec_mode)
9746 && sel.series_p (0, 2, 0, 2)
9747 && sel.series_p (1, 2, nelts + 1, 2)
9748 && GET_MODE_2XWIDER_MODE (inner_mode).exists (&wide_elt_mode)
9749 && multiple_p (GET_MODE_NUNITS (vec_mode), 2, &wide_nunits)
9750 && related_vector_mode (vec_mode, wide_elt_mode,
9751 wide_nunits).exists (&wide_mode))
9755 = lang_hooks.types.type_for_mode (GET_MODE_INNER (wide_mode),
9756 TYPE_UNSIGNED (type));
9757 tree ntype = build_vector_type_for_mode (stype, wide_mode);
9759 /* The format has to be a non-extended ieee format. */
9760 const struct real_format *fmt_old = FLOAT_MODE_FORMAT (vec_mode);
9761 const struct real_format *fmt_new = FLOAT_MODE_FORMAT (wide_mode);
9763 (if (TYPE_MODE (stype) != BLKmode
9764 && VECTOR_TYPE_P (ntype)
9769 /* If the target doesn't support v1xx vectors, try using
9770 scalar mode xx instead. */
9771 if (known_eq (GET_MODE_NUNITS (wide_mode), 1)
9772 && !target_supports_op_p (ntype, NEGATE_EXPR, optab_vector))
9775 (if (fmt_new->signbit_rw
9776 == fmt_old->signbit_rw + GET_MODE_UNIT_BITSIZE (vec_mode)
9777 && fmt_new->signbit_rw == fmt_new->signbit_ro
9778 && targetm.can_change_mode_class (TYPE_MODE (ntype),
9779 TYPE_MODE (type), ALL_REGS)
9780 && ((optimize_vectors_before_lowering_p ()
9781 && VECTOR_TYPE_P (ntype))
9782 || target_supports_op_p (ntype, NEGATE_EXPR, optab_vector)))
9783 (if (plusminus == PLUS_EXPR)
9784 (plus (view_convert:type (negate (view_convert:ntype @3))) @2)
9785 (minus @0 (view_convert:type
9786 (negate (view_convert:ntype @1))))))))))))))))
9789 (vec_perm @0 @1 VECTOR_CST@2)
9792 tree op0 = @0, op1 = @1, op2 = @2;
9793 machine_mode result_mode = TYPE_MODE (type);
9794 machine_mode op_mode = TYPE_MODE (TREE_TYPE (op0));
9796 /* Build a vector of integers from the tree mask. */
9797 vec_perm_builder builder;
9799 (if (tree_to_vec_perm_builder (&builder, op2))
9802 /* Create a vec_perm_indices for the integer vector. */
9803 poly_uint64 nelts = TYPE_VECTOR_SUBPARTS (type);
9804 bool single_arg = (op0 == op1);
9805 vec_perm_indices sel (builder, single_arg ? 1 : 2, nelts);
9807 (if (sel.series_p (0, 1, 0, 1))
9809 (if (sel.series_p (0, 1, nelts, 1))
9815 if (sel.all_from_input_p (0))
9817 else if (sel.all_from_input_p (1))
9820 sel.rotate_inputs (1);
9822 else if (known_ge (poly_uint64 (sel[0]), nelts))
9824 std::swap (op0, op1);
9825 sel.rotate_inputs (1);
9829 tree cop0 = op0, cop1 = op1;
9830 if (TREE_CODE (op0) == SSA_NAME
9831 && (def = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (op0)))
9832 && gimple_assign_rhs_code (def) == CONSTRUCTOR)
9833 cop0 = gimple_assign_rhs1 (def);
9834 if (TREE_CODE (op1) == SSA_NAME
9835 && (def = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (op1)))
9836 && gimple_assign_rhs_code (def) == CONSTRUCTOR)
9837 cop1 = gimple_assign_rhs1 (def);
9840 (if ((TREE_CODE (cop0) == VECTOR_CST
9841 || TREE_CODE (cop0) == CONSTRUCTOR)
9842 && (TREE_CODE (cop1) == VECTOR_CST
9843 || TREE_CODE (cop1) == CONSTRUCTOR)
9844 && (t = fold_vec_perm (type, cop0, cop1, sel)))
9848 bool changed = (op0 == op1 && !single_arg);
9849 tree ins = NULL_TREE;
9852 /* See if the permutation is performing a single element
9853 insert from a CONSTRUCTOR or constant and use a BIT_INSERT_EXPR
9854 in that case. But only if the vector mode is supported,
9855 otherwise this is invalid GIMPLE. */
9856 if (op_mode != BLKmode
9857 && (TREE_CODE (cop0) == VECTOR_CST
9858 || TREE_CODE (cop0) == CONSTRUCTOR
9859 || TREE_CODE (cop1) == VECTOR_CST
9860 || TREE_CODE (cop1) == CONSTRUCTOR))
9862 bool insert_first_p = sel.series_p (1, 1, nelts + 1, 1);
9865 /* After canonicalizing the first elt to come from the
9866 first vector we only can insert the first elt from
9867 the first vector. */
9869 if ((ins = fold_read_from_vector (cop0, sel[0])))
9872 /* The above can fail for two-element vectors which always
9873 appear to insert the first element, so try inserting
9874 into the second lane as well. For more than two
9875 elements that's wasted time. */
9876 if (!insert_first_p || (!ins && maybe_eq (nelts, 2u)))
9878 unsigned int encoded_nelts = sel.encoding ().encoded_nelts ();
9879 for (at = 0; at < encoded_nelts; ++at)
9880 if (maybe_ne (sel[at], at))
9882 if (at < encoded_nelts
9883 && (known_eq (at + 1, nelts)
9884 || sel.series_p (at + 1, 1, at + 1, 1)))
9886 if (known_lt (poly_uint64 (sel[at]), nelts))
9887 ins = fold_read_from_vector (cop0, sel[at]);
9889 ins = fold_read_from_vector (cop1, sel[at] - nelts);
9894 /* Generate a canonical form of the selector. */
9895 if (!ins && sel.encoding () != builder)
9897 /* Some targets are deficient and fail to expand a single
9898 argument permutation while still allowing an equivalent
9899 2-argument version. */
9901 if (sel.ninputs () == 2
9902 || can_vec_perm_const_p (result_mode, op_mode, sel, false))
9903 op2 = vec_perm_indices_to_tree (TREE_TYPE (op2), sel);
9906 vec_perm_indices sel2 (builder, 2, nelts);
9907 if (can_vec_perm_const_p (result_mode, op_mode, sel2, false))
9908 op2 = vec_perm_indices_to_tree (TREE_TYPE (op2), sel2);
9910 /* Not directly supported with either encoding,
9911 so use the preferred form. */
9912 op2 = vec_perm_indices_to_tree (TREE_TYPE (op2), sel);
9914 if (!operand_equal_p (op2, oldop2, 0))
9919 (bit_insert { op0; } { ins; }
9920 { bitsize_int (at * vector_element_bits (type)); })
9922 (vec_perm { op0; } { op1; } { op2; }))))))))))))
9924 /* VEC_PERM_EXPR (v, v, mask) -> v where v contains same element. */
9926 (match vec_same_elem_p
9929 (match vec_same_elem_p
9931 (if (TREE_CODE (@0) == SSA_NAME
9932 && uniform_vector_p (gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0))))))
9934 (match vec_same_elem_p
9936 (if (uniform_vector_p (@0))))
9940 (vec_perm vec_same_elem_p@0 @0 @1)
9941 (if (types_match (type, TREE_TYPE (@0)))
9945 tree elem = uniform_vector_p (@0);
9948 { build_vector_from_val (type, elem); }))))
9950 /* Push VEC_PERM earlier if that may help FMA perception (PR101895). */
9952 (plus:c (vec_perm:s (mult:c@0 @1 vec_same_elem_p@2) @0 @3) @4)
9953 (if (TREE_CODE (@0) == SSA_NAME && num_imm_uses (@0) == 2)
9954 (plus (mult (vec_perm @1 @1 @3) @2) @4)))
9956 (minus (vec_perm:s (mult:c@0 @1 vec_same_elem_p@2) @0 @3) @4)
9957 (if (TREE_CODE (@0) == SSA_NAME && num_imm_uses (@0) == 2)
9958 (minus (mult (vec_perm @1 @1 @3) @2) @4)))
9962 c = VEC_PERM_EXPR <a, b, VCST0>;
9963 d = VEC_PERM_EXPR <c, c, VCST1>;
9965 d = VEC_PERM_EXPR <a, b, NEW_VCST>; */
9968 (vec_perm (vec_perm@0 @1 @2 VECTOR_CST@3) @0 VECTOR_CST@4)
9969 (if (TYPE_VECTOR_SUBPARTS (type).is_constant ())
9972 machine_mode result_mode = TYPE_MODE (type);
9973 machine_mode op_mode = TYPE_MODE (TREE_TYPE (@1));
9974 int nelts = TYPE_VECTOR_SUBPARTS (type).to_constant ();
9975 vec_perm_builder builder0;
9976 vec_perm_builder builder1;
9977 vec_perm_builder builder2 (nelts, nelts, 1);
9979 (if (tree_to_vec_perm_builder (&builder0, @3)
9980 && tree_to_vec_perm_builder (&builder1, @4))
9983 vec_perm_indices sel0 (builder0, 2, nelts);
9984 vec_perm_indices sel1 (builder1, 1, nelts);
9986 for (int i = 0; i < nelts; i++)
9987 builder2.quick_push (sel0[sel1[i].to_constant ()]);
9989 vec_perm_indices sel2 (builder2, 2, nelts);
9991 tree op0 = NULL_TREE;
9992 /* If the new VEC_PERM_EXPR can't be handled but both
9993 original VEC_PERM_EXPRs can, punt.
9994 If one or both of the original VEC_PERM_EXPRs can't be
9995 handled and the new one can't be either, don't increase
9996 number of VEC_PERM_EXPRs that can't be handled. */
9997 if (can_vec_perm_const_p (result_mode, op_mode, sel2, false)
9999 ? (!can_vec_perm_const_p (result_mode, op_mode, sel0, false)
10000 || !can_vec_perm_const_p (result_mode, op_mode, sel1, false))
10001 : !can_vec_perm_const_p (result_mode, op_mode, sel1, false)))
10002 op0 = vec_perm_indices_to_tree (TREE_TYPE (@4), sel2);
10005 (vec_perm @1 @2 { op0; })))))))
10008 c = VEC_PERM_EXPR <a, b, VCST0>;
10009 d = VEC_PERM_EXPR <x, c, VCST1>;
10011 d = VEC_PERM_EXPR <x, {a,b}, NEW_VCST>;
10012 when all elements from a or b are replaced by the later
10016 (vec_perm @5 (vec_perm@0 @1 @2 VECTOR_CST@3) VECTOR_CST@4)
10017 (if (TYPE_VECTOR_SUBPARTS (type).is_constant ())
10020 machine_mode result_mode = TYPE_MODE (type);
10021 machine_mode op_mode = TYPE_MODE (TREE_TYPE (@1));
10022 int nelts = TYPE_VECTOR_SUBPARTS (type).to_constant ();
10023 vec_perm_builder builder0;
10024 vec_perm_builder builder1;
10025 vec_perm_builder builder2 (nelts, nelts, 2);
10027 (if (tree_to_vec_perm_builder (&builder0, @3)
10028 && tree_to_vec_perm_builder (&builder1, @4))
10031 vec_perm_indices sel0 (builder0, 2, nelts);
10032 vec_perm_indices sel1 (builder1, 2, nelts);
10033 bool use_1 = false, use_2 = false;
10035 for (int i = 0; i < nelts; i++)
10037 if (known_lt ((poly_uint64)sel1[i], sel1.nelts_per_input ()))
10038 builder2.quick_push (sel1[i]);
10041 poly_uint64 j = sel0[(sel1[i] - sel1.nelts_per_input ())
10043 if (known_lt (j, sel0.nelts_per_input ()))
10048 j -= sel0.nelts_per_input ();
10050 builder2.quick_push (j + sel1.nelts_per_input ());
10054 (if (use_1 ^ use_2)
10057 vec_perm_indices sel2 (builder2, 2, nelts);
10058 tree op0 = NULL_TREE;
10059 /* If the new VEC_PERM_EXPR can't be handled but both
10060 original VEC_PERM_EXPRs can, punt.
10061 If one or both of the original VEC_PERM_EXPRs can't be
10062 handled and the new one can't be either, don't increase
10063 number of VEC_PERM_EXPRs that can't be handled. */
10064 if (can_vec_perm_const_p (result_mode, op_mode, sel2, false)
10065 || (single_use (@0)
10066 ? (!can_vec_perm_const_p (result_mode, op_mode, sel0, false)
10067 || !can_vec_perm_const_p (result_mode, op_mode, sel1, false))
10068 : !can_vec_perm_const_p (result_mode, op_mode, sel1, false)))
10069 op0 = vec_perm_indices_to_tree (TREE_TYPE (@4), sel2);
10074 (vec_perm @5 @1 { op0; }))
10076 (vec_perm @5 @2 { op0; })))))))))))
10078 /* And the case with swapped outer permute sources. */
10081 (vec_perm (vec_perm@0 @1 @2 VECTOR_CST@3) @5 VECTOR_CST@4)
10082 (if (TYPE_VECTOR_SUBPARTS (type).is_constant ())
10085 machine_mode result_mode = TYPE_MODE (type);
10086 machine_mode op_mode = TYPE_MODE (TREE_TYPE (@1));
10087 int nelts = TYPE_VECTOR_SUBPARTS (type).to_constant ();
10088 vec_perm_builder builder0;
10089 vec_perm_builder builder1;
10090 vec_perm_builder builder2 (nelts, nelts, 2);
10092 (if (tree_to_vec_perm_builder (&builder0, @3)
10093 && tree_to_vec_perm_builder (&builder1, @4))
10096 vec_perm_indices sel0 (builder0, 2, nelts);
10097 vec_perm_indices sel1 (builder1, 2, nelts);
10098 bool use_1 = false, use_2 = false;
10100 for (int i = 0; i < nelts; i++)
10102 if (known_ge ((poly_uint64)sel1[i], sel1.nelts_per_input ()))
10103 builder2.quick_push (sel1[i]);
10106 poly_uint64 j = sel0[sel1[i].to_constant ()];
10107 if (known_lt (j, sel0.nelts_per_input ()))
10112 j -= sel0.nelts_per_input ();
10114 builder2.quick_push (j);
10118 (if (use_1 ^ use_2)
10121 vec_perm_indices sel2 (builder2, 2, nelts);
10122 tree op0 = NULL_TREE;
10123 /* If the new VEC_PERM_EXPR can't be handled but both
10124 original VEC_PERM_EXPRs can, punt.
10125 If one or both of the original VEC_PERM_EXPRs can't be
10126 handled and the new one can't be either, don't increase
10127 number of VEC_PERM_EXPRs that can't be handled. */
10128 if (can_vec_perm_const_p (result_mode, op_mode, sel2, false)
10129 || (single_use (@0)
10130 ? (!can_vec_perm_const_p (result_mode, op_mode, sel0, false)
10131 || !can_vec_perm_const_p (result_mode, op_mode, sel1, false))
10132 : !can_vec_perm_const_p (result_mode, op_mode, sel1, false)))
10133 op0 = vec_perm_indices_to_tree (TREE_TYPE (@4), sel2);
10138 (vec_perm @1 @5 { op0; }))
10140 (vec_perm @2 @5 { op0; })))))))))))
10143 /* Match count trailing zeroes for simplify_count_trailing_zeroes in fwprop.
10144 The canonical form is array[((x & -x) * C) >> SHIFT] where C is a magic
10145 constant which when multiplied by a power of 2 contains a unique value
10146 in the top 5 or 6 bits. This is then indexed into a table which maps it
10147 to the number of trailing zeroes. */
10148 (match (ctz_table_index @1 @2 @3)
10149 (rshift (mult (bit_and:c (negate @1) @1) INTEGER_CST@2) INTEGER_CST@3))
10151 (match (cond_expr_convert_p @0 @2 @3 @6)
10152 (cond (simple_comparison@6 @0 @1) (convert@4 @2) (convert@5 @3))
10153 (if (INTEGRAL_TYPE_P (type)
10154 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
10155 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
10156 && INTEGRAL_TYPE_P (TREE_TYPE (@3))
10157 && TYPE_PRECISION (type) != TYPE_PRECISION (TREE_TYPE (@0))
10158 && TYPE_PRECISION (TREE_TYPE (@0))
10159 == TYPE_PRECISION (TREE_TYPE (@2))
10160 && TYPE_PRECISION (TREE_TYPE (@0))
10161 == TYPE_PRECISION (TREE_TYPE (@3))
10162 /* For vect_recog_cond_expr_convert_pattern, @2 and @3 can differ in
10163 signess when convert is truncation, but not ok for extension since
10164 it's sign_extend vs zero_extend. */
10165 && (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type)
10166 || (TYPE_UNSIGNED (TREE_TYPE (@2))
10167 == TYPE_UNSIGNED (TREE_TYPE (@3))))
10169 && single_use (@5))))
10171 (for bit_op (bit_and bit_ior bit_xor)
10172 (match (bitwise_induction_p @0 @2 @3)
10174 (nop_convert1? (bit_not2?@0 (convert3? (lshift integer_onep@1 @2))))
10177 (match (bitwise_induction_p @0 @2 @3)
10179 (nop_convert1? (bit_xor@0 (convert2? (lshift integer_onep@1 @2)) @3))))
10181 /* n - (((n > C1) ? n : C1) & -C2) -> n & C1 for unsigned case.
10182 n - (((n > C1) ? n : C1) & -C2) -> (n <= C1) ? n : (n & C1) for signed case. */
10184 (minus @0 (bit_and (max @0 INTEGER_CST@1) INTEGER_CST@2))
10185 (with { auto i = wi::neg (wi::to_wide (@2)); }
10186 /* Check if -C2 is a power of 2 and C1 = -C2 - 1. */
10187 (if (wi::popcount (i) == 1
10188 && (wi::to_wide (@1)) == (i - 1))
10189 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
10191 (cond (le @0 @1) @0 (bit_and @0 @1))))))
10193 /* -x & 1 -> x & 1. */
10195 (bit_and (negate @0) integer_onep@1)
10196 (if (!TYPE_OVERFLOW_SANITIZED (type))
10199 /* `-a` is just `a` if the type is 1bit wide or when converting
10200 to a 1bit type; similar to the above transformation of `(-x)&1`.
10201 This is used mostly with the transformation of
10202 `a ? ~b : b` into `(-a)^b`.
10203 It also can show up with bitfields. */
10205 (convert? (negate @0))
10206 (if (INTEGRAL_TYPE_P (type)
10207 && TYPE_PRECISION (type) == 1
10208 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0)))
10212 c1 = VEC_PERM_EXPR (a, a, mask)
10213 c2 = VEC_PERM_EXPR (b, b, mask)
10217 c3 = VEC_PERM_EXPR (c, c, mask)
10218 For all integer non-div operations. */
10219 (for op (plus minus mult bit_and bit_ior bit_xor
10222 (op (vec_perm @0 @0 @2) (vec_perm @1 @1 @2))
10223 (if (VECTOR_INTEGER_TYPE_P (type))
10224 (vec_perm (op@3 @0 @1) @3 @2))))
10226 /* Similar for float arithmetic when permutation constant covers
10227 all vector elements. */
10228 (for op (plus minus mult)
10230 (op (vec_perm @0 @0 VECTOR_CST@2) (vec_perm @1 @1 VECTOR_CST@2))
10231 (if (VECTOR_FLOAT_TYPE_P (type)
10232 && TYPE_VECTOR_SUBPARTS (type).is_constant ())
10235 tree perm_cst = @2;
10236 vec_perm_builder builder;
10237 bool full_perm_p = false;
10238 if (tree_to_vec_perm_builder (&builder, perm_cst))
10240 unsigned HOST_WIDE_INT nelts;
10242 nelts = TYPE_VECTOR_SUBPARTS (type).to_constant ();
10243 /* Create a vec_perm_indices for the VECTOR_CST. */
10244 vec_perm_indices sel (builder, 1, nelts);
10246 /* Check if perm indices covers all vector elements. */
10247 if (sel.encoding ().encoded_full_vector_p ())
10249 auto_sbitmap seen (nelts);
10250 bitmap_clear (seen);
10252 unsigned HOST_WIDE_INT count = 0, i;
10254 for (i = 0; i < nelts; i++)
10256 if (!bitmap_set_bit (seen, sel[i].to_constant ()))
10260 full_perm_p = count == nelts;
10265 (vec_perm (op@3 @0 @1) @3 @2))))))