1 /* Conditional constant propagation pass for the GNU compiler.
2 Copyright (C) 2000-2024 Free Software Foundation, Inc.
3 Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org>
4 Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by the
10 Free Software Foundation; either version 3, or (at your option) any
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Conditional constant propagation (CCP) is based on the SSA
23 propagation engine (tree-ssa-propagate.cc). Constant assignments of
24 the form VAR = CST are propagated from the assignments into uses of
25 VAR, which in turn may generate new constants. The simulation uses
26 a four level lattice to keep track of constant values associated
27 with SSA names. Given an SSA name V_i, it may take one of the
30 UNINITIALIZED -> the initial state of the value. This value
31 is replaced with a correct initial value
32 the first time the value is used, so the
33 rest of the pass does not need to care about
34 it. Using this value simplifies initialization
35 of the pass, and prevents us from needlessly
36 scanning statements that are never reached.
38 UNDEFINED -> V_i is a local variable whose definition
39 has not been processed yet. Therefore we
40 don't yet know if its value is a constant
43 CONSTANT -> V_i has been found to hold a constant
46 VARYING -> V_i cannot take a constant value, or if it
47 does, it is not possible to determine it
50 The core of SSA-CCP is in ccp_visit_stmt and ccp_visit_phi_node:
52 1- In ccp_visit_stmt, we are interested in assignments whose RHS
53 evaluates into a constant and conditional jumps whose predicate
54 evaluates into a boolean true or false. When an assignment of
55 the form V_i = CONST is found, V_i's lattice value is set to
56 CONSTANT and CONST is associated with it. This causes the
57 propagation engine to add all the SSA edges coming out the
58 assignment into the worklists, so that statements that use V_i
61 If the statement is a conditional with a constant predicate, we
62 mark the outgoing edges as executable or not executable
63 depending on the predicate's value. This is then used when
64 visiting PHI nodes to know when a PHI argument can be ignored.
67 2- In ccp_visit_phi_node, if all the PHI arguments evaluate to the
68 same constant C, then the LHS of the PHI is set to C. This
69 evaluation is known as the "meet operation". Since one of the
70 goals of this evaluation is to optimistically return constant
71 values as often as possible, it uses two main short cuts:
73 - If an argument is flowing in through a non-executable edge, it
74 is ignored. This is useful in cases like this:
80 a_11 = PHI (a_9, a_10)
82 If PRED is known to always evaluate to false, then we can
83 assume that a_11 will always take its value from a_10, meaning
84 that instead of consider it VARYING (a_9 and a_10 have
85 different values), we can consider it CONSTANT 100.
87 - If an argument has an UNDEFINED value, then it does not affect
88 the outcome of the meet operation. If a variable V_i has an
89 UNDEFINED value, it means that either its defining statement
90 hasn't been visited yet or V_i has no defining statement, in
91 which case the original symbol 'V' is being used
92 uninitialized. Since 'V' is a local variable, the compiler
93 may assume any initial value for it.
96 After propagation, every variable V_i that ends up with a lattice
97 value of CONSTANT will have the associated constant value in the
98 array CONST_VAL[i].VALUE. That is fed into substitute_and_fold for
99 final substitution and folding.
101 This algorithm uses wide-ints at the max precision of the target.
102 This means that, with one uninteresting exception, variables with
103 UNSIGNED types never go to VARYING because the bits above the
104 precision of the type of the variable are always zero. The
105 uninteresting case is a variable of UNSIGNED type that has the
106 maximum precision of the target. Such variables can go to VARYING,
107 but this causes no loss of infomation since these variables will
112 Constant propagation with conditional branches,
113 Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
115 Building an Optimizing Compiler,
116 Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
118 Advanced Compiler Design and Implementation,
119 Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6 */
123 #include "coretypes.h"
128 #include "tree-pass.h"
130 #include "gimple-pretty-print.h"
131 #include "fold-const.h"
132 #include "gimple-iterator.h"
133 #include "gimple-fold.h"
135 #include "gimplify.h"
136 #include "tree-cfg.h"
137 #include "tree-ssa-propagate.h"
139 #include "builtins.h"
141 #include "stor-layout.h"
142 #include "optabs-query.h"
143 #include "tree-ssa-ccp.h"
144 #include "tree-dfa.h"
145 #include "diagnostic-core.h"
146 #include "stringpool.h"
148 #include "tree-vector-builder.h"
150 #include "alloc-pool.h"
151 #include "symbol-summary.h"
152 #include "ipa-utils.h"
155 #include "ipa-prop.h"
156 #include "internal-fn.h"
158 /* Possible lattice values. */
167 class ccp_prop_value_t
{
170 ccp_lattice_t lattice_val
;
172 /* Propagated value. */
175 /* Mask that applies to the propagated value during CCP. For X
176 with a CONSTANT lattice value X & ~mask == value & ~mask. The
177 zero bits in the mask cover constant values. The ones mean no
182 class ccp_propagate
: public ssa_propagation_engine
185 enum ssa_prop_result
visit_stmt (gimple
*, edge
*, tree
*) final override
;
186 enum ssa_prop_result
visit_phi (gphi
*) final override
;
189 /* Array of propagated constant values. After propagation,
190 CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I). If
191 the constant is held in an SSA name representing a memory store
192 (i.e., a VDEF), CONST_VAL[I].MEM_REF will contain the actual
193 memory reference used to store (i.e., the LHS of the assignment
195 static ccp_prop_value_t
*const_val
;
196 static unsigned n_const_val
;
198 static void canonicalize_value (ccp_prop_value_t
*);
199 static void ccp_lattice_meet (ccp_prop_value_t
*, ccp_prop_value_t
*);
201 /* Dump constant propagation value VAL to file OUTF prefixed by PREFIX. */
204 dump_lattice_value (FILE *outf
, const char *prefix
, ccp_prop_value_t val
)
206 switch (val
.lattice_val
)
209 fprintf (outf
, "%sUNINITIALIZED", prefix
);
212 fprintf (outf
, "%sUNDEFINED", prefix
);
215 fprintf (outf
, "%sVARYING", prefix
);
218 if (TREE_CODE (val
.value
) != INTEGER_CST
221 fprintf (outf
, "%sCONSTANT ", prefix
);
222 print_generic_expr (outf
, val
.value
, dump_flags
);
226 widest_int cval
= wi::bit_and_not (wi::to_widest (val
.value
),
228 fprintf (outf
, "%sCONSTANT ", prefix
);
229 print_hex (cval
, outf
);
230 fprintf (outf
, " (");
231 print_hex (val
.mask
, outf
);
241 /* Print lattice value VAL to stderr. */
243 void debug_lattice_value (ccp_prop_value_t val
);
246 debug_lattice_value (ccp_prop_value_t val
)
248 dump_lattice_value (stderr
, "", val
);
249 fprintf (stderr
, "\n");
252 /* Extend NONZERO_BITS to a full mask, based on sgn. */
255 extend_mask (const wide_int
&nonzero_bits
, signop sgn
)
257 return widest_int::from (nonzero_bits
, sgn
);
260 /* Compute a default value for variable VAR and store it in the
261 CONST_VAL array. The following rules are used to get default
264 1- Global and static variables that are declared constant are
267 2- Any other value is considered UNDEFINED. This is useful when
268 considering PHI nodes. PHI arguments that are undefined do not
269 change the constant value of the PHI node, which allows for more
270 constants to be propagated.
272 3- Variables defined by statements other than assignments and PHI
273 nodes are considered VARYING.
275 4- Initial values of variables that are not GIMPLE registers are
276 considered VARYING. */
278 static ccp_prop_value_t
279 get_default_value (tree var
)
281 ccp_prop_value_t val
= { UNINITIALIZED
, NULL_TREE
, 0 };
284 stmt
= SSA_NAME_DEF_STMT (var
);
286 if (gimple_nop_p (stmt
))
288 /* Variables defined by an empty statement are those used
289 before being initialized. If VAR is a local variable, we
290 can assume initially that it is UNDEFINED, otherwise we must
291 consider it VARYING. */
292 if (!virtual_operand_p (var
)
293 && SSA_NAME_VAR (var
)
294 && VAR_P (SSA_NAME_VAR (var
)))
295 val
.lattice_val
= UNDEFINED
;
298 val
.lattice_val
= VARYING
;
300 if (flag_tree_bit_ccp
)
302 wide_int nonzero_bits
= get_nonzero_bits (var
);
306 if (SSA_NAME_VAR (var
)
307 && TREE_CODE (SSA_NAME_VAR (var
)) == PARM_DECL
308 && ipcp_get_parm_bits (SSA_NAME_VAR (var
), &value
, &mask
))
310 val
.lattice_val
= CONSTANT
;
312 widest_int ipa_value
= wi::to_widest (value
);
313 /* Unknown bits from IPA CP must be equal to zero. */
314 gcc_assert (wi::bit_and (ipa_value
, mask
) == 0);
316 if (nonzero_bits
!= -1)
317 val
.mask
&= extend_mask (nonzero_bits
,
318 TYPE_SIGN (TREE_TYPE (var
)));
320 else if (nonzero_bits
!= -1)
322 val
.lattice_val
= CONSTANT
;
323 val
.value
= build_zero_cst (TREE_TYPE (var
));
324 val
.mask
= extend_mask (nonzero_bits
,
325 TYPE_SIGN (TREE_TYPE (var
)));
330 else if (is_gimple_assign (stmt
))
333 if (gimple_assign_single_p (stmt
)
334 && DECL_P (gimple_assign_rhs1 (stmt
))
335 && (cst
= get_symbol_constant_value (gimple_assign_rhs1 (stmt
))))
337 val
.lattice_val
= CONSTANT
;
342 /* Any other variable defined by an assignment is considered
344 val
.lattice_val
= UNDEFINED
;
347 else if ((is_gimple_call (stmt
)
348 && gimple_call_lhs (stmt
) != NULL_TREE
)
349 || gimple_code (stmt
) == GIMPLE_PHI
)
351 /* A variable defined by a call or a PHI node is considered
353 val
.lattice_val
= UNDEFINED
;
357 /* Otherwise, VAR will never take on a constant value. */
358 val
.lattice_val
= VARYING
;
366 /* Get the constant value associated with variable VAR. */
368 static inline ccp_prop_value_t
*
371 ccp_prop_value_t
*val
;
373 if (const_val
== NULL
374 || SSA_NAME_VERSION (var
) >= n_const_val
)
377 val
= &const_val
[SSA_NAME_VERSION (var
)];
378 if (val
->lattice_val
== UNINITIALIZED
)
379 *val
= get_default_value (var
);
381 canonicalize_value (val
);
386 /* Return the constant tree value associated with VAR. */
389 get_constant_value (tree var
)
391 ccp_prop_value_t
*val
;
392 if (TREE_CODE (var
) != SSA_NAME
)
394 if (is_gimple_min_invariant (var
))
398 val
= get_value (var
);
400 && val
->lattice_val
== CONSTANT
401 && (TREE_CODE (val
->value
) != INTEGER_CST
407 /* Sets the value associated with VAR to VARYING. */
410 set_value_varying (tree var
)
412 ccp_prop_value_t
*val
= &const_val
[SSA_NAME_VERSION (var
)];
414 val
->lattice_val
= VARYING
;
415 val
->value
= NULL_TREE
;
419 /* For integer constants, make sure to drop TREE_OVERFLOW. */
422 canonicalize_value (ccp_prop_value_t
*val
)
424 if (val
->lattice_val
!= CONSTANT
)
427 if (TREE_OVERFLOW_P (val
->value
))
428 val
->value
= drop_tree_overflow (val
->value
);
431 /* Return whether the lattice transition is valid. */
434 valid_lattice_transition (ccp_prop_value_t old_val
, ccp_prop_value_t new_val
)
436 /* Lattice transitions must always be monotonically increasing in
438 if (old_val
.lattice_val
< new_val
.lattice_val
)
441 if (old_val
.lattice_val
!= new_val
.lattice_val
)
444 if (!old_val
.value
&& !new_val
.value
)
447 /* Now both lattice values are CONSTANT. */
449 /* Allow arbitrary copy changes as we might look through PHI <a_1, ...>
450 when only a single copy edge is executable. */
451 if (TREE_CODE (old_val
.value
) == SSA_NAME
452 && TREE_CODE (new_val
.value
) == SSA_NAME
)
455 /* Allow transitioning from a constant to a copy. */
456 if (is_gimple_min_invariant (old_val
.value
)
457 && TREE_CODE (new_val
.value
) == SSA_NAME
)
460 /* Allow transitioning from PHI <&x, not executable> == &x
461 to PHI <&x, &y> == common alignment. */
462 if (TREE_CODE (old_val
.value
) != INTEGER_CST
463 && TREE_CODE (new_val
.value
) == INTEGER_CST
)
466 /* Bit-lattices have to agree in the still valid bits. */
467 if (TREE_CODE (old_val
.value
) == INTEGER_CST
468 && TREE_CODE (new_val
.value
) == INTEGER_CST
)
469 return (wi::bit_and_not (wi::to_widest (old_val
.value
), new_val
.mask
)
470 == wi::bit_and_not (wi::to_widest (new_val
.value
), new_val
.mask
));
472 /* Otherwise constant values have to agree. */
473 if (operand_equal_p (old_val
.value
, new_val
.value
, 0))
476 /* At least the kinds and types should agree now. */
477 if (TREE_CODE (old_val
.value
) != TREE_CODE (new_val
.value
)
478 || !types_compatible_p (TREE_TYPE (old_val
.value
),
479 TREE_TYPE (new_val
.value
)))
482 /* For floats and !HONOR_NANS allow transitions from (partial) NaN
484 tree type
= TREE_TYPE (new_val
.value
);
485 if (SCALAR_FLOAT_TYPE_P (type
)
486 && !HONOR_NANS (type
))
488 if (REAL_VALUE_ISNAN (TREE_REAL_CST (old_val
.value
)))
491 else if (VECTOR_FLOAT_TYPE_P (type
)
492 && !HONOR_NANS (type
))
495 = tree_vector_builder::binary_encoded_nelts (old_val
.value
,
497 for (unsigned int i
= 0; i
< count
; ++i
)
498 if (!REAL_VALUE_ISNAN
499 (TREE_REAL_CST (VECTOR_CST_ENCODED_ELT (old_val
.value
, i
)))
500 && !operand_equal_p (VECTOR_CST_ENCODED_ELT (old_val
.value
, i
),
501 VECTOR_CST_ENCODED_ELT (new_val
.value
, i
), 0))
505 else if (COMPLEX_FLOAT_TYPE_P (type
)
506 && !HONOR_NANS (type
))
508 if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_REALPART (old_val
.value
)))
509 && !operand_equal_p (TREE_REALPART (old_val
.value
),
510 TREE_REALPART (new_val
.value
), 0))
512 if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_IMAGPART (old_val
.value
)))
513 && !operand_equal_p (TREE_IMAGPART (old_val
.value
),
514 TREE_IMAGPART (new_val
.value
), 0))
521 /* Set the value for variable VAR to NEW_VAL. Return true if the new
522 value is different from VAR's previous value. */
525 set_lattice_value (tree var
, ccp_prop_value_t
*new_val
)
527 /* We can deal with old UNINITIALIZED values just fine here. */
528 ccp_prop_value_t
*old_val
= &const_val
[SSA_NAME_VERSION (var
)];
530 canonicalize_value (new_val
);
532 /* We have to be careful to not go up the bitwise lattice
533 represented by the mask. Instead of dropping to VARYING
534 use the meet operator to retain a conservative value.
535 Missed optimizations like PR65851 makes this necessary.
536 It also ensures we converge to a stable lattice solution. */
537 if (old_val
->lattice_val
!= UNINITIALIZED
538 /* But avoid using meet for constant -> copy transitions. */
539 && !(old_val
->lattice_val
== CONSTANT
540 && CONSTANT_CLASS_P (old_val
->value
)
541 && new_val
->lattice_val
== CONSTANT
542 && TREE_CODE (new_val
->value
) == SSA_NAME
))
543 ccp_lattice_meet (new_val
, old_val
);
545 gcc_checking_assert (valid_lattice_transition (*old_val
, *new_val
));
547 /* If *OLD_VAL and NEW_VAL are the same, return false to inform the
548 caller that this was a non-transition. */
549 if (old_val
->lattice_val
!= new_val
->lattice_val
550 || (new_val
->lattice_val
== CONSTANT
551 && (TREE_CODE (new_val
->value
) != TREE_CODE (old_val
->value
)
552 || (TREE_CODE (new_val
->value
) == INTEGER_CST
553 && (new_val
->mask
!= old_val
->mask
554 || (wi::bit_and_not (wi::to_widest (old_val
->value
),
556 != wi::bit_and_not (wi::to_widest (new_val
->value
),
558 || (TREE_CODE (new_val
->value
) != INTEGER_CST
559 && !operand_equal_p (new_val
->value
, old_val
->value
, 0)))))
561 /* ??? We would like to delay creation of INTEGER_CSTs from
562 partially constants here. */
564 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
566 dump_lattice_value (dump_file
, "Lattice value changed to ", *new_val
);
567 fprintf (dump_file
, ". Adding SSA edges to worklist.\n");
572 gcc_assert (new_val
->lattice_val
!= UNINITIALIZED
);
579 static ccp_prop_value_t
get_value_for_expr (tree
, bool);
580 static ccp_prop_value_t
bit_value_binop (enum tree_code
, tree
, tree
, tree
);
581 void bit_value_binop (enum tree_code
, signop
, int, widest_int
*, widest_int
*,
582 signop
, int, const widest_int
&, const widest_int
&,
583 signop
, int, const widest_int
&, const widest_int
&);
585 /* Return a widest_int that can be used for bitwise simplifications
589 value_to_wide_int (ccp_prop_value_t val
)
592 && TREE_CODE (val
.value
) == INTEGER_CST
)
593 return wi::to_widest (val
.value
);
598 /* Return the value for the address expression EXPR based on alignment
601 static ccp_prop_value_t
602 get_value_from_alignment (tree expr
)
604 tree type
= TREE_TYPE (expr
);
605 ccp_prop_value_t val
;
606 unsigned HOST_WIDE_INT bitpos
;
609 gcc_assert (TREE_CODE (expr
) == ADDR_EXPR
);
611 get_pointer_alignment_1 (expr
, &align
, &bitpos
);
612 val
.mask
= wi::bit_and_not
613 (POINTER_TYPE_P (type
) || TYPE_UNSIGNED (type
)
614 ? wi::mask
<widest_int
> (TYPE_PRECISION (type
), false)
616 align
/ BITS_PER_UNIT
- 1);
618 = wi::sext (val
.mask
, TYPE_PRECISION (type
)) == -1 ? VARYING
: CONSTANT
;
619 if (val
.lattice_val
== CONSTANT
)
620 val
.value
= build_int_cstu (type
, bitpos
/ BITS_PER_UNIT
);
622 val
.value
= NULL_TREE
;
627 /* Return the value for the tree operand EXPR. If FOR_BITS_P is true
628 return constant bits extracted from alignment information for
629 invariant addresses. */
631 static ccp_prop_value_t
632 get_value_for_expr (tree expr
, bool for_bits_p
)
634 ccp_prop_value_t val
;
636 if (TREE_CODE (expr
) == SSA_NAME
)
638 ccp_prop_value_t
*val_
= get_value (expr
);
643 val
.lattice_val
= VARYING
;
644 val
.value
= NULL_TREE
;
648 && val
.lattice_val
== CONSTANT
)
650 if (TREE_CODE (val
.value
) == ADDR_EXPR
)
651 val
= get_value_from_alignment (val
.value
);
652 else if (TREE_CODE (val
.value
) != INTEGER_CST
)
654 val
.lattice_val
= VARYING
;
655 val
.value
= NULL_TREE
;
659 /* Fall back to a copy value. */
661 && val
.lattice_val
== VARYING
662 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (expr
))
664 val
.lattice_val
= CONSTANT
;
669 else if (is_gimple_min_invariant (expr
)
670 && (!for_bits_p
|| TREE_CODE (expr
) == INTEGER_CST
))
672 val
.lattice_val
= CONSTANT
;
675 canonicalize_value (&val
);
677 else if (TREE_CODE (expr
) == ADDR_EXPR
)
678 val
= get_value_from_alignment (expr
);
681 val
.lattice_val
= VARYING
;
683 val
.value
= NULL_TREE
;
686 if (val
.lattice_val
== VARYING
687 && INTEGRAL_TYPE_P (TREE_TYPE (expr
))
688 && TYPE_UNSIGNED (TREE_TYPE (expr
)))
689 val
.mask
= wi::zext (val
.mask
, TYPE_PRECISION (TREE_TYPE (expr
)));
694 /* Return the likely CCP lattice value for STMT.
696 If STMT has no operands, then return CONSTANT.
698 Else if undefinedness of operands of STMT cause its value to be
699 undefined, then return UNDEFINED.
701 Else if any operands of STMT are constants, then return CONSTANT.
703 Else return VARYING. */
706 likely_value (gimple
*stmt
)
708 bool has_constant_operand
, has_undefined_operand
, all_undefined_operands
;
709 bool has_nsa_operand
;
714 enum gimple_code code
= gimple_code (stmt
);
716 /* This function appears to be called only for assignments, calls,
717 conditionals, and switches, due to the logic in visit_stmt. */
718 gcc_assert (code
== GIMPLE_ASSIGN
719 || code
== GIMPLE_CALL
720 || code
== GIMPLE_COND
721 || code
== GIMPLE_SWITCH
);
723 /* If the statement has volatile operands, it won't fold to a
725 if (gimple_has_volatile_ops (stmt
))
728 /* .DEFERRED_INIT produces undefined. */
729 if (gimple_call_internal_p (stmt
, IFN_DEFERRED_INIT
))
732 /* Arrive here for more complex cases. */
733 has_constant_operand
= false;
734 has_undefined_operand
= false;
735 all_undefined_operands
= true;
736 has_nsa_operand
= false;
737 FOR_EACH_SSA_TREE_OPERAND (use
, stmt
, iter
, SSA_OP_USE
)
739 ccp_prop_value_t
*val
= get_value (use
);
741 if (val
&& val
->lattice_val
== UNDEFINED
)
742 has_undefined_operand
= true;
744 all_undefined_operands
= false;
746 if (val
&& val
->lattice_val
== CONSTANT
)
747 has_constant_operand
= true;
749 if (SSA_NAME_IS_DEFAULT_DEF (use
)
750 || !prop_simulate_again_p (SSA_NAME_DEF_STMT (use
)))
751 has_nsa_operand
= true;
754 /* There may be constants in regular rhs operands. For calls we
755 have to ignore lhs, fndecl and static chain, otherwise only
757 for (i
= (is_gimple_call (stmt
) ? 2 : 0) + gimple_has_lhs (stmt
);
758 i
< gimple_num_ops (stmt
); ++i
)
760 tree op
= gimple_op (stmt
, i
);
761 if (!op
|| TREE_CODE (op
) == SSA_NAME
)
763 if (is_gimple_min_invariant (op
))
764 has_constant_operand
= true;
767 if (has_constant_operand
)
768 all_undefined_operands
= false;
770 if (has_undefined_operand
771 && code
== GIMPLE_CALL
772 && gimple_call_internal_p (stmt
))
773 switch (gimple_call_internal_fn (stmt
))
775 /* These 3 builtins use the first argument just as a magic
776 way how to find out a decl uid. */
777 case IFN_GOMP_SIMD_LANE
:
778 case IFN_GOMP_SIMD_VF
:
779 case IFN_GOMP_SIMD_LAST_LANE
:
780 has_undefined_operand
= false;
786 /* If the operation combines operands like COMPLEX_EXPR make sure to
787 not mark the result UNDEFINED if only one part of the result is
789 if (has_undefined_operand
&& all_undefined_operands
)
791 else if (code
== GIMPLE_ASSIGN
&& has_undefined_operand
)
793 switch (gimple_assign_rhs_code (stmt
))
795 /* Unary operators are handled with all_undefined_operands. */
798 case POINTER_PLUS_EXPR
:
800 /* Not MIN_EXPR, MAX_EXPR. One VARYING operand may be selected.
801 Not bitwise operators, one VARYING operand may specify the
803 Not logical operators for the same reason, apart from XOR.
804 Not COMPLEX_EXPR as one VARYING operand makes the result partly
805 not UNDEFINED. Not *DIV_EXPR, comparisons and shifts because
806 the undefined operand may be promoted. */
810 /* If any part of an address is UNDEFINED, like the index
811 of an ARRAY_EXPR, then treat the result as UNDEFINED. */
818 /* If there was an UNDEFINED operand but the result may be not UNDEFINED
819 fall back to CONSTANT. During iteration UNDEFINED may still drop
821 if (has_undefined_operand
)
824 /* We do not consider virtual operands here -- load from read-only
825 memory may have only VARYING virtual operands, but still be
826 constant. Also we can combine the stmt with definitions from
827 operands whose definitions are not simulated again. */
828 if (has_constant_operand
830 || gimple_references_memory_p (stmt
))
836 /* Returns true if STMT cannot be constant. */
839 surely_varying_stmt_p (gimple
*stmt
)
841 /* If the statement has operands that we cannot handle, it cannot be
843 if (gimple_has_volatile_ops (stmt
))
846 /* If it is a call and does not return a value or is not a
847 builtin and not an indirect call or a call to function with
848 assume_aligned/alloc_align attribute, it is varying. */
849 if (is_gimple_call (stmt
))
851 tree fndecl
, fntype
= gimple_call_fntype (stmt
);
852 if (!gimple_call_lhs (stmt
)
853 || ((fndecl
= gimple_call_fndecl (stmt
)) != NULL_TREE
854 && !fndecl_built_in_p (fndecl
)
855 && !lookup_attribute ("assume_aligned",
856 TYPE_ATTRIBUTES (fntype
))
857 && !lookup_attribute ("alloc_align",
858 TYPE_ATTRIBUTES (fntype
))))
862 /* Any other store operation is not interesting. */
863 else if (gimple_vdef (stmt
))
866 /* Anything other than assignments and conditional jumps are not
867 interesting for CCP. */
868 if (gimple_code (stmt
) != GIMPLE_ASSIGN
869 && gimple_code (stmt
) != GIMPLE_COND
870 && gimple_code (stmt
) != GIMPLE_SWITCH
871 && gimple_code (stmt
) != GIMPLE_CALL
)
877 /* Initialize local data structures for CCP. */
880 ccp_initialize (void)
884 n_const_val
= num_ssa_names
;
885 const_val
= XCNEWVEC (ccp_prop_value_t
, n_const_val
);
887 /* Initialize simulation flags for PHI nodes and statements. */
888 FOR_EACH_BB_FN (bb
, cfun
)
890 gimple_stmt_iterator i
;
892 for (i
= gsi_start_bb (bb
); !gsi_end_p (i
); gsi_next (&i
))
894 gimple
*stmt
= gsi_stmt (i
);
897 /* If the statement is a control insn, then we do not
898 want to avoid simulating the statement once. Failure
899 to do so means that those edges will never get added. */
900 if (stmt_ends_bb_p (stmt
))
903 is_varying
= surely_varying_stmt_p (stmt
);
910 /* If the statement will not produce a constant, mark
911 all its outputs VARYING. */
912 FOR_EACH_SSA_TREE_OPERAND (def
, stmt
, iter
, SSA_OP_ALL_DEFS
)
913 set_value_varying (def
);
915 prop_set_simulate_again (stmt
, !is_varying
);
919 /* Now process PHI nodes. We never clear the simulate_again flag on
920 phi nodes, since we do not know which edges are executable yet,
921 except for phi nodes for virtual operands when we do not do store ccp. */
922 FOR_EACH_BB_FN (bb
, cfun
)
926 for (i
= gsi_start_phis (bb
); !gsi_end_p (i
); gsi_next (&i
))
928 gphi
*phi
= i
.phi ();
930 if (virtual_operand_p (gimple_phi_result (phi
)))
931 prop_set_simulate_again (phi
, false);
933 prop_set_simulate_again (phi
, true);
938 /* Debug count support. Reset the values of ssa names
939 VARYING when the total number ssa names analyzed is
940 beyond the debug count specified. */
946 for (i
= 0; i
< num_ssa_names
; i
++)
950 const_val
[i
].lattice_val
= VARYING
;
951 const_val
[i
].mask
= -1;
952 const_val
[i
].value
= NULL_TREE
;
958 /* We want to provide our own GET_VALUE and FOLD_STMT virtual methods. */
959 class ccp_folder
: public substitute_and_fold_engine
962 tree
value_of_expr (tree
, gimple
*) final override
;
963 bool fold_stmt (gimple_stmt_iterator
*) final override
;
966 /* This method just wraps GET_CONSTANT_VALUE for now. Over time
967 naked calls to GET_CONSTANT_VALUE should be eliminated in favor
968 of calling member functions. */
971 ccp_folder::value_of_expr (tree op
, gimple
*)
973 return get_constant_value (op
);
976 /* Do final substitution of propagated values, cleanup the flowgraph and
977 free allocated storage. If NONZERO_P, record nonzero bits.
979 Return TRUE when something was optimized. */
982 ccp_finalize (bool nonzero_p
)
984 bool something_changed
;
990 /* Derive alignment and misalignment information from partially
991 constant pointers in the lattice or nonzero bits from partially
992 constant integers. */
993 FOR_EACH_SSA_NAME (i
, name
, cfun
)
995 ccp_prop_value_t
*val
;
996 unsigned int tem
, align
;
998 if (!POINTER_TYPE_P (TREE_TYPE (name
))
999 && (!INTEGRAL_TYPE_P (TREE_TYPE (name
))
1000 /* Don't record nonzero bits before IPA to avoid
1001 using too much memory. */
1005 val
= get_value (name
);
1006 if (val
->lattice_val
!= CONSTANT
1007 || TREE_CODE (val
->value
) != INTEGER_CST
1011 if (POINTER_TYPE_P (TREE_TYPE (name
)))
1013 /* Trailing mask bits specify the alignment, trailing value
1014 bits the misalignment. */
1015 tem
= val
->mask
.to_uhwi ();
1016 align
= least_bit_hwi (tem
);
1018 set_ptr_info_alignment (get_ptr_info (name
), align
,
1019 (TREE_INT_CST_LOW (val
->value
)
1024 unsigned int precision
= TYPE_PRECISION (TREE_TYPE (val
->value
));
1025 wide_int value
= wi::to_wide (val
->value
);
1026 wide_int mask
= wide_int::from (val
->mask
, precision
, UNSIGNED
);
1027 set_bitmask (name
, value
, mask
);
1031 /* Perform substitutions based on the known constant values. */
1032 class ccp_folder ccp_folder
;
1033 something_changed
= ccp_folder
.substitute_and_fold ();
1037 return something_changed
;
1041 /* Compute the meet operator between *VAL1 and *VAL2. Store the result
1044 any M UNDEFINED = any
1045 any M VARYING = VARYING
1046 Ci M Cj = Ci if (i == j)
1047 Ci M Cj = VARYING if (i != j)
1051 ccp_lattice_meet (ccp_prop_value_t
*val1
, ccp_prop_value_t
*val2
)
1053 if (val1
->lattice_val
== UNDEFINED
1054 /* For UNDEFINED M SSA we can't always SSA because its definition
1055 may not dominate the PHI node. Doing optimistic copy propagation
1056 also causes a lot of gcc.dg/uninit-pred*.c FAILs. */
1057 && (val2
->lattice_val
!= CONSTANT
1058 || TREE_CODE (val2
->value
) != SSA_NAME
))
1060 /* UNDEFINED M any = any */
1063 else if (val2
->lattice_val
== UNDEFINED
1065 && (val1
->lattice_val
!= CONSTANT
1066 || TREE_CODE (val1
->value
) != SSA_NAME
))
1068 /* any M UNDEFINED = any
1069 Nothing to do. VAL1 already contains the value we want. */
1072 else if (val1
->lattice_val
== VARYING
1073 || val2
->lattice_val
== VARYING
)
1075 /* any M VARYING = VARYING. */
1076 val1
->lattice_val
= VARYING
;
1078 val1
->value
= NULL_TREE
;
1080 else if (val1
->lattice_val
== CONSTANT
1081 && val2
->lattice_val
== CONSTANT
1082 && TREE_CODE (val1
->value
) == INTEGER_CST
1083 && TREE_CODE (val2
->value
) == INTEGER_CST
)
1085 /* Ci M Cj = Ci if (i == j)
1086 Ci M Cj = VARYING if (i != j)
1088 For INTEGER_CSTs mask unequal bits. If no equal bits remain,
1090 val1
->mask
= (val1
->mask
| val2
->mask
1091 | (wi::to_widest (val1
->value
)
1092 ^ wi::to_widest (val2
->value
)));
1093 if (wi::sext (val1
->mask
, TYPE_PRECISION (TREE_TYPE (val1
->value
))) == -1)
1095 val1
->lattice_val
= VARYING
;
1096 val1
->value
= NULL_TREE
;
1099 else if (val1
->lattice_val
== CONSTANT
1100 && val2
->lattice_val
== CONSTANT
1101 && operand_equal_p (val1
->value
, val2
->value
, 0))
1103 /* Ci M Cj = Ci if (i == j)
1104 Ci M Cj = VARYING if (i != j)
1106 VAL1 already contains the value we want for equivalent values. */
1108 else if (val1
->lattice_val
== CONSTANT
1109 && val2
->lattice_val
== CONSTANT
1110 && (TREE_CODE (val1
->value
) == ADDR_EXPR
1111 || TREE_CODE (val2
->value
) == ADDR_EXPR
))
1113 /* When not equal addresses are involved try meeting for
1115 ccp_prop_value_t tem
= *val2
;
1116 if (TREE_CODE (val1
->value
) == ADDR_EXPR
)
1117 *val1
= get_value_for_expr (val1
->value
, true);
1118 if (TREE_CODE (val2
->value
) == ADDR_EXPR
)
1119 tem
= get_value_for_expr (val2
->value
, true);
1120 ccp_lattice_meet (val1
, &tem
);
1124 /* Any other combination is VARYING. */
1125 val1
->lattice_val
= VARYING
;
1127 val1
->value
= NULL_TREE
;
1132 /* Loop through the PHI_NODE's parameters for BLOCK and compare their
1133 lattice values to determine PHI_NODE's lattice value. The value of a
1134 PHI node is determined calling ccp_lattice_meet with all the arguments
1135 of the PHI node that are incoming via executable edges. */
1137 enum ssa_prop_result
1138 ccp_propagate::visit_phi (gphi
*phi
)
1141 ccp_prop_value_t new_val
;
1143 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1145 fprintf (dump_file
, "\nVisiting PHI node: ");
1146 print_gimple_stmt (dump_file
, phi
, 0, dump_flags
);
1149 new_val
.lattice_val
= UNDEFINED
;
1150 new_val
.value
= NULL_TREE
;
1154 bool non_exec_edge
= false;
1155 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
1157 /* Compute the meet operator over all the PHI arguments flowing
1158 through executable edges. */
1159 edge e
= gimple_phi_arg_edge (phi
, i
);
1161 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1164 "\tArgument #%d (%d -> %d %sexecutable)\n",
1165 i
, e
->src
->index
, e
->dest
->index
,
1166 (e
->flags
& EDGE_EXECUTABLE
) ? "" : "not ");
1169 /* If the incoming edge is executable, Compute the meet operator for
1170 the existing value of the PHI node and the current PHI argument. */
1171 if (e
->flags
& EDGE_EXECUTABLE
)
1173 tree arg
= gimple_phi_arg (phi
, i
)->def
;
1174 ccp_prop_value_t arg_val
= get_value_for_expr (arg
, false);
1182 ccp_lattice_meet (&new_val
, &arg_val
);
1184 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1186 fprintf (dump_file
, "\t");
1187 print_generic_expr (dump_file
, arg
, dump_flags
);
1188 dump_lattice_value (dump_file
, "\tValue: ", arg_val
);
1189 fprintf (dump_file
, "\n");
1192 if (new_val
.lattice_val
== VARYING
)
1196 non_exec_edge
= true;
1199 /* In case there were non-executable edges and the value is a copy
1200 make sure its definition dominates the PHI node. */
1202 && new_val
.lattice_val
== CONSTANT
1203 && TREE_CODE (new_val
.value
) == SSA_NAME
1204 && ! SSA_NAME_IS_DEFAULT_DEF (new_val
.value
)
1205 && ! dominated_by_p (CDI_DOMINATORS
, gimple_bb (phi
),
1206 gimple_bb (SSA_NAME_DEF_STMT (new_val
.value
))))
1208 new_val
.lattice_val
= VARYING
;
1209 new_val
.value
= NULL_TREE
;
1213 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1215 dump_lattice_value (dump_file
, "\n PHI node value: ", new_val
);
1216 fprintf (dump_file
, "\n\n");
1219 /* Make the transition to the new value. */
1220 if (set_lattice_value (gimple_phi_result (phi
), &new_val
))
1222 if (new_val
.lattice_val
== VARYING
)
1223 return SSA_PROP_VARYING
;
1225 return SSA_PROP_INTERESTING
;
1228 return SSA_PROP_NOT_INTERESTING
;
1231 /* Return the constant value for OP or OP otherwise. */
1234 valueize_op (tree op
)
1236 if (TREE_CODE (op
) == SSA_NAME
)
1238 tree tem
= get_constant_value (op
);
1245 /* Return the constant value for OP, but signal to not follow SSA
1246 edges if the definition may be simulated again. */
1249 valueize_op_1 (tree op
)
1251 if (TREE_CODE (op
) == SSA_NAME
)
1253 /* If the definition may be simulated again we cannot follow
1254 this SSA edge as the SSA propagator does not necessarily
1255 re-visit the use. */
1256 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
1257 if (!gimple_nop_p (def_stmt
)
1258 && prop_simulate_again_p (def_stmt
))
1260 tree tem
= get_constant_value (op
);
1267 /* CCP specific front-end to the non-destructive constant folding
1270 Attempt to simplify the RHS of STMT knowing that one or more
1271 operands are constants.
1273 If simplification is possible, return the simplified RHS,
1274 otherwise return the original RHS or NULL_TREE. */
1277 ccp_fold (gimple
*stmt
)
1279 switch (gimple_code (stmt
))
1283 /* Return the constant switch index. */
1284 return valueize_op (gimple_switch_index (as_a
<gswitch
*> (stmt
)));
1290 return gimple_fold_stmt_to_constant_1 (stmt
,
1291 valueize_op
, valueize_op_1
);
1298 /* Determine the minimum and maximum values, *MIN and *MAX respectively,
1299 represented by the mask pair VAL and MASK with signedness SGN and
1300 precision PRECISION. */
1303 value_mask_to_min_max (widest_int
*min
, widest_int
*max
,
1304 const widest_int
&val
, const widest_int
&mask
,
1305 signop sgn
, int precision
)
1307 *min
= wi::bit_and_not (val
, mask
);
1309 if (sgn
== SIGNED
&& wi::neg_p (mask
))
1311 widest_int sign_bit
= wi::lshift (1, precision
- 1);
1314 /* MAX is zero extended, and MIN is sign extended. */
1315 *min
= wi::ext (*min
, precision
, sgn
);
1316 *max
= wi::ext (*max
, precision
, sgn
);
1320 /* Apply the operation CODE in type TYPE to the value, mask pair
1321 RVAL and RMASK representing a value of type RTYPE and set
1322 the value, mask pair *VAL and *MASK to the result. */
1325 bit_value_unop (enum tree_code code
, signop type_sgn
, int type_precision
,
1326 widest_int
*val
, widest_int
*mask
,
1327 signop rtype_sgn
, int rtype_precision
,
1328 const widest_int
&rval
, const widest_int
&rmask
)
1339 widest_int temv
, temm
;
1340 /* Return ~rval + 1. */
1341 bit_value_unop (BIT_NOT_EXPR
, type_sgn
, type_precision
, &temv
, &temm
,
1342 type_sgn
, type_precision
, rval
, rmask
);
1343 bit_value_binop (PLUS_EXPR
, type_sgn
, type_precision
, val
, mask
,
1344 type_sgn
, type_precision
, temv
, temm
,
1345 type_sgn
, type_precision
, 1, 0);
1351 /* First extend mask and value according to the original type. */
1352 *mask
= wi::ext (rmask
, rtype_precision
, rtype_sgn
);
1353 *val
= wi::ext (rval
, rtype_precision
, rtype_sgn
);
1355 /* Then extend mask and value according to the target type. */
1356 *mask
= wi::ext (*mask
, type_precision
, type_sgn
);
1357 *val
= wi::ext (*val
, type_precision
, type_sgn
);
1363 if (wi::sext (rmask
, rtype_precision
) == -1)
1368 else if (wi::neg_p (rmask
))
1370 /* Result is either rval or -rval. */
1371 widest_int temv
, temm
;
1372 bit_value_unop (NEGATE_EXPR
, rtype_sgn
, rtype_precision
, &temv
,
1373 &temm
, type_sgn
, type_precision
, rval
, rmask
);
1374 temm
|= (rmask
| (rval
^ temv
));
1375 /* Extend the result. */
1376 *mask
= wi::ext (temm
, type_precision
, type_sgn
);
1377 *val
= wi::ext (temv
, type_precision
, type_sgn
);
1379 else if (wi::neg_p (rval
))
1381 bit_value_unop (NEGATE_EXPR
, type_sgn
, type_precision
, val
, mask
,
1382 type_sgn
, type_precision
, rval
, rmask
);
1398 /* Determine the mask pair *VAL and *MASK from multiplying the
1399 argument mask pair RVAL, RMASK by the unsigned constant C. */
1401 bit_value_mult_const (signop sgn
, int width
,
1402 widest_int
*val
, widest_int
*mask
,
1403 const widest_int
&rval
, const widest_int
&rmask
,
1406 widest_int sum_mask
= 0;
1408 /* Ensure rval_lo only contains known bits. */
1409 widest_int rval_lo
= wi::bit_and_not (rval
, rmask
);
1413 /* General case (some bits of multiplicand are known set). */
1414 widest_int sum_val
= 0;
1417 /* Determine the lowest bit set in the multiplier. */
1418 int bitpos
= wi::ctz (c
);
1419 widest_int term_mask
= rmask
<< bitpos
;
1420 widest_int term_val
= rval_lo
<< bitpos
;
1423 widest_int lo
= sum_val
+ term_val
;
1424 widest_int hi
= (sum_val
| sum_mask
) + (term_val
| term_mask
);
1425 sum_mask
|= term_mask
| (lo
^ hi
);
1428 /* Clear this bit in the multiplier. */
1429 c
^= wi::lshift (1, bitpos
);
1431 /* Correctly extend the result value. */
1432 *val
= wi::ext (sum_val
, width
, sgn
);
1436 /* Special case (no bits of multiplicand are known set). */
1439 /* Determine the lowest bit set in the multiplier. */
1440 int bitpos
= wi::ctz (c
);
1441 widest_int term_mask
= rmask
<< bitpos
;
1444 widest_int hi
= sum_mask
+ term_mask
;
1445 sum_mask
|= term_mask
| hi
;
1447 /* Clear this bit in the multiplier. */
1448 c
^= wi::lshift (1, bitpos
);
1453 /* Correctly extend the result mask. */
1454 *mask
= wi::ext (sum_mask
, width
, sgn
);
1457 /* Fill up to MAX values in the BITS array with values representing
1458 each of the non-zero bits in the value X. Returns the number of
1459 bits in X (capped at the maximum value MAX). For example, an X
1460 value 11, places 1, 2 and 8 in BITS and returns the value 3. */
1463 get_individual_bits (widest_int
*bits
, widest_int x
, unsigned int max
)
1465 unsigned int count
= 0;
1466 while (count
< max
&& x
!= 0)
1468 int bitpos
= wi::ctz (x
);
1469 bits
[count
] = wi::lshift (1, bitpos
);
1476 /* Array of 2^N - 1 values representing the bits flipped between
1477 consecutive Gray codes. This is used to efficiently enumerate
1478 all permutations on N bits using XOR. */
1479 static const unsigned char gray_code_bit_flips
[63] = {
1480 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4,
1481 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5,
1482 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4,
1483 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0
1486 /* Apply the operation CODE in type TYPE to the value, mask pairs
1487 R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
1488 and R2TYPE and set the value, mask pair *VAL and *MASK to the result. */
1491 bit_value_binop (enum tree_code code
, signop sgn
, int width
,
1492 widest_int
*val
, widest_int
*mask
,
1493 signop r1type_sgn
, int r1type_precision
,
1494 const widest_int
&r1val
, const widest_int
&r1mask
,
1495 signop r2type_sgn
, int r2type_precision ATTRIBUTE_UNUSED
,
1496 const widest_int
&r2val
, const widest_int
&r2mask
)
1498 bool swap_p
= false;
1500 /* Assume we'll get a constant result. Use an initial non varying
1501 value, we fall back to varying in the end if necessary. */
1503 /* Ensure that VAL is initialized (to any value). */
1509 /* The mask is constant where there is a known not
1510 set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
1511 *mask
= (r1mask
| r2mask
) & (r1val
| r1mask
) & (r2val
| r2mask
);
1512 *val
= r1val
& r2val
;
1516 /* The mask is constant where there is a known
1517 set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)). */
1518 *mask
= wi::bit_and_not (r1mask
| r2mask
,
1519 wi::bit_and_not (r1val
, r1mask
)
1520 | wi::bit_and_not (r2val
, r2mask
));
1521 *val
= r1val
| r2val
;
1526 *mask
= r1mask
| r2mask
;
1527 *val
= r1val
^ r2val
;
1534 widest_int shift
= r2val
;
1542 if (wi::neg_p (shift
, r2type_sgn
))
1545 if (code
== RROTATE_EXPR
)
1546 code
= LROTATE_EXPR
;
1548 code
= RROTATE_EXPR
;
1550 if (code
== RROTATE_EXPR
)
1552 *mask
= wi::rrotate (r1mask
, shift
, width
);
1553 *val
= wi::rrotate (r1val
, shift
, width
);
1557 *mask
= wi::lrotate (r1mask
, shift
, width
);
1558 *val
= wi::lrotate (r1val
, shift
, width
);
1560 *mask
= wi::ext (*mask
, width
, sgn
);
1561 *val
= wi::ext (*val
, width
, sgn
);
1564 else if (wi::ltu_p (r2val
| r2mask
, width
)
1565 && wi::popcount (r2mask
) <= 4)
1568 widest_int res_val
, res_mask
;
1569 widest_int tmp_val
, tmp_mask
;
1570 widest_int shift
= wi::bit_and_not (r2val
, r2mask
);
1571 unsigned int bit_count
= get_individual_bits (bits
, r2mask
, 4);
1572 unsigned int count
= (1 << bit_count
) - 1;
1574 /* Initialize result to rotate by smallest value of shift. */
1575 if (code
== RROTATE_EXPR
)
1577 res_mask
= wi::rrotate (r1mask
, shift
, width
);
1578 res_val
= wi::rrotate (r1val
, shift
, width
);
1582 res_mask
= wi::lrotate (r1mask
, shift
, width
);
1583 res_val
= wi::lrotate (r1val
, shift
, width
);
1586 /* Iterate through the remaining values of shift. */
1587 for (unsigned int i
=0; i
<count
; i
++)
1589 shift
^= bits
[gray_code_bit_flips
[i
]];
1590 if (code
== RROTATE_EXPR
)
1592 tmp_mask
= wi::rrotate (r1mask
, shift
, width
);
1593 tmp_val
= wi::rrotate (r1val
, shift
, width
);
1597 tmp_mask
= wi::lrotate (r1mask
, shift
, width
);
1598 tmp_val
= wi::lrotate (r1val
, shift
, width
);
1600 /* Accumulate the result. */
1601 res_mask
|= tmp_mask
| (res_val
^ tmp_val
);
1603 *val
= wi::ext (wi::bit_and_not (res_val
, res_mask
), width
, sgn
);
1604 *mask
= wi::ext (res_mask
, width
, sgn
);
1610 /* ??? We can handle partially known shift counts if we know
1611 its sign. That way we can tell that (x << (y | 8)) & 255
1615 widest_int shift
= r2val
;
1623 if (wi::neg_p (shift
, r2type_sgn
))
1625 if (code
== RSHIFT_EXPR
)
1627 *mask
= wi::rshift (wi::ext (r1mask
, width
, sgn
), shift
, sgn
);
1628 *val
= wi::rshift (wi::ext (r1val
, width
, sgn
), shift
, sgn
);
1632 *mask
= wi::ext (r1mask
<< shift
, width
, sgn
);
1633 *val
= wi::ext (r1val
<< shift
, width
, sgn
);
1637 else if (wi::ltu_p (r2val
| r2mask
, width
))
1639 if (wi::popcount (r2mask
) <= 4)
1642 widest_int arg_val
, arg_mask
;
1643 widest_int res_val
, res_mask
;
1644 widest_int tmp_val
, tmp_mask
;
1645 widest_int shift
= wi::bit_and_not (r2val
, r2mask
);
1646 unsigned int bit_count
= get_individual_bits (bits
, r2mask
, 4);
1647 unsigned int count
= (1 << bit_count
) - 1;
1649 /* Initialize result to shift by smallest value of shift. */
1650 if (code
== RSHIFT_EXPR
)
1652 arg_mask
= wi::ext (r1mask
, width
, sgn
);
1653 arg_val
= wi::ext (r1val
, width
, sgn
);
1654 res_mask
= wi::rshift (arg_mask
, shift
, sgn
);
1655 res_val
= wi::rshift (arg_val
, shift
, sgn
);
1661 res_mask
= arg_mask
<< shift
;
1662 res_val
= arg_val
<< shift
;
1665 /* Iterate through the remaining values of shift. */
1666 for (unsigned int i
=0; i
<count
; i
++)
1668 shift
^= bits
[gray_code_bit_flips
[i
]];
1669 if (code
== RSHIFT_EXPR
)
1671 tmp_mask
= wi::rshift (arg_mask
, shift
, sgn
);
1672 tmp_val
= wi::rshift (arg_val
, shift
, sgn
);
1676 tmp_mask
= arg_mask
<< shift
;
1677 tmp_val
= arg_val
<< shift
;
1679 /* Accumulate the result. */
1680 res_mask
|= tmp_mask
| (res_val
^ tmp_val
);
1682 res_mask
= wi::ext (res_mask
, width
, sgn
);
1683 res_val
= wi::ext (res_val
, width
, sgn
);
1684 *val
= wi::bit_and_not (res_val
, res_mask
);
1687 else if ((r1val
| r1mask
) == 0)
1689 /* Handle shifts of zero to avoid undefined wi::ctz below. */
1693 else if (code
== LSHIFT_EXPR
)
1695 widest_int tmp
= wi::mask
<widest_int
> (width
, false);
1696 tmp
<<= wi::ctz (r1val
| r1mask
);
1697 tmp
<<= wi::bit_and_not (r2val
, r2mask
);
1698 *mask
= wi::ext (tmp
, width
, sgn
);
1701 else if (!wi::neg_p (r1val
| r1mask
, sgn
))
1703 /* Logical right shift, or zero sign bit. */
1704 widest_int arg
= r1val
| r1mask
;
1705 int lzcount
= wi::clz (arg
);
1707 lzcount
-= wi::get_precision (arg
) - width
;
1708 widest_int tmp
= wi::mask
<widest_int
> (width
, false);
1709 tmp
= wi::lrshift (tmp
, lzcount
);
1710 tmp
= wi::lrshift (tmp
, wi::bit_and_not (r2val
, r2mask
));
1711 *mask
= wi::ext (tmp
, width
, sgn
);
1714 else if (!wi::neg_p (r1mask
))
1716 /* Arithmetic right shift with set sign bit. */
1717 widest_int arg
= wi::bit_and_not (r1val
, r1mask
);
1718 int sbcount
= wi::clrsb (arg
);
1719 sbcount
-= wi::get_precision (arg
) - width
;
1720 widest_int tmp
= wi::mask
<widest_int
> (width
, false);
1721 tmp
= wi::lrshift (tmp
, sbcount
);
1722 tmp
= wi::lrshift (tmp
, wi::bit_and_not (r2val
, r2mask
));
1723 *mask
= wi::sext (tmp
, width
);
1724 tmp
= wi::bit_not (tmp
);
1725 *val
= wi::sext (tmp
, width
);
1731 case POINTER_PLUS_EXPR
:
1733 /* Do the addition with unknown bits set to zero, to give carry-ins of
1734 zero wherever possible. */
1735 widest_int lo
= (wi::bit_and_not (r1val
, r1mask
)
1736 + wi::bit_and_not (r2val
, r2mask
));
1737 lo
= wi::ext (lo
, width
, sgn
);
1738 /* Do the addition with unknown bits set to one, to give carry-ins of
1739 one wherever possible. */
1740 widest_int hi
= (r1val
| r1mask
) + (r2val
| r2mask
);
1741 hi
= wi::ext (hi
, width
, sgn
);
1742 /* Each bit in the result is known if (a) the corresponding bits in
1743 both inputs are known, and (b) the carry-in to that bit position
1744 is known. We can check condition (b) by seeing if we got the same
1745 result with minimised carries as with maximised carries. */
1746 *mask
= r1mask
| r2mask
| (lo
^ hi
);
1747 *mask
= wi::ext (*mask
, width
, sgn
);
1748 /* It shouldn't matter whether we choose lo or hi here. */
1754 case POINTER_DIFF_EXPR
:
1756 /* Subtraction is derived from the addition algorithm above. */
1757 widest_int lo
= wi::bit_and_not (r1val
, r1mask
) - (r2val
| r2mask
);
1758 lo
= wi::ext (lo
, width
, sgn
);
1759 widest_int hi
= (r1val
| r1mask
) - wi::bit_and_not (r2val
, r2mask
);
1760 hi
= wi::ext (hi
, width
, sgn
);
1761 *mask
= r1mask
| r2mask
| (lo
^ hi
);
1762 *mask
= wi::ext (*mask
, width
, sgn
);
1769 && !wi::neg_p (r2val
, sgn
)
1770 && (flag_expensive_optimizations
|| wi::popcount (r2val
) < 8))
1771 bit_value_mult_const (sgn
, width
, val
, mask
, r1val
, r1mask
, r2val
);
1772 else if (r1mask
== 0
1773 && !wi::neg_p (r1val
, sgn
)
1774 && (flag_expensive_optimizations
|| wi::popcount (r1val
) < 8))
1775 bit_value_mult_const (sgn
, width
, val
, mask
, r2val
, r2mask
, r1val
);
1778 /* Just track trailing zeros in both operands and transfer
1779 them to the other. */
1780 int r1tz
= wi::ctz (r1val
| r1mask
);
1781 int r2tz
= wi::ctz (r2val
| r2mask
);
1782 if (r1tz
+ r2tz
>= width
)
1787 else if (r1tz
+ r2tz
> 0)
1789 *mask
= wi::ext (wi::mask
<widest_int
> (r1tz
+ r2tz
, true),
1799 widest_int m
= r1mask
| r2mask
;
1800 if (wi::bit_and_not (r1val
, m
) != wi::bit_and_not (r2val
, m
))
1803 *val
= ((code
== EQ_EXPR
) ? 0 : 1);
1807 /* We know the result of a comparison is always one or zero. */
1817 code
= swap_tree_comparison (code
);
1822 widest_int min1
, max1
, min2
, max2
;
1825 const widest_int
&o1val
= swap_p
? r2val
: r1val
;
1826 const widest_int
&o1mask
= swap_p
? r2mask
: r1mask
;
1827 const widest_int
&o2val
= swap_p
? r1val
: r2val
;
1828 const widest_int
&o2mask
= swap_p
? r1mask
: r2mask
;
1830 value_mask_to_min_max (&min1
, &max1
, o1val
, o1mask
,
1831 r1type_sgn
, r1type_precision
);
1832 value_mask_to_min_max (&min2
, &max2
, o2val
, o2mask
,
1833 r1type_sgn
, r1type_precision
);
1835 /* For comparisons the signedness is in the comparison operands. */
1836 /* Do a cross comparison of the max/min pairs. */
1837 maxmin
= wi::cmp (max1
, min2
, r1type_sgn
);
1838 minmax
= wi::cmp (min1
, max2
, r1type_sgn
);
1839 if (maxmin
< (code
== LE_EXPR
? 1: 0)) /* o1 < or <= o2. */
1844 else if (minmax
> (code
== LT_EXPR
? -1 : 0)) /* o1 >= or > o2. */
1849 else if (maxmin
== minmax
) /* o1 and o2 are equal. */
1851 /* This probably should never happen as we'd have
1852 folded the thing during fully constant value folding. */
1854 *val
= (code
== LE_EXPR
? 1 : 0);
1858 /* We know the result of a comparison is always one or zero. */
1868 widest_int min1
, max1
, min2
, max2
;
1870 value_mask_to_min_max (&min1
, &max1
, r1val
, r1mask
, sgn
, width
);
1871 value_mask_to_min_max (&min2
, &max2
, r2val
, r2mask
, sgn
, width
);
1873 if (wi::cmp (max1
, min2
, sgn
) <= 0) /* r1 is less than r2. */
1875 if (code
== MIN_EXPR
)
1886 else if (wi::cmp (min1
, max2
, sgn
) >= 0) /* r2 is less than r1. */
1888 if (code
== MIN_EXPR
)
1901 /* The result is either r1 or r2. */
1902 *mask
= r1mask
| r2mask
| (r1val
^ r2val
);
1908 case TRUNC_MOD_EXPR
:
1910 widest_int r1max
= r1val
| r1mask
;
1911 widest_int r2max
= r2val
| r2mask
;
1913 || (!wi::neg_p (r1max
) && !wi::neg_p (r2max
)))
1915 /* Confirm R2 has some bits set, to avoid division by zero. */
1916 widest_int r2min
= wi::bit_and_not (r2val
, r2mask
);
1919 /* R1 % R2 is R1 if R1 is always less than R2. */
1920 if (wi::ltu_p (r1max
, r2min
))
1927 /* R1 % R2 is always less than the maximum of R2. */
1928 unsigned int lzcount
= wi::clz (r2max
);
1929 unsigned int bits
= wi::get_precision (r2max
) - lzcount
;
1930 if (r2max
== wi::lshift (1, bits
))
1932 *mask
= wi::mask
<widest_int
> (bits
, false);
1940 case TRUNC_DIV_EXPR
:
1942 widest_int r1max
= r1val
| r1mask
;
1943 widest_int r2max
= r2val
| r2mask
;
1944 if (r2mask
== 0 && !wi::neg_p (r1max
))
1946 widest_int shift
= wi::exact_log2 (r2val
);
1949 // Handle division by a power of 2 as an rshift.
1950 bit_value_binop (RSHIFT_EXPR
, sgn
, width
, val
, mask
,
1951 r1type_sgn
, r1type_precision
, r1val
, r1mask
,
1952 r2type_sgn
, r2type_precision
, shift
, r2mask
);
1957 || (!wi::neg_p (r1max
) && !wi::neg_p (r2max
)))
1959 /* Confirm R2 has some bits set, to avoid division by zero. */
1960 widest_int r2min
= wi::bit_and_not (r2val
, r2mask
);
1963 /* R1 / R2 is zero if R1 is always less than R2. */
1964 if (wi::ltu_p (r1max
, r2min
))
1972 = wi::udiv_trunc (wi::zext (r1max
, width
), r2min
);
1973 unsigned int lzcount
= wi::clz (upper
);
1974 unsigned int bits
= wi::get_precision (upper
) - lzcount
;
1975 *mask
= wi::mask
<widest_int
> (bits
, false);
1987 /* Return the propagation value when applying the operation CODE to
1988 the value RHS yielding type TYPE. */
1990 static ccp_prop_value_t
1991 bit_value_unop (enum tree_code code
, tree type
, tree rhs
)
1993 ccp_prop_value_t rval
= get_value_for_expr (rhs
, true);
1994 widest_int value
, mask
;
1995 ccp_prop_value_t val
;
1997 if (rval
.lattice_val
== UNDEFINED
)
2000 gcc_assert ((rval
.lattice_val
== CONSTANT
2001 && TREE_CODE (rval
.value
) == INTEGER_CST
)
2002 || wi::sext (rval
.mask
, TYPE_PRECISION (TREE_TYPE (rhs
))) == -1);
2003 bit_value_unop (code
, TYPE_SIGN (type
), TYPE_PRECISION (type
), &value
, &mask
,
2004 TYPE_SIGN (TREE_TYPE (rhs
)), TYPE_PRECISION (TREE_TYPE (rhs
)),
2005 value_to_wide_int (rval
), rval
.mask
);
2006 if (wi::sext (mask
, TYPE_PRECISION (type
)) != -1)
2008 val
.lattice_val
= CONSTANT
;
2010 /* ??? Delay building trees here. */
2011 val
.value
= wide_int_to_tree (type
, value
);
2015 val
.lattice_val
= VARYING
;
2016 val
.value
= NULL_TREE
;
2022 /* Return the propagation value when applying the operation CODE to
2023 the values RHS1 and RHS2 yielding type TYPE. */
2025 static ccp_prop_value_t
2026 bit_value_binop (enum tree_code code
, tree type
, tree rhs1
, tree rhs2
)
2028 ccp_prop_value_t r1val
= get_value_for_expr (rhs1
, true);
2029 ccp_prop_value_t r2val
= get_value_for_expr (rhs2
, true);
2030 widest_int value
, mask
;
2031 ccp_prop_value_t val
;
2033 if (r1val
.lattice_val
== UNDEFINED
2034 || r2val
.lattice_val
== UNDEFINED
)
2036 val
.lattice_val
= VARYING
;
2037 val
.value
= NULL_TREE
;
2042 gcc_assert ((r1val
.lattice_val
== CONSTANT
2043 && TREE_CODE (r1val
.value
) == INTEGER_CST
)
2044 || wi::sext (r1val
.mask
,
2045 TYPE_PRECISION (TREE_TYPE (rhs1
))) == -1);
2046 gcc_assert ((r2val
.lattice_val
== CONSTANT
2047 && TREE_CODE (r2val
.value
) == INTEGER_CST
)
2048 || wi::sext (r2val
.mask
,
2049 TYPE_PRECISION (TREE_TYPE (rhs2
))) == -1);
2050 bit_value_binop (code
, TYPE_SIGN (type
), TYPE_PRECISION (type
), &value
, &mask
,
2051 TYPE_SIGN (TREE_TYPE (rhs1
)), TYPE_PRECISION (TREE_TYPE (rhs1
)),
2052 value_to_wide_int (r1val
), r1val
.mask
,
2053 TYPE_SIGN (TREE_TYPE (rhs2
)), TYPE_PRECISION (TREE_TYPE (rhs2
)),
2054 value_to_wide_int (r2val
), r2val
.mask
);
2056 /* (x * x) & 2 == 0. */
2057 if (code
== MULT_EXPR
&& rhs1
== rhs2
&& TYPE_PRECISION (type
) > 1)
2060 if (wi::sext (mask
, TYPE_PRECISION (type
)) != -1)
2061 value
= wi::bit_and_not (value
, m
);
2064 mask
= wi::bit_and_not (mask
, m
);
2067 if (wi::sext (mask
, TYPE_PRECISION (type
)) != -1)
2069 val
.lattice_val
= CONSTANT
;
2071 /* ??? Delay building trees here. */
2072 val
.value
= wide_int_to_tree (type
, value
);
2076 val
.lattice_val
= VARYING
;
2077 val
.value
= NULL_TREE
;
2083 /* Return the propagation value for __builtin_assume_aligned
2084 and functions with assume_aligned or alloc_aligned attribute.
2085 For __builtin_assume_aligned, ATTR is NULL_TREE,
2086 for assume_aligned attribute ATTR is non-NULL and ALLOC_ALIGNED
2087 is false, for alloc_aligned attribute ATTR is non-NULL and
2088 ALLOC_ALIGNED is true. */
2090 static ccp_prop_value_t
2091 bit_value_assume_aligned (gimple
*stmt
, tree attr
, ccp_prop_value_t ptrval
,
2094 tree align
, misalign
= NULL_TREE
, type
;
2095 unsigned HOST_WIDE_INT aligni
, misaligni
= 0;
2096 ccp_prop_value_t alignval
;
2097 widest_int value
, mask
;
2098 ccp_prop_value_t val
;
2100 if (attr
== NULL_TREE
)
2102 tree ptr
= gimple_call_arg (stmt
, 0);
2103 type
= TREE_TYPE (ptr
);
2104 ptrval
= get_value_for_expr (ptr
, true);
2108 tree lhs
= gimple_call_lhs (stmt
);
2109 type
= TREE_TYPE (lhs
);
2112 if (ptrval
.lattice_val
== UNDEFINED
)
2114 gcc_assert ((ptrval
.lattice_val
== CONSTANT
2115 && TREE_CODE (ptrval
.value
) == INTEGER_CST
)
2116 || wi::sext (ptrval
.mask
, TYPE_PRECISION (type
)) == -1);
2117 if (attr
== NULL_TREE
)
2119 /* Get aligni and misaligni from __builtin_assume_aligned. */
2120 align
= gimple_call_arg (stmt
, 1);
2121 if (!tree_fits_uhwi_p (align
))
2123 aligni
= tree_to_uhwi (align
);
2124 if (gimple_call_num_args (stmt
) > 2)
2126 misalign
= gimple_call_arg (stmt
, 2);
2127 if (!tree_fits_uhwi_p (misalign
))
2129 misaligni
= tree_to_uhwi (misalign
);
2134 /* Get aligni and misaligni from assume_aligned or
2135 alloc_align attributes. */
2136 if (TREE_VALUE (attr
) == NULL_TREE
)
2138 attr
= TREE_VALUE (attr
);
2139 align
= TREE_VALUE (attr
);
2140 if (!tree_fits_uhwi_p (align
))
2142 aligni
= tree_to_uhwi (align
);
2145 if (aligni
== 0 || aligni
> gimple_call_num_args (stmt
))
2147 align
= gimple_call_arg (stmt
, aligni
- 1);
2148 if (!tree_fits_uhwi_p (align
))
2150 aligni
= tree_to_uhwi (align
);
2152 else if (TREE_CHAIN (attr
) && TREE_VALUE (TREE_CHAIN (attr
)))
2154 misalign
= TREE_VALUE (TREE_CHAIN (attr
));
2155 if (!tree_fits_uhwi_p (misalign
))
2157 misaligni
= tree_to_uhwi (misalign
);
2160 if (aligni
<= 1 || (aligni
& (aligni
- 1)) != 0 || misaligni
>= aligni
)
2163 align
= build_int_cst_type (type
, -aligni
);
2164 alignval
= get_value_for_expr (align
, true);
2165 bit_value_binop (BIT_AND_EXPR
, TYPE_SIGN (type
), TYPE_PRECISION (type
), &value
, &mask
,
2166 TYPE_SIGN (type
), TYPE_PRECISION (type
), value_to_wide_int (ptrval
), ptrval
.mask
,
2167 TYPE_SIGN (type
), TYPE_PRECISION (type
), value_to_wide_int (alignval
), alignval
.mask
);
2169 if (wi::sext (mask
, TYPE_PRECISION (type
)) != -1)
2171 val
.lattice_val
= CONSTANT
;
2173 gcc_assert ((mask
.to_uhwi () & (aligni
- 1)) == 0);
2174 gcc_assert ((value
.to_uhwi () & (aligni
- 1)) == 0);
2176 /* ??? Delay building trees here. */
2177 val
.value
= wide_int_to_tree (type
, value
);
2181 val
.lattice_val
= VARYING
;
2182 val
.value
= NULL_TREE
;
2188 /* Evaluate statement STMT.
2189 Valid only for assignments, calls, conditionals, and switches. */
2191 static ccp_prop_value_t
2192 evaluate_stmt (gimple
*stmt
)
2194 ccp_prop_value_t val
;
2195 tree simplified
= NULL_TREE
;
2196 ccp_lattice_t likelyvalue
= likely_value (stmt
);
2197 bool is_constant
= false;
2199 bool ignore_return_flags
= false;
2201 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2203 fprintf (dump_file
, "which is likely ");
2204 switch (likelyvalue
)
2207 fprintf (dump_file
, "CONSTANT");
2210 fprintf (dump_file
, "UNDEFINED");
2213 fprintf (dump_file
, "VARYING");
2217 fprintf (dump_file
, "\n");
2220 /* If the statement is likely to have a CONSTANT result, then try
2221 to fold the statement to determine the constant value. */
2222 /* FIXME. This is the only place that we call ccp_fold.
2223 Since likely_value never returns CONSTANT for calls, we will
2224 not attempt to fold them, including builtins that may profit. */
2225 if (likelyvalue
== CONSTANT
)
2227 fold_defer_overflow_warnings ();
2228 simplified
= ccp_fold (stmt
);
2230 && TREE_CODE (simplified
) == SSA_NAME
)
2232 /* We may not use values of something that may be simulated again,
2233 see valueize_op_1. */
2234 if (SSA_NAME_IS_DEFAULT_DEF (simplified
)
2235 || ! prop_simulate_again_p (SSA_NAME_DEF_STMT (simplified
)))
2237 ccp_prop_value_t
*val
= get_value (simplified
);
2238 if (val
&& val
->lattice_val
!= VARYING
)
2240 fold_undefer_overflow_warnings (true, stmt
, 0);
2245 /* We may also not place a non-valueized copy in the lattice
2246 as that might become stale if we never re-visit this stmt. */
2247 simplified
= NULL_TREE
;
2249 is_constant
= simplified
&& is_gimple_min_invariant (simplified
);
2250 fold_undefer_overflow_warnings (is_constant
, stmt
, 0);
2253 /* The statement produced a constant value. */
2254 val
.lattice_val
= CONSTANT
;
2255 val
.value
= simplified
;
2260 /* If the statement is likely to have a VARYING result, then do not
2261 bother folding the statement. */
2262 else if (likelyvalue
== VARYING
)
2264 enum gimple_code code
= gimple_code (stmt
);
2265 if (code
== GIMPLE_ASSIGN
)
2267 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
2269 /* Other cases cannot satisfy is_gimple_min_invariant
2271 if (get_gimple_rhs_class (subcode
) == GIMPLE_SINGLE_RHS
)
2272 simplified
= gimple_assign_rhs1 (stmt
);
2274 else if (code
== GIMPLE_SWITCH
)
2275 simplified
= gimple_switch_index (as_a
<gswitch
*> (stmt
));
2277 /* These cannot satisfy is_gimple_min_invariant without folding. */
2278 gcc_assert (code
== GIMPLE_CALL
|| code
== GIMPLE_COND
);
2279 is_constant
= simplified
&& is_gimple_min_invariant (simplified
);
2282 /* The statement produced a constant value. */
2283 val
.lattice_val
= CONSTANT
;
2284 val
.value
= simplified
;
2288 /* If the statement result is likely UNDEFINED, make it so. */
2289 else if (likelyvalue
== UNDEFINED
)
2291 val
.lattice_val
= UNDEFINED
;
2292 val
.value
= NULL_TREE
;
2297 /* Resort to simplification for bitwise tracking. */
2298 if (flag_tree_bit_ccp
2299 && (likelyvalue
== CONSTANT
|| is_gimple_call (stmt
)
2300 || (gimple_assign_single_p (stmt
)
2301 && gimple_assign_rhs_code (stmt
) == ADDR_EXPR
))
2304 enum gimple_code code
= gimple_code (stmt
);
2305 val
.lattice_val
= VARYING
;
2306 val
.value
= NULL_TREE
;
2308 if (code
== GIMPLE_ASSIGN
)
2310 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
2311 tree rhs1
= gimple_assign_rhs1 (stmt
);
2312 tree lhs
= gimple_assign_lhs (stmt
);
2313 if ((INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
2314 || POINTER_TYPE_P (TREE_TYPE (lhs
)))
2315 && (INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
2316 || POINTER_TYPE_P (TREE_TYPE (rhs1
))))
2317 switch (get_gimple_rhs_class (subcode
))
2319 case GIMPLE_SINGLE_RHS
:
2320 val
= get_value_for_expr (rhs1
, true);
2323 case GIMPLE_UNARY_RHS
:
2324 val
= bit_value_unop (subcode
, TREE_TYPE (lhs
), rhs1
);
2327 case GIMPLE_BINARY_RHS
:
2328 val
= bit_value_binop (subcode
, TREE_TYPE (lhs
), rhs1
,
2329 gimple_assign_rhs2 (stmt
));
2335 else if (code
== GIMPLE_COND
)
2337 enum tree_code code
= gimple_cond_code (stmt
);
2338 tree rhs1
= gimple_cond_lhs (stmt
);
2339 tree rhs2
= gimple_cond_rhs (stmt
);
2340 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
2341 || POINTER_TYPE_P (TREE_TYPE (rhs1
)))
2342 val
= bit_value_binop (code
, TREE_TYPE (rhs1
), rhs1
, rhs2
);
2344 else if (gimple_call_builtin_p (stmt
, BUILT_IN_NORMAL
))
2346 tree fndecl
= gimple_call_fndecl (stmt
);
2347 switch (DECL_FUNCTION_CODE (fndecl
))
2349 case BUILT_IN_MALLOC
:
2350 case BUILT_IN_REALLOC
:
2351 case BUILT_IN_GOMP_REALLOC
:
2352 case BUILT_IN_CALLOC
:
2353 case BUILT_IN_STRDUP
:
2354 case BUILT_IN_STRNDUP
:
2355 val
.lattice_val
= CONSTANT
;
2356 val
.value
= build_int_cst (TREE_TYPE (gimple_get_lhs (stmt
)), 0);
2357 val
.mask
= ~((HOST_WIDE_INT
) MALLOC_ABI_ALIGNMENT
2358 / BITS_PER_UNIT
- 1);
2361 CASE_BUILT_IN_ALLOCA
:
2362 align
= (DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_ALLOCA
2364 : TREE_INT_CST_LOW (gimple_call_arg (stmt
, 1)));
2365 val
.lattice_val
= CONSTANT
;
2366 val
.value
= build_int_cst (TREE_TYPE (gimple_get_lhs (stmt
)), 0);
2367 val
.mask
= ~((HOST_WIDE_INT
) align
/ BITS_PER_UNIT
- 1);
2370 case BUILT_IN_ASSUME_ALIGNED
:
2371 val
= bit_value_assume_aligned (stmt
, NULL_TREE
, val
, false);
2372 ignore_return_flags
= true;
2375 case BUILT_IN_ALIGNED_ALLOC
:
2376 case BUILT_IN_GOMP_ALLOC
:
2378 tree align
= get_constant_value (gimple_call_arg (stmt
, 0));
2380 && tree_fits_uhwi_p (align
))
2382 unsigned HOST_WIDE_INT aligni
= tree_to_uhwi (align
);
2384 /* align must be power-of-two */
2385 && (aligni
& (aligni
- 1)) == 0)
2387 val
.lattice_val
= CONSTANT
;
2388 val
.value
= build_int_cst (ptr_type_node
, 0);
2395 case BUILT_IN_BSWAP16
:
2396 case BUILT_IN_BSWAP32
:
2397 case BUILT_IN_BSWAP64
:
2398 case BUILT_IN_BSWAP128
:
2399 val
= get_value_for_expr (gimple_call_arg (stmt
, 0), true);
2400 if (val
.lattice_val
== UNDEFINED
)
2402 else if (val
.lattice_val
== CONSTANT
2404 && TREE_CODE (val
.value
) == INTEGER_CST
)
2406 tree type
= TREE_TYPE (gimple_call_lhs (stmt
));
2407 int prec
= TYPE_PRECISION (type
);
2408 wide_int wval
= wi::to_wide (val
.value
);
2410 = wide_int_to_tree (type
,
2411 wi::bswap (wide_int::from (wval
, prec
,
2414 = widest_int::from (wi::bswap (wide_int::from (val
.mask
,
2418 if (wi::sext (val
.mask
, prec
) != -1)
2421 val
.lattice_val
= VARYING
;
2422 val
.value
= NULL_TREE
;
2429 if (is_gimple_call (stmt
) && gimple_call_lhs (stmt
))
2431 tree fntype
= gimple_call_fntype (stmt
);
2434 tree attrs
= lookup_attribute ("assume_aligned",
2435 TYPE_ATTRIBUTES (fntype
));
2437 val
= bit_value_assume_aligned (stmt
, attrs
, val
, false);
2438 attrs
= lookup_attribute ("alloc_align",
2439 TYPE_ATTRIBUTES (fntype
));
2441 val
= bit_value_assume_aligned (stmt
, attrs
, val
, true);
2443 int flags
= ignore_return_flags
2444 ? 0 : gimple_call_return_flags (as_a
<gcall
*> (stmt
));
2445 if (flags
& ERF_RETURNS_ARG
2446 && (flags
& ERF_RETURN_ARG_MASK
) < gimple_call_num_args (stmt
))
2448 val
= get_value_for_expr
2449 (gimple_call_arg (stmt
,
2450 flags
& ERF_RETURN_ARG_MASK
), true);
2453 is_constant
= (val
.lattice_val
== CONSTANT
);
2456 if (flag_tree_bit_ccp
2457 && ((is_constant
&& TREE_CODE (val
.value
) == INTEGER_CST
)
2459 && gimple_get_lhs (stmt
)
2460 && TREE_CODE (gimple_get_lhs (stmt
)) == SSA_NAME
)
2462 tree lhs
= gimple_get_lhs (stmt
);
2463 wide_int nonzero_bits
= get_nonzero_bits (lhs
);
2464 if (nonzero_bits
!= -1)
2468 val
.lattice_val
= CONSTANT
;
2469 val
.value
= build_zero_cst (TREE_TYPE (lhs
));
2470 val
.mask
= extend_mask (nonzero_bits
, TYPE_SIGN (TREE_TYPE (lhs
)));
2475 if (wi::bit_and_not (wi::to_wide (val
.value
), nonzero_bits
) != 0)
2476 val
.value
= wide_int_to_tree (TREE_TYPE (lhs
),
2478 & wi::to_wide (val
.value
));
2479 if (nonzero_bits
== 0)
2482 val
.mask
= val
.mask
& extend_mask (nonzero_bits
,
2483 TYPE_SIGN (TREE_TYPE (lhs
)));
2488 /* The statement produced a nonconstant value. */
2491 /* The statement produced a copy. */
2492 if (simplified
&& TREE_CODE (simplified
) == SSA_NAME
2493 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (simplified
))
2495 val
.lattice_val
= CONSTANT
;
2496 val
.value
= simplified
;
2499 /* The statement is VARYING. */
2502 val
.lattice_val
= VARYING
;
2503 val
.value
= NULL_TREE
;
2511 typedef hash_table
<nofree_ptr_hash
<gimple
> > gimple_htab
;
2513 /* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before
2514 each matching BUILT_IN_STACK_RESTORE. Mark visited phis in VISITED. */
2517 insert_clobber_before_stack_restore (tree saved_val
, tree var
,
2518 gimple_htab
**visited
)
2521 gassign
*clobber_stmt
;
2523 imm_use_iterator iter
;
2524 gimple_stmt_iterator i
;
2527 FOR_EACH_IMM_USE_STMT (stmt
, iter
, saved_val
)
2528 if (gimple_call_builtin_p (stmt
, BUILT_IN_STACK_RESTORE
))
2530 clobber
= build_clobber (TREE_TYPE (var
), CLOBBER_STORAGE_END
);
2531 clobber_stmt
= gimple_build_assign (var
, clobber
);
2533 i
= gsi_for_stmt (stmt
);
2534 gsi_insert_before (&i
, clobber_stmt
, GSI_SAME_STMT
);
2536 else if (gimple_code (stmt
) == GIMPLE_PHI
)
2539 *visited
= new gimple_htab (10);
2541 slot
= (*visited
)->find_slot (stmt
, INSERT
);
2546 insert_clobber_before_stack_restore (gimple_phi_result (stmt
), var
,
2549 else if (gimple_assign_ssa_name_copy_p (stmt
))
2550 insert_clobber_before_stack_restore (gimple_assign_lhs (stmt
), var
,
2554 /* Advance the iterator to the previous non-debug gimple statement in the same
2555 or dominating basic block. */
2558 gsi_prev_dom_bb_nondebug (gimple_stmt_iterator
*i
)
2562 gsi_prev_nondebug (i
);
2563 while (gsi_end_p (*i
))
2565 dom
= get_immediate_dominator (CDI_DOMINATORS
, gsi_bb (*i
));
2566 if (dom
== NULL
|| dom
== ENTRY_BLOCK_PTR_FOR_FN (cfun
))
2569 *i
= gsi_last_bb (dom
);
2573 /* Find a BUILT_IN_STACK_SAVE dominating gsi_stmt (I), and insert
2574 a clobber of VAR before each matching BUILT_IN_STACK_RESTORE.
2576 It is possible that BUILT_IN_STACK_SAVE cannot be found in a dominator when
2577 a previous pass (such as DOM) duplicated it along multiple paths to a BB.
2578 In that case the function gives up without inserting the clobbers. */
2581 insert_clobbers_for_var (gimple_stmt_iterator i
, tree var
)
2585 gimple_htab
*visited
= NULL
;
2587 for (; !gsi_end_p (i
); gsi_prev_dom_bb_nondebug (&i
))
2589 stmt
= gsi_stmt (i
);
2591 if (!gimple_call_builtin_p (stmt
, BUILT_IN_STACK_SAVE
))
2594 saved_val
= gimple_call_lhs (stmt
);
2595 if (saved_val
== NULL_TREE
)
2598 insert_clobber_before_stack_restore (saved_val
, var
, &visited
);
2605 /* Detects a __builtin_alloca_with_align with constant size argument. Declares
2606 fixed-size array and returns the address, if found, otherwise returns
2610 fold_builtin_alloca_with_align (gimple
*stmt
)
2612 unsigned HOST_WIDE_INT size
, threshold
, n_elem
;
2613 tree lhs
, arg
, block
, var
, elem_type
, array_type
;
2616 lhs
= gimple_call_lhs (stmt
);
2617 if (lhs
== NULL_TREE
)
2620 /* Detect constant argument. */
2621 arg
= get_constant_value (gimple_call_arg (stmt
, 0));
2622 if (arg
== NULL_TREE
2623 || TREE_CODE (arg
) != INTEGER_CST
2624 || !tree_fits_uhwi_p (arg
))
2627 size
= tree_to_uhwi (arg
);
2629 /* Heuristic: don't fold large allocas. */
2630 threshold
= (unsigned HOST_WIDE_INT
)param_large_stack_frame
;
2631 /* In case the alloca is located at function entry, it has the same lifetime
2632 as a declared array, so we allow a larger size. */
2633 block
= gimple_block (stmt
);
2634 if (!(cfun
->after_inlining
2636 && TREE_CODE (BLOCK_SUPERCONTEXT (block
)) == FUNCTION_DECL
))
2638 if (size
> threshold
)
2641 /* We have to be able to move points-to info. We used to assert
2642 that we can but IPA PTA might end up with two UIDs here
2643 as it might need to handle more than one instance being
2644 live at the same time. Instead of trying to detect this case
2645 (using the first UID would be OK) just give up for now. */
2646 struct ptr_info_def
*pi
= SSA_NAME_PTR_INFO (lhs
);
2650 && !pt_solution_singleton_or_null_p (&pi
->pt
, &uid
))
2653 /* Declare array. */
2654 elem_type
= build_nonstandard_integer_type (BITS_PER_UNIT
, 1);
2655 n_elem
= size
* 8 / BITS_PER_UNIT
;
2656 array_type
= build_array_type_nelts (elem_type
, n_elem
);
2658 if (tree ssa_name
= SSA_NAME_IDENTIFIER (lhs
))
2660 /* Give the temporary a name derived from the name of the VLA
2661 declaration so it can be referenced in diagnostics. */
2662 const char *name
= IDENTIFIER_POINTER (ssa_name
);
2663 var
= create_tmp_var (array_type
, name
);
2666 var
= create_tmp_var (array_type
);
2668 if (gimple
*lhsdef
= SSA_NAME_DEF_STMT (lhs
))
2670 /* Set the temporary's location to that of the VLA declaration
2671 so it can be pointed to in diagnostics. */
2672 location_t loc
= gimple_location (lhsdef
);
2673 DECL_SOURCE_LOCATION (var
) = loc
;
2676 SET_DECL_ALIGN (var
, TREE_INT_CST_LOW (gimple_call_arg (stmt
, 1)));
2678 SET_DECL_PT_UID (var
, uid
);
2680 /* Fold alloca to the address of the array. */
2681 return fold_convert (TREE_TYPE (lhs
), build_fold_addr_expr (var
));
2684 /* Fold the stmt at *GSI with CCP specific information that propagating
2685 and regular folding does not catch. */
2688 ccp_folder::fold_stmt (gimple_stmt_iterator
*gsi
)
2690 gimple
*stmt
= gsi_stmt (*gsi
);
2692 switch (gimple_code (stmt
))
2696 gcond
*cond_stmt
= as_a
<gcond
*> (stmt
);
2697 ccp_prop_value_t val
;
2698 /* Statement evaluation will handle type mismatches in constants
2699 more gracefully than the final propagation. This allows us to
2700 fold more conditionals here. */
2701 val
= evaluate_stmt (stmt
);
2702 if (val
.lattice_val
!= CONSTANT
2708 fprintf (dump_file
, "Folding predicate ");
2709 print_gimple_expr (dump_file
, stmt
, 0);
2710 fprintf (dump_file
, " to ");
2711 print_generic_expr (dump_file
, val
.value
);
2712 fprintf (dump_file
, "\n");
2715 if (integer_zerop (val
.value
))
2716 gimple_cond_make_false (cond_stmt
);
2718 gimple_cond_make_true (cond_stmt
);
2725 tree lhs
= gimple_call_lhs (stmt
);
2726 int flags
= gimple_call_flags (stmt
);
2729 bool changed
= false;
2732 /* If the call was folded into a constant make sure it goes
2733 away even if we cannot propagate into all uses because of
2736 && TREE_CODE (lhs
) == SSA_NAME
2737 && (val
= get_constant_value (lhs
))
2738 /* Don't optimize away calls that have side-effects. */
2739 && (flags
& (ECF_CONST
|ECF_PURE
)) != 0
2740 && (flags
& ECF_LOOPING_CONST_OR_PURE
) == 0)
2742 tree new_rhs
= unshare_expr (val
);
2743 if (!useless_type_conversion_p (TREE_TYPE (lhs
),
2744 TREE_TYPE (new_rhs
)))
2745 new_rhs
= fold_convert (TREE_TYPE (lhs
), new_rhs
);
2746 gimplify_and_update_call_from_tree (gsi
, new_rhs
);
2750 /* Internal calls provide no argument types, so the extra laxity
2751 for normal calls does not apply. */
2752 if (gimple_call_internal_p (stmt
))
2755 /* The heuristic of fold_builtin_alloca_with_align differs before and
2756 after inlining, so we don't require the arg to be changed into a
2757 constant for folding, but just to be constant. */
2758 if (gimple_call_builtin_p (stmt
, BUILT_IN_ALLOCA_WITH_ALIGN
)
2759 || gimple_call_builtin_p (stmt
, BUILT_IN_ALLOCA_WITH_ALIGN_AND_MAX
))
2761 tree new_rhs
= fold_builtin_alloca_with_align (stmt
);
2764 gimplify_and_update_call_from_tree (gsi
, new_rhs
);
2765 tree var
= TREE_OPERAND (TREE_OPERAND (new_rhs
, 0),0);
2766 insert_clobbers_for_var (*gsi
, var
);
2771 /* If there's no extra info from an assume_aligned call,
2772 drop it so it doesn't act as otherwise useless dataflow
2774 if (gimple_call_builtin_p (stmt
, BUILT_IN_ASSUME_ALIGNED
))
2776 tree ptr
= gimple_call_arg (stmt
, 0);
2777 ccp_prop_value_t ptrval
= get_value_for_expr (ptr
, true);
2778 if (ptrval
.lattice_val
== CONSTANT
2779 && TREE_CODE (ptrval
.value
) == INTEGER_CST
2780 && ptrval
.mask
!= 0)
2782 ccp_prop_value_t val
2783 = bit_value_assume_aligned (stmt
, NULL_TREE
, ptrval
, false);
2784 unsigned int ptralign
= least_bit_hwi (ptrval
.mask
.to_uhwi ());
2785 unsigned int align
= least_bit_hwi (val
.mask
.to_uhwi ());
2786 if (ptralign
== align
2787 && ((TREE_INT_CST_LOW (ptrval
.value
) & (align
- 1))
2788 == (TREE_INT_CST_LOW (val
.value
) & (align
- 1))))
2790 replace_call_with_value (gsi
, ptr
);
2796 /* Propagate into the call arguments. Compared to replace_uses_in
2797 this can use the argument slot types for type verification
2798 instead of the current argument type. We also can safely
2799 drop qualifiers here as we are dealing with constants anyway. */
2800 argt
= TYPE_ARG_TYPES (gimple_call_fntype (stmt
));
2801 for (i
= 0; i
< gimple_call_num_args (stmt
) && argt
;
2802 ++i
, argt
= TREE_CHAIN (argt
))
2804 tree arg
= gimple_call_arg (stmt
, i
);
2805 if (TREE_CODE (arg
) == SSA_NAME
2806 && (val
= get_constant_value (arg
))
2807 && useless_type_conversion_p
2808 (TYPE_MAIN_VARIANT (TREE_VALUE (argt
)),
2809 TYPE_MAIN_VARIANT (TREE_TYPE (val
))))
2811 gimple_call_set_arg (stmt
, i
, unshare_expr (val
));
2821 tree lhs
= gimple_assign_lhs (stmt
);
2824 /* If we have a load that turned out to be constant replace it
2825 as we cannot propagate into all uses in all cases. */
2826 if (gimple_assign_single_p (stmt
)
2827 && TREE_CODE (lhs
) == SSA_NAME
2828 && (val
= get_constant_value (lhs
)))
2830 tree rhs
= unshare_expr (val
);
2831 if (!useless_type_conversion_p (TREE_TYPE (lhs
), TREE_TYPE (rhs
)))
2832 rhs
= fold_build1 (VIEW_CONVERT_EXPR
, TREE_TYPE (lhs
), rhs
);
2833 gimple_assign_set_rhs_from_tree (gsi
, rhs
);
2845 /* Visit the assignment statement STMT. Set the value of its LHS to the
2846 value computed by the RHS and store LHS in *OUTPUT_P. If STMT
2847 creates virtual definitions, set the value of each new name to that
2848 of the RHS (if we can derive a constant out of the RHS).
2849 Value-returning call statements also perform an assignment, and
2850 are handled here. */
2852 static enum ssa_prop_result
2853 visit_assignment (gimple
*stmt
, tree
*output_p
)
2855 ccp_prop_value_t val
;
2856 enum ssa_prop_result retval
= SSA_PROP_NOT_INTERESTING
;
2858 tree lhs
= gimple_get_lhs (stmt
);
2859 if (TREE_CODE (lhs
) == SSA_NAME
)
2861 /* Evaluate the statement, which could be
2862 either a GIMPLE_ASSIGN or a GIMPLE_CALL. */
2863 val
= evaluate_stmt (stmt
);
2865 /* If STMT is an assignment to an SSA_NAME, we only have one
2867 if (set_lattice_value (lhs
, &val
))
2870 if (val
.lattice_val
== VARYING
)
2871 retval
= SSA_PROP_VARYING
;
2873 retval
= SSA_PROP_INTERESTING
;
2881 /* Visit the conditional statement STMT. Return SSA_PROP_INTERESTING
2882 if it can determine which edge will be taken. Otherwise, return
2883 SSA_PROP_VARYING. */
2885 static enum ssa_prop_result
2886 visit_cond_stmt (gimple
*stmt
, edge
*taken_edge_p
)
2888 ccp_prop_value_t val
;
2891 block
= gimple_bb (stmt
);
2892 val
= evaluate_stmt (stmt
);
2893 if (val
.lattice_val
!= CONSTANT
2895 return SSA_PROP_VARYING
;
2897 /* Find which edge out of the conditional block will be taken and add it
2898 to the worklist. If no single edge can be determined statically,
2899 return SSA_PROP_VARYING to feed all the outgoing edges to the
2900 propagation engine. */
2901 *taken_edge_p
= find_taken_edge (block
, val
.value
);
2903 return SSA_PROP_INTERESTING
;
2905 return SSA_PROP_VARYING
;
2909 /* Evaluate statement STMT. If the statement produces an output value and
2910 its evaluation changes the lattice value of its output, return
2911 SSA_PROP_INTERESTING and set *OUTPUT_P to the SSA_NAME holding the
2914 If STMT is a conditional branch and we can determine its truth
2915 value, set *TAKEN_EDGE_P accordingly. If STMT produces a varying
2916 value, return SSA_PROP_VARYING. */
2918 enum ssa_prop_result
2919 ccp_propagate::visit_stmt (gimple
*stmt
, edge
*taken_edge_p
, tree
*output_p
)
2924 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2926 fprintf (dump_file
, "\nVisiting statement:\n");
2927 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
2930 switch (gimple_code (stmt
))
2933 /* If the statement is an assignment that produces a single
2934 output value, evaluate its RHS to see if the lattice value of
2935 its output has changed. */
2936 return visit_assignment (stmt
, output_p
);
2939 /* A value-returning call also performs an assignment. */
2940 if (gimple_call_lhs (stmt
) != NULL_TREE
)
2941 return visit_assignment (stmt
, output_p
);
2946 /* If STMT is a conditional branch, see if we can determine
2947 which branch will be taken. */
2948 /* FIXME. It appears that we should be able to optimize
2949 computed GOTOs here as well. */
2950 return visit_cond_stmt (stmt
, taken_edge_p
);
2956 /* Any other kind of statement is not interesting for constant
2957 propagation and, therefore, not worth simulating. */
2958 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2959 fprintf (dump_file
, "No interesting values produced. Marked VARYING.\n");
2961 /* Definitions made by statements other than assignments to
2962 SSA_NAMEs represent unknown modifications to their outputs.
2963 Mark them VARYING. */
2964 FOR_EACH_SSA_TREE_OPERAND (def
, stmt
, iter
, SSA_OP_ALL_DEFS
)
2965 set_value_varying (def
);
2967 return SSA_PROP_VARYING
;
2971 /* Main entry point for SSA Conditional Constant Propagation. If NONZERO_P,
2972 record nonzero bits. */
2975 do_ssa_ccp (bool nonzero_p
)
2977 unsigned int todo
= 0;
2978 calculate_dominance_info (CDI_DOMINATORS
);
2981 class ccp_propagate ccp_propagate
;
2982 ccp_propagate
.ssa_propagate ();
2983 if (ccp_finalize (nonzero_p
|| flag_ipa_bit_cp
))
2985 todo
= (TODO_cleanup_cfg
| TODO_update_ssa
);
2987 /* ccp_finalize does not preserve loop-closed ssa. */
2988 loops_state_clear (LOOP_CLOSED_SSA
);
2991 free_dominance_info (CDI_DOMINATORS
);
2998 const pass_data pass_data_ccp
=
3000 GIMPLE_PASS
, /* type */
3002 OPTGROUP_NONE
, /* optinfo_flags */
3003 TV_TREE_CCP
, /* tv_id */
3004 ( PROP_cfg
| PROP_ssa
), /* properties_required */
3005 0, /* properties_provided */
3006 0, /* properties_destroyed */
3007 0, /* todo_flags_start */
3008 TODO_update_address_taken
, /* todo_flags_finish */
3011 class pass_ccp
: public gimple_opt_pass
3014 pass_ccp (gcc::context
*ctxt
)
3015 : gimple_opt_pass (pass_data_ccp
, ctxt
), nonzero_p (false)
3018 /* opt_pass methods: */
3019 opt_pass
* clone () final override
{ return new pass_ccp (m_ctxt
); }
3020 void set_pass_param (unsigned int n
, bool param
) final override
3022 gcc_assert (n
== 0);
3025 bool gate (function
*) final override
{ return flag_tree_ccp
!= 0; }
3026 unsigned int execute (function
*) final override
3028 return do_ssa_ccp (nonzero_p
);
3032 /* Determines whether the pass instance records nonzero bits. */
3034 }; // class pass_ccp
3039 make_pass_ccp (gcc::context
*ctxt
)
3041 return new pass_ccp (ctxt
);
3046 /* Try to optimize out __builtin_stack_restore. Optimize it out
3047 if there is another __builtin_stack_restore in the same basic
3048 block and no calls or ASM_EXPRs are in between, or if this block's
3049 only outgoing edge is to EXIT_BLOCK and there are no calls or
3050 ASM_EXPRs after this __builtin_stack_restore. */
3053 optimize_stack_restore (gimple_stmt_iterator i
)
3058 basic_block bb
= gsi_bb (i
);
3059 gimple
*call
= gsi_stmt (i
);
3061 if (gimple_code (call
) != GIMPLE_CALL
3062 || gimple_call_num_args (call
) != 1
3063 || TREE_CODE (gimple_call_arg (call
, 0)) != SSA_NAME
3064 || !POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (call
, 0))))
3067 for (gsi_next (&i
); !gsi_end_p (i
); gsi_next (&i
))
3069 stmt
= gsi_stmt (i
);
3070 if (gimple_code (stmt
) == GIMPLE_ASM
)
3072 if (gimple_code (stmt
) != GIMPLE_CALL
)
3075 callee
= gimple_call_fndecl (stmt
);
3077 || !fndecl_built_in_p (callee
, BUILT_IN_NORMAL
)
3078 /* All regular builtins are ok, just obviously not alloca. */
3079 || ALLOCA_FUNCTION_CODE_P (DECL_FUNCTION_CODE (callee
))
3080 /* Do not remove stack updates before strub leave. */
3081 || fndecl_built_in_p (callee
, BUILT_IN___STRUB_LEAVE
))
3084 if (fndecl_built_in_p (callee
, BUILT_IN_STACK_RESTORE
))
3085 goto second_stack_restore
;
3091 /* Allow one successor of the exit block, or zero successors. */
3092 switch (EDGE_COUNT (bb
->succs
))
3097 if (single_succ_edge (bb
)->dest
!= EXIT_BLOCK_PTR_FOR_FN (cfun
))
3103 second_stack_restore
:
3105 /* If there's exactly one use, then zap the call to __builtin_stack_save.
3106 If there are multiple uses, then the last one should remove the call.
3107 In any case, whether the call to __builtin_stack_save can be removed
3108 or not is irrelevant to removing the call to __builtin_stack_restore. */
3109 if (has_single_use (gimple_call_arg (call
, 0)))
3111 gimple
*stack_save
= SSA_NAME_DEF_STMT (gimple_call_arg (call
, 0));
3112 if (is_gimple_call (stack_save
))
3114 callee
= gimple_call_fndecl (stack_save
);
3115 if (callee
&& fndecl_built_in_p (callee
, BUILT_IN_STACK_SAVE
))
3117 gimple_stmt_iterator stack_save_gsi
;
3120 stack_save_gsi
= gsi_for_stmt (stack_save
);
3121 rhs
= build_int_cst (TREE_TYPE (gimple_call_arg (call
, 0)), 0);
3122 replace_call_with_value (&stack_save_gsi
, rhs
);
3127 /* No effect, so the statement will be deleted. */
3128 return integer_zero_node
;
3131 /* If va_list type is a simple pointer and nothing special is needed,
3132 optimize __builtin_va_start (&ap, 0) into ap = __builtin_next_arg (0),
3133 __builtin_va_end (&ap) out as NOP and __builtin_va_copy into a simple
3134 pointer assignment. */
3137 optimize_stdarg_builtin (gimple
*call
)
3139 tree callee
, lhs
, rhs
, cfun_va_list
;
3140 bool va_list_simple_ptr
;
3141 location_t loc
= gimple_location (call
);
3143 callee
= gimple_call_fndecl (call
);
3145 cfun_va_list
= targetm
.fn_abi_va_list (callee
);
3146 va_list_simple_ptr
= POINTER_TYPE_P (cfun_va_list
)
3147 && (TREE_TYPE (cfun_va_list
) == void_type_node
3148 || TREE_TYPE (cfun_va_list
) == char_type_node
);
3150 switch (DECL_FUNCTION_CODE (callee
))
3152 case BUILT_IN_VA_START
:
3153 if (!va_list_simple_ptr
3154 || targetm
.expand_builtin_va_start
!= NULL
3155 || !builtin_decl_explicit_p (BUILT_IN_NEXT_ARG
))
3158 if (gimple_call_num_args (call
) != 2)
3161 lhs
= gimple_call_arg (call
, 0);
3162 if (!POINTER_TYPE_P (TREE_TYPE (lhs
))
3163 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs
)))
3164 != TYPE_MAIN_VARIANT (cfun_va_list
))
3167 lhs
= build_fold_indirect_ref_loc (loc
, lhs
);
3168 rhs
= build_call_expr_loc (loc
, builtin_decl_explicit (BUILT_IN_NEXT_ARG
),
3169 1, integer_zero_node
);
3170 rhs
= fold_convert_loc (loc
, TREE_TYPE (lhs
), rhs
);
3171 return build2 (MODIFY_EXPR
, TREE_TYPE (lhs
), lhs
, rhs
);
3173 case BUILT_IN_VA_COPY
:
3174 if (!va_list_simple_ptr
)
3177 if (gimple_call_num_args (call
) != 2)
3180 lhs
= gimple_call_arg (call
, 0);
3181 if (!POINTER_TYPE_P (TREE_TYPE (lhs
))
3182 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs
)))
3183 != TYPE_MAIN_VARIANT (cfun_va_list
))
3186 lhs
= build_fold_indirect_ref_loc (loc
, lhs
);
3187 rhs
= gimple_call_arg (call
, 1);
3188 if (TYPE_MAIN_VARIANT (TREE_TYPE (rhs
))
3189 != TYPE_MAIN_VARIANT (cfun_va_list
))
3192 rhs
= fold_convert_loc (loc
, TREE_TYPE (lhs
), rhs
);
3193 return build2 (MODIFY_EXPR
, TREE_TYPE (lhs
), lhs
, rhs
);
3195 case BUILT_IN_VA_END
:
3196 /* No effect, so the statement will be deleted. */
3197 return integer_zero_node
;
3204 /* Attemp to make the block of __builtin_unreachable I unreachable by changing
3205 the incoming jumps. Return true if at least one jump was changed. */
3208 optimize_unreachable (gimple_stmt_iterator i
)
3210 basic_block bb
= gsi_bb (i
);
3211 gimple_stmt_iterator gsi
;
3217 if (flag_sanitize
& SANITIZE_UNREACHABLE
)
3220 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3222 stmt
= gsi_stmt (gsi
);
3224 if (is_gimple_debug (stmt
))
3227 if (glabel
*label_stmt
= dyn_cast
<glabel
*> (stmt
))
3229 /* Verify we do not need to preserve the label. */
3230 if (FORCED_LABEL (gimple_label_label (label_stmt
)))
3236 /* Only handle the case that __builtin_unreachable is the first statement
3237 in the block. We rely on DCE to remove stmts without side-effects
3238 before __builtin_unreachable. */
3239 if (gsi_stmt (gsi
) != gsi_stmt (i
))
3244 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
3246 gsi
= gsi_last_bb (e
->src
);
3247 if (gsi_end_p (gsi
))
3250 stmt
= gsi_stmt (gsi
);
3251 if (gcond
*cond_stmt
= dyn_cast
<gcond
*> (stmt
))
3253 if (e
->flags
& EDGE_TRUE_VALUE
)
3254 gimple_cond_make_false (cond_stmt
);
3255 else if (e
->flags
& EDGE_FALSE_VALUE
)
3256 gimple_cond_make_true (cond_stmt
);
3259 update_stmt (cond_stmt
);
3263 /* Todo: handle other cases. Note that unreachable switch case
3264 statements have already been removed. */
3275 _1 = __atomic_fetch_or_* (ptr_6, 1, _3);
3279 _1 = __atomic_fetch_or_* (ptr_6, 1, _3);
3283 _1 = __atomic_fetch_and_* (ptr_6, ~1, _3);
3287 _1 = __atomic_fetch_and_* (ptr_6, ~1, _3);
3291 USE_STMT is the gimplt statement which uses the return value of
3292 __atomic_fetch_or_*. LHS is the return value of __atomic_fetch_or_*.
3293 MASK is the mask passed to __atomic_fetch_or_*.
3297 convert_atomic_bit_not (enum internal_fn fn
, gimple
*use_stmt
,
3298 tree lhs
, tree mask
)
3301 if (fn
== IFN_ATOMIC_BIT_TEST_AND_RESET
)
3303 /* MASK must be ~1. */
3304 if (!operand_equal_p (build_int_cst (TREE_TYPE (lhs
),
3305 ~HOST_WIDE_INT_1
), mask
, 0))
3307 and_mask
= build_int_cst (TREE_TYPE (lhs
), 1);
3311 /* MASK must be 1. */
3312 if (!operand_equal_p (build_int_cst (TREE_TYPE (lhs
), 1), mask
, 0))
3317 tree use_lhs
= gimple_assign_lhs (use_stmt
);
3319 use_operand_p use_p
;
3320 gimple
*use_not_stmt
;
3322 if (!single_imm_use (use_lhs
, &use_p
, &use_not_stmt
)
3323 || !is_gimple_assign (use_not_stmt
))
3326 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (use_not_stmt
)))
3329 tree use_not_lhs
= gimple_assign_lhs (use_not_stmt
);
3330 if (TREE_CODE (TREE_TYPE (use_not_lhs
)) != BOOLEAN_TYPE
)
3333 gimple_stmt_iterator gsi
;
3334 gsi
= gsi_for_stmt (use_stmt
);
3335 gsi_remove (&gsi
, true);
3336 tree var
= make_ssa_name (TREE_TYPE (lhs
));
3337 use_stmt
= gimple_build_assign (var
, BIT_AND_EXPR
, lhs
, and_mask
);
3338 gsi
= gsi_for_stmt (use_not_stmt
);
3339 gsi_insert_before (&gsi
, use_stmt
, GSI_NEW_STMT
);
3340 lhs
= gimple_assign_lhs (use_not_stmt
);
3341 gimple
*g
= gimple_build_assign (lhs
, EQ_EXPR
, var
,
3342 build_zero_cst (TREE_TYPE (mask
)));
3343 gsi_insert_after (&gsi
, g
, GSI_NEW_STMT
);
3344 gsi
= gsi_for_stmt (use_not_stmt
);
3345 gsi_remove (&gsi
, true);
3349 /* match.pd function to match atomic_bit_test_and pattern which
3351 _1 = __atomic_fetch_or_4 (&v, 1, 0);
3355 extern bool gimple_nop_atomic_bit_test_and_p (tree
, tree
*,
3357 extern bool gimple_nop_convert (tree
, tree
*, tree (*) (tree
));
3360 mask_2 = 1 << cnt_1;
3361 _4 = __atomic_fetch_or_* (ptr_6, mask_2, _3);
3364 _4 = .ATOMIC_BIT_TEST_AND_SET (ptr_6, cnt_1, 0, _3);
3366 If _5 is only used in _5 != 0 or _5 == 0 comparisons, 1
3367 is passed instead of 0, and the builtin just returns a zero
3368 or 1 value instead of the actual bit.
3369 Similarly for __sync_fetch_and_or_* (without the ", _3" part
3370 in there), and/or if mask_2 is a power of 2 constant.
3371 Similarly for xor instead of or, use ATOMIC_BIT_TEST_AND_COMPLEMENT
3372 in that case. And similarly for and instead of or, except that
3373 the second argument to the builtin needs to be one's complement
3374 of the mask instead of mask. */
3377 optimize_atomic_bit_test_and (gimple_stmt_iterator
*gsip
,
3378 enum internal_fn fn
, bool has_model_arg
,
3381 gimple
*call
= gsi_stmt (*gsip
);
3382 tree lhs
= gimple_call_lhs (call
);
3383 use_operand_p use_p
;
3388 if (!flag_inline_atomics
3390 || !gimple_call_builtin_p (call
, BUILT_IN_NORMAL
)
3392 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs
)
3393 || !single_imm_use (lhs
, &use_p
, &use_stmt
)
3394 || !is_gimple_assign (use_stmt
)
3395 || !gimple_vdef (call
))
3400 case IFN_ATOMIC_BIT_TEST_AND_SET
:
3401 optab
= atomic_bit_test_and_set_optab
;
3403 case IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT
:
3404 optab
= atomic_bit_test_and_complement_optab
;
3406 case IFN_ATOMIC_BIT_TEST_AND_RESET
:
3407 optab
= atomic_bit_test_and_reset_optab
;
3415 mask
= gimple_call_arg (call
, 1);
3416 tree_code rhs_code
= gimple_assign_rhs_code (use_stmt
);
3417 if (rhs_code
!= BIT_AND_EXPR
)
3419 if (rhs_code
!= NOP_EXPR
&& rhs_code
!= BIT_NOT_EXPR
)
3422 tree use_lhs
= gimple_assign_lhs (use_stmt
);
3423 if (TREE_CODE (use_lhs
) == SSA_NAME
3424 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs
))
3427 tree use_rhs
= gimple_assign_rhs1 (use_stmt
);
3431 if (optab_handler (optab
, TYPE_MODE (TREE_TYPE (lhs
)))
3432 == CODE_FOR_nothing
)
3436 gimple_stmt_iterator gsi
;
3440 if (rhs_code
== BIT_NOT_EXPR
)
3442 g
= convert_atomic_bit_not (fn
, use_stmt
, lhs
, mask
);
3448 else if (TREE_CODE (TREE_TYPE (use_lhs
)) == BOOLEAN_TYPE
)
3451 if (fn
== IFN_ATOMIC_BIT_TEST_AND_RESET
)
3453 /* MASK must be ~1. */
3454 if (!operand_equal_p (build_int_cst (TREE_TYPE (lhs
),
3460 _1 = __atomic_fetch_and_* (ptr_6, ~1, _3);
3463 _1 = __atomic_fetch_and_* (ptr_6, ~1, _3);
3467 and_mask
= build_int_cst (TREE_TYPE (lhs
), 1);
3471 and_mask
= build_int_cst (TREE_TYPE (lhs
), 1);
3472 if (!operand_equal_p (and_mask
, mask
, 0))
3476 _1 = __atomic_fetch_or_* (ptr_6, 1, _3);
3479 _1 = __atomic_fetch_or_* (ptr_6, 1, _3);
3484 var
= make_ssa_name (TREE_TYPE (use_rhs
));
3485 replace_uses_by (use_rhs
, var
);
3486 g
= gimple_build_assign (var
, BIT_AND_EXPR
, use_rhs
,
3488 gsi
= gsi_for_stmt (use_stmt
);
3489 gsi_insert_before (&gsi
, g
, GSI_NEW_STMT
);
3493 else if (TYPE_PRECISION (TREE_TYPE (use_lhs
))
3494 <= TYPE_PRECISION (TREE_TYPE (use_rhs
)))
3496 gimple
*use_nop_stmt
;
3497 if (!single_imm_use (use_lhs
, &use_p
, &use_nop_stmt
)
3498 || (!is_gimple_assign (use_nop_stmt
)
3499 && gimple_code (use_nop_stmt
) != GIMPLE_COND
))
3506 tree use_nop_lhs
= nullptr;
3507 rhs_code
= ERROR_MARK
;
3508 if (is_gimple_assign (use_nop_stmt
))
3510 use_nop_lhs
= gimple_assign_lhs (use_nop_stmt
);
3511 rhs_code
= gimple_assign_rhs_code (use_nop_stmt
);
3513 if (!use_nop_lhs
|| rhs_code
!= BIT_AND_EXPR
)
3519 && TREE_CODE (use_nop_lhs
) == SSA_NAME
3520 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_nop_lhs
))
3522 if (use_nop_lhs
&& rhs_code
== BIT_NOT_EXPR
)
3527 g
= convert_atomic_bit_not (fn
, use_nop_stmt
, lhs
,
3532 _1 = __atomic_fetch_or_4 (ptr_6, 1, _3);
3537 _1 = __atomic_fetch_or_4 (ptr_6, ~1, _3);
3541 _1 = __atomic_fetch_and_4 (ptr_6, ~1, _3);
3546 _1 = __atomic_fetch_and_4 (ptr_6, 1, _3);
3550 gsi
= gsi_for_stmt (use_stmt
);
3551 gsi_remove (&gsi
, true);
3557 tree cmp_rhs1
, cmp_rhs2
;
3563 if (TREE_CODE (TREE_TYPE (use_nop_lhs
))
3566 cmp_rhs1
= gimple_assign_rhs1 (use_nop_stmt
);
3567 cmp_rhs2
= gimple_assign_rhs2 (use_nop_stmt
);
3574 rhs_code
= gimple_cond_code (use_nop_stmt
);
3575 cmp_rhs1
= gimple_cond_lhs (use_nop_stmt
);
3576 cmp_rhs2
= gimple_cond_rhs (use_nop_stmt
);
3578 if (rhs_code
!= GE_EXPR
&& rhs_code
!= LT_EXPR
)
3580 if (use_lhs
!= cmp_rhs1
)
3582 if (!integer_zerop (cmp_rhs2
))
3587 unsigned HOST_WIDE_INT bytes
3588 = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (use_rhs
)));
3589 ibit
= bytes
* BITS_PER_UNIT
- 1;
3590 unsigned HOST_WIDE_INT highest
3591 = HOST_WIDE_INT_1U
<< ibit
;
3593 if (fn
== IFN_ATOMIC_BIT_TEST_AND_RESET
)
3595 /* Get the signed maximum of the USE_RHS type. */
3596 and_mask
= build_int_cst (TREE_TYPE (use_rhs
),
3598 if (!operand_equal_p (and_mask
, mask
, 0))
3602 _1 = __atomic_fetch_and_4 (ptr_6, 0x7fffffff, _3);
3603 _5 = (signed int) _1;
3604 _4 = _5 < 0 or _5 >= 0;
3606 _1 = __atomic_fetch_and_4 (ptr_6, 0x7fffffff, _3);
3607 _6 = _1 & 0x80000000;
3608 _4 = _6 != 0 or _6 == 0;
3610 _1 = __atomic_fetch_and_4 (ptr_6, 0x7fffffff, _3);
3611 _5 = (signed int) _1;
3612 if (_5 < 0 or _5 >= 0)
3614 _1 = __atomic_fetch_and_4 (ptr_6, 0x7fffffff, _3);
3615 _6 = _1 & 0x80000000;
3616 if (_6 != 0 or _6 == 0)
3618 and_mask
= build_int_cst (TREE_TYPE (use_rhs
),
3623 /* Get the signed minimum of the USE_RHS type. */
3624 and_mask
= build_int_cst (TREE_TYPE (use_rhs
),
3626 if (!operand_equal_p (and_mask
, mask
, 0))
3630 _1 = __atomic_fetch_or_4 (ptr_6, 0x80000000, _3);
3631 _5 = (signed int) _1;
3632 _4 = _5 < 0 or _5 >= 0;
3634 _1 = __atomic_fetch_or_4 (ptr_6, 0x80000000, _3);
3635 _6 = _1 & 0x80000000;
3636 _4 = _6 != 0 or _6 == 0;
3638 _1 = __atomic_fetch_or_4 (ptr_6, 0x80000000, _3);
3639 _5 = (signed int) _1;
3640 if (_5 < 0 or _5 >= 0)
3642 _1 = __atomic_fetch_or_4 (ptr_6, 0x80000000, _3);
3643 _6 = _1 & 0x80000000;
3644 if (_6 != 0 or _6 == 0)
3647 var
= make_ssa_name (TREE_TYPE (use_rhs
));
3648 gsi
= gsi_for_stmt (use_stmt
);
3649 gsi_remove (&gsi
, true);
3650 g
= gimple_build_assign (var
, BIT_AND_EXPR
, use_rhs
,
3652 gsi
= gsi_for_stmt (use_nop_stmt
);
3653 gsi_insert_before (&gsi
, g
, GSI_NEW_STMT
);
3655 rhs_code
= rhs_code
== GE_EXPR
? EQ_EXPR
: NE_EXPR
;
3656 tree const_zero
= build_zero_cst (TREE_TYPE (use_rhs
));
3658 g
= gimple_build_assign (use_nop_lhs
, rhs_code
,
3661 g
= gimple_build_cond (rhs_code
, var
, const_zero
,
3663 gsi_insert_after (&gsi
, g
, GSI_NEW_STMT
);
3664 gsi
= gsi_for_stmt (use_nop_stmt
);
3665 gsi_remove (&gsi
, true);
3672 if (!gimple_nop_atomic_bit_test_and_p (use_nop_lhs
,
3674 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (match_op
[2])
3675 || !single_imm_use (match_op
[2], &use_p
, &g
)
3676 || !is_gimple_assign (g
))
3679 if (TREE_CODE (match_op
[1]) == INTEGER_CST
)
3681 ibit
= tree_log2 (match_op
[1]);
3682 gcc_assert (ibit
>= 0);
3686 g
= SSA_NAME_DEF_STMT (match_op
[1]);
3687 gcc_assert (is_gimple_assign (g
));
3688 bit
= gimple_assign_rhs2 (g
);
3691 _1 = __atomic_fetch_or_4 (ptr_6, mask, _3);
3695 _1 = __atomic_fetch_or_4 (ptr_6, mask, _3);
3700 _2 = (unsigned int) _1;
3701 _3 = __atomic_fetch_and_4 (ptr_6, _2, 0);
3705 _1 = __atomic_fetch_and_* (ptr_6, ~mask_7, _3);
3710 _1 = __atomic_fetch_and_4 (ptr_6, ~mask, _3);
3711 _2 = (short int) _1;
3714 _1 = __atomic_fetch_and_4 (ptr_6, ~mask, _3);
3716 _5 = (short int) _8;
3718 gimple_seq stmts
= NULL
;
3719 match_op
[1] = gimple_convert (&stmts
,
3720 TREE_TYPE (use_rhs
),
3722 var
= gimple_build (&stmts
, BIT_AND_EXPR
,
3723 TREE_TYPE (use_rhs
), use_rhs
, match_op
[1]);
3724 gsi
= gsi_for_stmt (use_stmt
);
3725 gsi_remove (&gsi
, true);
3726 release_defs (use_stmt
);
3727 use_stmt
= gimple_seq_last_stmt (stmts
);
3728 gsi
= gsi_for_stmt (use_nop_stmt
);
3729 gsi_insert_seq_before (&gsi
, stmts
, GSI_SAME_STMT
);
3730 gimple_assign_set_rhs_with_ops (&gsi
, CONVERT_EXPR
, var
);
3731 update_stmt (use_nop_stmt
);
3741 bit
= build_int_cst (TREE_TYPE (lhs
), ibit
);
3744 else if (optab_handler (optab
, TYPE_MODE (TREE_TYPE (lhs
)))
3745 == CODE_FOR_nothing
)
3748 tree use_lhs
= gimple_assign_lhs (use_stmt
);
3754 if (TREE_CODE (mask
) == INTEGER_CST
)
3756 if (fn
== IFN_ATOMIC_BIT_TEST_AND_RESET
)
3757 mask
= const_unop (BIT_NOT_EXPR
, TREE_TYPE (mask
), mask
);
3758 mask
= fold_convert (TREE_TYPE (lhs
), mask
);
3759 int ibit
= tree_log2 (mask
);
3762 bit
= build_int_cst (TREE_TYPE (lhs
), ibit
);
3764 else if (TREE_CODE (mask
) == SSA_NAME
)
3766 gimple
*g
= SSA_NAME_DEF_STMT (mask
);
3768 if (gimple_nop_convert (mask
, &match_op
, NULL
))
3771 if (TREE_CODE (mask
) != SSA_NAME
)
3773 g
= SSA_NAME_DEF_STMT (mask
);
3775 if (!is_gimple_assign (g
))
3778 if (fn
== IFN_ATOMIC_BIT_TEST_AND_RESET
)
3780 if (gimple_assign_rhs_code (g
) != BIT_NOT_EXPR
)
3782 mask
= gimple_assign_rhs1 (g
);
3783 if (TREE_CODE (mask
) != SSA_NAME
)
3785 g
= SSA_NAME_DEF_STMT (mask
);
3788 if (!is_gimple_assign (g
)
3789 || gimple_assign_rhs_code (g
) != LSHIFT_EXPR
3790 || !integer_onep (gimple_assign_rhs1 (g
)))
3792 bit
= gimple_assign_rhs2 (g
);
3798 if (gimple_assign_rhs1 (use_stmt
) == lhs
)
3799 cmp_mask
= gimple_assign_rhs2 (use_stmt
);
3801 cmp_mask
= gimple_assign_rhs1 (use_stmt
);
3804 if (gimple_nop_convert (cmp_mask
, &match_op
, NULL
))
3805 cmp_mask
= match_op
;
3807 if (!operand_equal_p (cmp_mask
, mask
, 0))
3811 bool use_bool
= true;
3812 bool has_debug_uses
= false;
3813 imm_use_iterator iter
;
3816 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs
))
3818 FOR_EACH_IMM_USE_STMT (g
, iter
, use_lhs
)
3820 enum tree_code code
= ERROR_MARK
;
3821 tree op0
= NULL_TREE
, op1
= NULL_TREE
;
3822 if (is_gimple_debug (g
))
3824 has_debug_uses
= true;
3827 else if (is_gimple_assign (g
))
3828 switch (gimple_assign_rhs_code (g
))
3831 op1
= gimple_assign_rhs1 (g
);
3832 code
= TREE_CODE (op1
);
3833 if (TREE_CODE_CLASS (code
) != tcc_comparison
)
3835 op0
= TREE_OPERAND (op1
, 0);
3836 op1
= TREE_OPERAND (op1
, 1);
3840 code
= gimple_assign_rhs_code (g
);
3841 op0
= gimple_assign_rhs1 (g
);
3842 op1
= gimple_assign_rhs2 (g
);
3847 else if (gimple_code (g
) == GIMPLE_COND
)
3849 code
= gimple_cond_code (g
);
3850 op0
= gimple_cond_lhs (g
);
3851 op1
= gimple_cond_rhs (g
);
3854 if ((code
== EQ_EXPR
|| code
== NE_EXPR
)
3856 && integer_zerop (op1
))
3858 use_operand_p use_p
;
3860 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
3870 tree new_lhs
= make_ssa_name (TREE_TYPE (lhs
));
3871 tree flag
= build_int_cst (TREE_TYPE (lhs
), use_bool
);
3873 g
= gimple_build_call_internal (fn
, 5, gimple_call_arg (call
, 0),
3874 bit
, flag
, gimple_call_arg (call
, 2),
3875 gimple_call_fn (call
));
3877 g
= gimple_build_call_internal (fn
, 4, gimple_call_arg (call
, 0),
3878 bit
, flag
, gimple_call_fn (call
));
3879 gimple_call_set_lhs (g
, new_lhs
);
3880 gimple_set_location (g
, gimple_location (call
));
3881 gimple_move_vops (g
, call
);
3882 bool throws
= stmt_can_throw_internal (cfun
, call
);
3883 gimple_call_set_nothrow (as_a
<gcall
*> (g
),
3884 gimple_call_nothrow_p (as_a
<gcall
*> (call
)));
3885 gimple_stmt_iterator gsi
= *gsip
;
3886 gsi_insert_after (&gsi
, g
, GSI_NEW_STMT
);
3890 maybe_clean_or_replace_eh_stmt (call
, g
);
3891 if (after
|| (use_bool
&& has_debug_uses
))
3892 e
= find_fallthru_edge (gsi_bb (gsi
)->succs
);
3896 /* The internal function returns the value of the specified bit
3897 before the atomic operation. If we are interested in the value
3898 of the specified bit after the atomic operation (makes only sense
3899 for xor, otherwise the bit content is compile time known),
3900 we need to invert the bit. */
3901 tree mask_convert
= mask
;
3902 gimple_seq stmts
= NULL
;
3904 mask_convert
= gimple_convert (&stmts
, TREE_TYPE (lhs
), mask
);
3905 new_lhs
= gimple_build (&stmts
, BIT_XOR_EXPR
, TREE_TYPE (lhs
), new_lhs
,
3906 use_bool
? build_int_cst (TREE_TYPE (lhs
), 1)
3910 gsi_insert_seq_on_edge_immediate (e
, stmts
);
3911 gsi
= gsi_for_stmt (gimple_seq_last (stmts
));
3914 gsi_insert_seq_after (&gsi
, stmts
, GSI_NEW_STMT
);
3916 if (use_bool
&& has_debug_uses
)
3918 tree temp
= NULL_TREE
;
3919 if (!throws
|| after
|| single_pred_p (e
->dest
))
3921 temp
= build_debug_expr_decl (TREE_TYPE (lhs
));
3922 tree t
= build2 (LSHIFT_EXPR
, TREE_TYPE (lhs
), new_lhs
, bit
);
3923 g
= gimple_build_debug_bind (temp
, t
, g
);
3924 if (throws
&& !after
)
3926 gsi
= gsi_after_labels (e
->dest
);
3927 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
3930 gsi_insert_after (&gsi
, g
, GSI_NEW_STMT
);
3932 FOR_EACH_IMM_USE_STMT (g
, iter
, use_lhs
)
3933 if (is_gimple_debug (g
))
3935 use_operand_p use_p
;
3936 if (temp
== NULL_TREE
)
3937 gimple_debug_bind_reset_value (g
);
3939 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
3940 SET_USE (use_p
, temp
);
3944 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_lhs
)
3945 = SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs
);
3946 replace_uses_by (use_lhs
, new_lhs
);
3947 gsi
= gsi_for_stmt (use_stmt
);
3948 gsi_remove (&gsi
, true);
3949 release_defs (use_stmt
);
3950 gsi_remove (gsip
, true);
3951 release_ssa_name (lhs
);
3956 _4 = __atomic_add_fetch_* (ptr_6, arg_2, _3);
3959 _4 = .ATOMIC_ADD_FETCH_CMP_0 (EQ_EXPR, ptr_6, arg_2, _3);
3961 Similarly for __sync_add_and_fetch_* (without the ", _3" part
3965 optimize_atomic_op_fetch_cmp_0 (gimple_stmt_iterator
*gsip
,
3966 enum internal_fn fn
, bool has_model_arg
)
3968 gimple
*call
= gsi_stmt (*gsip
);
3969 tree lhs
= gimple_call_lhs (call
);
3970 use_operand_p use_p
;
3973 if (!flag_inline_atomics
3975 || !gimple_call_builtin_p (call
, BUILT_IN_NORMAL
)
3977 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs
)
3978 || !single_imm_use (lhs
, &use_p
, &use_stmt
)
3979 || !gimple_vdef (call
))
3985 case IFN_ATOMIC_ADD_FETCH_CMP_0
:
3986 optab
= atomic_add_fetch_cmp_0_optab
;
3988 case IFN_ATOMIC_SUB_FETCH_CMP_0
:
3989 optab
= atomic_sub_fetch_cmp_0_optab
;
3991 case IFN_ATOMIC_AND_FETCH_CMP_0
:
3992 optab
= atomic_and_fetch_cmp_0_optab
;
3994 case IFN_ATOMIC_OR_FETCH_CMP_0
:
3995 optab
= atomic_or_fetch_cmp_0_optab
;
3997 case IFN_ATOMIC_XOR_FETCH_CMP_0
:
3998 optab
= atomic_xor_fetch_cmp_0_optab
;
4004 if (optab_handler (optab
, TYPE_MODE (TREE_TYPE (lhs
)))
4005 == CODE_FOR_nothing
)
4009 if (gimple_assign_cast_p (use_stmt
))
4011 use_lhs
= gimple_assign_lhs (use_stmt
);
4012 if (!tree_nop_conversion_p (TREE_TYPE (use_lhs
), TREE_TYPE (lhs
))
4013 || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs
))
4014 && !POINTER_TYPE_P (TREE_TYPE (use_lhs
)))
4015 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs
)
4016 || !single_imm_use (use_lhs
, &use_p
, &use_stmt
))
4019 enum tree_code code
= ERROR_MARK
;
4020 tree op0
= NULL_TREE
, op1
= NULL_TREE
;
4021 if (is_gimple_assign (use_stmt
))
4022 switch (gimple_assign_rhs_code (use_stmt
))
4025 op1
= gimple_assign_rhs1 (use_stmt
);
4026 code
= TREE_CODE (op1
);
4027 if (TREE_CODE_CLASS (code
) == tcc_comparison
)
4029 op0
= TREE_OPERAND (op1
, 0);
4030 op1
= TREE_OPERAND (op1
, 1);
4034 code
= gimple_assign_rhs_code (use_stmt
);
4035 if (TREE_CODE_CLASS (code
) == tcc_comparison
)
4037 op0
= gimple_assign_rhs1 (use_stmt
);
4038 op1
= gimple_assign_rhs2 (use_stmt
);
4042 else if (gimple_code (use_stmt
) == GIMPLE_COND
)
4044 code
= gimple_cond_code (use_stmt
);
4045 op0
= gimple_cond_lhs (use_stmt
);
4046 op1
= gimple_cond_rhs (use_stmt
);
4055 if (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs
))
4056 || TREE_CODE (TREE_TYPE (use_lhs
)) == BOOLEAN_TYPE
4057 || TYPE_UNSIGNED (TREE_TYPE (use_lhs
)))
4062 if (op0
== use_lhs
&& integer_zerop (op1
))
4072 /* Use special encoding of the operation. We want to also
4073 encode the mode in the first argument and for neither EQ_EXPR
4074 etc. nor EQ etc. we can rely it will fit into QImode. */
4075 case EQ_EXPR
: encoded
= ATOMIC_OP_FETCH_CMP_0_EQ
; break;
4076 case NE_EXPR
: encoded
= ATOMIC_OP_FETCH_CMP_0_NE
; break;
4077 case LT_EXPR
: encoded
= ATOMIC_OP_FETCH_CMP_0_LT
; break;
4078 case LE_EXPR
: encoded
= ATOMIC_OP_FETCH_CMP_0_LE
; break;
4079 case GT_EXPR
: encoded
= ATOMIC_OP_FETCH_CMP_0_GT
; break;
4080 case GE_EXPR
: encoded
= ATOMIC_OP_FETCH_CMP_0_GE
; break;
4081 default: gcc_unreachable ();
4084 tree new_lhs
= make_ssa_name (boolean_type_node
);
4086 tree flag
= build_int_cst (TREE_TYPE (lhs
), encoded
);
4088 g
= gimple_build_call_internal (fn
, 5, flag
,
4089 gimple_call_arg (call
, 0),
4090 gimple_call_arg (call
, 1),
4091 gimple_call_arg (call
, 2),
4092 gimple_call_fn (call
));
4094 g
= gimple_build_call_internal (fn
, 4, flag
,
4095 gimple_call_arg (call
, 0),
4096 gimple_call_arg (call
, 1),
4097 gimple_call_fn (call
));
4098 gimple_call_set_lhs (g
, new_lhs
);
4099 gimple_set_location (g
, gimple_location (call
));
4100 gimple_move_vops (g
, call
);
4101 bool throws
= stmt_can_throw_internal (cfun
, call
);
4102 gimple_call_set_nothrow (as_a
<gcall
*> (g
),
4103 gimple_call_nothrow_p (as_a
<gcall
*> (call
)));
4104 gimple_stmt_iterator gsi
= *gsip
;
4105 gsi_insert_after (&gsi
, g
, GSI_SAME_STMT
);
4107 maybe_clean_or_replace_eh_stmt (call
, g
);
4108 if (is_gimple_assign (use_stmt
))
4109 switch (gimple_assign_rhs_code (use_stmt
))
4112 gimple_assign_set_rhs1 (use_stmt
, new_lhs
);
4115 gsi
= gsi_for_stmt (use_stmt
);
4116 if (tree ulhs
= gimple_assign_lhs (use_stmt
))
4117 if (useless_type_conversion_p (TREE_TYPE (ulhs
),
4120 gimple_assign_set_rhs_with_ops (&gsi
, SSA_NAME
, new_lhs
);
4123 gimple_assign_set_rhs_with_ops (&gsi
, NOP_EXPR
, new_lhs
);
4126 else if (gimple_code (use_stmt
) == GIMPLE_COND
)
4128 gcond
*use_cond
= as_a
<gcond
*> (use_stmt
);
4129 gimple_cond_set_code (use_cond
, NE_EXPR
);
4130 gimple_cond_set_lhs (use_cond
, new_lhs
);
4131 gimple_cond_set_rhs (use_cond
, boolean_false_node
);
4134 update_stmt (use_stmt
);
4137 gsi
= gsi_for_stmt (SSA_NAME_DEF_STMT (use_lhs
));
4138 gsi_remove (&gsi
, true);
4139 release_ssa_name (use_lhs
);
4141 gsi_remove (gsip
, true);
4142 release_ssa_name (lhs
);
4152 Similarly for memset (&a, ..., sizeof (a)); instead of a = {};
4153 and/or memcpy (&b, &a, sizeof (a)); instead of b = a; */
4156 optimize_memcpy (gimple_stmt_iterator
*gsip
, tree dest
, tree src
, tree len
)
4158 gimple
*stmt
= gsi_stmt (*gsip
);
4159 if (gimple_has_volatile_ops (stmt
))
4162 tree vuse
= gimple_vuse (stmt
);
4166 gimple
*defstmt
= SSA_NAME_DEF_STMT (vuse
);
4167 tree src2
= NULL_TREE
, len2
= NULL_TREE
;
4168 poly_int64 offset
, offset2
;
4169 tree val
= integer_zero_node
;
4170 if (gimple_store_p (defstmt
)
4171 && gimple_assign_single_p (defstmt
)
4172 && TREE_CODE (gimple_assign_rhs1 (defstmt
)) == CONSTRUCTOR
4173 && !gimple_clobber_p (defstmt
))
4174 src2
= gimple_assign_lhs (defstmt
);
4175 else if (gimple_call_builtin_p (defstmt
, BUILT_IN_MEMSET
)
4176 && TREE_CODE (gimple_call_arg (defstmt
, 0)) == ADDR_EXPR
4177 && TREE_CODE (gimple_call_arg (defstmt
, 1)) == INTEGER_CST
)
4179 src2
= TREE_OPERAND (gimple_call_arg (defstmt
, 0), 0);
4180 len2
= gimple_call_arg (defstmt
, 2);
4181 val
= gimple_call_arg (defstmt
, 1);
4182 /* For non-0 val, we'd have to transform stmt from assignment
4183 into memset (only if dest is addressable). */
4184 if (!integer_zerop (val
) && is_gimple_assign (stmt
))
4188 if (src2
== NULL_TREE
)
4191 if (len
== NULL_TREE
)
4192 len
= (TREE_CODE (src
) == COMPONENT_REF
4193 ? DECL_SIZE_UNIT (TREE_OPERAND (src
, 1))
4194 : TYPE_SIZE_UNIT (TREE_TYPE (src
)));
4195 if (len2
== NULL_TREE
)
4196 len2
= (TREE_CODE (src2
) == COMPONENT_REF
4197 ? DECL_SIZE_UNIT (TREE_OPERAND (src2
, 1))
4198 : TYPE_SIZE_UNIT (TREE_TYPE (src2
)));
4199 if (len
== NULL_TREE
4200 || !poly_int_tree_p (len
)
4201 || len2
== NULL_TREE
4202 || !poly_int_tree_p (len2
))
4205 src
= get_addr_base_and_unit_offset (src
, &offset
);
4206 src2
= get_addr_base_and_unit_offset (src2
, &offset2
);
4207 if (src
== NULL_TREE
4208 || src2
== NULL_TREE
4209 || maybe_lt (offset
, offset2
))
4212 if (!operand_equal_p (src
, src2
, 0))
4215 /* [ src + offset2, src + offset2 + len2 - 1 ] is set to val.
4217 [ src + offset, src + offset + len - 1 ] is a subset of that. */
4218 if (maybe_gt (wi::to_poly_offset (len
) + (offset
- offset2
),
4219 wi::to_poly_offset (len2
)))
4222 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4224 fprintf (dump_file
, "Simplified\n ");
4225 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
4226 fprintf (dump_file
, "after previous\n ");
4227 print_gimple_stmt (dump_file
, defstmt
, 0, dump_flags
);
4230 /* For simplicity, don't change the kind of the stmt,
4231 turn dest = src; into dest = {}; and memcpy (&dest, &src, len);
4232 into memset (&dest, val, len);
4233 In theory we could change dest = src into memset if dest
4234 is addressable (maybe beneficial if val is not 0), or
4235 memcpy (&dest, &src, len) into dest = {} if len is the size
4236 of dest, dest isn't volatile. */
4237 if (is_gimple_assign (stmt
))
4239 tree ctor
= build_constructor (TREE_TYPE (dest
), NULL
);
4240 gimple_assign_set_rhs_from_tree (gsip
, ctor
);
4243 else /* If stmt is memcpy, transform it into memset. */
4245 gcall
*call
= as_a
<gcall
*> (stmt
);
4246 tree fndecl
= builtin_decl_implicit (BUILT_IN_MEMSET
);
4247 gimple_call_set_fndecl (call
, fndecl
);
4248 gimple_call_set_fntype (call
, TREE_TYPE (fndecl
));
4249 gimple_call_set_arg (call
, 1, val
);
4253 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4255 fprintf (dump_file
, "into\n ");
4256 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
4260 /* A simple pass that attempts to fold all builtin functions. This pass
4261 is run after we've propagated as many constants as we can. */
4265 const pass_data pass_data_fold_builtins
=
4267 GIMPLE_PASS
, /* type */
4269 OPTGROUP_NONE
, /* optinfo_flags */
4270 TV_NONE
, /* tv_id */
4271 ( PROP_cfg
| PROP_ssa
), /* properties_required */
4272 0, /* properties_provided */
4273 0, /* properties_destroyed */
4274 0, /* todo_flags_start */
4275 TODO_update_ssa
, /* todo_flags_finish */
4278 class pass_fold_builtins
: public gimple_opt_pass
4281 pass_fold_builtins (gcc::context
*ctxt
)
4282 : gimple_opt_pass (pass_data_fold_builtins
, ctxt
)
4285 /* opt_pass methods: */
4286 opt_pass
* clone () final override
{ return new pass_fold_builtins (m_ctxt
); }
4287 unsigned int execute (function
*) final override
;
4289 }; // class pass_fold_builtins
4292 pass_fold_builtins::execute (function
*fun
)
4294 bool cfg_changed
= false;
4296 unsigned int todoflags
= 0;
4298 FOR_EACH_BB_FN (bb
, fun
)
4300 gimple_stmt_iterator i
;
4301 for (i
= gsi_start_bb (bb
); !gsi_end_p (i
); )
4303 gimple
*stmt
, *old_stmt
;
4305 enum built_in_function fcode
;
4307 stmt
= gsi_stmt (i
);
4309 if (gimple_code (stmt
) != GIMPLE_CALL
)
4311 if (gimple_assign_load_p (stmt
) && gimple_store_p (stmt
))
4312 optimize_memcpy (&i
, gimple_assign_lhs (stmt
),
4313 gimple_assign_rhs1 (stmt
), NULL_TREE
);
4318 callee
= gimple_call_fndecl (stmt
);
4320 && gimple_call_internal_p (stmt
, IFN_ASSUME
))
4322 gsi_remove (&i
, true);
4325 if (!callee
|| !fndecl_built_in_p (callee
, BUILT_IN_NORMAL
))
4331 fcode
= DECL_FUNCTION_CODE (callee
);
4336 tree result
= NULL_TREE
;
4337 switch (DECL_FUNCTION_CODE (callee
))
4339 case BUILT_IN_CONSTANT_P
:
4340 /* Resolve __builtin_constant_p. If it hasn't been
4341 folded to integer_one_node by now, it's fairly
4342 certain that the value simply isn't constant. */
4343 result
= integer_zero_node
;
4346 case BUILT_IN_ASSUME_ALIGNED
:
4347 /* Remove __builtin_assume_aligned. */
4348 result
= gimple_call_arg (stmt
, 0);
4351 case BUILT_IN_STACK_RESTORE
:
4352 result
= optimize_stack_restore (i
);
4358 case BUILT_IN_UNREACHABLE
:
4359 if (optimize_unreachable (i
))
4363 case BUILT_IN_ATOMIC_ADD_FETCH_1
:
4364 case BUILT_IN_ATOMIC_ADD_FETCH_2
:
4365 case BUILT_IN_ATOMIC_ADD_FETCH_4
:
4366 case BUILT_IN_ATOMIC_ADD_FETCH_8
:
4367 case BUILT_IN_ATOMIC_ADD_FETCH_16
:
4368 optimize_atomic_op_fetch_cmp_0 (&i
,
4369 IFN_ATOMIC_ADD_FETCH_CMP_0
,
4372 case BUILT_IN_SYNC_ADD_AND_FETCH_1
:
4373 case BUILT_IN_SYNC_ADD_AND_FETCH_2
:
4374 case BUILT_IN_SYNC_ADD_AND_FETCH_4
:
4375 case BUILT_IN_SYNC_ADD_AND_FETCH_8
:
4376 case BUILT_IN_SYNC_ADD_AND_FETCH_16
:
4377 optimize_atomic_op_fetch_cmp_0 (&i
,
4378 IFN_ATOMIC_ADD_FETCH_CMP_0
,
4382 case BUILT_IN_ATOMIC_SUB_FETCH_1
:
4383 case BUILT_IN_ATOMIC_SUB_FETCH_2
:
4384 case BUILT_IN_ATOMIC_SUB_FETCH_4
:
4385 case BUILT_IN_ATOMIC_SUB_FETCH_8
:
4386 case BUILT_IN_ATOMIC_SUB_FETCH_16
:
4387 optimize_atomic_op_fetch_cmp_0 (&i
,
4388 IFN_ATOMIC_SUB_FETCH_CMP_0
,
4391 case BUILT_IN_SYNC_SUB_AND_FETCH_1
:
4392 case BUILT_IN_SYNC_SUB_AND_FETCH_2
:
4393 case BUILT_IN_SYNC_SUB_AND_FETCH_4
:
4394 case BUILT_IN_SYNC_SUB_AND_FETCH_8
:
4395 case BUILT_IN_SYNC_SUB_AND_FETCH_16
:
4396 optimize_atomic_op_fetch_cmp_0 (&i
,
4397 IFN_ATOMIC_SUB_FETCH_CMP_0
,
4401 case BUILT_IN_ATOMIC_FETCH_OR_1
:
4402 case BUILT_IN_ATOMIC_FETCH_OR_2
:
4403 case BUILT_IN_ATOMIC_FETCH_OR_4
:
4404 case BUILT_IN_ATOMIC_FETCH_OR_8
:
4405 case BUILT_IN_ATOMIC_FETCH_OR_16
:
4406 optimize_atomic_bit_test_and (&i
,
4407 IFN_ATOMIC_BIT_TEST_AND_SET
,
4410 case BUILT_IN_SYNC_FETCH_AND_OR_1
:
4411 case BUILT_IN_SYNC_FETCH_AND_OR_2
:
4412 case BUILT_IN_SYNC_FETCH_AND_OR_4
:
4413 case BUILT_IN_SYNC_FETCH_AND_OR_8
:
4414 case BUILT_IN_SYNC_FETCH_AND_OR_16
:
4415 optimize_atomic_bit_test_and (&i
,
4416 IFN_ATOMIC_BIT_TEST_AND_SET
,
4420 case BUILT_IN_ATOMIC_FETCH_XOR_1
:
4421 case BUILT_IN_ATOMIC_FETCH_XOR_2
:
4422 case BUILT_IN_ATOMIC_FETCH_XOR_4
:
4423 case BUILT_IN_ATOMIC_FETCH_XOR_8
:
4424 case BUILT_IN_ATOMIC_FETCH_XOR_16
:
4425 optimize_atomic_bit_test_and
4426 (&i
, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT
, true, false);
4428 case BUILT_IN_SYNC_FETCH_AND_XOR_1
:
4429 case BUILT_IN_SYNC_FETCH_AND_XOR_2
:
4430 case BUILT_IN_SYNC_FETCH_AND_XOR_4
:
4431 case BUILT_IN_SYNC_FETCH_AND_XOR_8
:
4432 case BUILT_IN_SYNC_FETCH_AND_XOR_16
:
4433 optimize_atomic_bit_test_and
4434 (&i
, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT
, false, false);
4437 case BUILT_IN_ATOMIC_XOR_FETCH_1
:
4438 case BUILT_IN_ATOMIC_XOR_FETCH_2
:
4439 case BUILT_IN_ATOMIC_XOR_FETCH_4
:
4440 case BUILT_IN_ATOMIC_XOR_FETCH_8
:
4441 case BUILT_IN_ATOMIC_XOR_FETCH_16
:
4442 if (optimize_atomic_bit_test_and
4443 (&i
, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT
, true, true))
4445 optimize_atomic_op_fetch_cmp_0 (&i
,
4446 IFN_ATOMIC_XOR_FETCH_CMP_0
,
4449 case BUILT_IN_SYNC_XOR_AND_FETCH_1
:
4450 case BUILT_IN_SYNC_XOR_AND_FETCH_2
:
4451 case BUILT_IN_SYNC_XOR_AND_FETCH_4
:
4452 case BUILT_IN_SYNC_XOR_AND_FETCH_8
:
4453 case BUILT_IN_SYNC_XOR_AND_FETCH_16
:
4454 if (optimize_atomic_bit_test_and
4455 (&i
, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT
, false, true))
4457 optimize_atomic_op_fetch_cmp_0 (&i
,
4458 IFN_ATOMIC_XOR_FETCH_CMP_0
,
4462 case BUILT_IN_ATOMIC_FETCH_AND_1
:
4463 case BUILT_IN_ATOMIC_FETCH_AND_2
:
4464 case BUILT_IN_ATOMIC_FETCH_AND_4
:
4465 case BUILT_IN_ATOMIC_FETCH_AND_8
:
4466 case BUILT_IN_ATOMIC_FETCH_AND_16
:
4467 optimize_atomic_bit_test_and (&i
,
4468 IFN_ATOMIC_BIT_TEST_AND_RESET
,
4471 case BUILT_IN_SYNC_FETCH_AND_AND_1
:
4472 case BUILT_IN_SYNC_FETCH_AND_AND_2
:
4473 case BUILT_IN_SYNC_FETCH_AND_AND_4
:
4474 case BUILT_IN_SYNC_FETCH_AND_AND_8
:
4475 case BUILT_IN_SYNC_FETCH_AND_AND_16
:
4476 optimize_atomic_bit_test_and (&i
,
4477 IFN_ATOMIC_BIT_TEST_AND_RESET
,
4481 case BUILT_IN_ATOMIC_AND_FETCH_1
:
4482 case BUILT_IN_ATOMIC_AND_FETCH_2
:
4483 case BUILT_IN_ATOMIC_AND_FETCH_4
:
4484 case BUILT_IN_ATOMIC_AND_FETCH_8
:
4485 case BUILT_IN_ATOMIC_AND_FETCH_16
:
4486 optimize_atomic_op_fetch_cmp_0 (&i
,
4487 IFN_ATOMIC_AND_FETCH_CMP_0
,
4490 case BUILT_IN_SYNC_AND_AND_FETCH_1
:
4491 case BUILT_IN_SYNC_AND_AND_FETCH_2
:
4492 case BUILT_IN_SYNC_AND_AND_FETCH_4
:
4493 case BUILT_IN_SYNC_AND_AND_FETCH_8
:
4494 case BUILT_IN_SYNC_AND_AND_FETCH_16
:
4495 optimize_atomic_op_fetch_cmp_0 (&i
,
4496 IFN_ATOMIC_AND_FETCH_CMP_0
,
4500 case BUILT_IN_ATOMIC_OR_FETCH_1
:
4501 case BUILT_IN_ATOMIC_OR_FETCH_2
:
4502 case BUILT_IN_ATOMIC_OR_FETCH_4
:
4503 case BUILT_IN_ATOMIC_OR_FETCH_8
:
4504 case BUILT_IN_ATOMIC_OR_FETCH_16
:
4505 optimize_atomic_op_fetch_cmp_0 (&i
,
4506 IFN_ATOMIC_OR_FETCH_CMP_0
,
4509 case BUILT_IN_SYNC_OR_AND_FETCH_1
:
4510 case BUILT_IN_SYNC_OR_AND_FETCH_2
:
4511 case BUILT_IN_SYNC_OR_AND_FETCH_4
:
4512 case BUILT_IN_SYNC_OR_AND_FETCH_8
:
4513 case BUILT_IN_SYNC_OR_AND_FETCH_16
:
4514 optimize_atomic_op_fetch_cmp_0 (&i
,
4515 IFN_ATOMIC_OR_FETCH_CMP_0
,
4519 case BUILT_IN_MEMCPY
:
4520 if (gimple_call_builtin_p (stmt
, BUILT_IN_NORMAL
)
4521 && TREE_CODE (gimple_call_arg (stmt
, 0)) == ADDR_EXPR
4522 && TREE_CODE (gimple_call_arg (stmt
, 1)) == ADDR_EXPR
4523 && TREE_CODE (gimple_call_arg (stmt
, 2)) == INTEGER_CST
)
4525 tree dest
= TREE_OPERAND (gimple_call_arg (stmt
, 0), 0);
4526 tree src
= TREE_OPERAND (gimple_call_arg (stmt
, 1), 0);
4527 tree len
= gimple_call_arg (stmt
, 2);
4528 optimize_memcpy (&i
, dest
, src
, len
);
4532 case BUILT_IN_VA_START
:
4533 case BUILT_IN_VA_END
:
4534 case BUILT_IN_VA_COPY
:
4535 /* These shouldn't be folded before pass_stdarg. */
4536 result
= optimize_stdarg_builtin (stmt
);
4548 gimplify_and_update_call_from_tree (&i
, result
);
4551 todoflags
|= TODO_update_address_taken
;
4553 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4555 fprintf (dump_file
, "Simplified\n ");
4556 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
4560 stmt
= gsi_stmt (i
);
4563 if (maybe_clean_or_replace_eh_stmt (old_stmt
, stmt
)
4564 && gimple_purge_dead_eh_edges (bb
))
4567 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4569 fprintf (dump_file
, "to\n ");
4570 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
4571 fprintf (dump_file
, "\n");
4574 /* Retry the same statement if it changed into another
4575 builtin, there might be new opportunities now. */
4576 if (gimple_code (stmt
) != GIMPLE_CALL
)
4581 callee
= gimple_call_fndecl (stmt
);
4583 || !fndecl_built_in_p (callee
, fcode
))
4588 /* Delete unreachable blocks. */
4590 todoflags
|= TODO_cleanup_cfg
;
4598 make_pass_fold_builtins (gcc::context
*ctxt
)
4600 return new pass_fold_builtins (ctxt
);
4603 /* A simple pass that emits some warnings post IPA. */
4607 const pass_data pass_data_post_ipa_warn
=
4609 GIMPLE_PASS
, /* type */
4610 "post_ipa_warn", /* name */
4611 OPTGROUP_NONE
, /* optinfo_flags */
4612 TV_NONE
, /* tv_id */
4613 ( PROP_cfg
| PROP_ssa
), /* properties_required */
4614 0, /* properties_provided */
4615 0, /* properties_destroyed */
4616 0, /* todo_flags_start */
4617 0, /* todo_flags_finish */
4620 class pass_post_ipa_warn
: public gimple_opt_pass
4623 pass_post_ipa_warn (gcc::context
*ctxt
)
4624 : gimple_opt_pass (pass_data_post_ipa_warn
, ctxt
)
4627 /* opt_pass methods: */
4628 opt_pass
* clone () final override
{ return new pass_post_ipa_warn (m_ctxt
); }
4629 bool gate (function
*) final override
{ return warn_nonnull
!= 0; }
4630 unsigned int execute (function
*) final override
;
4632 }; // class pass_fold_builtins
4635 pass_post_ipa_warn::execute (function
*fun
)
4639 FOR_EACH_BB_FN (bb
, fun
)
4641 gimple_stmt_iterator gsi
;
4642 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4644 gimple
*stmt
= gsi_stmt (gsi
);
4645 if (!is_gimple_call (stmt
) || warning_suppressed_p (stmt
, OPT_Wnonnull
))
4648 tree fntype
= gimple_call_fntype (stmt
);
4649 bitmap nonnullargs
= get_nonnull_args (fntype
);
4653 tree fndecl
= gimple_call_fndecl (stmt
);
4654 const bool closure
= fndecl
&& DECL_LAMBDA_FUNCTION_P (fndecl
);
4656 for (unsigned i
= 0; i
< gimple_call_num_args (stmt
); i
++)
4658 tree arg
= gimple_call_arg (stmt
, i
);
4659 if (TREE_CODE (TREE_TYPE (arg
)) != POINTER_TYPE
)
4661 if (!integer_zerop (arg
))
4663 if (i
== 0 && closure
)
4664 /* Avoid warning for the first argument to lambda functions. */
4666 if (!bitmap_empty_p (nonnullargs
)
4667 && !bitmap_bit_p (nonnullargs
, i
))
4670 /* In C++ non-static member functions argument 0 refers
4671 to the implicit this pointer. Use the same one-based
4672 numbering for ordinary arguments. */
4673 unsigned argno
= TREE_CODE (fntype
) == METHOD_TYPE
? i
: i
+ 1;
4674 location_t loc
= (EXPR_HAS_LOCATION (arg
)
4675 ? EXPR_LOCATION (arg
)
4676 : gimple_location (stmt
));
4677 auto_diagnostic_group d
;
4680 if (warning_at (loc
, OPT_Wnonnull
,
4681 "%qs pointer is null", "this")
4683 inform (DECL_SOURCE_LOCATION (fndecl
),
4684 "in a call to non-static member function %qD",
4689 if (!warning_at (loc
, OPT_Wnonnull
,
4690 "argument %u null where non-null "
4694 tree fndecl
= gimple_call_fndecl (stmt
);
4695 if (fndecl
&& DECL_IS_UNDECLARED_BUILTIN (fndecl
))
4696 inform (loc
, "in a call to built-in function %qD",
4699 inform (DECL_SOURCE_LOCATION (fndecl
),
4700 "in a call to function %qD declared %qs",
4703 BITMAP_FREE (nonnullargs
);
4712 make_pass_post_ipa_warn (gcc::context
*ctxt
)
4714 return new pass_post_ipa_warn (ctxt
);