1 /* Conditional constant propagation pass for the GNU compiler.
2 Copyright (C) 2000-2020 Free Software Foundation, Inc.
3 Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org>
4 Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by the
10 Free Software Foundation; either version 3, or (at your option) any
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Conditional constant propagation (CCP) is based on the SSA
23 propagation engine (tree-ssa-propagate.c). Constant assignments of
24 the form VAR = CST are propagated from the assignments into uses of
25 VAR, which in turn may generate new constants. The simulation uses
26 a four level lattice to keep track of constant values associated
27 with SSA names. Given an SSA name V_i, it may take one of the
30 UNINITIALIZED -> the initial state of the value. This value
31 is replaced with a correct initial value
32 the first time the value is used, so the
33 rest of the pass does not need to care about
34 it. Using this value simplifies initialization
35 of the pass, and prevents us from needlessly
36 scanning statements that are never reached.
38 UNDEFINED -> V_i is a local variable whose definition
39 has not been processed yet. Therefore we
40 don't yet know if its value is a constant
43 CONSTANT -> V_i has been found to hold a constant
46 VARYING -> V_i cannot take a constant value, or if it
47 does, it is not possible to determine it
50 The core of SSA-CCP is in ccp_visit_stmt and ccp_visit_phi_node:
52 1- In ccp_visit_stmt, we are interested in assignments whose RHS
53 evaluates into a constant and conditional jumps whose predicate
54 evaluates into a boolean true or false. When an assignment of
55 the form V_i = CONST is found, V_i's lattice value is set to
56 CONSTANT and CONST is associated with it. This causes the
57 propagation engine to add all the SSA edges coming out the
58 assignment into the worklists, so that statements that use V_i
61 If the statement is a conditional with a constant predicate, we
62 mark the outgoing edges as executable or not executable
63 depending on the predicate's value. This is then used when
64 visiting PHI nodes to know when a PHI argument can be ignored.
67 2- In ccp_visit_phi_node, if all the PHI arguments evaluate to the
68 same constant C, then the LHS of the PHI is set to C. This
69 evaluation is known as the "meet operation". Since one of the
70 goals of this evaluation is to optimistically return constant
71 values as often as possible, it uses two main short cuts:
73 - If an argument is flowing in through a non-executable edge, it
74 is ignored. This is useful in cases like this:
80 a_11 = PHI (a_9, a_10)
82 If PRED is known to always evaluate to false, then we can
83 assume that a_11 will always take its value from a_10, meaning
84 that instead of consider it VARYING (a_9 and a_10 have
85 different values), we can consider it CONSTANT 100.
87 - If an argument has an UNDEFINED value, then it does not affect
88 the outcome of the meet operation. If a variable V_i has an
89 UNDEFINED value, it means that either its defining statement
90 hasn't been visited yet or V_i has no defining statement, in
91 which case the original symbol 'V' is being used
92 uninitialized. Since 'V' is a local variable, the compiler
93 may assume any initial value for it.
96 After propagation, every variable V_i that ends up with a lattice
97 value of CONSTANT will have the associated constant value in the
98 array CONST_VAL[i].VALUE. That is fed into substitute_and_fold for
99 final substitution and folding.
101 This algorithm uses wide-ints at the max precision of the target.
102 This means that, with one uninteresting exception, variables with
103 UNSIGNED types never go to VARYING because the bits above the
104 precision of the type of the variable are always zero. The
105 uninteresting case is a variable of UNSIGNED type that has the
106 maximum precision of the target. Such variables can go to VARYING,
107 but this causes no loss of infomation since these variables will
112 Constant propagation with conditional branches,
113 Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
115 Building an Optimizing Compiler,
116 Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
118 Advanced Compiler Design and Implementation,
119 Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6 */
123 #include "coretypes.h"
128 #include "tree-pass.h"
130 #include "gimple-pretty-print.h"
131 #include "fold-const.h"
132 #include "gimple-fold.h"
134 #include "gimplify.h"
135 #include "gimple-iterator.h"
136 #include "tree-cfg.h"
137 #include "tree-ssa-propagate.h"
139 #include "builtins.h"
141 #include "stor-layout.h"
142 #include "optabs-query.h"
143 #include "tree-ssa-ccp.h"
144 #include "tree-dfa.h"
145 #include "diagnostic-core.h"
146 #include "stringpool.h"
148 #include "tree-vector-builder.h"
150 #include "alloc-pool.h"
151 #include "symbol-summary.h"
152 #include "ipa-utils.h"
153 #include "ipa-prop.h"
155 /* Possible lattice values. */
164 class ccp_prop_value_t
{
167 ccp_lattice_t lattice_val
;
169 /* Propagated value. */
172 /* Mask that applies to the propagated value during CCP. For X
173 with a CONSTANT lattice value X & ~mask == value & ~mask. The
174 zero bits in the mask cover constant values. The ones mean no
179 class ccp_propagate
: public ssa_propagation_engine
182 enum ssa_prop_result
visit_stmt (gimple
*, edge
*, tree
*) FINAL OVERRIDE
;
183 enum ssa_prop_result
visit_phi (gphi
*) FINAL OVERRIDE
;
186 /* Array of propagated constant values. After propagation,
187 CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I). If
188 the constant is held in an SSA name representing a memory store
189 (i.e., a VDEF), CONST_VAL[I].MEM_REF will contain the actual
190 memory reference used to store (i.e., the LHS of the assignment
192 static ccp_prop_value_t
*const_val
;
193 static unsigned n_const_val
;
195 static void canonicalize_value (ccp_prop_value_t
*);
196 static void ccp_lattice_meet (ccp_prop_value_t
*, ccp_prop_value_t
*);
198 /* Dump constant propagation value VAL to file OUTF prefixed by PREFIX. */
201 dump_lattice_value (FILE *outf
, const char *prefix
, ccp_prop_value_t val
)
203 switch (val
.lattice_val
)
206 fprintf (outf
, "%sUNINITIALIZED", prefix
);
209 fprintf (outf
, "%sUNDEFINED", prefix
);
212 fprintf (outf
, "%sVARYING", prefix
);
215 if (TREE_CODE (val
.value
) != INTEGER_CST
218 fprintf (outf
, "%sCONSTANT ", prefix
);
219 print_generic_expr (outf
, val
.value
, dump_flags
);
223 widest_int cval
= wi::bit_and_not (wi::to_widest (val
.value
),
225 fprintf (outf
, "%sCONSTANT ", prefix
);
226 print_hex (cval
, outf
);
227 fprintf (outf
, " (");
228 print_hex (val
.mask
, outf
);
238 /* Print lattice value VAL to stderr. */
240 void debug_lattice_value (ccp_prop_value_t val
);
243 debug_lattice_value (ccp_prop_value_t val
)
245 dump_lattice_value (stderr
, "", val
);
246 fprintf (stderr
, "\n");
249 /* Extend NONZERO_BITS to a full mask, based on sgn. */
252 extend_mask (const wide_int
&nonzero_bits
, signop sgn
)
254 return widest_int::from (nonzero_bits
, sgn
);
257 /* Compute a default value for variable VAR and store it in the
258 CONST_VAL array. The following rules are used to get default
261 1- Global and static variables that are declared constant are
264 2- Any other value is considered UNDEFINED. This is useful when
265 considering PHI nodes. PHI arguments that are undefined do not
266 change the constant value of the PHI node, which allows for more
267 constants to be propagated.
269 3- Variables defined by statements other than assignments and PHI
270 nodes are considered VARYING.
272 4- Initial values of variables that are not GIMPLE registers are
273 considered VARYING. */
275 static ccp_prop_value_t
276 get_default_value (tree var
)
278 ccp_prop_value_t val
= { UNINITIALIZED
, NULL_TREE
, 0 };
281 stmt
= SSA_NAME_DEF_STMT (var
);
283 if (gimple_nop_p (stmt
))
285 /* Variables defined by an empty statement are those used
286 before being initialized. If VAR is a local variable, we
287 can assume initially that it is UNDEFINED, otherwise we must
288 consider it VARYING. */
289 if (!virtual_operand_p (var
)
290 && SSA_NAME_VAR (var
)
291 && TREE_CODE (SSA_NAME_VAR (var
)) == VAR_DECL
)
292 val
.lattice_val
= UNDEFINED
;
295 val
.lattice_val
= VARYING
;
297 if (flag_tree_bit_ccp
)
299 wide_int nonzero_bits
= get_nonzero_bits (var
);
303 if (SSA_NAME_VAR (var
)
304 && TREE_CODE (SSA_NAME_VAR (var
)) == PARM_DECL
305 && ipcp_get_parm_bits (SSA_NAME_VAR (var
), &value
, &mask
))
307 val
.lattice_val
= CONSTANT
;
309 widest_int ipa_value
= wi::to_widest (value
);
310 /* Unknown bits from IPA CP must be equal to zero. */
311 gcc_assert (wi::bit_and (ipa_value
, mask
) == 0);
313 if (nonzero_bits
!= -1)
314 val
.mask
&= extend_mask (nonzero_bits
,
315 TYPE_SIGN (TREE_TYPE (var
)));
317 else if (nonzero_bits
!= -1)
319 val
.lattice_val
= CONSTANT
;
320 val
.value
= build_zero_cst (TREE_TYPE (var
));
321 val
.mask
= extend_mask (nonzero_bits
,
322 TYPE_SIGN (TREE_TYPE (var
)));
327 else if (is_gimple_assign (stmt
))
330 if (gimple_assign_single_p (stmt
)
331 && DECL_P (gimple_assign_rhs1 (stmt
))
332 && (cst
= get_symbol_constant_value (gimple_assign_rhs1 (stmt
))))
334 val
.lattice_val
= CONSTANT
;
339 /* Any other variable defined by an assignment is considered
341 val
.lattice_val
= UNDEFINED
;
344 else if ((is_gimple_call (stmt
)
345 && gimple_call_lhs (stmt
) != NULL_TREE
)
346 || gimple_code (stmt
) == GIMPLE_PHI
)
348 /* A variable defined by a call or a PHI node is considered
350 val
.lattice_val
= UNDEFINED
;
354 /* Otherwise, VAR will never take on a constant value. */
355 val
.lattice_val
= VARYING
;
363 /* Get the constant value associated with variable VAR. */
365 static inline ccp_prop_value_t
*
368 ccp_prop_value_t
*val
;
370 if (const_val
== NULL
371 || SSA_NAME_VERSION (var
) >= n_const_val
)
374 val
= &const_val
[SSA_NAME_VERSION (var
)];
375 if (val
->lattice_val
== UNINITIALIZED
)
376 *val
= get_default_value (var
);
378 canonicalize_value (val
);
383 /* Return the constant tree value associated with VAR. */
386 get_constant_value (tree var
)
388 ccp_prop_value_t
*val
;
389 if (TREE_CODE (var
) != SSA_NAME
)
391 if (is_gimple_min_invariant (var
))
395 val
= get_value (var
);
397 && val
->lattice_val
== CONSTANT
398 && (TREE_CODE (val
->value
) != INTEGER_CST
404 /* Sets the value associated with VAR to VARYING. */
407 set_value_varying (tree var
)
409 ccp_prop_value_t
*val
= &const_val
[SSA_NAME_VERSION (var
)];
411 val
->lattice_val
= VARYING
;
412 val
->value
= NULL_TREE
;
416 /* For integer constants, make sure to drop TREE_OVERFLOW. */
419 canonicalize_value (ccp_prop_value_t
*val
)
421 if (val
->lattice_val
!= CONSTANT
)
424 if (TREE_OVERFLOW_P (val
->value
))
425 val
->value
= drop_tree_overflow (val
->value
);
428 /* Return whether the lattice transition is valid. */
431 valid_lattice_transition (ccp_prop_value_t old_val
, ccp_prop_value_t new_val
)
433 /* Lattice transitions must always be monotonically increasing in
435 if (old_val
.lattice_val
< new_val
.lattice_val
)
438 if (old_val
.lattice_val
!= new_val
.lattice_val
)
441 if (!old_val
.value
&& !new_val
.value
)
444 /* Now both lattice values are CONSTANT. */
446 /* Allow arbitrary copy changes as we might look through PHI <a_1, ...>
447 when only a single copy edge is executable. */
448 if (TREE_CODE (old_val
.value
) == SSA_NAME
449 && TREE_CODE (new_val
.value
) == SSA_NAME
)
452 /* Allow transitioning from a constant to a copy. */
453 if (is_gimple_min_invariant (old_val
.value
)
454 && TREE_CODE (new_val
.value
) == SSA_NAME
)
457 /* Allow transitioning from PHI <&x, not executable> == &x
458 to PHI <&x, &y> == common alignment. */
459 if (TREE_CODE (old_val
.value
) != INTEGER_CST
460 && TREE_CODE (new_val
.value
) == INTEGER_CST
)
463 /* Bit-lattices have to agree in the still valid bits. */
464 if (TREE_CODE (old_val
.value
) == INTEGER_CST
465 && TREE_CODE (new_val
.value
) == INTEGER_CST
)
466 return (wi::bit_and_not (wi::to_widest (old_val
.value
), new_val
.mask
)
467 == wi::bit_and_not (wi::to_widest (new_val
.value
), new_val
.mask
));
469 /* Otherwise constant values have to agree. */
470 if (operand_equal_p (old_val
.value
, new_val
.value
, 0))
473 /* At least the kinds and types should agree now. */
474 if (TREE_CODE (old_val
.value
) != TREE_CODE (new_val
.value
)
475 || !types_compatible_p (TREE_TYPE (old_val
.value
),
476 TREE_TYPE (new_val
.value
)))
479 /* For floats and !HONOR_NANS allow transitions from (partial) NaN
481 tree type
= TREE_TYPE (new_val
.value
);
482 if (SCALAR_FLOAT_TYPE_P (type
)
483 && !HONOR_NANS (type
))
485 if (REAL_VALUE_ISNAN (TREE_REAL_CST (old_val
.value
)))
488 else if (VECTOR_FLOAT_TYPE_P (type
)
489 && !HONOR_NANS (type
))
492 = tree_vector_builder::binary_encoded_nelts (old_val
.value
,
494 for (unsigned int i
= 0; i
< count
; ++i
)
495 if (!REAL_VALUE_ISNAN
496 (TREE_REAL_CST (VECTOR_CST_ENCODED_ELT (old_val
.value
, i
)))
497 && !operand_equal_p (VECTOR_CST_ENCODED_ELT (old_val
.value
, i
),
498 VECTOR_CST_ENCODED_ELT (new_val
.value
, i
), 0))
502 else if (COMPLEX_FLOAT_TYPE_P (type
)
503 && !HONOR_NANS (type
))
505 if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_REALPART (old_val
.value
)))
506 && !operand_equal_p (TREE_REALPART (old_val
.value
),
507 TREE_REALPART (new_val
.value
), 0))
509 if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_IMAGPART (old_val
.value
)))
510 && !operand_equal_p (TREE_IMAGPART (old_val
.value
),
511 TREE_IMAGPART (new_val
.value
), 0))
518 /* Set the value for variable VAR to NEW_VAL. Return true if the new
519 value is different from VAR's previous value. */
522 set_lattice_value (tree var
, ccp_prop_value_t
*new_val
)
524 /* We can deal with old UNINITIALIZED values just fine here. */
525 ccp_prop_value_t
*old_val
= &const_val
[SSA_NAME_VERSION (var
)];
527 canonicalize_value (new_val
);
529 /* We have to be careful to not go up the bitwise lattice
530 represented by the mask. Instead of dropping to VARYING
531 use the meet operator to retain a conservative value.
532 Missed optimizations like PR65851 makes this necessary.
533 It also ensures we converge to a stable lattice solution. */
534 if (old_val
->lattice_val
!= UNINITIALIZED
)
535 ccp_lattice_meet (new_val
, old_val
);
537 gcc_checking_assert (valid_lattice_transition (*old_val
, *new_val
));
539 /* If *OLD_VAL and NEW_VAL are the same, return false to inform the
540 caller that this was a non-transition. */
541 if (old_val
->lattice_val
!= new_val
->lattice_val
542 || (new_val
->lattice_val
== CONSTANT
543 && (TREE_CODE (new_val
->value
) != TREE_CODE (old_val
->value
)
544 || (TREE_CODE (new_val
->value
) == INTEGER_CST
545 && (new_val
->mask
!= old_val
->mask
546 || (wi::bit_and_not (wi::to_widest (old_val
->value
),
548 != wi::bit_and_not (wi::to_widest (new_val
->value
),
550 || (TREE_CODE (new_val
->value
) != INTEGER_CST
551 && !operand_equal_p (new_val
->value
, old_val
->value
, 0)))))
553 /* ??? We would like to delay creation of INTEGER_CSTs from
554 partially constants here. */
556 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
558 dump_lattice_value (dump_file
, "Lattice value changed to ", *new_val
);
559 fprintf (dump_file
, ". Adding SSA edges to worklist.\n");
564 gcc_assert (new_val
->lattice_val
!= UNINITIALIZED
);
571 static ccp_prop_value_t
get_value_for_expr (tree
, bool);
572 static ccp_prop_value_t
bit_value_binop (enum tree_code
, tree
, tree
, tree
);
573 void bit_value_binop (enum tree_code
, signop
, int, widest_int
*, widest_int
*,
574 signop
, int, const widest_int
&, const widest_int
&,
575 signop
, int, const widest_int
&, const widest_int
&);
577 /* Return a widest_int that can be used for bitwise simplifications
581 value_to_wide_int (ccp_prop_value_t val
)
584 && TREE_CODE (val
.value
) == INTEGER_CST
)
585 return wi::to_widest (val
.value
);
590 /* Return the value for the address expression EXPR based on alignment
593 static ccp_prop_value_t
594 get_value_from_alignment (tree expr
)
596 tree type
= TREE_TYPE (expr
);
597 ccp_prop_value_t val
;
598 unsigned HOST_WIDE_INT bitpos
;
601 gcc_assert (TREE_CODE (expr
) == ADDR_EXPR
);
603 get_pointer_alignment_1 (expr
, &align
, &bitpos
);
604 val
.mask
= wi::bit_and_not
605 (POINTER_TYPE_P (type
) || TYPE_UNSIGNED (type
)
606 ? wi::mask
<widest_int
> (TYPE_PRECISION (type
), false)
608 align
/ BITS_PER_UNIT
- 1);
610 = wi::sext (val
.mask
, TYPE_PRECISION (type
)) == -1 ? VARYING
: CONSTANT
;
611 if (val
.lattice_val
== CONSTANT
)
612 val
.value
= build_int_cstu (type
, bitpos
/ BITS_PER_UNIT
);
614 val
.value
= NULL_TREE
;
619 /* Return the value for the tree operand EXPR. If FOR_BITS_P is true
620 return constant bits extracted from alignment information for
621 invariant addresses. */
623 static ccp_prop_value_t
624 get_value_for_expr (tree expr
, bool for_bits_p
)
626 ccp_prop_value_t val
;
628 if (TREE_CODE (expr
) == SSA_NAME
)
630 ccp_prop_value_t
*val_
= get_value (expr
);
635 val
.lattice_val
= VARYING
;
636 val
.value
= NULL_TREE
;
640 && val
.lattice_val
== CONSTANT
)
642 if (TREE_CODE (val
.value
) == ADDR_EXPR
)
643 val
= get_value_from_alignment (val
.value
);
644 else if (TREE_CODE (val
.value
) != INTEGER_CST
)
646 val
.lattice_val
= VARYING
;
647 val
.value
= NULL_TREE
;
651 /* Fall back to a copy value. */
653 && val
.lattice_val
== VARYING
654 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (expr
))
656 val
.lattice_val
= CONSTANT
;
661 else if (is_gimple_min_invariant (expr
)
662 && (!for_bits_p
|| TREE_CODE (expr
) == INTEGER_CST
))
664 val
.lattice_val
= CONSTANT
;
667 canonicalize_value (&val
);
669 else if (TREE_CODE (expr
) == ADDR_EXPR
)
670 val
= get_value_from_alignment (expr
);
673 val
.lattice_val
= VARYING
;
675 val
.value
= NULL_TREE
;
678 if (val
.lattice_val
== VARYING
679 && TYPE_UNSIGNED (TREE_TYPE (expr
)))
680 val
.mask
= wi::zext (val
.mask
, TYPE_PRECISION (TREE_TYPE (expr
)));
685 /* Return the likely CCP lattice value for STMT.
687 If STMT has no operands, then return CONSTANT.
689 Else if undefinedness of operands of STMT cause its value to be
690 undefined, then return UNDEFINED.
692 Else if any operands of STMT are constants, then return CONSTANT.
694 Else return VARYING. */
697 likely_value (gimple
*stmt
)
699 bool has_constant_operand
, has_undefined_operand
, all_undefined_operands
;
700 bool has_nsa_operand
;
705 enum gimple_code code
= gimple_code (stmt
);
707 /* This function appears to be called only for assignments, calls,
708 conditionals, and switches, due to the logic in visit_stmt. */
709 gcc_assert (code
== GIMPLE_ASSIGN
710 || code
== GIMPLE_CALL
711 || code
== GIMPLE_COND
712 || code
== GIMPLE_SWITCH
);
714 /* If the statement has volatile operands, it won't fold to a
716 if (gimple_has_volatile_ops (stmt
))
719 /* Arrive here for more complex cases. */
720 has_constant_operand
= false;
721 has_undefined_operand
= false;
722 all_undefined_operands
= true;
723 has_nsa_operand
= false;
724 FOR_EACH_SSA_TREE_OPERAND (use
, stmt
, iter
, SSA_OP_USE
)
726 ccp_prop_value_t
*val
= get_value (use
);
728 if (val
&& val
->lattice_val
== UNDEFINED
)
729 has_undefined_operand
= true;
731 all_undefined_operands
= false;
733 if (val
&& val
->lattice_val
== CONSTANT
)
734 has_constant_operand
= true;
736 if (SSA_NAME_IS_DEFAULT_DEF (use
)
737 || !prop_simulate_again_p (SSA_NAME_DEF_STMT (use
)))
738 has_nsa_operand
= true;
741 /* There may be constants in regular rhs operands. For calls we
742 have to ignore lhs, fndecl and static chain, otherwise only
744 for (i
= (is_gimple_call (stmt
) ? 2 : 0) + gimple_has_lhs (stmt
);
745 i
< gimple_num_ops (stmt
); ++i
)
747 tree op
= gimple_op (stmt
, i
);
748 if (!op
|| TREE_CODE (op
) == SSA_NAME
)
750 if (is_gimple_min_invariant (op
))
751 has_constant_operand
= true;
754 if (has_constant_operand
)
755 all_undefined_operands
= false;
757 if (has_undefined_operand
758 && code
== GIMPLE_CALL
759 && gimple_call_internal_p (stmt
))
760 switch (gimple_call_internal_fn (stmt
))
762 /* These 3 builtins use the first argument just as a magic
763 way how to find out a decl uid. */
764 case IFN_GOMP_SIMD_LANE
:
765 case IFN_GOMP_SIMD_VF
:
766 case IFN_GOMP_SIMD_LAST_LANE
:
767 has_undefined_operand
= false;
773 /* If the operation combines operands like COMPLEX_EXPR make sure to
774 not mark the result UNDEFINED if only one part of the result is
776 if (has_undefined_operand
&& all_undefined_operands
)
778 else if (code
== GIMPLE_ASSIGN
&& has_undefined_operand
)
780 switch (gimple_assign_rhs_code (stmt
))
782 /* Unary operators are handled with all_undefined_operands. */
785 case POINTER_PLUS_EXPR
:
787 /* Not MIN_EXPR, MAX_EXPR. One VARYING operand may be selected.
788 Not bitwise operators, one VARYING operand may specify the
790 Not logical operators for the same reason, apart from XOR.
791 Not COMPLEX_EXPR as one VARYING operand makes the result partly
792 not UNDEFINED. Not *DIV_EXPR, comparisons and shifts because
793 the undefined operand may be promoted. */
797 /* If any part of an address is UNDEFINED, like the index
798 of an ARRAY_EXPR, then treat the result as UNDEFINED. */
805 /* If there was an UNDEFINED operand but the result may be not UNDEFINED
806 fall back to CONSTANT. During iteration UNDEFINED may still drop
808 if (has_undefined_operand
)
811 /* We do not consider virtual operands here -- load from read-only
812 memory may have only VARYING virtual operands, but still be
813 constant. Also we can combine the stmt with definitions from
814 operands whose definitions are not simulated again. */
815 if (has_constant_operand
817 || gimple_references_memory_p (stmt
))
823 /* Returns true if STMT cannot be constant. */
826 surely_varying_stmt_p (gimple
*stmt
)
828 /* If the statement has operands that we cannot handle, it cannot be
830 if (gimple_has_volatile_ops (stmt
))
833 /* If it is a call and does not return a value or is not a
834 builtin and not an indirect call or a call to function with
835 assume_aligned/alloc_align attribute, it is varying. */
836 if (is_gimple_call (stmt
))
838 tree fndecl
, fntype
= gimple_call_fntype (stmt
);
839 if (!gimple_call_lhs (stmt
)
840 || ((fndecl
= gimple_call_fndecl (stmt
)) != NULL_TREE
841 && !fndecl_built_in_p (fndecl
)
842 && !lookup_attribute ("assume_aligned",
843 TYPE_ATTRIBUTES (fntype
))
844 && !lookup_attribute ("alloc_align",
845 TYPE_ATTRIBUTES (fntype
))))
849 /* Any other store operation is not interesting. */
850 else if (gimple_vdef (stmt
))
853 /* Anything other than assignments and conditional jumps are not
854 interesting for CCP. */
855 if (gimple_code (stmt
) != GIMPLE_ASSIGN
856 && gimple_code (stmt
) != GIMPLE_COND
857 && gimple_code (stmt
) != GIMPLE_SWITCH
858 && gimple_code (stmt
) != GIMPLE_CALL
)
864 /* Initialize local data structures for CCP. */
867 ccp_initialize (void)
871 n_const_val
= num_ssa_names
;
872 const_val
= XCNEWVEC (ccp_prop_value_t
, n_const_val
);
874 /* Initialize simulation flags for PHI nodes and statements. */
875 FOR_EACH_BB_FN (bb
, cfun
)
877 gimple_stmt_iterator i
;
879 for (i
= gsi_start_bb (bb
); !gsi_end_p (i
); gsi_next (&i
))
881 gimple
*stmt
= gsi_stmt (i
);
884 /* If the statement is a control insn, then we do not
885 want to avoid simulating the statement once. Failure
886 to do so means that those edges will never get added. */
887 if (stmt_ends_bb_p (stmt
))
890 is_varying
= surely_varying_stmt_p (stmt
);
897 /* If the statement will not produce a constant, mark
898 all its outputs VARYING. */
899 FOR_EACH_SSA_TREE_OPERAND (def
, stmt
, iter
, SSA_OP_ALL_DEFS
)
900 set_value_varying (def
);
902 prop_set_simulate_again (stmt
, !is_varying
);
906 /* Now process PHI nodes. We never clear the simulate_again flag on
907 phi nodes, since we do not know which edges are executable yet,
908 except for phi nodes for virtual operands when we do not do store ccp. */
909 FOR_EACH_BB_FN (bb
, cfun
)
913 for (i
= gsi_start_phis (bb
); !gsi_end_p (i
); gsi_next (&i
))
915 gphi
*phi
= i
.phi ();
917 if (virtual_operand_p (gimple_phi_result (phi
)))
918 prop_set_simulate_again (phi
, false);
920 prop_set_simulate_again (phi
, true);
925 /* Debug count support. Reset the values of ssa names
926 VARYING when the total number ssa names analyzed is
927 beyond the debug count specified. */
933 for (i
= 0; i
< num_ssa_names
; i
++)
937 const_val
[i
].lattice_val
= VARYING
;
938 const_val
[i
].mask
= -1;
939 const_val
[i
].value
= NULL_TREE
;
945 /* We want to provide our own GET_VALUE and FOLD_STMT virtual methods. */
946 class ccp_folder
: public substitute_and_fold_engine
949 tree
value_of_expr (tree
, gimple
*) FINAL OVERRIDE
;
950 bool fold_stmt (gimple_stmt_iterator
*) FINAL OVERRIDE
;
953 /* This method just wraps GET_CONSTANT_VALUE for now. Over time
954 naked calls to GET_CONSTANT_VALUE should be eliminated in favor
955 of calling member functions. */
958 ccp_folder::value_of_expr (tree op
, gimple
*)
960 return get_constant_value (op
);
963 /* Do final substitution of propagated values, cleanup the flowgraph and
964 free allocated storage. If NONZERO_P, record nonzero bits.
966 Return TRUE when something was optimized. */
969 ccp_finalize (bool nonzero_p
)
971 bool something_changed
;
977 /* Derive alignment and misalignment information from partially
978 constant pointers in the lattice or nonzero bits from partially
979 constant integers. */
980 FOR_EACH_SSA_NAME (i
, name
, cfun
)
982 ccp_prop_value_t
*val
;
983 unsigned int tem
, align
;
985 if (!POINTER_TYPE_P (TREE_TYPE (name
))
986 && (!INTEGRAL_TYPE_P (TREE_TYPE (name
))
987 /* Don't record nonzero bits before IPA to avoid
988 using too much memory. */
992 val
= get_value (name
);
993 if (val
->lattice_val
!= CONSTANT
994 || TREE_CODE (val
->value
) != INTEGER_CST
998 if (POINTER_TYPE_P (TREE_TYPE (name
)))
1000 /* Trailing mask bits specify the alignment, trailing value
1001 bits the misalignment. */
1002 tem
= val
->mask
.to_uhwi ();
1003 align
= least_bit_hwi (tem
);
1005 set_ptr_info_alignment (get_ptr_info (name
), align
,
1006 (TREE_INT_CST_LOW (val
->value
)
1011 unsigned int precision
= TYPE_PRECISION (TREE_TYPE (val
->value
));
1012 wide_int nonzero_bits
1013 = (wide_int::from (val
->mask
, precision
, UNSIGNED
)
1014 | wi::to_wide (val
->value
));
1015 nonzero_bits
&= get_nonzero_bits (name
);
1016 set_nonzero_bits (name
, nonzero_bits
);
1020 /* Perform substitutions based on the known constant values. */
1021 class ccp_folder ccp_folder
;
1022 something_changed
= ccp_folder
.substitute_and_fold ();
1026 return something_changed
;
1030 /* Compute the meet operator between *VAL1 and *VAL2. Store the result
1033 any M UNDEFINED = any
1034 any M VARYING = VARYING
1035 Ci M Cj = Ci if (i == j)
1036 Ci M Cj = VARYING if (i != j)
1040 ccp_lattice_meet (ccp_prop_value_t
*val1
, ccp_prop_value_t
*val2
)
1042 if (val1
->lattice_val
== UNDEFINED
1043 /* For UNDEFINED M SSA we can't always SSA because its definition
1044 may not dominate the PHI node. Doing optimistic copy propagation
1045 also causes a lot of gcc.dg/uninit-pred*.c FAILs. */
1046 && (val2
->lattice_val
!= CONSTANT
1047 || TREE_CODE (val2
->value
) != SSA_NAME
))
1049 /* UNDEFINED M any = any */
1052 else if (val2
->lattice_val
== UNDEFINED
1054 && (val1
->lattice_val
!= CONSTANT
1055 || TREE_CODE (val1
->value
) != SSA_NAME
))
1057 /* any M UNDEFINED = any
1058 Nothing to do. VAL1 already contains the value we want. */
1061 else if (val1
->lattice_val
== VARYING
1062 || val2
->lattice_val
== VARYING
)
1064 /* any M VARYING = VARYING. */
1065 val1
->lattice_val
= VARYING
;
1067 val1
->value
= NULL_TREE
;
1069 else if (val1
->lattice_val
== CONSTANT
1070 && val2
->lattice_val
== CONSTANT
1071 && TREE_CODE (val1
->value
) == INTEGER_CST
1072 && TREE_CODE (val2
->value
) == INTEGER_CST
)
1074 /* Ci M Cj = Ci if (i == j)
1075 Ci M Cj = VARYING if (i != j)
1077 For INTEGER_CSTs mask unequal bits. If no equal bits remain,
1079 val1
->mask
= (val1
->mask
| val2
->mask
1080 | (wi::to_widest (val1
->value
)
1081 ^ wi::to_widest (val2
->value
)));
1082 if (wi::sext (val1
->mask
, TYPE_PRECISION (TREE_TYPE (val1
->value
))) == -1)
1084 val1
->lattice_val
= VARYING
;
1085 val1
->value
= NULL_TREE
;
1088 else if (val1
->lattice_val
== CONSTANT
1089 && val2
->lattice_val
== CONSTANT
1090 && operand_equal_p (val1
->value
, val2
->value
, 0))
1092 /* Ci M Cj = Ci if (i == j)
1093 Ci M Cj = VARYING if (i != j)
1095 VAL1 already contains the value we want for equivalent values. */
1097 else if (val1
->lattice_val
== CONSTANT
1098 && val2
->lattice_val
== CONSTANT
1099 && (TREE_CODE (val1
->value
) == ADDR_EXPR
1100 || TREE_CODE (val2
->value
) == ADDR_EXPR
))
1102 /* When not equal addresses are involved try meeting for
1104 ccp_prop_value_t tem
= *val2
;
1105 if (TREE_CODE (val1
->value
) == ADDR_EXPR
)
1106 *val1
= get_value_for_expr (val1
->value
, true);
1107 if (TREE_CODE (val2
->value
) == ADDR_EXPR
)
1108 tem
= get_value_for_expr (val2
->value
, true);
1109 ccp_lattice_meet (val1
, &tem
);
1113 /* Any other combination is VARYING. */
1114 val1
->lattice_val
= VARYING
;
1116 val1
->value
= NULL_TREE
;
1121 /* Loop through the PHI_NODE's parameters for BLOCK and compare their
1122 lattice values to determine PHI_NODE's lattice value. The value of a
1123 PHI node is determined calling ccp_lattice_meet with all the arguments
1124 of the PHI node that are incoming via executable edges. */
1126 enum ssa_prop_result
1127 ccp_propagate::visit_phi (gphi
*phi
)
1130 ccp_prop_value_t new_val
;
1132 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1134 fprintf (dump_file
, "\nVisiting PHI node: ");
1135 print_gimple_stmt (dump_file
, phi
, 0, dump_flags
);
1138 new_val
.lattice_val
= UNDEFINED
;
1139 new_val
.value
= NULL_TREE
;
1143 bool non_exec_edge
= false;
1144 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
1146 /* Compute the meet operator over all the PHI arguments flowing
1147 through executable edges. */
1148 edge e
= gimple_phi_arg_edge (phi
, i
);
1150 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1153 "\tArgument #%d (%d -> %d %sexecutable)\n",
1154 i
, e
->src
->index
, e
->dest
->index
,
1155 (e
->flags
& EDGE_EXECUTABLE
) ? "" : "not ");
1158 /* If the incoming edge is executable, Compute the meet operator for
1159 the existing value of the PHI node and the current PHI argument. */
1160 if (e
->flags
& EDGE_EXECUTABLE
)
1162 tree arg
= gimple_phi_arg (phi
, i
)->def
;
1163 ccp_prop_value_t arg_val
= get_value_for_expr (arg
, false);
1171 ccp_lattice_meet (&new_val
, &arg_val
);
1173 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1175 fprintf (dump_file
, "\t");
1176 print_generic_expr (dump_file
, arg
, dump_flags
);
1177 dump_lattice_value (dump_file
, "\tValue: ", arg_val
);
1178 fprintf (dump_file
, "\n");
1181 if (new_val
.lattice_val
== VARYING
)
1185 non_exec_edge
= true;
1188 /* In case there were non-executable edges and the value is a copy
1189 make sure its definition dominates the PHI node. */
1191 && new_val
.lattice_val
== CONSTANT
1192 && TREE_CODE (new_val
.value
) == SSA_NAME
1193 && ! SSA_NAME_IS_DEFAULT_DEF (new_val
.value
)
1194 && ! dominated_by_p (CDI_DOMINATORS
, gimple_bb (phi
),
1195 gimple_bb (SSA_NAME_DEF_STMT (new_val
.value
))))
1197 new_val
.lattice_val
= VARYING
;
1198 new_val
.value
= NULL_TREE
;
1202 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1204 dump_lattice_value (dump_file
, "\n PHI node value: ", new_val
);
1205 fprintf (dump_file
, "\n\n");
1208 /* Make the transition to the new value. */
1209 if (set_lattice_value (gimple_phi_result (phi
), &new_val
))
1211 if (new_val
.lattice_val
== VARYING
)
1212 return SSA_PROP_VARYING
;
1214 return SSA_PROP_INTERESTING
;
1217 return SSA_PROP_NOT_INTERESTING
;
1220 /* Return the constant value for OP or OP otherwise. */
1223 valueize_op (tree op
)
1225 if (TREE_CODE (op
) == SSA_NAME
)
1227 tree tem
= get_constant_value (op
);
1234 /* Return the constant value for OP, but signal to not follow SSA
1235 edges if the definition may be simulated again. */
1238 valueize_op_1 (tree op
)
1240 if (TREE_CODE (op
) == SSA_NAME
)
1242 /* If the definition may be simulated again we cannot follow
1243 this SSA edge as the SSA propagator does not necessarily
1244 re-visit the use. */
1245 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
1246 if (!gimple_nop_p (def_stmt
)
1247 && prop_simulate_again_p (def_stmt
))
1249 tree tem
= get_constant_value (op
);
1256 /* CCP specific front-end to the non-destructive constant folding
1259 Attempt to simplify the RHS of STMT knowing that one or more
1260 operands are constants.
1262 If simplification is possible, return the simplified RHS,
1263 otherwise return the original RHS or NULL_TREE. */
1266 ccp_fold (gimple
*stmt
)
1268 location_t loc
= gimple_location (stmt
);
1269 switch (gimple_code (stmt
))
1273 /* Handle comparison operators that can appear in GIMPLE form. */
1274 tree op0
= valueize_op (gimple_cond_lhs (stmt
));
1275 tree op1
= valueize_op (gimple_cond_rhs (stmt
));
1276 enum tree_code code
= gimple_cond_code (stmt
);
1277 return fold_binary_loc (loc
, code
, boolean_type_node
, op0
, op1
);
1282 /* Return the constant switch index. */
1283 return valueize_op (gimple_switch_index (as_a
<gswitch
*> (stmt
)));
1288 return gimple_fold_stmt_to_constant_1 (stmt
,
1289 valueize_op
, valueize_op_1
);
1296 /* Apply the operation CODE in type TYPE to the value, mask pair
1297 RVAL and RMASK representing a value of type RTYPE and set
1298 the value, mask pair *VAL and *MASK to the result. */
1301 bit_value_unop (enum tree_code code
, signop type_sgn
, int type_precision
,
1302 widest_int
*val
, widest_int
*mask
,
1303 signop rtype_sgn
, int rtype_precision
,
1304 const widest_int
&rval
, const widest_int
&rmask
)
1315 widest_int temv
, temm
;
1316 /* Return ~rval + 1. */
1317 bit_value_unop (BIT_NOT_EXPR
, type_sgn
, type_precision
, &temv
, &temm
,
1318 type_sgn
, type_precision
, rval
, rmask
);
1319 bit_value_binop (PLUS_EXPR
, type_sgn
, type_precision
, val
, mask
,
1320 type_sgn
, type_precision
, temv
, temm
,
1321 type_sgn
, type_precision
, 1, 0);
1327 /* First extend mask and value according to the original type. */
1328 *mask
= wi::ext (rmask
, rtype_precision
, rtype_sgn
);
1329 *val
= wi::ext (rval
, rtype_precision
, rtype_sgn
);
1331 /* Then extend mask and value according to the target type. */
1332 *mask
= wi::ext (*mask
, type_precision
, type_sgn
);
1333 *val
= wi::ext (*val
, type_precision
, type_sgn
);
1343 /* Apply the operation CODE in type TYPE to the value, mask pairs
1344 R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
1345 and R2TYPE and set the value, mask pair *VAL and *MASK to the result. */
1348 bit_value_binop (enum tree_code code
, signop sgn
, int width
,
1349 widest_int
*val
, widest_int
*mask
,
1350 signop r1type_sgn
, int r1type_precision
,
1351 const widest_int
&r1val
, const widest_int
&r1mask
,
1352 signop r2type_sgn
, int r2type_precision
,
1353 const widest_int
&r2val
, const widest_int
&r2mask
)
1355 bool swap_p
= false;
1357 /* Assume we'll get a constant result. Use an initial non varying
1358 value, we fall back to varying in the end if necessary. */
1364 /* The mask is constant where there is a known not
1365 set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
1366 *mask
= (r1mask
| r2mask
) & (r1val
| r1mask
) & (r2val
| r2mask
);
1367 *val
= r1val
& r2val
;
1371 /* The mask is constant where there is a known
1372 set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)). */
1373 *mask
= wi::bit_and_not (r1mask
| r2mask
,
1374 wi::bit_and_not (r1val
, r1mask
)
1375 | wi::bit_and_not (r2val
, r2mask
));
1376 *val
= r1val
| r2val
;
1381 *mask
= r1mask
| r2mask
;
1382 *val
= r1val
^ r2val
;
1389 widest_int shift
= r2val
;
1397 if (wi::neg_p (shift
))
1400 if (code
== RROTATE_EXPR
)
1401 code
= LROTATE_EXPR
;
1403 code
= RROTATE_EXPR
;
1405 if (code
== RROTATE_EXPR
)
1407 *mask
= wi::rrotate (r1mask
, shift
, width
);
1408 *val
= wi::rrotate (r1val
, shift
, width
);
1412 *mask
= wi::lrotate (r1mask
, shift
, width
);
1413 *val
= wi::lrotate (r1val
, shift
, width
);
1421 /* ??? We can handle partially known shift counts if we know
1422 its sign. That way we can tell that (x << (y | 8)) & 255
1426 widest_int shift
= r2val
;
1434 if (wi::neg_p (shift
))
1437 if (code
== RSHIFT_EXPR
)
1442 if (code
== RSHIFT_EXPR
)
1444 *mask
= wi::rshift (wi::ext (r1mask
, width
, sgn
), shift
, sgn
);
1445 *val
= wi::rshift (wi::ext (r1val
, width
, sgn
), shift
, sgn
);
1449 *mask
= wi::ext (r1mask
<< shift
, width
, sgn
);
1450 *val
= wi::ext (r1val
<< shift
, width
, sgn
);
1457 case POINTER_PLUS_EXPR
:
1459 /* Do the addition with unknown bits set to zero, to give carry-ins of
1460 zero wherever possible. */
1461 widest_int lo
= (wi::bit_and_not (r1val
, r1mask
)
1462 + wi::bit_and_not (r2val
, r2mask
));
1463 lo
= wi::ext (lo
, width
, sgn
);
1464 /* Do the addition with unknown bits set to one, to give carry-ins of
1465 one wherever possible. */
1466 widest_int hi
= (r1val
| r1mask
) + (r2val
| r2mask
);
1467 hi
= wi::ext (hi
, width
, sgn
);
1468 /* Each bit in the result is known if (a) the corresponding bits in
1469 both inputs are known, and (b) the carry-in to that bit position
1470 is known. We can check condition (b) by seeing if we got the same
1471 result with minimised carries as with maximised carries. */
1472 *mask
= r1mask
| r2mask
| (lo
^ hi
);
1473 *mask
= wi::ext (*mask
, width
, sgn
);
1474 /* It shouldn't matter whether we choose lo or hi here. */
1481 widest_int temv
, temm
;
1482 bit_value_unop (NEGATE_EXPR
, r2type_sgn
, r2type_precision
, &temv
, &temm
,
1483 r2type_sgn
, r2type_precision
, r2val
, r2mask
);
1484 bit_value_binop (PLUS_EXPR
, sgn
, width
, val
, mask
,
1485 r1type_sgn
, r1type_precision
, r1val
, r1mask
,
1486 r2type_sgn
, r2type_precision
, temv
, temm
);
1492 /* Just track trailing zeros in both operands and transfer
1493 them to the other. */
1494 int r1tz
= wi::ctz (r1val
| r1mask
);
1495 int r2tz
= wi::ctz (r2val
| r2mask
);
1496 if (r1tz
+ r2tz
>= width
)
1501 else if (r1tz
+ r2tz
> 0)
1503 *mask
= wi::ext (wi::mask
<widest_int
> (r1tz
+ r2tz
, true),
1513 widest_int m
= r1mask
| r2mask
;
1514 if (wi::bit_and_not (r1val
, m
) != wi::bit_and_not (r2val
, m
))
1517 *val
= ((code
== EQ_EXPR
) ? 0 : 1);
1521 /* We know the result of a comparison is always one or zero. */
1531 code
= swap_tree_comparison (code
);
1538 const widest_int
&o1val
= swap_p
? r2val
: r1val
;
1539 const widest_int
&o1mask
= swap_p
? r2mask
: r1mask
;
1540 const widest_int
&o2val
= swap_p
? r1val
: r2val
;
1541 const widest_int
&o2mask
= swap_p
? r1mask
: r2mask
;
1543 /* If the most significant bits are not known we know nothing. */
1544 if (wi::neg_p (o1mask
) || wi::neg_p (o2mask
))
1547 /* For comparisons the signedness is in the comparison operands. */
1550 /* If we know the most significant bits we know the values
1551 value ranges by means of treating varying bits as zero
1552 or one. Do a cross comparison of the max/min pairs. */
1553 maxmin
= wi::cmp (o1val
| o1mask
,
1554 wi::bit_and_not (o2val
, o2mask
), sgn
);
1555 minmax
= wi::cmp (wi::bit_and_not (o1val
, o1mask
),
1556 o2val
| o2mask
, sgn
);
1557 if (maxmin
< 0) /* o1 is less than o2. */
1562 else if (minmax
> 0) /* o1 is not less or equal to o2. */
1567 else if (maxmin
== minmax
) /* o1 and o2 are equal. */
1569 /* This probably should never happen as we'd have
1570 folded the thing during fully constant value folding. */
1572 *val
= (code
== LE_EXPR
? 1 : 0);
1576 /* We know the result of a comparison is always one or zero. */
1587 /* Return the propagation value when applying the operation CODE to
1588 the value RHS yielding type TYPE. */
1590 static ccp_prop_value_t
1591 bit_value_unop (enum tree_code code
, tree type
, tree rhs
)
1593 ccp_prop_value_t rval
= get_value_for_expr (rhs
, true);
1594 widest_int value
, mask
;
1595 ccp_prop_value_t val
;
1597 if (rval
.lattice_val
== UNDEFINED
)
1600 gcc_assert ((rval
.lattice_val
== CONSTANT
1601 && TREE_CODE (rval
.value
) == INTEGER_CST
)
1602 || wi::sext (rval
.mask
, TYPE_PRECISION (TREE_TYPE (rhs
))) == -1);
1603 bit_value_unop (code
, TYPE_SIGN (type
), TYPE_PRECISION (type
), &value
, &mask
,
1604 TYPE_SIGN (TREE_TYPE (rhs
)), TYPE_PRECISION (TREE_TYPE (rhs
)),
1605 value_to_wide_int (rval
), rval
.mask
);
1606 if (wi::sext (mask
, TYPE_PRECISION (type
)) != -1)
1608 val
.lattice_val
= CONSTANT
;
1610 /* ??? Delay building trees here. */
1611 val
.value
= wide_int_to_tree (type
, value
);
1615 val
.lattice_val
= VARYING
;
1616 val
.value
= NULL_TREE
;
1622 /* Return the propagation value when applying the operation CODE to
1623 the values RHS1 and RHS2 yielding type TYPE. */
1625 static ccp_prop_value_t
1626 bit_value_binop (enum tree_code code
, tree type
, tree rhs1
, tree rhs2
)
1628 ccp_prop_value_t r1val
= get_value_for_expr (rhs1
, true);
1629 ccp_prop_value_t r2val
= get_value_for_expr (rhs2
, true);
1630 widest_int value
, mask
;
1631 ccp_prop_value_t val
;
1633 if (r1val
.lattice_val
== UNDEFINED
1634 || r2val
.lattice_val
== UNDEFINED
)
1636 val
.lattice_val
= VARYING
;
1637 val
.value
= NULL_TREE
;
1642 gcc_assert ((r1val
.lattice_val
== CONSTANT
1643 && TREE_CODE (r1val
.value
) == INTEGER_CST
)
1644 || wi::sext (r1val
.mask
,
1645 TYPE_PRECISION (TREE_TYPE (rhs1
))) == -1);
1646 gcc_assert ((r2val
.lattice_val
== CONSTANT
1647 && TREE_CODE (r2val
.value
) == INTEGER_CST
)
1648 || wi::sext (r2val
.mask
,
1649 TYPE_PRECISION (TREE_TYPE (rhs2
))) == -1);
1650 bit_value_binop (code
, TYPE_SIGN (type
), TYPE_PRECISION (type
), &value
, &mask
,
1651 TYPE_SIGN (TREE_TYPE (rhs1
)), TYPE_PRECISION (TREE_TYPE (rhs1
)),
1652 value_to_wide_int (r1val
), r1val
.mask
,
1653 TYPE_SIGN (TREE_TYPE (rhs2
)), TYPE_PRECISION (TREE_TYPE (rhs2
)),
1654 value_to_wide_int (r2val
), r2val
.mask
);
1656 /* (x * x) & 2 == 0. */
1657 if (code
== MULT_EXPR
&& rhs1
== rhs2
&& TYPE_PRECISION (type
) > 1)
1660 if (wi::sext (mask
, TYPE_PRECISION (type
)) != -1)
1661 value
= wi::bit_and_not (value
, m
);
1664 mask
= wi::bit_and_not (mask
, m
);
1667 if (wi::sext (mask
, TYPE_PRECISION (type
)) != -1)
1669 val
.lattice_val
= CONSTANT
;
1671 /* ??? Delay building trees here. */
1672 val
.value
= wide_int_to_tree (type
, value
);
1676 val
.lattice_val
= VARYING
;
1677 val
.value
= NULL_TREE
;
1683 /* Return the propagation value for __builtin_assume_aligned
1684 and functions with assume_aligned or alloc_aligned attribute.
1685 For __builtin_assume_aligned, ATTR is NULL_TREE,
1686 for assume_aligned attribute ATTR is non-NULL and ALLOC_ALIGNED
1687 is false, for alloc_aligned attribute ATTR is non-NULL and
1688 ALLOC_ALIGNED is true. */
1690 static ccp_prop_value_t
1691 bit_value_assume_aligned (gimple
*stmt
, tree attr
, ccp_prop_value_t ptrval
,
1694 tree align
, misalign
= NULL_TREE
, type
;
1695 unsigned HOST_WIDE_INT aligni
, misaligni
= 0;
1696 ccp_prop_value_t alignval
;
1697 widest_int value
, mask
;
1698 ccp_prop_value_t val
;
1700 if (attr
== NULL_TREE
)
1702 tree ptr
= gimple_call_arg (stmt
, 0);
1703 type
= TREE_TYPE (ptr
);
1704 ptrval
= get_value_for_expr (ptr
, true);
1708 tree lhs
= gimple_call_lhs (stmt
);
1709 type
= TREE_TYPE (lhs
);
1712 if (ptrval
.lattice_val
== UNDEFINED
)
1714 gcc_assert ((ptrval
.lattice_val
== CONSTANT
1715 && TREE_CODE (ptrval
.value
) == INTEGER_CST
)
1716 || wi::sext (ptrval
.mask
, TYPE_PRECISION (type
)) == -1);
1717 if (attr
== NULL_TREE
)
1719 /* Get aligni and misaligni from __builtin_assume_aligned. */
1720 align
= gimple_call_arg (stmt
, 1);
1721 if (!tree_fits_uhwi_p (align
))
1723 aligni
= tree_to_uhwi (align
);
1724 if (gimple_call_num_args (stmt
) > 2)
1726 misalign
= gimple_call_arg (stmt
, 2);
1727 if (!tree_fits_uhwi_p (misalign
))
1729 misaligni
= tree_to_uhwi (misalign
);
1734 /* Get aligni and misaligni from assume_aligned or
1735 alloc_align attributes. */
1736 if (TREE_VALUE (attr
) == NULL_TREE
)
1738 attr
= TREE_VALUE (attr
);
1739 align
= TREE_VALUE (attr
);
1740 if (!tree_fits_uhwi_p (align
))
1742 aligni
= tree_to_uhwi (align
);
1745 if (aligni
== 0 || aligni
> gimple_call_num_args (stmt
))
1747 align
= gimple_call_arg (stmt
, aligni
- 1);
1748 if (!tree_fits_uhwi_p (align
))
1750 aligni
= tree_to_uhwi (align
);
1752 else if (TREE_CHAIN (attr
) && TREE_VALUE (TREE_CHAIN (attr
)))
1754 misalign
= TREE_VALUE (TREE_CHAIN (attr
));
1755 if (!tree_fits_uhwi_p (misalign
))
1757 misaligni
= tree_to_uhwi (misalign
);
1760 if (aligni
<= 1 || (aligni
& (aligni
- 1)) != 0 || misaligni
>= aligni
)
1763 align
= build_int_cst_type (type
, -aligni
);
1764 alignval
= get_value_for_expr (align
, true);
1765 bit_value_binop (BIT_AND_EXPR
, TYPE_SIGN (type
), TYPE_PRECISION (type
), &value
, &mask
,
1766 TYPE_SIGN (type
), TYPE_PRECISION (type
), value_to_wide_int (ptrval
), ptrval
.mask
,
1767 TYPE_SIGN (type
), TYPE_PRECISION (type
), value_to_wide_int (alignval
), alignval
.mask
);
1769 if (wi::sext (mask
, TYPE_PRECISION (type
)) != -1)
1771 val
.lattice_val
= CONSTANT
;
1773 gcc_assert ((mask
.to_uhwi () & (aligni
- 1)) == 0);
1774 gcc_assert ((value
.to_uhwi () & (aligni
- 1)) == 0);
1776 /* ??? Delay building trees here. */
1777 val
.value
= wide_int_to_tree (type
, value
);
1781 val
.lattice_val
= VARYING
;
1782 val
.value
= NULL_TREE
;
1788 /* Evaluate statement STMT.
1789 Valid only for assignments, calls, conditionals, and switches. */
1791 static ccp_prop_value_t
1792 evaluate_stmt (gimple
*stmt
)
1794 ccp_prop_value_t val
;
1795 tree simplified
= NULL_TREE
;
1796 ccp_lattice_t likelyvalue
= likely_value (stmt
);
1797 bool is_constant
= false;
1799 bool ignore_return_flags
= false;
1801 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1803 fprintf (dump_file
, "which is likely ");
1804 switch (likelyvalue
)
1807 fprintf (dump_file
, "CONSTANT");
1810 fprintf (dump_file
, "UNDEFINED");
1813 fprintf (dump_file
, "VARYING");
1817 fprintf (dump_file
, "\n");
1820 /* If the statement is likely to have a CONSTANT result, then try
1821 to fold the statement to determine the constant value. */
1822 /* FIXME. This is the only place that we call ccp_fold.
1823 Since likely_value never returns CONSTANT for calls, we will
1824 not attempt to fold them, including builtins that may profit. */
1825 if (likelyvalue
== CONSTANT
)
1827 fold_defer_overflow_warnings ();
1828 simplified
= ccp_fold (stmt
);
1830 && TREE_CODE (simplified
) == SSA_NAME
)
1832 /* We may not use values of something that may be simulated again,
1833 see valueize_op_1. */
1834 if (SSA_NAME_IS_DEFAULT_DEF (simplified
)
1835 || ! prop_simulate_again_p (SSA_NAME_DEF_STMT (simplified
)))
1837 ccp_prop_value_t
*val
= get_value (simplified
);
1838 if (val
&& val
->lattice_val
!= VARYING
)
1840 fold_undefer_overflow_warnings (true, stmt
, 0);
1845 /* We may also not place a non-valueized copy in the lattice
1846 as that might become stale if we never re-visit this stmt. */
1847 simplified
= NULL_TREE
;
1849 is_constant
= simplified
&& is_gimple_min_invariant (simplified
);
1850 fold_undefer_overflow_warnings (is_constant
, stmt
, 0);
1853 /* The statement produced a constant value. */
1854 val
.lattice_val
= CONSTANT
;
1855 val
.value
= simplified
;
1860 /* If the statement is likely to have a VARYING result, then do not
1861 bother folding the statement. */
1862 else if (likelyvalue
== VARYING
)
1864 enum gimple_code code
= gimple_code (stmt
);
1865 if (code
== GIMPLE_ASSIGN
)
1867 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
1869 /* Other cases cannot satisfy is_gimple_min_invariant
1871 if (get_gimple_rhs_class (subcode
) == GIMPLE_SINGLE_RHS
)
1872 simplified
= gimple_assign_rhs1 (stmt
);
1874 else if (code
== GIMPLE_SWITCH
)
1875 simplified
= gimple_switch_index (as_a
<gswitch
*> (stmt
));
1877 /* These cannot satisfy is_gimple_min_invariant without folding. */
1878 gcc_assert (code
== GIMPLE_CALL
|| code
== GIMPLE_COND
);
1879 is_constant
= simplified
&& is_gimple_min_invariant (simplified
);
1882 /* The statement produced a constant value. */
1883 val
.lattice_val
= CONSTANT
;
1884 val
.value
= simplified
;
1888 /* If the statement result is likely UNDEFINED, make it so. */
1889 else if (likelyvalue
== UNDEFINED
)
1891 val
.lattice_val
= UNDEFINED
;
1892 val
.value
= NULL_TREE
;
1897 /* Resort to simplification for bitwise tracking. */
1898 if (flag_tree_bit_ccp
1899 && (likelyvalue
== CONSTANT
|| is_gimple_call (stmt
)
1900 || (gimple_assign_single_p (stmt
)
1901 && gimple_assign_rhs_code (stmt
) == ADDR_EXPR
))
1904 enum gimple_code code
= gimple_code (stmt
);
1905 val
.lattice_val
= VARYING
;
1906 val
.value
= NULL_TREE
;
1908 if (code
== GIMPLE_ASSIGN
)
1910 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
1911 tree rhs1
= gimple_assign_rhs1 (stmt
);
1912 tree lhs
= gimple_assign_lhs (stmt
);
1913 if ((INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
1914 || POINTER_TYPE_P (TREE_TYPE (lhs
)))
1915 && (INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
1916 || POINTER_TYPE_P (TREE_TYPE (rhs1
))))
1917 switch (get_gimple_rhs_class (subcode
))
1919 case GIMPLE_SINGLE_RHS
:
1920 val
= get_value_for_expr (rhs1
, true);
1923 case GIMPLE_UNARY_RHS
:
1924 val
= bit_value_unop (subcode
, TREE_TYPE (lhs
), rhs1
);
1927 case GIMPLE_BINARY_RHS
:
1928 val
= bit_value_binop (subcode
, TREE_TYPE (lhs
), rhs1
,
1929 gimple_assign_rhs2 (stmt
));
1935 else if (code
== GIMPLE_COND
)
1937 enum tree_code code
= gimple_cond_code (stmt
);
1938 tree rhs1
= gimple_cond_lhs (stmt
);
1939 tree rhs2
= gimple_cond_rhs (stmt
);
1940 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
1941 || POINTER_TYPE_P (TREE_TYPE (rhs1
)))
1942 val
= bit_value_binop (code
, TREE_TYPE (rhs1
), rhs1
, rhs2
);
1944 else if (gimple_call_builtin_p (stmt
, BUILT_IN_NORMAL
))
1946 tree fndecl
= gimple_call_fndecl (stmt
);
1947 switch (DECL_FUNCTION_CODE (fndecl
))
1949 case BUILT_IN_MALLOC
:
1950 case BUILT_IN_REALLOC
:
1951 case BUILT_IN_CALLOC
:
1952 case BUILT_IN_STRDUP
:
1953 case BUILT_IN_STRNDUP
:
1954 val
.lattice_val
= CONSTANT
;
1955 val
.value
= build_int_cst (TREE_TYPE (gimple_get_lhs (stmt
)), 0);
1956 val
.mask
= ~((HOST_WIDE_INT
) MALLOC_ABI_ALIGNMENT
1957 / BITS_PER_UNIT
- 1);
1960 CASE_BUILT_IN_ALLOCA
:
1961 align
= (DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_ALLOCA
1963 : TREE_INT_CST_LOW (gimple_call_arg (stmt
, 1)));
1964 val
.lattice_val
= CONSTANT
;
1965 val
.value
= build_int_cst (TREE_TYPE (gimple_get_lhs (stmt
)), 0);
1966 val
.mask
= ~((HOST_WIDE_INT
) align
/ BITS_PER_UNIT
- 1);
1969 case BUILT_IN_ASSUME_ALIGNED
:
1970 val
= bit_value_assume_aligned (stmt
, NULL_TREE
, val
, false);
1971 ignore_return_flags
= true;
1974 case BUILT_IN_ALIGNED_ALLOC
:
1976 tree align
= get_constant_value (gimple_call_arg (stmt
, 0));
1978 && tree_fits_uhwi_p (align
))
1980 unsigned HOST_WIDE_INT aligni
= tree_to_uhwi (align
);
1982 /* align must be power-of-two */
1983 && (aligni
& (aligni
- 1)) == 0)
1985 val
.lattice_val
= CONSTANT
;
1986 val
.value
= build_int_cst (ptr_type_node
, 0);
1993 case BUILT_IN_BSWAP16
:
1994 case BUILT_IN_BSWAP32
:
1995 case BUILT_IN_BSWAP64
:
1996 case BUILT_IN_BSWAP128
:
1997 val
= get_value_for_expr (gimple_call_arg (stmt
, 0), true);
1998 if (val
.lattice_val
== UNDEFINED
)
2000 else if (val
.lattice_val
== CONSTANT
2002 && TREE_CODE (val
.value
) == INTEGER_CST
)
2004 tree type
= TREE_TYPE (gimple_call_lhs (stmt
));
2005 int prec
= TYPE_PRECISION (type
);
2006 wide_int wval
= wi::to_wide (val
.value
);
2008 = wide_int_to_tree (type
,
2009 wide_int::from (wval
, prec
,
2010 UNSIGNED
).bswap ());
2012 = widest_int::from (wide_int::from (val
.mask
, prec
,
2015 if (wi::sext (val
.mask
, prec
) != -1)
2018 val
.lattice_val
= VARYING
;
2019 val
.value
= NULL_TREE
;
2026 if (is_gimple_call (stmt
) && gimple_call_lhs (stmt
))
2028 tree fntype
= gimple_call_fntype (stmt
);
2031 tree attrs
= lookup_attribute ("assume_aligned",
2032 TYPE_ATTRIBUTES (fntype
));
2034 val
= bit_value_assume_aligned (stmt
, attrs
, val
, false);
2035 attrs
= lookup_attribute ("alloc_align",
2036 TYPE_ATTRIBUTES (fntype
));
2038 val
= bit_value_assume_aligned (stmt
, attrs
, val
, true);
2040 int flags
= ignore_return_flags
2041 ? 0 : gimple_call_return_flags (as_a
<gcall
*> (stmt
));
2042 if (flags
& ERF_RETURNS_ARG
2043 && (flags
& ERF_RETURN_ARG_MASK
) < gimple_call_num_args (stmt
))
2045 val
= get_value_for_expr
2046 (gimple_call_arg (stmt
,
2047 flags
& ERF_RETURN_ARG_MASK
), true);
2050 is_constant
= (val
.lattice_val
== CONSTANT
);
2053 if (flag_tree_bit_ccp
2054 && ((is_constant
&& TREE_CODE (val
.value
) == INTEGER_CST
)
2056 && gimple_get_lhs (stmt
)
2057 && TREE_CODE (gimple_get_lhs (stmt
)) == SSA_NAME
)
2059 tree lhs
= gimple_get_lhs (stmt
);
2060 wide_int nonzero_bits
= get_nonzero_bits (lhs
);
2061 if (nonzero_bits
!= -1)
2065 val
.lattice_val
= CONSTANT
;
2066 val
.value
= build_zero_cst (TREE_TYPE (lhs
));
2067 val
.mask
= extend_mask (nonzero_bits
, TYPE_SIGN (TREE_TYPE (lhs
)));
2072 if (wi::bit_and_not (wi::to_wide (val
.value
), nonzero_bits
) != 0)
2073 val
.value
= wide_int_to_tree (TREE_TYPE (lhs
),
2075 & wi::to_wide (val
.value
));
2076 if (nonzero_bits
== 0)
2079 val
.mask
= val
.mask
& extend_mask (nonzero_bits
,
2080 TYPE_SIGN (TREE_TYPE (lhs
)));
2085 /* The statement produced a nonconstant value. */
2088 /* The statement produced a copy. */
2089 if (simplified
&& TREE_CODE (simplified
) == SSA_NAME
2090 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (simplified
))
2092 val
.lattice_val
= CONSTANT
;
2093 val
.value
= simplified
;
2096 /* The statement is VARYING. */
2099 val
.lattice_val
= VARYING
;
2100 val
.value
= NULL_TREE
;
2108 typedef hash_table
<nofree_ptr_hash
<gimple
> > gimple_htab
;
2110 /* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before
2111 each matching BUILT_IN_STACK_RESTORE. Mark visited phis in VISITED. */
2114 insert_clobber_before_stack_restore (tree saved_val
, tree var
,
2115 gimple_htab
**visited
)
2118 gassign
*clobber_stmt
;
2120 imm_use_iterator iter
;
2121 gimple_stmt_iterator i
;
2124 FOR_EACH_IMM_USE_STMT (stmt
, iter
, saved_val
)
2125 if (gimple_call_builtin_p (stmt
, BUILT_IN_STACK_RESTORE
))
2127 clobber
= build_clobber (TREE_TYPE (var
));
2128 clobber_stmt
= gimple_build_assign (var
, clobber
);
2130 i
= gsi_for_stmt (stmt
);
2131 gsi_insert_before (&i
, clobber_stmt
, GSI_SAME_STMT
);
2133 else if (gimple_code (stmt
) == GIMPLE_PHI
)
2136 *visited
= new gimple_htab (10);
2138 slot
= (*visited
)->find_slot (stmt
, INSERT
);
2143 insert_clobber_before_stack_restore (gimple_phi_result (stmt
), var
,
2146 else if (gimple_assign_ssa_name_copy_p (stmt
))
2147 insert_clobber_before_stack_restore (gimple_assign_lhs (stmt
), var
,
2151 /* Advance the iterator to the previous non-debug gimple statement in the same
2152 or dominating basic block. */
2155 gsi_prev_dom_bb_nondebug (gimple_stmt_iterator
*i
)
2159 gsi_prev_nondebug (i
);
2160 while (gsi_end_p (*i
))
2162 dom
= get_immediate_dominator (CDI_DOMINATORS
, gsi_bb (*i
));
2163 if (dom
== NULL
|| dom
== ENTRY_BLOCK_PTR_FOR_FN (cfun
))
2166 *i
= gsi_last_bb (dom
);
2170 /* Find a BUILT_IN_STACK_SAVE dominating gsi_stmt (I), and insert
2171 a clobber of VAR before each matching BUILT_IN_STACK_RESTORE.
2173 It is possible that BUILT_IN_STACK_SAVE cannot be found in a dominator when
2174 a previous pass (such as DOM) duplicated it along multiple paths to a BB.
2175 In that case the function gives up without inserting the clobbers. */
2178 insert_clobbers_for_var (gimple_stmt_iterator i
, tree var
)
2182 gimple_htab
*visited
= NULL
;
2184 for (; !gsi_end_p (i
); gsi_prev_dom_bb_nondebug (&i
))
2186 stmt
= gsi_stmt (i
);
2188 if (!gimple_call_builtin_p (stmt
, BUILT_IN_STACK_SAVE
))
2191 saved_val
= gimple_call_lhs (stmt
);
2192 if (saved_val
== NULL_TREE
)
2195 insert_clobber_before_stack_restore (saved_val
, var
, &visited
);
2202 /* Detects a __builtin_alloca_with_align with constant size argument. Declares
2203 fixed-size array and returns the address, if found, otherwise returns
2207 fold_builtin_alloca_with_align (gimple
*stmt
)
2209 unsigned HOST_WIDE_INT size
, threshold
, n_elem
;
2210 tree lhs
, arg
, block
, var
, elem_type
, array_type
;
2213 lhs
= gimple_call_lhs (stmt
);
2214 if (lhs
== NULL_TREE
)
2217 /* Detect constant argument. */
2218 arg
= get_constant_value (gimple_call_arg (stmt
, 0));
2219 if (arg
== NULL_TREE
2220 || TREE_CODE (arg
) != INTEGER_CST
2221 || !tree_fits_uhwi_p (arg
))
2224 size
= tree_to_uhwi (arg
);
2226 /* Heuristic: don't fold large allocas. */
2227 threshold
= (unsigned HOST_WIDE_INT
)param_large_stack_frame
;
2228 /* In case the alloca is located at function entry, it has the same lifetime
2229 as a declared array, so we allow a larger size. */
2230 block
= gimple_block (stmt
);
2231 if (!(cfun
->after_inlining
2233 && TREE_CODE (BLOCK_SUPERCONTEXT (block
)) == FUNCTION_DECL
))
2235 if (size
> threshold
)
2238 /* We have to be able to move points-to info. We used to assert
2239 that we can but IPA PTA might end up with two UIDs here
2240 as it might need to handle more than one instance being
2241 live at the same time. Instead of trying to detect this case
2242 (using the first UID would be OK) just give up for now. */
2243 struct ptr_info_def
*pi
= SSA_NAME_PTR_INFO (lhs
);
2247 && !pt_solution_singleton_or_null_p (&pi
->pt
, &uid
))
2250 /* Declare array. */
2251 elem_type
= build_nonstandard_integer_type (BITS_PER_UNIT
, 1);
2252 n_elem
= size
* 8 / BITS_PER_UNIT
;
2253 array_type
= build_array_type_nelts (elem_type
, n_elem
);
2255 if (tree ssa_name
= SSA_NAME_IDENTIFIER (lhs
))
2257 /* Give the temporary a name derived from the name of the VLA
2258 declaration so it can be referenced in diagnostics. */
2259 const char *name
= IDENTIFIER_POINTER (ssa_name
);
2260 var
= create_tmp_var (array_type
, name
);
2263 var
= create_tmp_var (array_type
);
2265 if (gimple
*lhsdef
= SSA_NAME_DEF_STMT (lhs
))
2267 /* Set the temporary's location to that of the VLA declaration
2268 so it can be pointed to in diagnostics. */
2269 location_t loc
= gimple_location (lhsdef
);
2270 DECL_SOURCE_LOCATION (var
) = loc
;
2273 SET_DECL_ALIGN (var
, TREE_INT_CST_LOW (gimple_call_arg (stmt
, 1)));
2275 SET_DECL_PT_UID (var
, uid
);
2277 /* Fold alloca to the address of the array. */
2278 return fold_convert (TREE_TYPE (lhs
), build_fold_addr_expr (var
));
2281 /* Fold the stmt at *GSI with CCP specific information that propagating
2282 and regular folding does not catch. */
2285 ccp_folder::fold_stmt (gimple_stmt_iterator
*gsi
)
2287 gimple
*stmt
= gsi_stmt (*gsi
);
2289 switch (gimple_code (stmt
))
2293 gcond
*cond_stmt
= as_a
<gcond
*> (stmt
);
2294 ccp_prop_value_t val
;
2295 /* Statement evaluation will handle type mismatches in constants
2296 more gracefully than the final propagation. This allows us to
2297 fold more conditionals here. */
2298 val
= evaluate_stmt (stmt
);
2299 if (val
.lattice_val
!= CONSTANT
2305 fprintf (dump_file
, "Folding predicate ");
2306 print_gimple_expr (dump_file
, stmt
, 0);
2307 fprintf (dump_file
, " to ");
2308 print_generic_expr (dump_file
, val
.value
);
2309 fprintf (dump_file
, "\n");
2312 if (integer_zerop (val
.value
))
2313 gimple_cond_make_false (cond_stmt
);
2315 gimple_cond_make_true (cond_stmt
);
2322 tree lhs
= gimple_call_lhs (stmt
);
2323 int flags
= gimple_call_flags (stmt
);
2326 bool changed
= false;
2329 /* If the call was folded into a constant make sure it goes
2330 away even if we cannot propagate into all uses because of
2333 && TREE_CODE (lhs
) == SSA_NAME
2334 && (val
= get_constant_value (lhs
))
2335 /* Don't optimize away calls that have side-effects. */
2336 && (flags
& (ECF_CONST
|ECF_PURE
)) != 0
2337 && (flags
& ECF_LOOPING_CONST_OR_PURE
) == 0)
2339 tree new_rhs
= unshare_expr (val
);
2341 if (!useless_type_conversion_p (TREE_TYPE (lhs
),
2342 TREE_TYPE (new_rhs
)))
2343 new_rhs
= fold_convert (TREE_TYPE (lhs
), new_rhs
);
2344 res
= update_call_from_tree (gsi
, new_rhs
);
2349 /* Internal calls provide no argument types, so the extra laxity
2350 for normal calls does not apply. */
2351 if (gimple_call_internal_p (stmt
))
2354 /* The heuristic of fold_builtin_alloca_with_align differs before and
2355 after inlining, so we don't require the arg to be changed into a
2356 constant for folding, but just to be constant. */
2357 if (gimple_call_builtin_p (stmt
, BUILT_IN_ALLOCA_WITH_ALIGN
)
2358 || gimple_call_builtin_p (stmt
, BUILT_IN_ALLOCA_WITH_ALIGN_AND_MAX
))
2360 tree new_rhs
= fold_builtin_alloca_with_align (stmt
);
2363 bool res
= update_call_from_tree (gsi
, new_rhs
);
2364 tree var
= TREE_OPERAND (TREE_OPERAND (new_rhs
, 0),0);
2366 insert_clobbers_for_var (*gsi
, var
);
2371 /* If there's no extra info from an assume_aligned call,
2372 drop it so it doesn't act as otherwise useless dataflow
2374 if (gimple_call_builtin_p (stmt
, BUILT_IN_ASSUME_ALIGNED
))
2376 tree ptr
= gimple_call_arg (stmt
, 0);
2377 ccp_prop_value_t ptrval
= get_value_for_expr (ptr
, true);
2378 if (ptrval
.lattice_val
== CONSTANT
2379 && TREE_CODE (ptrval
.value
) == INTEGER_CST
2380 && ptrval
.mask
!= 0)
2382 ccp_prop_value_t val
2383 = bit_value_assume_aligned (stmt
, NULL_TREE
, ptrval
, false);
2384 unsigned int ptralign
= least_bit_hwi (ptrval
.mask
.to_uhwi ());
2385 unsigned int align
= least_bit_hwi (val
.mask
.to_uhwi ());
2386 if (ptralign
== align
2387 && ((TREE_INT_CST_LOW (ptrval
.value
) & (align
- 1))
2388 == (TREE_INT_CST_LOW (val
.value
) & (align
- 1))))
2390 bool res
= update_call_from_tree (gsi
, ptr
);
2397 /* Propagate into the call arguments. Compared to replace_uses_in
2398 this can use the argument slot types for type verification
2399 instead of the current argument type. We also can safely
2400 drop qualifiers here as we are dealing with constants anyway. */
2401 argt
= TYPE_ARG_TYPES (gimple_call_fntype (stmt
));
2402 for (i
= 0; i
< gimple_call_num_args (stmt
) && argt
;
2403 ++i
, argt
= TREE_CHAIN (argt
))
2405 tree arg
= gimple_call_arg (stmt
, i
);
2406 if (TREE_CODE (arg
) == SSA_NAME
2407 && (val
= get_constant_value (arg
))
2408 && useless_type_conversion_p
2409 (TYPE_MAIN_VARIANT (TREE_VALUE (argt
)),
2410 TYPE_MAIN_VARIANT (TREE_TYPE (val
))))
2412 gimple_call_set_arg (stmt
, i
, unshare_expr (val
));
2422 tree lhs
= gimple_assign_lhs (stmt
);
2425 /* If we have a load that turned out to be constant replace it
2426 as we cannot propagate into all uses in all cases. */
2427 if (gimple_assign_single_p (stmt
)
2428 && TREE_CODE (lhs
) == SSA_NAME
2429 && (val
= get_constant_value (lhs
)))
2431 tree rhs
= unshare_expr (val
);
2432 if (!useless_type_conversion_p (TREE_TYPE (lhs
), TREE_TYPE (rhs
)))
2433 rhs
= fold_build1 (VIEW_CONVERT_EXPR
, TREE_TYPE (lhs
), rhs
);
2434 gimple_assign_set_rhs_from_tree (gsi
, rhs
);
2446 /* Visit the assignment statement STMT. Set the value of its LHS to the
2447 value computed by the RHS and store LHS in *OUTPUT_P. If STMT
2448 creates virtual definitions, set the value of each new name to that
2449 of the RHS (if we can derive a constant out of the RHS).
2450 Value-returning call statements also perform an assignment, and
2451 are handled here. */
2453 static enum ssa_prop_result
2454 visit_assignment (gimple
*stmt
, tree
*output_p
)
2456 ccp_prop_value_t val
;
2457 enum ssa_prop_result retval
= SSA_PROP_NOT_INTERESTING
;
2459 tree lhs
= gimple_get_lhs (stmt
);
2460 if (TREE_CODE (lhs
) == SSA_NAME
)
2462 /* Evaluate the statement, which could be
2463 either a GIMPLE_ASSIGN or a GIMPLE_CALL. */
2464 val
= evaluate_stmt (stmt
);
2466 /* If STMT is an assignment to an SSA_NAME, we only have one
2468 if (set_lattice_value (lhs
, &val
))
2471 if (val
.lattice_val
== VARYING
)
2472 retval
= SSA_PROP_VARYING
;
2474 retval
= SSA_PROP_INTERESTING
;
2482 /* Visit the conditional statement STMT. Return SSA_PROP_INTERESTING
2483 if it can determine which edge will be taken. Otherwise, return
2484 SSA_PROP_VARYING. */
2486 static enum ssa_prop_result
2487 visit_cond_stmt (gimple
*stmt
, edge
*taken_edge_p
)
2489 ccp_prop_value_t val
;
2492 block
= gimple_bb (stmt
);
2493 val
= evaluate_stmt (stmt
);
2494 if (val
.lattice_val
!= CONSTANT
2496 return SSA_PROP_VARYING
;
2498 /* Find which edge out of the conditional block will be taken and add it
2499 to the worklist. If no single edge can be determined statically,
2500 return SSA_PROP_VARYING to feed all the outgoing edges to the
2501 propagation engine. */
2502 *taken_edge_p
= find_taken_edge (block
, val
.value
);
2504 return SSA_PROP_INTERESTING
;
2506 return SSA_PROP_VARYING
;
2510 /* Evaluate statement STMT. If the statement produces an output value and
2511 its evaluation changes the lattice value of its output, return
2512 SSA_PROP_INTERESTING and set *OUTPUT_P to the SSA_NAME holding the
2515 If STMT is a conditional branch and we can determine its truth
2516 value, set *TAKEN_EDGE_P accordingly. If STMT produces a varying
2517 value, return SSA_PROP_VARYING. */
2519 enum ssa_prop_result
2520 ccp_propagate::visit_stmt (gimple
*stmt
, edge
*taken_edge_p
, tree
*output_p
)
2525 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2527 fprintf (dump_file
, "\nVisiting statement:\n");
2528 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
2531 switch (gimple_code (stmt
))
2534 /* If the statement is an assignment that produces a single
2535 output value, evaluate its RHS to see if the lattice value of
2536 its output has changed. */
2537 return visit_assignment (stmt
, output_p
);
2540 /* A value-returning call also performs an assignment. */
2541 if (gimple_call_lhs (stmt
) != NULL_TREE
)
2542 return visit_assignment (stmt
, output_p
);
2547 /* If STMT is a conditional branch, see if we can determine
2548 which branch will be taken. */
2549 /* FIXME. It appears that we should be able to optimize
2550 computed GOTOs here as well. */
2551 return visit_cond_stmt (stmt
, taken_edge_p
);
2557 /* Any other kind of statement is not interesting for constant
2558 propagation and, therefore, not worth simulating. */
2559 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2560 fprintf (dump_file
, "No interesting values produced. Marked VARYING.\n");
2562 /* Definitions made by statements other than assignments to
2563 SSA_NAMEs represent unknown modifications to their outputs.
2564 Mark them VARYING. */
2565 FOR_EACH_SSA_TREE_OPERAND (def
, stmt
, iter
, SSA_OP_ALL_DEFS
)
2566 set_value_varying (def
);
2568 return SSA_PROP_VARYING
;
2572 /* Main entry point for SSA Conditional Constant Propagation. If NONZERO_P,
2573 record nonzero bits. */
2576 do_ssa_ccp (bool nonzero_p
)
2578 unsigned int todo
= 0;
2579 calculate_dominance_info (CDI_DOMINATORS
);
2582 class ccp_propagate ccp_propagate
;
2583 ccp_propagate
.ssa_propagate ();
2584 if (ccp_finalize (nonzero_p
|| flag_ipa_bit_cp
))
2586 todo
= (TODO_cleanup_cfg
| TODO_update_ssa
);
2588 /* ccp_finalize does not preserve loop-closed ssa. */
2589 loops_state_clear (LOOP_CLOSED_SSA
);
2592 free_dominance_info (CDI_DOMINATORS
);
2599 const pass_data pass_data_ccp
=
2601 GIMPLE_PASS
, /* type */
2603 OPTGROUP_NONE
, /* optinfo_flags */
2604 TV_TREE_CCP
, /* tv_id */
2605 ( PROP_cfg
| PROP_ssa
), /* properties_required */
2606 0, /* properties_provided */
2607 0, /* properties_destroyed */
2608 0, /* todo_flags_start */
2609 TODO_update_address_taken
, /* todo_flags_finish */
2612 class pass_ccp
: public gimple_opt_pass
2615 pass_ccp (gcc::context
*ctxt
)
2616 : gimple_opt_pass (pass_data_ccp
, ctxt
), nonzero_p (false)
2619 /* opt_pass methods: */
2620 opt_pass
* clone () { return new pass_ccp (m_ctxt
); }
2621 void set_pass_param (unsigned int n
, bool param
)
2623 gcc_assert (n
== 0);
2626 virtual bool gate (function
*) { return flag_tree_ccp
!= 0; }
2627 virtual unsigned int execute (function
*) { return do_ssa_ccp (nonzero_p
); }
2630 /* Determines whether the pass instance records nonzero bits. */
2632 }; // class pass_ccp
2637 make_pass_ccp (gcc::context
*ctxt
)
2639 return new pass_ccp (ctxt
);
2644 /* Try to optimize out __builtin_stack_restore. Optimize it out
2645 if there is another __builtin_stack_restore in the same basic
2646 block and no calls or ASM_EXPRs are in between, or if this block's
2647 only outgoing edge is to EXIT_BLOCK and there are no calls or
2648 ASM_EXPRs after this __builtin_stack_restore. */
2651 optimize_stack_restore (gimple_stmt_iterator i
)
2656 basic_block bb
= gsi_bb (i
);
2657 gimple
*call
= gsi_stmt (i
);
2659 if (gimple_code (call
) != GIMPLE_CALL
2660 || gimple_call_num_args (call
) != 1
2661 || TREE_CODE (gimple_call_arg (call
, 0)) != SSA_NAME
2662 || !POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (call
, 0))))
2665 for (gsi_next (&i
); !gsi_end_p (i
); gsi_next (&i
))
2667 stmt
= gsi_stmt (i
);
2668 if (gimple_code (stmt
) == GIMPLE_ASM
)
2670 if (gimple_code (stmt
) != GIMPLE_CALL
)
2673 callee
= gimple_call_fndecl (stmt
);
2675 || !fndecl_built_in_p (callee
, BUILT_IN_NORMAL
)
2676 /* All regular builtins are ok, just obviously not alloca. */
2677 || ALLOCA_FUNCTION_CODE_P (DECL_FUNCTION_CODE (callee
)))
2680 if (fndecl_built_in_p (callee
, BUILT_IN_STACK_RESTORE
))
2681 goto second_stack_restore
;
2687 /* Allow one successor of the exit block, or zero successors. */
2688 switch (EDGE_COUNT (bb
->succs
))
2693 if (single_succ_edge (bb
)->dest
!= EXIT_BLOCK_PTR_FOR_FN (cfun
))
2699 second_stack_restore
:
2701 /* If there's exactly one use, then zap the call to __builtin_stack_save.
2702 If there are multiple uses, then the last one should remove the call.
2703 In any case, whether the call to __builtin_stack_save can be removed
2704 or not is irrelevant to removing the call to __builtin_stack_restore. */
2705 if (has_single_use (gimple_call_arg (call
, 0)))
2707 gimple
*stack_save
= SSA_NAME_DEF_STMT (gimple_call_arg (call
, 0));
2708 if (is_gimple_call (stack_save
))
2710 callee
= gimple_call_fndecl (stack_save
);
2711 if (callee
&& fndecl_built_in_p (callee
, BUILT_IN_STACK_SAVE
))
2713 gimple_stmt_iterator stack_save_gsi
;
2716 stack_save_gsi
= gsi_for_stmt (stack_save
);
2717 rhs
= build_int_cst (TREE_TYPE (gimple_call_arg (call
, 0)), 0);
2718 update_call_from_tree (&stack_save_gsi
, rhs
);
2723 /* No effect, so the statement will be deleted. */
2724 return integer_zero_node
;
2727 /* If va_list type is a simple pointer and nothing special is needed,
2728 optimize __builtin_va_start (&ap, 0) into ap = __builtin_next_arg (0),
2729 __builtin_va_end (&ap) out as NOP and __builtin_va_copy into a simple
2730 pointer assignment. */
2733 optimize_stdarg_builtin (gimple
*call
)
2735 tree callee
, lhs
, rhs
, cfun_va_list
;
2736 bool va_list_simple_ptr
;
2737 location_t loc
= gimple_location (call
);
2739 callee
= gimple_call_fndecl (call
);
2741 cfun_va_list
= targetm
.fn_abi_va_list (callee
);
2742 va_list_simple_ptr
= POINTER_TYPE_P (cfun_va_list
)
2743 && (TREE_TYPE (cfun_va_list
) == void_type_node
2744 || TREE_TYPE (cfun_va_list
) == char_type_node
);
2746 switch (DECL_FUNCTION_CODE (callee
))
2748 case BUILT_IN_VA_START
:
2749 if (!va_list_simple_ptr
2750 || targetm
.expand_builtin_va_start
!= NULL
2751 || !builtin_decl_explicit_p (BUILT_IN_NEXT_ARG
))
2754 if (gimple_call_num_args (call
) != 2)
2757 lhs
= gimple_call_arg (call
, 0);
2758 if (!POINTER_TYPE_P (TREE_TYPE (lhs
))
2759 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs
)))
2760 != TYPE_MAIN_VARIANT (cfun_va_list
))
2763 lhs
= build_fold_indirect_ref_loc (loc
, lhs
);
2764 rhs
= build_call_expr_loc (loc
, builtin_decl_explicit (BUILT_IN_NEXT_ARG
),
2765 1, integer_zero_node
);
2766 rhs
= fold_convert_loc (loc
, TREE_TYPE (lhs
), rhs
);
2767 return build2 (MODIFY_EXPR
, TREE_TYPE (lhs
), lhs
, rhs
);
2769 case BUILT_IN_VA_COPY
:
2770 if (!va_list_simple_ptr
)
2773 if (gimple_call_num_args (call
) != 2)
2776 lhs
= gimple_call_arg (call
, 0);
2777 if (!POINTER_TYPE_P (TREE_TYPE (lhs
))
2778 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs
)))
2779 != TYPE_MAIN_VARIANT (cfun_va_list
))
2782 lhs
= build_fold_indirect_ref_loc (loc
, lhs
);
2783 rhs
= gimple_call_arg (call
, 1);
2784 if (TYPE_MAIN_VARIANT (TREE_TYPE (rhs
))
2785 != TYPE_MAIN_VARIANT (cfun_va_list
))
2788 rhs
= fold_convert_loc (loc
, TREE_TYPE (lhs
), rhs
);
2789 return build2 (MODIFY_EXPR
, TREE_TYPE (lhs
), lhs
, rhs
);
2791 case BUILT_IN_VA_END
:
2792 /* No effect, so the statement will be deleted. */
2793 return integer_zero_node
;
2800 /* Attemp to make the block of __builtin_unreachable I unreachable by changing
2801 the incoming jumps. Return true if at least one jump was changed. */
2804 optimize_unreachable (gimple_stmt_iterator i
)
2806 basic_block bb
= gsi_bb (i
);
2807 gimple_stmt_iterator gsi
;
2813 if (flag_sanitize
& SANITIZE_UNREACHABLE
)
2816 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2818 stmt
= gsi_stmt (gsi
);
2820 if (is_gimple_debug (stmt
))
2823 if (glabel
*label_stmt
= dyn_cast
<glabel
*> (stmt
))
2825 /* Verify we do not need to preserve the label. */
2826 if (FORCED_LABEL (gimple_label_label (label_stmt
)))
2832 /* Only handle the case that __builtin_unreachable is the first statement
2833 in the block. We rely on DCE to remove stmts without side-effects
2834 before __builtin_unreachable. */
2835 if (gsi_stmt (gsi
) != gsi_stmt (i
))
2840 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
2842 gsi
= gsi_last_bb (e
->src
);
2843 if (gsi_end_p (gsi
))
2846 stmt
= gsi_stmt (gsi
);
2847 if (gcond
*cond_stmt
= dyn_cast
<gcond
*> (stmt
))
2849 if (e
->flags
& EDGE_TRUE_VALUE
)
2850 gimple_cond_make_false (cond_stmt
);
2851 else if (e
->flags
& EDGE_FALSE_VALUE
)
2852 gimple_cond_make_true (cond_stmt
);
2855 update_stmt (cond_stmt
);
2859 /* Todo: handle other cases. Note that unreachable switch case
2860 statements have already been removed. */
2871 mask_2 = 1 << cnt_1;
2872 _4 = __atomic_fetch_or_* (ptr_6, mask_2, _3);
2875 _4 = ATOMIC_BIT_TEST_AND_SET (ptr_6, cnt_1, 0, _3);
2877 If _5 is only used in _5 != 0 or _5 == 0 comparisons, 1
2878 is passed instead of 0, and the builtin just returns a zero
2879 or 1 value instead of the actual bit.
2880 Similarly for __sync_fetch_and_or_* (without the ", _3" part
2881 in there), and/or if mask_2 is a power of 2 constant.
2882 Similarly for xor instead of or, use ATOMIC_BIT_TEST_AND_COMPLEMENT
2883 in that case. And similarly for and instead of or, except that
2884 the second argument to the builtin needs to be one's complement
2885 of the mask instead of mask. */
2888 optimize_atomic_bit_test_and (gimple_stmt_iterator
*gsip
,
2889 enum internal_fn fn
, bool has_model_arg
,
2892 gimple
*call
= gsi_stmt (*gsip
);
2893 tree lhs
= gimple_call_lhs (call
);
2894 use_operand_p use_p
;
2899 if (!flag_inline_atomics
2901 || !gimple_call_builtin_p (call
, BUILT_IN_NORMAL
)
2903 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs
)
2904 || !single_imm_use (lhs
, &use_p
, &use_stmt
)
2905 || !is_gimple_assign (use_stmt
)
2906 || gimple_assign_rhs_code (use_stmt
) != BIT_AND_EXPR
2907 || !gimple_vdef (call
))
2912 case IFN_ATOMIC_BIT_TEST_AND_SET
:
2913 optab
= atomic_bit_test_and_set_optab
;
2915 case IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT
:
2916 optab
= atomic_bit_test_and_complement_optab
;
2918 case IFN_ATOMIC_BIT_TEST_AND_RESET
:
2919 optab
= atomic_bit_test_and_reset_optab
;
2925 if (optab_handler (optab
, TYPE_MODE (TREE_TYPE (lhs
))) == CODE_FOR_nothing
)
2928 mask
= gimple_call_arg (call
, 1);
2929 tree use_lhs
= gimple_assign_lhs (use_stmt
);
2933 if (TREE_CODE (mask
) == INTEGER_CST
)
2935 if (fn
== IFN_ATOMIC_BIT_TEST_AND_RESET
)
2936 mask
= const_unop (BIT_NOT_EXPR
, TREE_TYPE (mask
), mask
);
2937 mask
= fold_convert (TREE_TYPE (lhs
), mask
);
2938 int ibit
= tree_log2 (mask
);
2941 bit
= build_int_cst (TREE_TYPE (lhs
), ibit
);
2943 else if (TREE_CODE (mask
) == SSA_NAME
)
2945 gimple
*g
= SSA_NAME_DEF_STMT (mask
);
2946 if (fn
== IFN_ATOMIC_BIT_TEST_AND_RESET
)
2948 if (!is_gimple_assign (g
)
2949 || gimple_assign_rhs_code (g
) != BIT_NOT_EXPR
)
2951 mask
= gimple_assign_rhs1 (g
);
2952 if (TREE_CODE (mask
) != SSA_NAME
)
2954 g
= SSA_NAME_DEF_STMT (mask
);
2956 if (!is_gimple_assign (g
)
2957 || gimple_assign_rhs_code (g
) != LSHIFT_EXPR
2958 || !integer_onep (gimple_assign_rhs1 (g
)))
2960 bit
= gimple_assign_rhs2 (g
);
2965 if (gimple_assign_rhs1 (use_stmt
) == lhs
)
2967 if (!operand_equal_p (gimple_assign_rhs2 (use_stmt
), mask
, 0))
2970 else if (gimple_assign_rhs2 (use_stmt
) != lhs
2971 || !operand_equal_p (gimple_assign_rhs1 (use_stmt
), mask
, 0))
2974 bool use_bool
= true;
2975 bool has_debug_uses
= false;
2976 imm_use_iterator iter
;
2979 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs
))
2981 FOR_EACH_IMM_USE_STMT (g
, iter
, use_lhs
)
2983 enum tree_code code
= ERROR_MARK
;
2984 tree op0
= NULL_TREE
, op1
= NULL_TREE
;
2985 if (is_gimple_debug (g
))
2987 has_debug_uses
= true;
2990 else if (is_gimple_assign (g
))
2991 switch (gimple_assign_rhs_code (g
))
2994 op1
= gimple_assign_rhs1 (g
);
2995 code
= TREE_CODE (op1
);
2996 op0
= TREE_OPERAND (op1
, 0);
2997 op1
= TREE_OPERAND (op1
, 1);
3001 code
= gimple_assign_rhs_code (g
);
3002 op0
= gimple_assign_rhs1 (g
);
3003 op1
= gimple_assign_rhs2 (g
);
3008 else if (gimple_code (g
) == GIMPLE_COND
)
3010 code
= gimple_cond_code (g
);
3011 op0
= gimple_cond_lhs (g
);
3012 op1
= gimple_cond_rhs (g
);
3015 if ((code
== EQ_EXPR
|| code
== NE_EXPR
)
3017 && integer_zerop (op1
))
3019 use_operand_p use_p
;
3021 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
3028 BREAK_FROM_IMM_USE_STMT (iter
);
3031 tree new_lhs
= make_ssa_name (TREE_TYPE (lhs
));
3032 tree flag
= build_int_cst (TREE_TYPE (lhs
), use_bool
);
3034 g
= gimple_build_call_internal (fn
, 4, gimple_call_arg (call
, 0),
3035 bit
, flag
, gimple_call_arg (call
, 2));
3037 g
= gimple_build_call_internal (fn
, 3, gimple_call_arg (call
, 0),
3039 gimple_call_set_lhs (g
, new_lhs
);
3040 gimple_set_location (g
, gimple_location (call
));
3041 gimple_move_vops (g
, call
);
3042 bool throws
= stmt_can_throw_internal (cfun
, call
);
3043 gimple_call_set_nothrow (as_a
<gcall
*> (g
),
3044 gimple_call_nothrow_p (as_a
<gcall
*> (call
)));
3045 gimple_stmt_iterator gsi
= *gsip
;
3046 gsi_insert_after (&gsi
, g
, GSI_NEW_STMT
);
3050 maybe_clean_or_replace_eh_stmt (call
, g
);
3051 if (after
|| (use_bool
&& has_debug_uses
))
3052 e
= find_fallthru_edge (gsi_bb (gsi
)->succs
);
3056 /* The internal function returns the value of the specified bit
3057 before the atomic operation. If we are interested in the value
3058 of the specified bit after the atomic operation (makes only sense
3059 for xor, otherwise the bit content is compile time known),
3060 we need to invert the bit. */
3061 g
= gimple_build_assign (make_ssa_name (TREE_TYPE (lhs
)),
3062 BIT_XOR_EXPR
, new_lhs
,
3063 use_bool
? build_int_cst (TREE_TYPE (lhs
), 1)
3065 new_lhs
= gimple_assign_lhs (g
);
3068 gsi_insert_on_edge_immediate (e
, g
);
3069 gsi
= gsi_for_stmt (g
);
3072 gsi_insert_after (&gsi
, g
, GSI_NEW_STMT
);
3074 if (use_bool
&& has_debug_uses
)
3076 tree temp
= NULL_TREE
;
3077 if (!throws
|| after
|| single_pred_p (e
->dest
))
3079 temp
= make_node (DEBUG_EXPR_DECL
);
3080 DECL_ARTIFICIAL (temp
) = 1;
3081 TREE_TYPE (temp
) = TREE_TYPE (lhs
);
3082 SET_DECL_MODE (temp
, TYPE_MODE (TREE_TYPE (lhs
)));
3083 tree t
= build2 (LSHIFT_EXPR
, TREE_TYPE (lhs
), new_lhs
, bit
);
3084 g
= gimple_build_debug_bind (temp
, t
, g
);
3085 if (throws
&& !after
)
3087 gsi
= gsi_after_labels (e
->dest
);
3088 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
3091 gsi_insert_after (&gsi
, g
, GSI_NEW_STMT
);
3093 FOR_EACH_IMM_USE_STMT (g
, iter
, use_lhs
)
3094 if (is_gimple_debug (g
))
3096 use_operand_p use_p
;
3097 if (temp
== NULL_TREE
)
3098 gimple_debug_bind_reset_value (g
);
3100 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
3101 SET_USE (use_p
, temp
);
3105 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_lhs
)
3106 = SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs
);
3107 replace_uses_by (use_lhs
, new_lhs
);
3108 gsi
= gsi_for_stmt (use_stmt
);
3109 gsi_remove (&gsi
, true);
3110 release_defs (use_stmt
);
3111 gsi_remove (gsip
, true);
3112 release_ssa_name (lhs
);
3121 Similarly for memset (&a, ..., sizeof (a)); instead of a = {};
3122 and/or memcpy (&b, &a, sizeof (a)); instead of b = a; */
3125 optimize_memcpy (gimple_stmt_iterator
*gsip
, tree dest
, tree src
, tree len
)
3127 gimple
*stmt
= gsi_stmt (*gsip
);
3128 if (gimple_has_volatile_ops (stmt
))
3131 tree vuse
= gimple_vuse (stmt
);
3135 gimple
*defstmt
= SSA_NAME_DEF_STMT (vuse
);
3136 tree src2
= NULL_TREE
, len2
= NULL_TREE
;
3137 poly_int64 offset
, offset2
;
3138 tree val
= integer_zero_node
;
3139 if (gimple_store_p (defstmt
)
3140 && gimple_assign_single_p (defstmt
)
3141 && TREE_CODE (gimple_assign_rhs1 (defstmt
)) == CONSTRUCTOR
3142 && !gimple_clobber_p (defstmt
))
3143 src2
= gimple_assign_lhs (defstmt
);
3144 else if (gimple_call_builtin_p (defstmt
, BUILT_IN_MEMSET
)
3145 && TREE_CODE (gimple_call_arg (defstmt
, 0)) == ADDR_EXPR
3146 && TREE_CODE (gimple_call_arg (defstmt
, 1)) == INTEGER_CST
)
3148 src2
= TREE_OPERAND (gimple_call_arg (defstmt
, 0), 0);
3149 len2
= gimple_call_arg (defstmt
, 2);
3150 val
= gimple_call_arg (defstmt
, 1);
3151 /* For non-0 val, we'd have to transform stmt from assignment
3152 into memset (only if dest is addressable). */
3153 if (!integer_zerop (val
) && is_gimple_assign (stmt
))
3157 if (src2
== NULL_TREE
)
3160 if (len
== NULL_TREE
)
3161 len
= (TREE_CODE (src
) == COMPONENT_REF
3162 ? DECL_SIZE_UNIT (TREE_OPERAND (src
, 1))
3163 : TYPE_SIZE_UNIT (TREE_TYPE (src
)));
3164 if (len2
== NULL_TREE
)
3165 len2
= (TREE_CODE (src2
) == COMPONENT_REF
3166 ? DECL_SIZE_UNIT (TREE_OPERAND (src2
, 1))
3167 : TYPE_SIZE_UNIT (TREE_TYPE (src2
)));
3168 if (len
== NULL_TREE
3169 || !poly_int_tree_p (len
)
3170 || len2
== NULL_TREE
3171 || !poly_int_tree_p (len2
))
3174 src
= get_addr_base_and_unit_offset (src
, &offset
);
3175 src2
= get_addr_base_and_unit_offset (src2
, &offset2
);
3176 if (src
== NULL_TREE
3177 || src2
== NULL_TREE
3178 || maybe_lt (offset
, offset2
))
3181 if (!operand_equal_p (src
, src2
, 0))
3184 /* [ src + offset2, src + offset2 + len2 - 1 ] is set to val.
3186 [ src + offset, src + offset + len - 1 ] is a subset of that. */
3187 if (maybe_gt (wi::to_poly_offset (len
) + (offset
- offset2
),
3188 wi::to_poly_offset (len2
)))
3191 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3193 fprintf (dump_file
, "Simplified\n ");
3194 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
3195 fprintf (dump_file
, "after previous\n ");
3196 print_gimple_stmt (dump_file
, defstmt
, 0, dump_flags
);
3199 /* For simplicity, don't change the kind of the stmt,
3200 turn dest = src; into dest = {}; and memcpy (&dest, &src, len);
3201 into memset (&dest, val, len);
3202 In theory we could change dest = src into memset if dest
3203 is addressable (maybe beneficial if val is not 0), or
3204 memcpy (&dest, &src, len) into dest = {} if len is the size
3205 of dest, dest isn't volatile. */
3206 if (is_gimple_assign (stmt
))
3208 tree ctor
= build_constructor (TREE_TYPE (dest
), NULL
);
3209 gimple_assign_set_rhs_from_tree (gsip
, ctor
);
3212 else /* If stmt is memcpy, transform it into memset. */
3214 gcall
*call
= as_a
<gcall
*> (stmt
);
3215 tree fndecl
= builtin_decl_implicit (BUILT_IN_MEMSET
);
3216 gimple_call_set_fndecl (call
, fndecl
);
3217 gimple_call_set_fntype (call
, TREE_TYPE (fndecl
));
3218 gimple_call_set_arg (call
, 1, val
);
3222 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3224 fprintf (dump_file
, "into\n ");
3225 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
3229 /* A simple pass that attempts to fold all builtin functions. This pass
3230 is run after we've propagated as many constants as we can. */
3234 const pass_data pass_data_fold_builtins
=
3236 GIMPLE_PASS
, /* type */
3238 OPTGROUP_NONE
, /* optinfo_flags */
3239 TV_NONE
, /* tv_id */
3240 ( PROP_cfg
| PROP_ssa
), /* properties_required */
3241 0, /* properties_provided */
3242 0, /* properties_destroyed */
3243 0, /* todo_flags_start */
3244 TODO_update_ssa
, /* todo_flags_finish */
3247 class pass_fold_builtins
: public gimple_opt_pass
3250 pass_fold_builtins (gcc::context
*ctxt
)
3251 : gimple_opt_pass (pass_data_fold_builtins
, ctxt
)
3254 /* opt_pass methods: */
3255 opt_pass
* clone () { return new pass_fold_builtins (m_ctxt
); }
3256 virtual unsigned int execute (function
*);
3258 }; // class pass_fold_builtins
3261 pass_fold_builtins::execute (function
*fun
)
3263 bool cfg_changed
= false;
3265 unsigned int todoflags
= 0;
3267 FOR_EACH_BB_FN (bb
, fun
)
3269 gimple_stmt_iterator i
;
3270 for (i
= gsi_start_bb (bb
); !gsi_end_p (i
); )
3272 gimple
*stmt
, *old_stmt
;
3274 enum built_in_function fcode
;
3276 stmt
= gsi_stmt (i
);
3278 if (gimple_code (stmt
) != GIMPLE_CALL
)
3280 /* Remove all *ssaname_N ={v} {CLOBBER}; stmts,
3281 after the last GIMPLE DSE they aren't needed and might
3282 unnecessarily keep the SSA_NAMEs live. */
3283 if (gimple_clobber_p (stmt
))
3285 tree lhs
= gimple_assign_lhs (stmt
);
3286 if (TREE_CODE (lhs
) == MEM_REF
3287 && TREE_CODE (TREE_OPERAND (lhs
, 0)) == SSA_NAME
)
3289 unlink_stmt_vdef (stmt
);
3290 gsi_remove (&i
, true);
3291 release_defs (stmt
);
3295 else if (gimple_assign_load_p (stmt
) && gimple_store_p (stmt
))
3296 optimize_memcpy (&i
, gimple_assign_lhs (stmt
),
3297 gimple_assign_rhs1 (stmt
), NULL_TREE
);
3302 callee
= gimple_call_fndecl (stmt
);
3303 if (!callee
|| !fndecl_built_in_p (callee
, BUILT_IN_NORMAL
))
3309 fcode
= DECL_FUNCTION_CODE (callee
);
3314 tree result
= NULL_TREE
;
3315 switch (DECL_FUNCTION_CODE (callee
))
3317 case BUILT_IN_CONSTANT_P
:
3318 /* Resolve __builtin_constant_p. If it hasn't been
3319 folded to integer_one_node by now, it's fairly
3320 certain that the value simply isn't constant. */
3321 result
= integer_zero_node
;
3324 case BUILT_IN_ASSUME_ALIGNED
:
3325 /* Remove __builtin_assume_aligned. */
3326 result
= gimple_call_arg (stmt
, 0);
3329 case BUILT_IN_STACK_RESTORE
:
3330 result
= optimize_stack_restore (i
);
3336 case BUILT_IN_UNREACHABLE
:
3337 if (optimize_unreachable (i
))
3341 case BUILT_IN_ATOMIC_FETCH_OR_1
:
3342 case BUILT_IN_ATOMIC_FETCH_OR_2
:
3343 case BUILT_IN_ATOMIC_FETCH_OR_4
:
3344 case BUILT_IN_ATOMIC_FETCH_OR_8
:
3345 case BUILT_IN_ATOMIC_FETCH_OR_16
:
3346 optimize_atomic_bit_test_and (&i
,
3347 IFN_ATOMIC_BIT_TEST_AND_SET
,
3350 case BUILT_IN_SYNC_FETCH_AND_OR_1
:
3351 case BUILT_IN_SYNC_FETCH_AND_OR_2
:
3352 case BUILT_IN_SYNC_FETCH_AND_OR_4
:
3353 case BUILT_IN_SYNC_FETCH_AND_OR_8
:
3354 case BUILT_IN_SYNC_FETCH_AND_OR_16
:
3355 optimize_atomic_bit_test_and (&i
,
3356 IFN_ATOMIC_BIT_TEST_AND_SET
,
3360 case BUILT_IN_ATOMIC_FETCH_XOR_1
:
3361 case BUILT_IN_ATOMIC_FETCH_XOR_2
:
3362 case BUILT_IN_ATOMIC_FETCH_XOR_4
:
3363 case BUILT_IN_ATOMIC_FETCH_XOR_8
:
3364 case BUILT_IN_ATOMIC_FETCH_XOR_16
:
3365 optimize_atomic_bit_test_and
3366 (&i
, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT
, true, false);
3368 case BUILT_IN_SYNC_FETCH_AND_XOR_1
:
3369 case BUILT_IN_SYNC_FETCH_AND_XOR_2
:
3370 case BUILT_IN_SYNC_FETCH_AND_XOR_4
:
3371 case BUILT_IN_SYNC_FETCH_AND_XOR_8
:
3372 case BUILT_IN_SYNC_FETCH_AND_XOR_16
:
3373 optimize_atomic_bit_test_and
3374 (&i
, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT
, false, false);
3377 case BUILT_IN_ATOMIC_XOR_FETCH_1
:
3378 case BUILT_IN_ATOMIC_XOR_FETCH_2
:
3379 case BUILT_IN_ATOMIC_XOR_FETCH_4
:
3380 case BUILT_IN_ATOMIC_XOR_FETCH_8
:
3381 case BUILT_IN_ATOMIC_XOR_FETCH_16
:
3382 optimize_atomic_bit_test_and
3383 (&i
, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT
, true, true);
3385 case BUILT_IN_SYNC_XOR_AND_FETCH_1
:
3386 case BUILT_IN_SYNC_XOR_AND_FETCH_2
:
3387 case BUILT_IN_SYNC_XOR_AND_FETCH_4
:
3388 case BUILT_IN_SYNC_XOR_AND_FETCH_8
:
3389 case BUILT_IN_SYNC_XOR_AND_FETCH_16
:
3390 optimize_atomic_bit_test_and
3391 (&i
, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT
, false, true);
3394 case BUILT_IN_ATOMIC_FETCH_AND_1
:
3395 case BUILT_IN_ATOMIC_FETCH_AND_2
:
3396 case BUILT_IN_ATOMIC_FETCH_AND_4
:
3397 case BUILT_IN_ATOMIC_FETCH_AND_8
:
3398 case BUILT_IN_ATOMIC_FETCH_AND_16
:
3399 optimize_atomic_bit_test_and (&i
,
3400 IFN_ATOMIC_BIT_TEST_AND_RESET
,
3403 case BUILT_IN_SYNC_FETCH_AND_AND_1
:
3404 case BUILT_IN_SYNC_FETCH_AND_AND_2
:
3405 case BUILT_IN_SYNC_FETCH_AND_AND_4
:
3406 case BUILT_IN_SYNC_FETCH_AND_AND_8
:
3407 case BUILT_IN_SYNC_FETCH_AND_AND_16
:
3408 optimize_atomic_bit_test_and (&i
,
3409 IFN_ATOMIC_BIT_TEST_AND_RESET
,
3413 case BUILT_IN_MEMCPY
:
3414 if (gimple_call_builtin_p (stmt
, BUILT_IN_NORMAL
)
3415 && TREE_CODE (gimple_call_arg (stmt
, 0)) == ADDR_EXPR
3416 && TREE_CODE (gimple_call_arg (stmt
, 1)) == ADDR_EXPR
3417 && TREE_CODE (gimple_call_arg (stmt
, 2)) == INTEGER_CST
)
3419 tree dest
= TREE_OPERAND (gimple_call_arg (stmt
, 0), 0);
3420 tree src
= TREE_OPERAND (gimple_call_arg (stmt
, 1), 0);
3421 tree len
= gimple_call_arg (stmt
, 2);
3422 optimize_memcpy (&i
, dest
, src
, len
);
3426 case BUILT_IN_VA_START
:
3427 case BUILT_IN_VA_END
:
3428 case BUILT_IN_VA_COPY
:
3429 /* These shouldn't be folded before pass_stdarg. */
3430 result
= optimize_stdarg_builtin (stmt
);
3442 if (!update_call_from_tree (&i
, result
))
3443 gimplify_and_update_call_from_tree (&i
, result
);
3446 todoflags
|= TODO_update_address_taken
;
3448 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3450 fprintf (dump_file
, "Simplified\n ");
3451 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
3455 stmt
= gsi_stmt (i
);
3458 if (maybe_clean_or_replace_eh_stmt (old_stmt
, stmt
)
3459 && gimple_purge_dead_eh_edges (bb
))
3462 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3464 fprintf (dump_file
, "to\n ");
3465 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
3466 fprintf (dump_file
, "\n");
3469 /* Retry the same statement if it changed into another
3470 builtin, there might be new opportunities now. */
3471 if (gimple_code (stmt
) != GIMPLE_CALL
)
3476 callee
= gimple_call_fndecl (stmt
);
3478 || !fndecl_built_in_p (callee
, fcode
))
3483 /* Delete unreachable blocks. */
3485 todoflags
|= TODO_cleanup_cfg
;
3493 make_pass_fold_builtins (gcc::context
*ctxt
)
3495 return new pass_fold_builtins (ctxt
);
3498 /* A simple pass that emits some warnings post IPA. */
3502 const pass_data pass_data_post_ipa_warn
=
3504 GIMPLE_PASS
, /* type */
3505 "post_ipa_warn", /* name */
3506 OPTGROUP_NONE
, /* optinfo_flags */
3507 TV_NONE
, /* tv_id */
3508 ( PROP_cfg
| PROP_ssa
), /* properties_required */
3509 0, /* properties_provided */
3510 0, /* properties_destroyed */
3511 0, /* todo_flags_start */
3512 0, /* todo_flags_finish */
3515 class pass_post_ipa_warn
: public gimple_opt_pass
3518 pass_post_ipa_warn (gcc::context
*ctxt
)
3519 : gimple_opt_pass (pass_data_post_ipa_warn
, ctxt
)
3522 /* opt_pass methods: */
3523 opt_pass
* clone () { return new pass_post_ipa_warn (m_ctxt
); }
3524 virtual bool gate (function
*) { return warn_nonnull
!= 0; }
3525 virtual unsigned int execute (function
*);
3527 }; // class pass_fold_builtins
3530 pass_post_ipa_warn::execute (function
*fun
)
3534 FOR_EACH_BB_FN (bb
, fun
)
3536 gimple_stmt_iterator gsi
;
3537 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3539 gimple
*stmt
= gsi_stmt (gsi
);
3540 if (!is_gimple_call (stmt
) || gimple_no_warning_p (stmt
))
3543 tree fntype
= gimple_call_fntype (stmt
);
3544 bitmap nonnullargs
= get_nonnull_args (fntype
);
3548 tree fndecl
= gimple_call_fndecl (stmt
);
3550 for (unsigned i
= 0; i
< gimple_call_num_args (stmt
); i
++)
3552 tree arg
= gimple_call_arg (stmt
, i
);
3553 if (TREE_CODE (TREE_TYPE (arg
)) != POINTER_TYPE
)
3555 if (!integer_zerop (arg
))
3557 if (!bitmap_empty_p (nonnullargs
)
3558 && !bitmap_bit_p (nonnullargs
, i
))
3561 /* In C++ non-static member functions argument 0 refers
3562 to the implicit this pointer. Use the same one-based
3563 numbering for ordinary arguments. */
3564 unsigned argno
= TREE_CODE (fntype
) == METHOD_TYPE
? i
: i
+ 1;
3565 location_t loc
= (EXPR_HAS_LOCATION (arg
)
3566 ? EXPR_LOCATION (arg
)
3567 : gimple_location (stmt
));
3568 auto_diagnostic_group d
;
3571 if (warning_at (loc
, OPT_Wnonnull
,
3572 "%G%qs pointer null", stmt
, "this")
3574 inform (DECL_SOURCE_LOCATION (fndecl
),
3575 "in a call to non-static member function %qD",
3580 if (!warning_at (loc
, OPT_Wnonnull
,
3581 "%Gargument %u null where non-null "
3582 "expected", stmt
, argno
))
3585 tree fndecl
= gimple_call_fndecl (stmt
);
3586 if (fndecl
&& DECL_IS_BUILTIN (fndecl
))
3587 inform (loc
, "in a call to built-in function %qD",
3590 inform (DECL_SOURCE_LOCATION (fndecl
),
3591 "in a call to function %qD declared %qs",
3594 BITMAP_FREE (nonnullargs
);
3603 make_pass_post_ipa_warn (gcc::context
*ctxt
)
3605 return new pass_post_ipa_warn (ctxt
);