1 /* Conditional constant propagation pass for the GNU compiler.
2 Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009,
3 2010, 2011, 2012 Free Software Foundation, Inc.
4 Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org>
5 Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it
10 under the terms of the GNU General Public License as published by the
11 Free Software Foundation; either version 3, or (at your option) any
14 GCC is distributed in the hope that it will be useful, but WITHOUT
15 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 /* Conditional constant propagation (CCP) is based on the SSA
24 propagation engine (tree-ssa-propagate.c). Constant assignments of
25 the form VAR = CST are propagated from the assignments into uses of
26 VAR, which in turn may generate new constants. The simulation uses
27 a four level lattice to keep track of constant values associated
28 with SSA names. Given an SSA name V_i, it may take one of the
31 UNINITIALIZED -> the initial state of the value. This value
32 is replaced with a correct initial value
33 the first time the value is used, so the
34 rest of the pass does not need to care about
35 it. Using this value simplifies initialization
36 of the pass, and prevents us from needlessly
37 scanning statements that are never reached.
39 UNDEFINED -> V_i is a local variable whose definition
40 has not been processed yet. Therefore we
41 don't yet know if its value is a constant
44 CONSTANT -> V_i has been found to hold a constant
47 VARYING -> V_i cannot take a constant value, or if it
48 does, it is not possible to determine it
51 The core of SSA-CCP is in ccp_visit_stmt and ccp_visit_phi_node:
53 1- In ccp_visit_stmt, we are interested in assignments whose RHS
54 evaluates into a constant and conditional jumps whose predicate
55 evaluates into a boolean true or false. When an assignment of
56 the form V_i = CONST is found, V_i's lattice value is set to
57 CONSTANT and CONST is associated with it. This causes the
58 propagation engine to add all the SSA edges coming out the
59 assignment into the worklists, so that statements that use V_i
62 If the statement is a conditional with a constant predicate, we
63 mark the outgoing edges as executable or not executable
64 depending on the predicate's value. This is then used when
65 visiting PHI nodes to know when a PHI argument can be ignored.
68 2- In ccp_visit_phi_node, if all the PHI arguments evaluate to the
69 same constant C, then the LHS of the PHI is set to C. This
70 evaluation is known as the "meet operation". Since one of the
71 goals of this evaluation is to optimistically return constant
72 values as often as possible, it uses two main short cuts:
74 - If an argument is flowing in through a non-executable edge, it
75 is ignored. This is useful in cases like this:
81 a_11 = PHI (a_9, a_10)
83 If PRED is known to always evaluate to false, then we can
84 assume that a_11 will always take its value from a_10, meaning
85 that instead of consider it VARYING (a_9 and a_10 have
86 different values), we can consider it CONSTANT 100.
88 - If an argument has an UNDEFINED value, then it does not affect
89 the outcome of the meet operation. If a variable V_i has an
90 UNDEFINED value, it means that either its defining statement
91 hasn't been visited yet or V_i has no defining statement, in
92 which case the original symbol 'V' is being used
93 uninitialized. Since 'V' is a local variable, the compiler
94 may assume any initial value for it.
97 After propagation, every variable V_i that ends up with a lattice
98 value of CONSTANT will have the associated constant value in the
99 array CONST_VAL[i].VALUE. That is fed into substitute_and_fold for
100 final substitution and folding.
104 Constant propagation with conditional branches,
105 Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
107 Building an Optimizing Compiler,
108 Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
110 Advanced Compiler Design and Implementation,
111 Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6 */
115 #include "coretypes.h"
120 #include "basic-block.h"
121 #include "function.h"
122 #include "gimple-pretty-print.h"
123 #include "tree-flow.h"
124 #include "tree-pass.h"
125 #include "tree-ssa-propagate.h"
126 #include "value-prof.h"
127 #include "langhooks.h"
129 #include "diagnostic-core.h"
131 #include "gimple-fold.h"
133 #include "hash-table.h"
136 /* Possible lattice values. */
145 struct prop_value_d
{
147 ccp_lattice_t lattice_val
;
149 /* Propagated value. */
152 /* Mask that applies to the propagated value during CCP. For
153 X with a CONSTANT lattice value X & ~mask == value & ~mask. */
157 typedef struct prop_value_d prop_value_t
;
159 /* Array of propagated constant values. After propagation,
160 CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I). If
161 the constant is held in an SSA name representing a memory store
162 (i.e., a VDEF), CONST_VAL[I].MEM_REF will contain the actual
163 memory reference used to store (i.e., the LHS of the assignment
165 static prop_value_t
*const_val
;
167 static void canonicalize_float_value (prop_value_t
*);
168 static bool ccp_fold_stmt (gimple_stmt_iterator
*);
170 /* Dump constant propagation value VAL to file OUTF prefixed by PREFIX. */
173 dump_lattice_value (FILE *outf
, const char *prefix
, prop_value_t val
)
175 switch (val
.lattice_val
)
178 fprintf (outf
, "%sUNINITIALIZED", prefix
);
181 fprintf (outf
, "%sUNDEFINED", prefix
);
184 fprintf (outf
, "%sVARYING", prefix
);
187 if (TREE_CODE (val
.value
) != INTEGER_CST
188 || val
.mask
.is_zero ())
190 fprintf (outf
, "%sCONSTANT ", prefix
);
191 print_generic_expr (outf
, val
.value
, dump_flags
);
195 double_int cval
= tree_to_double_int (val
.value
).and_not (val
.mask
);
196 fprintf (outf
, "%sCONSTANT " HOST_WIDE_INT_PRINT_DOUBLE_HEX
,
197 prefix
, cval
.high
, cval
.low
);
198 fprintf (outf
, " (" HOST_WIDE_INT_PRINT_DOUBLE_HEX
")",
199 val
.mask
.high
, val
.mask
.low
);
208 /* Print lattice value VAL to stderr. */
210 void debug_lattice_value (prop_value_t val
);
213 debug_lattice_value (prop_value_t val
)
215 dump_lattice_value (stderr
, "", val
);
216 fprintf (stderr
, "\n");
220 /* Compute a default value for variable VAR and store it in the
221 CONST_VAL array. The following rules are used to get default
224 1- Global and static variables that are declared constant are
227 2- Any other value is considered UNDEFINED. This is useful when
228 considering PHI nodes. PHI arguments that are undefined do not
229 change the constant value of the PHI node, which allows for more
230 constants to be propagated.
232 3- Variables defined by statements other than assignments and PHI
233 nodes are considered VARYING.
235 4- Initial values of variables that are not GIMPLE registers are
236 considered VARYING. */
239 get_default_value (tree var
)
241 prop_value_t val
= { UNINITIALIZED
, NULL_TREE
, { 0, 0 } };
244 stmt
= SSA_NAME_DEF_STMT (var
);
246 if (gimple_nop_p (stmt
))
248 /* Variables defined by an empty statement are those used
249 before being initialized. If VAR is a local variable, we
250 can assume initially that it is UNDEFINED, otherwise we must
251 consider it VARYING. */
252 if (!virtual_operand_p (var
)
253 && TREE_CODE (SSA_NAME_VAR (var
)) == VAR_DECL
)
254 val
.lattice_val
= UNDEFINED
;
257 val
.lattice_val
= VARYING
;
258 val
.mask
= double_int_minus_one
;
261 else if (is_gimple_assign (stmt
)
262 /* Value-returning GIMPLE_CALL statements assign to
263 a variable, and are treated similarly to GIMPLE_ASSIGN. */
264 || (is_gimple_call (stmt
)
265 && gimple_call_lhs (stmt
) != NULL_TREE
)
266 || gimple_code (stmt
) == GIMPLE_PHI
)
269 if (gimple_assign_single_p (stmt
)
270 && DECL_P (gimple_assign_rhs1 (stmt
))
271 && (cst
= get_symbol_constant_value (gimple_assign_rhs1 (stmt
))))
273 val
.lattice_val
= CONSTANT
;
277 /* Any other variable defined by an assignment or a PHI node
278 is considered UNDEFINED. */
279 val
.lattice_val
= UNDEFINED
;
283 /* Otherwise, VAR will never take on a constant value. */
284 val
.lattice_val
= VARYING
;
285 val
.mask
= double_int_minus_one
;
292 /* Get the constant value associated with variable VAR. */
294 static inline prop_value_t
*
299 if (const_val
== NULL
)
302 val
= &const_val
[SSA_NAME_VERSION (var
)];
303 if (val
->lattice_val
== UNINITIALIZED
)
304 *val
= get_default_value (var
);
306 canonicalize_float_value (val
);
311 /* Return the constant tree value associated with VAR. */
314 get_constant_value (tree var
)
317 if (TREE_CODE (var
) != SSA_NAME
)
319 if (is_gimple_min_invariant (var
))
323 val
= get_value (var
);
325 && val
->lattice_val
== CONSTANT
326 && (TREE_CODE (val
->value
) != INTEGER_CST
327 || val
->mask
.is_zero ()))
332 /* Sets the value associated with VAR to VARYING. */
335 set_value_varying (tree var
)
337 prop_value_t
*val
= &const_val
[SSA_NAME_VERSION (var
)];
339 val
->lattice_val
= VARYING
;
340 val
->value
= NULL_TREE
;
341 val
->mask
= double_int_minus_one
;
344 /* For float types, modify the value of VAL to make ccp work correctly
345 for non-standard values (-0, NaN):
347 If HONOR_SIGNED_ZEROS is false, and VAL = -0, we canonicalize it to 0.
348 If HONOR_NANS is false, and VAL is NaN, we canonicalize it to UNDEFINED.
349 This is to fix the following problem (see PR 29921): Suppose we have
353 and we set value of y to NaN. This causes value of x to be set to NaN.
354 When we later determine that y is in fact VARYING, fold uses the fact
355 that HONOR_NANS is false, and we try to change the value of x to 0,
356 causing an ICE. With HONOR_NANS being false, the real appearance of
357 NaN would cause undefined behavior, though, so claiming that y (and x)
358 are UNDEFINED initially is correct. */
361 canonicalize_float_value (prop_value_t
*val
)
363 enum machine_mode mode
;
367 if (val
->lattice_val
!= CONSTANT
368 || TREE_CODE (val
->value
) != REAL_CST
)
371 d
= TREE_REAL_CST (val
->value
);
372 type
= TREE_TYPE (val
->value
);
373 mode
= TYPE_MODE (type
);
375 if (!HONOR_SIGNED_ZEROS (mode
)
376 && REAL_VALUE_MINUS_ZERO (d
))
378 val
->value
= build_real (type
, dconst0
);
382 if (!HONOR_NANS (mode
)
383 && REAL_VALUE_ISNAN (d
))
385 val
->lattice_val
= UNDEFINED
;
391 /* Return whether the lattice transition is valid. */
394 valid_lattice_transition (prop_value_t old_val
, prop_value_t new_val
)
396 /* Lattice transitions must always be monotonically increasing in
398 if (old_val
.lattice_val
< new_val
.lattice_val
)
401 if (old_val
.lattice_val
!= new_val
.lattice_val
)
404 if (!old_val
.value
&& !new_val
.value
)
407 /* Now both lattice values are CONSTANT. */
409 /* Allow transitioning from PHI <&x, not executable> == &x
410 to PHI <&x, &y> == common alignment. */
411 if (TREE_CODE (old_val
.value
) != INTEGER_CST
412 && TREE_CODE (new_val
.value
) == INTEGER_CST
)
415 /* Bit-lattices have to agree in the still valid bits. */
416 if (TREE_CODE (old_val
.value
) == INTEGER_CST
417 && TREE_CODE (new_val
.value
) == INTEGER_CST
)
418 return tree_to_double_int (old_val
.value
).and_not (new_val
.mask
)
419 == tree_to_double_int (new_val
.value
).and_not (new_val
.mask
);
421 /* Otherwise constant values have to agree. */
422 return operand_equal_p (old_val
.value
, new_val
.value
, 0);
425 /* Set the value for variable VAR to NEW_VAL. Return true if the new
426 value is different from VAR's previous value. */
429 set_lattice_value (tree var
, prop_value_t new_val
)
431 /* We can deal with old UNINITIALIZED values just fine here. */
432 prop_value_t
*old_val
= &const_val
[SSA_NAME_VERSION (var
)];
434 canonicalize_float_value (&new_val
);
436 /* We have to be careful to not go up the bitwise lattice
437 represented by the mask.
438 ??? This doesn't seem to be the best place to enforce this. */
439 if (new_val
.lattice_val
== CONSTANT
440 && old_val
->lattice_val
== CONSTANT
441 && TREE_CODE (new_val
.value
) == INTEGER_CST
442 && TREE_CODE (old_val
->value
) == INTEGER_CST
)
445 diff
= tree_to_double_int (new_val
.value
)
446 ^ tree_to_double_int (old_val
->value
);
447 new_val
.mask
= new_val
.mask
| old_val
->mask
| diff
;
450 gcc_assert (valid_lattice_transition (*old_val
, new_val
));
452 /* If *OLD_VAL and NEW_VAL are the same, return false to inform the
453 caller that this was a non-transition. */
454 if (old_val
->lattice_val
!= new_val
.lattice_val
455 || (new_val
.lattice_val
== CONSTANT
456 && TREE_CODE (new_val
.value
) == INTEGER_CST
457 && (TREE_CODE (old_val
->value
) != INTEGER_CST
458 || new_val
.mask
!= old_val
->mask
)))
460 /* ??? We would like to delay creation of INTEGER_CSTs from
461 partially constants here. */
463 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
465 dump_lattice_value (dump_file
, "Lattice value changed to ", new_val
);
466 fprintf (dump_file
, ". Adding SSA edges to worklist.\n");
471 gcc_assert (new_val
.lattice_val
!= UNINITIALIZED
);
478 static prop_value_t
get_value_for_expr (tree
, bool);
479 static prop_value_t
bit_value_binop (enum tree_code
, tree
, tree
, tree
);
480 static void bit_value_binop_1 (enum tree_code
, tree
, double_int
*, double_int
*,
481 tree
, double_int
, double_int
,
482 tree
, double_int
, double_int
);
484 /* Return a double_int that can be used for bitwise simplifications
488 value_to_double_int (prop_value_t val
)
491 && TREE_CODE (val
.value
) == INTEGER_CST
)
492 return tree_to_double_int (val
.value
);
494 return double_int_zero
;
497 /* Return the value for the address expression EXPR based on alignment
501 get_value_from_alignment (tree expr
)
503 tree type
= TREE_TYPE (expr
);
505 unsigned HOST_WIDE_INT bitpos
;
508 gcc_assert (TREE_CODE (expr
) == ADDR_EXPR
);
510 get_pointer_alignment_1 (expr
, &align
, &bitpos
);
511 val
.mask
= (POINTER_TYPE_P (type
) || TYPE_UNSIGNED (type
)
512 ? double_int::mask (TYPE_PRECISION (type
))
513 : double_int_minus_one
)
514 .and_not (double_int::from_uhwi (align
/ BITS_PER_UNIT
- 1));
515 val
.lattice_val
= val
.mask
.is_minus_one () ? VARYING
: CONSTANT
;
516 if (val
.lattice_val
== CONSTANT
)
518 = double_int_to_tree (type
,
519 double_int::from_uhwi (bitpos
/ BITS_PER_UNIT
));
521 val
.value
= NULL_TREE
;
526 /* Return the value for the tree operand EXPR. If FOR_BITS_P is true
527 return constant bits extracted from alignment information for
528 invariant addresses. */
531 get_value_for_expr (tree expr
, bool for_bits_p
)
535 if (TREE_CODE (expr
) == SSA_NAME
)
537 val
= *get_value (expr
);
539 && val
.lattice_val
== CONSTANT
540 && TREE_CODE (val
.value
) == ADDR_EXPR
)
541 val
= get_value_from_alignment (val
.value
);
543 else if (is_gimple_min_invariant (expr
)
544 && (!for_bits_p
|| TREE_CODE (expr
) != ADDR_EXPR
))
546 val
.lattice_val
= CONSTANT
;
548 val
.mask
= double_int_zero
;
549 canonicalize_float_value (&val
);
551 else if (TREE_CODE (expr
) == ADDR_EXPR
)
552 val
= get_value_from_alignment (expr
);
555 val
.lattice_val
= VARYING
;
556 val
.mask
= double_int_minus_one
;
557 val
.value
= NULL_TREE
;
562 /* Return the likely CCP lattice value for STMT.
564 If STMT has no operands, then return CONSTANT.
566 Else if undefinedness of operands of STMT cause its value to be
567 undefined, then return UNDEFINED.
569 Else if any operands of STMT are constants, then return CONSTANT.
571 Else return VARYING. */
574 likely_value (gimple stmt
)
576 bool has_constant_operand
, has_undefined_operand
, all_undefined_operands
;
581 enum gimple_code code
= gimple_code (stmt
);
583 /* This function appears to be called only for assignments, calls,
584 conditionals, and switches, due to the logic in visit_stmt. */
585 gcc_assert (code
== GIMPLE_ASSIGN
586 || code
== GIMPLE_CALL
587 || code
== GIMPLE_COND
588 || code
== GIMPLE_SWITCH
);
590 /* If the statement has volatile operands, it won't fold to a
592 if (gimple_has_volatile_ops (stmt
))
595 /* Arrive here for more complex cases. */
596 has_constant_operand
= false;
597 has_undefined_operand
= false;
598 all_undefined_operands
= true;
599 FOR_EACH_SSA_TREE_OPERAND (use
, stmt
, iter
, SSA_OP_USE
)
601 prop_value_t
*val
= get_value (use
);
603 if (val
->lattice_val
== UNDEFINED
)
604 has_undefined_operand
= true;
606 all_undefined_operands
= false;
608 if (val
->lattice_val
== CONSTANT
)
609 has_constant_operand
= true;
612 /* There may be constants in regular rhs operands. For calls we
613 have to ignore lhs, fndecl and static chain, otherwise only
615 for (i
= (is_gimple_call (stmt
) ? 2 : 0) + gimple_has_lhs (stmt
);
616 i
< gimple_num_ops (stmt
); ++i
)
618 tree op
= gimple_op (stmt
, i
);
619 if (!op
|| TREE_CODE (op
) == SSA_NAME
)
621 if (is_gimple_min_invariant (op
))
622 has_constant_operand
= true;
625 if (has_constant_operand
)
626 all_undefined_operands
= false;
628 /* If the operation combines operands like COMPLEX_EXPR make sure to
629 not mark the result UNDEFINED if only one part of the result is
631 if (has_undefined_operand
&& all_undefined_operands
)
633 else if (code
== GIMPLE_ASSIGN
&& has_undefined_operand
)
635 switch (gimple_assign_rhs_code (stmt
))
637 /* Unary operators are handled with all_undefined_operands. */
640 case POINTER_PLUS_EXPR
:
641 /* Not MIN_EXPR, MAX_EXPR. One VARYING operand may be selected.
642 Not bitwise operators, one VARYING operand may specify the
643 result completely. Not logical operators for the same reason.
644 Not COMPLEX_EXPR as one VARYING operand makes the result partly
645 not UNDEFINED. Not *DIV_EXPR, comparisons and shifts because
646 the undefined operand may be promoted. */
650 /* If any part of an address is UNDEFINED, like the index
651 of an ARRAY_EXPR, then treat the result as UNDEFINED. */
658 /* If there was an UNDEFINED operand but the result may be not UNDEFINED
659 fall back to CONSTANT. During iteration UNDEFINED may still drop
661 if (has_undefined_operand
)
664 /* We do not consider virtual operands here -- load from read-only
665 memory may have only VARYING virtual operands, but still be
667 if (has_constant_operand
668 || gimple_references_memory_p (stmt
))
674 /* Returns true if STMT cannot be constant. */
677 surely_varying_stmt_p (gimple stmt
)
679 /* If the statement has operands that we cannot handle, it cannot be
681 if (gimple_has_volatile_ops (stmt
))
684 /* If it is a call and does not return a value or is not a
685 builtin and not an indirect call, it is varying. */
686 if (is_gimple_call (stmt
))
689 if (!gimple_call_lhs (stmt
)
690 || ((fndecl
= gimple_call_fndecl (stmt
)) != NULL_TREE
691 && !DECL_BUILT_IN (fndecl
)))
695 /* Any other store operation is not interesting. */
696 else if (gimple_vdef (stmt
))
699 /* Anything other than assignments and conditional jumps are not
700 interesting for CCP. */
701 if (gimple_code (stmt
) != GIMPLE_ASSIGN
702 && gimple_code (stmt
) != GIMPLE_COND
703 && gimple_code (stmt
) != GIMPLE_SWITCH
704 && gimple_code (stmt
) != GIMPLE_CALL
)
710 /* Initialize local data structures for CCP. */
713 ccp_initialize (void)
717 const_val
= XCNEWVEC (prop_value_t
, num_ssa_names
);
719 /* Initialize simulation flags for PHI nodes and statements. */
722 gimple_stmt_iterator i
;
724 for (i
= gsi_start_bb (bb
); !gsi_end_p (i
); gsi_next (&i
))
726 gimple stmt
= gsi_stmt (i
);
729 /* If the statement is a control insn, then we do not
730 want to avoid simulating the statement once. Failure
731 to do so means that those edges will never get added. */
732 if (stmt_ends_bb_p (stmt
))
735 is_varying
= surely_varying_stmt_p (stmt
);
742 /* If the statement will not produce a constant, mark
743 all its outputs VARYING. */
744 FOR_EACH_SSA_TREE_OPERAND (def
, stmt
, iter
, SSA_OP_ALL_DEFS
)
745 set_value_varying (def
);
747 prop_set_simulate_again (stmt
, !is_varying
);
751 /* Now process PHI nodes. We never clear the simulate_again flag on
752 phi nodes, since we do not know which edges are executable yet,
753 except for phi nodes for virtual operands when we do not do store ccp. */
756 gimple_stmt_iterator i
;
758 for (i
= gsi_start_phis (bb
); !gsi_end_p (i
); gsi_next (&i
))
760 gimple phi
= gsi_stmt (i
);
762 if (virtual_operand_p (gimple_phi_result (phi
)))
763 prop_set_simulate_again (phi
, false);
765 prop_set_simulate_again (phi
, true);
770 /* Debug count support. Reset the values of ssa names
771 VARYING when the total number ssa names analyzed is
772 beyond the debug count specified. */
778 for (i
= 0; i
< num_ssa_names
; i
++)
782 const_val
[i
].lattice_val
= VARYING
;
783 const_val
[i
].mask
= double_int_minus_one
;
784 const_val
[i
].value
= NULL_TREE
;
790 /* Do final substitution of propagated values, cleanup the flowgraph and
791 free allocated storage.
793 Return TRUE when something was optimized. */
798 bool something_changed
;
803 /* Derive alignment and misalignment information from partially
804 constant pointers in the lattice. */
805 for (i
= 1; i
< num_ssa_names
; ++i
)
807 tree name
= ssa_name (i
);
809 unsigned int tem
, align
;
812 || !POINTER_TYPE_P (TREE_TYPE (name
)))
815 val
= get_value (name
);
816 if (val
->lattice_val
!= CONSTANT
817 || TREE_CODE (val
->value
) != INTEGER_CST
)
820 /* Trailing constant bits specify the alignment, trailing value
821 bits the misalignment. */
823 align
= (tem
& -tem
);
825 set_ptr_info_alignment (get_ptr_info (name
), align
,
826 TREE_INT_CST_LOW (val
->value
) & (align
- 1));
829 /* Perform substitutions based on the known constant values. */
830 something_changed
= substitute_and_fold (get_constant_value
,
831 ccp_fold_stmt
, true);
835 return something_changed
;;
839 /* Compute the meet operator between *VAL1 and *VAL2. Store the result
842 any M UNDEFINED = any
843 any M VARYING = VARYING
844 Ci M Cj = Ci if (i == j)
845 Ci M Cj = VARYING if (i != j)
849 ccp_lattice_meet (prop_value_t
*val1
, prop_value_t
*val2
)
851 if (val1
->lattice_val
== UNDEFINED
)
853 /* UNDEFINED M any = any */
856 else if (val2
->lattice_val
== UNDEFINED
)
858 /* any M UNDEFINED = any
859 Nothing to do. VAL1 already contains the value we want. */
862 else if (val1
->lattice_val
== VARYING
863 || val2
->lattice_val
== VARYING
)
865 /* any M VARYING = VARYING. */
866 val1
->lattice_val
= VARYING
;
867 val1
->mask
= double_int_minus_one
;
868 val1
->value
= NULL_TREE
;
870 else if (val1
->lattice_val
== CONSTANT
871 && val2
->lattice_val
== CONSTANT
872 && TREE_CODE (val1
->value
) == INTEGER_CST
873 && TREE_CODE (val2
->value
) == INTEGER_CST
)
875 /* Ci M Cj = Ci if (i == j)
876 Ci M Cj = VARYING if (i != j)
878 For INTEGER_CSTs mask unequal bits. If no equal bits remain,
880 val1
->mask
= val1
->mask
| val2
->mask
881 | (tree_to_double_int (val1
->value
)
882 ^ tree_to_double_int (val2
->value
));
883 if (val1
->mask
.is_minus_one ())
885 val1
->lattice_val
= VARYING
;
886 val1
->value
= NULL_TREE
;
889 else if (val1
->lattice_val
== CONSTANT
890 && val2
->lattice_val
== CONSTANT
891 && simple_cst_equal (val1
->value
, val2
->value
) == 1)
893 /* Ci M Cj = Ci if (i == j)
894 Ci M Cj = VARYING if (i != j)
896 VAL1 already contains the value we want for equivalent values. */
898 else if (val1
->lattice_val
== CONSTANT
899 && val2
->lattice_val
== CONSTANT
900 && (TREE_CODE (val1
->value
) == ADDR_EXPR
901 || TREE_CODE (val2
->value
) == ADDR_EXPR
))
903 /* When not equal addresses are involved try meeting for
905 prop_value_t tem
= *val2
;
906 if (TREE_CODE (val1
->value
) == ADDR_EXPR
)
907 *val1
= get_value_for_expr (val1
->value
, true);
908 if (TREE_CODE (val2
->value
) == ADDR_EXPR
)
909 tem
= get_value_for_expr (val2
->value
, true);
910 ccp_lattice_meet (val1
, &tem
);
914 /* Any other combination is VARYING. */
915 val1
->lattice_val
= VARYING
;
916 val1
->mask
= double_int_minus_one
;
917 val1
->value
= NULL_TREE
;
922 /* Loop through the PHI_NODE's parameters for BLOCK and compare their
923 lattice values to determine PHI_NODE's lattice value. The value of a
924 PHI node is determined calling ccp_lattice_meet with all the arguments
925 of the PHI node that are incoming via executable edges. */
927 static enum ssa_prop_result
928 ccp_visit_phi_node (gimple phi
)
931 prop_value_t
*old_val
, new_val
;
933 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
935 fprintf (dump_file
, "\nVisiting PHI node: ");
936 print_gimple_stmt (dump_file
, phi
, 0, dump_flags
);
939 old_val
= get_value (gimple_phi_result (phi
));
940 switch (old_val
->lattice_val
)
943 return SSA_PROP_VARYING
;
950 new_val
.lattice_val
= UNDEFINED
;
951 new_val
.value
= NULL_TREE
;
958 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
960 /* Compute the meet operator over all the PHI arguments flowing
961 through executable edges. */
962 edge e
= gimple_phi_arg_edge (phi
, i
);
964 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
967 "\n Argument #%d (%d -> %d %sexecutable)\n",
968 i
, e
->src
->index
, e
->dest
->index
,
969 (e
->flags
& EDGE_EXECUTABLE
) ? "" : "not ");
972 /* If the incoming edge is executable, Compute the meet operator for
973 the existing value of the PHI node and the current PHI argument. */
974 if (e
->flags
& EDGE_EXECUTABLE
)
976 tree arg
= gimple_phi_arg (phi
, i
)->def
;
977 prop_value_t arg_val
= get_value_for_expr (arg
, false);
979 ccp_lattice_meet (&new_val
, &arg_val
);
981 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
983 fprintf (dump_file
, "\t");
984 print_generic_expr (dump_file
, arg
, dump_flags
);
985 dump_lattice_value (dump_file
, "\tValue: ", arg_val
);
986 fprintf (dump_file
, "\n");
989 if (new_val
.lattice_val
== VARYING
)
994 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
996 dump_lattice_value (dump_file
, "\n PHI node value: ", new_val
);
997 fprintf (dump_file
, "\n\n");
1000 /* Make the transition to the new value. */
1001 if (set_lattice_value (gimple_phi_result (phi
), new_val
))
1003 if (new_val
.lattice_val
== VARYING
)
1004 return SSA_PROP_VARYING
;
1006 return SSA_PROP_INTERESTING
;
1009 return SSA_PROP_NOT_INTERESTING
;
1012 /* Return the constant value for OP or OP otherwise. */
1015 valueize_op (tree op
)
1017 if (TREE_CODE (op
) == SSA_NAME
)
1019 tree tem
= get_constant_value (op
);
1026 /* CCP specific front-end to the non-destructive constant folding
1029 Attempt to simplify the RHS of STMT knowing that one or more
1030 operands are constants.
1032 If simplification is possible, return the simplified RHS,
1033 otherwise return the original RHS or NULL_TREE. */
1036 ccp_fold (gimple stmt
)
1038 location_t loc
= gimple_location (stmt
);
1039 switch (gimple_code (stmt
))
1043 /* Handle comparison operators that can appear in GIMPLE form. */
1044 tree op0
= valueize_op (gimple_cond_lhs (stmt
));
1045 tree op1
= valueize_op (gimple_cond_rhs (stmt
));
1046 enum tree_code code
= gimple_cond_code (stmt
);
1047 return fold_binary_loc (loc
, code
, boolean_type_node
, op0
, op1
);
1052 /* Return the constant switch index. */
1053 return valueize_op (gimple_switch_index (stmt
));
1058 return gimple_fold_stmt_to_constant_1 (stmt
, valueize_op
);
1065 /* Apply the operation CODE in type TYPE to the value, mask pair
1066 RVAL and RMASK representing a value of type RTYPE and set
1067 the value, mask pair *VAL and *MASK to the result. */
1070 bit_value_unop_1 (enum tree_code code
, tree type
,
1071 double_int
*val
, double_int
*mask
,
1072 tree rtype
, double_int rval
, double_int rmask
)
1083 double_int temv
, temm
;
1084 /* Return ~rval + 1. */
1085 bit_value_unop_1 (BIT_NOT_EXPR
, type
, &temv
, &temm
, type
, rval
, rmask
);
1086 bit_value_binop_1 (PLUS_EXPR
, type
, val
, mask
,
1088 type
, double_int_one
, double_int_zero
);
1096 /* First extend mask and value according to the original type. */
1097 uns
= TYPE_UNSIGNED (rtype
);
1098 *mask
= rmask
.ext (TYPE_PRECISION (rtype
), uns
);
1099 *val
= rval
.ext (TYPE_PRECISION (rtype
), uns
);
1101 /* Then extend mask and value according to the target type. */
1102 uns
= TYPE_UNSIGNED (type
);
1103 *mask
= (*mask
).ext (TYPE_PRECISION (type
), uns
);
1104 *val
= (*val
).ext (TYPE_PRECISION (type
), uns
);
1109 *mask
= double_int_minus_one
;
1114 /* Apply the operation CODE in type TYPE to the value, mask pairs
1115 R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
1116 and R2TYPE and set the value, mask pair *VAL and *MASK to the result. */
1119 bit_value_binop_1 (enum tree_code code
, tree type
,
1120 double_int
*val
, double_int
*mask
,
1121 tree r1type
, double_int r1val
, double_int r1mask
,
1122 tree r2type
, double_int r2val
, double_int r2mask
)
1124 bool uns
= TYPE_UNSIGNED (type
);
1125 /* Assume we'll get a constant result. Use an initial varying value,
1126 we fall back to varying in the end if necessary. */
1127 *mask
= double_int_minus_one
;
1131 /* The mask is constant where there is a known not
1132 set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
1133 *mask
= (r1mask
| r2mask
) & (r1val
| r1mask
) & (r2val
| r2mask
);
1134 *val
= r1val
& r2val
;
1138 /* The mask is constant where there is a known
1139 set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)). */
1140 *mask
= (r1mask
| r2mask
)
1141 .and_not (r1val
.and_not (r1mask
) | r2val
.and_not (r2mask
));
1142 *val
= r1val
| r2val
;
1147 *mask
= r1mask
| r2mask
;
1148 *val
= r1val
^ r2val
;
1153 if (r2mask
.is_zero ())
1155 HOST_WIDE_INT shift
= r2val
.low
;
1156 if (code
== RROTATE_EXPR
)
1158 *mask
= r1mask
.lrotate (shift
, TYPE_PRECISION (type
));
1159 *val
= r1val
.lrotate (shift
, TYPE_PRECISION (type
));
1165 /* ??? We can handle partially known shift counts if we know
1166 its sign. That way we can tell that (x << (y | 8)) & 255
1168 if (r2mask
.is_zero ())
1170 HOST_WIDE_INT shift
= r2val
.low
;
1171 if (code
== RSHIFT_EXPR
)
1173 /* We need to know if we are doing a left or a right shift
1174 to properly shift in zeros for left shift and unsigned
1175 right shifts and the sign bit for signed right shifts.
1176 For signed right shifts we shift in varying in case
1177 the sign bit was varying. */
1180 *mask
= r1mask
.llshift (shift
, TYPE_PRECISION (type
));
1181 *val
= r1val
.llshift (shift
, TYPE_PRECISION (type
));
1186 *mask
= r1mask
.rshift (shift
, TYPE_PRECISION (type
), !uns
);
1187 *val
= r1val
.rshift (shift
, TYPE_PRECISION (type
), !uns
);
1198 case POINTER_PLUS_EXPR
:
1201 /* Do the addition with unknown bits set to zero, to give carry-ins of
1202 zero wherever possible. */
1203 lo
= r1val
.and_not (r1mask
) + r2val
.and_not (r2mask
);
1204 lo
= lo
.ext (TYPE_PRECISION (type
), uns
);
1205 /* Do the addition with unknown bits set to one, to give carry-ins of
1206 one wherever possible. */
1207 hi
= (r1val
| r1mask
) + (r2val
| r2mask
);
1208 hi
= hi
.ext (TYPE_PRECISION (type
), uns
);
1209 /* Each bit in the result is known if (a) the corresponding bits in
1210 both inputs are known, and (b) the carry-in to that bit position
1211 is known. We can check condition (b) by seeing if we got the same
1212 result with minimised carries as with maximised carries. */
1213 *mask
= r1mask
| r2mask
| (lo
^ hi
);
1214 *mask
= (*mask
).ext (TYPE_PRECISION (type
), uns
);
1215 /* It shouldn't matter whether we choose lo or hi here. */
1222 double_int temv
, temm
;
1223 bit_value_unop_1 (NEGATE_EXPR
, r2type
, &temv
, &temm
,
1224 r2type
, r2val
, r2mask
);
1225 bit_value_binop_1 (PLUS_EXPR
, type
, val
, mask
,
1226 r1type
, r1val
, r1mask
,
1227 r2type
, temv
, temm
);
1233 /* Just track trailing zeros in both operands and transfer
1234 them to the other. */
1235 int r1tz
= (r1val
| r1mask
).trailing_zeros ();
1236 int r2tz
= (r2val
| r2mask
).trailing_zeros ();
1237 if (r1tz
+ r2tz
>= HOST_BITS_PER_DOUBLE_INT
)
1239 *mask
= double_int_zero
;
1240 *val
= double_int_zero
;
1242 else if (r1tz
+ r2tz
> 0)
1244 *mask
= ~double_int::mask (r1tz
+ r2tz
);
1245 *mask
= (*mask
).ext (TYPE_PRECISION (type
), uns
);
1246 *val
= double_int_zero
;
1254 double_int m
= r1mask
| r2mask
;
1255 if (r1val
.and_not (m
) != r2val
.and_not (m
))
1257 *mask
= double_int_zero
;
1258 *val
= ((code
== EQ_EXPR
) ? double_int_zero
: double_int_one
);
1262 /* We know the result of a comparison is always one or zero. */
1263 *mask
= double_int_one
;
1264 *val
= double_int_zero
;
1272 double_int tem
= r1val
;
1278 code
= swap_tree_comparison (code
);
1285 /* If the most significant bits are not known we know nothing. */
1286 if (r1mask
.is_negative () || r2mask
.is_negative ())
1289 /* For comparisons the signedness is in the comparison operands. */
1290 uns
= TYPE_UNSIGNED (r1type
);
1292 /* If we know the most significant bits we know the values
1293 value ranges by means of treating varying bits as zero
1294 or one. Do a cross comparison of the max/min pairs. */
1295 maxmin
= (r1val
| r1mask
).cmp (r2val
.and_not (r2mask
), uns
);
1296 minmax
= r1val
.and_not (r1mask
).cmp (r2val
| r2mask
, uns
);
1297 if (maxmin
< 0) /* r1 is less than r2. */
1299 *mask
= double_int_zero
;
1300 *val
= double_int_one
;
1302 else if (minmax
> 0) /* r1 is not less or equal to r2. */
1304 *mask
= double_int_zero
;
1305 *val
= double_int_zero
;
1307 else if (maxmin
== minmax
) /* r1 and r2 are equal. */
1309 /* This probably should never happen as we'd have
1310 folded the thing during fully constant value folding. */
1311 *mask
= double_int_zero
;
1312 *val
= (code
== LE_EXPR
? double_int_one
: double_int_zero
);
1316 /* We know the result of a comparison is always one or zero. */
1317 *mask
= double_int_one
;
1318 *val
= double_int_zero
;
1327 /* Return the propagation value when applying the operation CODE to
1328 the value RHS yielding type TYPE. */
1331 bit_value_unop (enum tree_code code
, tree type
, tree rhs
)
1333 prop_value_t rval
= get_value_for_expr (rhs
, true);
1334 double_int value
, mask
;
1337 if (rval
.lattice_val
== UNDEFINED
)
1340 gcc_assert ((rval
.lattice_val
== CONSTANT
1341 && TREE_CODE (rval
.value
) == INTEGER_CST
)
1342 || rval
.mask
.is_minus_one ());
1343 bit_value_unop_1 (code
, type
, &value
, &mask
,
1344 TREE_TYPE (rhs
), value_to_double_int (rval
), rval
.mask
);
1345 if (!mask
.is_minus_one ())
1347 val
.lattice_val
= CONSTANT
;
1349 /* ??? Delay building trees here. */
1350 val
.value
= double_int_to_tree (type
, value
);
1354 val
.lattice_val
= VARYING
;
1355 val
.value
= NULL_TREE
;
1356 val
.mask
= double_int_minus_one
;
1361 /* Return the propagation value when applying the operation CODE to
1362 the values RHS1 and RHS2 yielding type TYPE. */
1365 bit_value_binop (enum tree_code code
, tree type
, tree rhs1
, tree rhs2
)
1367 prop_value_t r1val
= get_value_for_expr (rhs1
, true);
1368 prop_value_t r2val
= get_value_for_expr (rhs2
, true);
1369 double_int value
, mask
;
1372 if (r1val
.lattice_val
== UNDEFINED
1373 || r2val
.lattice_val
== UNDEFINED
)
1375 val
.lattice_val
= VARYING
;
1376 val
.value
= NULL_TREE
;
1377 val
.mask
= double_int_minus_one
;
1381 gcc_assert ((r1val
.lattice_val
== CONSTANT
1382 && TREE_CODE (r1val
.value
) == INTEGER_CST
)
1383 || r1val
.mask
.is_minus_one ());
1384 gcc_assert ((r2val
.lattice_val
== CONSTANT
1385 && TREE_CODE (r2val
.value
) == INTEGER_CST
)
1386 || r2val
.mask
.is_minus_one ());
1387 bit_value_binop_1 (code
, type
, &value
, &mask
,
1388 TREE_TYPE (rhs1
), value_to_double_int (r1val
), r1val
.mask
,
1389 TREE_TYPE (rhs2
), value_to_double_int (r2val
), r2val
.mask
);
1390 if (!mask
.is_minus_one ())
1392 val
.lattice_val
= CONSTANT
;
1394 /* ??? Delay building trees here. */
1395 val
.value
= double_int_to_tree (type
, value
);
1399 val
.lattice_val
= VARYING
;
1400 val
.value
= NULL_TREE
;
1401 val
.mask
= double_int_minus_one
;
1406 /* Return the propagation value when applying __builtin_assume_aligned to
1410 bit_value_assume_aligned (gimple stmt
)
1412 tree ptr
= gimple_call_arg (stmt
, 0), align
, misalign
= NULL_TREE
;
1413 tree type
= TREE_TYPE (ptr
);
1414 unsigned HOST_WIDE_INT aligni
, misaligni
= 0;
1415 prop_value_t ptrval
= get_value_for_expr (ptr
, true);
1416 prop_value_t alignval
;
1417 double_int value
, mask
;
1419 if (ptrval
.lattice_val
== UNDEFINED
)
1421 gcc_assert ((ptrval
.lattice_val
== CONSTANT
1422 && TREE_CODE (ptrval
.value
) == INTEGER_CST
)
1423 || ptrval
.mask
.is_minus_one ());
1424 align
= gimple_call_arg (stmt
, 1);
1425 if (!host_integerp (align
, 1))
1427 aligni
= tree_low_cst (align
, 1);
1429 || (aligni
& (aligni
- 1)) != 0)
1431 if (gimple_call_num_args (stmt
) > 2)
1433 misalign
= gimple_call_arg (stmt
, 2);
1434 if (!host_integerp (misalign
, 1))
1436 misaligni
= tree_low_cst (misalign
, 1);
1437 if (misaligni
>= aligni
)
1440 align
= build_int_cst_type (type
, -aligni
);
1441 alignval
= get_value_for_expr (align
, true);
1442 bit_value_binop_1 (BIT_AND_EXPR
, type
, &value
, &mask
,
1443 type
, value_to_double_int (ptrval
), ptrval
.mask
,
1444 type
, value_to_double_int (alignval
), alignval
.mask
);
1445 if (!mask
.is_minus_one ())
1447 val
.lattice_val
= CONSTANT
;
1449 gcc_assert ((mask
.low
& (aligni
- 1)) == 0);
1450 gcc_assert ((value
.low
& (aligni
- 1)) == 0);
1451 value
.low
|= misaligni
;
1452 /* ??? Delay building trees here. */
1453 val
.value
= double_int_to_tree (type
, value
);
1457 val
.lattice_val
= VARYING
;
1458 val
.value
= NULL_TREE
;
1459 val
.mask
= double_int_minus_one
;
1464 /* Evaluate statement STMT.
1465 Valid only for assignments, calls, conditionals, and switches. */
1468 evaluate_stmt (gimple stmt
)
1471 tree simplified
= NULL_TREE
;
1472 ccp_lattice_t likelyvalue
= likely_value (stmt
);
1473 bool is_constant
= false;
1476 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1478 fprintf (dump_file
, "which is likely ");
1479 switch (likelyvalue
)
1482 fprintf (dump_file
, "CONSTANT");
1485 fprintf (dump_file
, "UNDEFINED");
1488 fprintf (dump_file
, "VARYING");
1492 fprintf (dump_file
, "\n");
1495 /* If the statement is likely to have a CONSTANT result, then try
1496 to fold the statement to determine the constant value. */
1497 /* FIXME. This is the only place that we call ccp_fold.
1498 Since likely_value never returns CONSTANT for calls, we will
1499 not attempt to fold them, including builtins that may profit. */
1500 if (likelyvalue
== CONSTANT
)
1502 fold_defer_overflow_warnings ();
1503 simplified
= ccp_fold (stmt
);
1504 is_constant
= simplified
&& is_gimple_min_invariant (simplified
);
1505 fold_undefer_overflow_warnings (is_constant
, stmt
, 0);
1508 /* The statement produced a constant value. */
1509 val
.lattice_val
= CONSTANT
;
1510 val
.value
= simplified
;
1511 val
.mask
= double_int_zero
;
1514 /* If the statement is likely to have a VARYING result, then do not
1515 bother folding the statement. */
1516 else if (likelyvalue
== VARYING
)
1518 enum gimple_code code
= gimple_code (stmt
);
1519 if (code
== GIMPLE_ASSIGN
)
1521 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
1523 /* Other cases cannot satisfy is_gimple_min_invariant
1525 if (get_gimple_rhs_class (subcode
) == GIMPLE_SINGLE_RHS
)
1526 simplified
= gimple_assign_rhs1 (stmt
);
1528 else if (code
== GIMPLE_SWITCH
)
1529 simplified
= gimple_switch_index (stmt
);
1531 /* These cannot satisfy is_gimple_min_invariant without folding. */
1532 gcc_assert (code
== GIMPLE_CALL
|| code
== GIMPLE_COND
);
1533 is_constant
= simplified
&& is_gimple_min_invariant (simplified
);
1536 /* The statement produced a constant value. */
1537 val
.lattice_val
= CONSTANT
;
1538 val
.value
= simplified
;
1539 val
.mask
= double_int_zero
;
1543 /* Resort to simplification for bitwise tracking. */
1544 if (flag_tree_bit_ccp
1545 && (likelyvalue
== CONSTANT
|| is_gimple_call (stmt
))
1548 enum gimple_code code
= gimple_code (stmt
);
1550 val
.lattice_val
= VARYING
;
1551 val
.value
= NULL_TREE
;
1552 val
.mask
= double_int_minus_one
;
1553 if (code
== GIMPLE_ASSIGN
)
1555 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
1556 tree rhs1
= gimple_assign_rhs1 (stmt
);
1557 switch (get_gimple_rhs_class (subcode
))
1559 case GIMPLE_SINGLE_RHS
:
1560 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
1561 || POINTER_TYPE_P (TREE_TYPE (rhs1
)))
1562 val
= get_value_for_expr (rhs1
, true);
1565 case GIMPLE_UNARY_RHS
:
1566 if ((INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
1567 || POINTER_TYPE_P (TREE_TYPE (rhs1
)))
1568 && (INTEGRAL_TYPE_P (gimple_expr_type (stmt
))
1569 || POINTER_TYPE_P (gimple_expr_type (stmt
))))
1570 val
= bit_value_unop (subcode
, gimple_expr_type (stmt
), rhs1
);
1573 case GIMPLE_BINARY_RHS
:
1574 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
1575 || POINTER_TYPE_P (TREE_TYPE (rhs1
)))
1577 tree lhs
= gimple_assign_lhs (stmt
);
1578 tree rhs2
= gimple_assign_rhs2 (stmt
);
1579 val
= bit_value_binop (subcode
,
1580 TREE_TYPE (lhs
), rhs1
, rhs2
);
1587 else if (code
== GIMPLE_COND
)
1589 enum tree_code code
= gimple_cond_code (stmt
);
1590 tree rhs1
= gimple_cond_lhs (stmt
);
1591 tree rhs2
= gimple_cond_rhs (stmt
);
1592 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
1593 || POINTER_TYPE_P (TREE_TYPE (rhs1
)))
1594 val
= bit_value_binop (code
, TREE_TYPE (rhs1
), rhs1
, rhs2
);
1596 else if (code
== GIMPLE_CALL
1597 && (fndecl
= gimple_call_fndecl (stmt
))
1598 && DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
1600 switch (DECL_FUNCTION_CODE (fndecl
))
1602 case BUILT_IN_MALLOC
:
1603 case BUILT_IN_REALLOC
:
1604 case BUILT_IN_CALLOC
:
1605 case BUILT_IN_STRDUP
:
1606 case BUILT_IN_STRNDUP
:
1607 val
.lattice_val
= CONSTANT
;
1608 val
.value
= build_int_cst (TREE_TYPE (gimple_get_lhs (stmt
)), 0);
1609 val
.mask
= double_int::from_shwi
1610 (~(((HOST_WIDE_INT
) MALLOC_ABI_ALIGNMENT
)
1611 / BITS_PER_UNIT
- 1));
1614 case BUILT_IN_ALLOCA
:
1615 case BUILT_IN_ALLOCA_WITH_ALIGN
:
1616 align
= (DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_ALLOCA_WITH_ALIGN
1617 ? TREE_INT_CST_LOW (gimple_call_arg (stmt
, 1))
1618 : BIGGEST_ALIGNMENT
);
1619 val
.lattice_val
= CONSTANT
;
1620 val
.value
= build_int_cst (TREE_TYPE (gimple_get_lhs (stmt
)), 0);
1621 val
.mask
= double_int::from_shwi (~(((HOST_WIDE_INT
) align
)
1622 / BITS_PER_UNIT
- 1));
1625 /* These builtins return their first argument, unmodified. */
1626 case BUILT_IN_MEMCPY
:
1627 case BUILT_IN_MEMMOVE
:
1628 case BUILT_IN_MEMSET
:
1629 case BUILT_IN_STRCPY
:
1630 case BUILT_IN_STRNCPY
:
1631 case BUILT_IN_MEMCPY_CHK
:
1632 case BUILT_IN_MEMMOVE_CHK
:
1633 case BUILT_IN_MEMSET_CHK
:
1634 case BUILT_IN_STRCPY_CHK
:
1635 case BUILT_IN_STRNCPY_CHK
:
1636 val
= get_value_for_expr (gimple_call_arg (stmt
, 0), true);
1639 case BUILT_IN_ASSUME_ALIGNED
:
1640 val
= bit_value_assume_aligned (stmt
);
1646 is_constant
= (val
.lattice_val
== CONSTANT
);
1651 /* The statement produced a nonconstant value. If the statement
1652 had UNDEFINED operands, then the result of the statement
1653 should be UNDEFINED. Otherwise, the statement is VARYING. */
1654 if (likelyvalue
== UNDEFINED
)
1656 val
.lattice_val
= likelyvalue
;
1657 val
.mask
= double_int_zero
;
1661 val
.lattice_val
= VARYING
;
1662 val
.mask
= double_int_minus_one
;
1665 val
.value
= NULL_TREE
;
1671 typedef hash_table
<pointer_hash
<gimple_statement_d
> > gimple_htab
;
1673 /* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before
1674 each matching BUILT_IN_STACK_RESTORE. Mark visited phis in VISITED. */
1677 insert_clobber_before_stack_restore (tree saved_val
, tree var
,
1678 gimple_htab
*visited
)
1680 gimple stmt
, clobber_stmt
;
1682 imm_use_iterator iter
;
1683 gimple_stmt_iterator i
;
1686 FOR_EACH_IMM_USE_STMT (stmt
, iter
, saved_val
)
1687 if (gimple_call_builtin_p (stmt
, BUILT_IN_STACK_RESTORE
))
1689 clobber
= build_constructor (TREE_TYPE (var
), NULL
);
1690 TREE_THIS_VOLATILE (clobber
) = 1;
1691 clobber_stmt
= gimple_build_assign (var
, clobber
);
1693 i
= gsi_for_stmt (stmt
);
1694 gsi_insert_before (&i
, clobber_stmt
, GSI_SAME_STMT
);
1696 else if (gimple_code (stmt
) == GIMPLE_PHI
)
1698 if (!visited
->is_created ())
1699 visited
->create (10);
1701 slot
= visited
->find_slot (stmt
, INSERT
);
1706 insert_clobber_before_stack_restore (gimple_phi_result (stmt
), var
,
1710 gcc_assert (is_gimple_debug (stmt
));
1713 /* Advance the iterator to the previous non-debug gimple statement in the same
1714 or dominating basic block. */
1717 gsi_prev_dom_bb_nondebug (gimple_stmt_iterator
*i
)
1721 gsi_prev_nondebug (i
);
1722 while (gsi_end_p (*i
))
1724 dom
= get_immediate_dominator (CDI_DOMINATORS
, i
->bb
);
1725 if (dom
== NULL
|| dom
== ENTRY_BLOCK_PTR
)
1728 *i
= gsi_last_bb (dom
);
1732 /* Find a BUILT_IN_STACK_SAVE dominating gsi_stmt (I), and insert
1733 a clobber of VAR before each matching BUILT_IN_STACK_RESTORE.
1735 It is possible that BUILT_IN_STACK_SAVE cannot be find in a dominator when a
1736 previous pass (such as DOM) duplicated it along multiple paths to a BB. In
1737 that case the function gives up without inserting the clobbers. */
1740 insert_clobbers_for_var (gimple_stmt_iterator i
, tree var
)
1744 gimple_htab visited
;
1746 for (; !gsi_end_p (i
); gsi_prev_dom_bb_nondebug (&i
))
1748 stmt
= gsi_stmt (i
);
1750 if (!gimple_call_builtin_p (stmt
, BUILT_IN_STACK_SAVE
))
1753 saved_val
= gimple_call_lhs (stmt
);
1754 if (saved_val
== NULL_TREE
)
1757 insert_clobber_before_stack_restore (saved_val
, var
, &visited
);
1761 if (visited
.is_created ())
1765 /* Detects a __builtin_alloca_with_align with constant size argument. Declares
1766 fixed-size array and returns the address, if found, otherwise returns
1770 fold_builtin_alloca_with_align (gimple stmt
)
1772 unsigned HOST_WIDE_INT size
, threshold
, n_elem
;
1773 tree lhs
, arg
, block
, var
, elem_type
, array_type
;
1776 lhs
= gimple_call_lhs (stmt
);
1777 if (lhs
== NULL_TREE
)
1780 /* Detect constant argument. */
1781 arg
= get_constant_value (gimple_call_arg (stmt
, 0));
1782 if (arg
== NULL_TREE
1783 || TREE_CODE (arg
) != INTEGER_CST
1784 || !host_integerp (arg
, 1))
1787 size
= TREE_INT_CST_LOW (arg
);
1789 /* Heuristic: don't fold large allocas. */
1790 threshold
= (unsigned HOST_WIDE_INT
)PARAM_VALUE (PARAM_LARGE_STACK_FRAME
);
1791 /* In case the alloca is located at function entry, it has the same lifetime
1792 as a declared array, so we allow a larger size. */
1793 block
= gimple_block (stmt
);
1794 if (!(cfun
->after_inlining
1795 && TREE_CODE (BLOCK_SUPERCONTEXT (block
)) == FUNCTION_DECL
))
1797 if (size
> threshold
)
1800 /* Declare array. */
1801 elem_type
= build_nonstandard_integer_type (BITS_PER_UNIT
, 1);
1802 n_elem
= size
* 8 / BITS_PER_UNIT
;
1803 array_type
= build_array_type_nelts (elem_type
, n_elem
);
1804 var
= create_tmp_var (array_type
, NULL
);
1805 DECL_ALIGN (var
) = TREE_INT_CST_LOW (gimple_call_arg (stmt
, 1));
1807 struct ptr_info_def
*pi
= SSA_NAME_PTR_INFO (lhs
);
1808 if (pi
!= NULL
&& !pi
->pt
.anything
)
1812 singleton_p
= pt_solution_singleton_p (&pi
->pt
, &uid
);
1813 gcc_assert (singleton_p
);
1814 SET_DECL_PT_UID (var
, uid
);
1818 /* Fold alloca to the address of the array. */
1819 return fold_convert (TREE_TYPE (lhs
), build_fold_addr_expr (var
));
1822 /* Fold the stmt at *GSI with CCP specific information that propagating
1823 and regular folding does not catch. */
1826 ccp_fold_stmt (gimple_stmt_iterator
*gsi
)
1828 gimple stmt
= gsi_stmt (*gsi
);
1830 switch (gimple_code (stmt
))
1835 /* Statement evaluation will handle type mismatches in constants
1836 more gracefully than the final propagation. This allows us to
1837 fold more conditionals here. */
1838 val
= evaluate_stmt (stmt
);
1839 if (val
.lattice_val
!= CONSTANT
1840 || !val
.mask
.is_zero ())
1845 fprintf (dump_file
, "Folding predicate ");
1846 print_gimple_expr (dump_file
, stmt
, 0, 0);
1847 fprintf (dump_file
, " to ");
1848 print_generic_expr (dump_file
, val
.value
, 0);
1849 fprintf (dump_file
, "\n");
1852 if (integer_zerop (val
.value
))
1853 gimple_cond_make_false (stmt
);
1855 gimple_cond_make_true (stmt
);
1862 tree lhs
= gimple_call_lhs (stmt
);
1863 int flags
= gimple_call_flags (stmt
);
1866 bool changed
= false;
1869 /* If the call was folded into a constant make sure it goes
1870 away even if we cannot propagate into all uses because of
1873 && TREE_CODE (lhs
) == SSA_NAME
1874 && (val
= get_constant_value (lhs
))
1875 /* Don't optimize away calls that have side-effects. */
1876 && (flags
& (ECF_CONST
|ECF_PURE
)) != 0
1877 && (flags
& ECF_LOOPING_CONST_OR_PURE
) == 0)
1879 tree new_rhs
= unshare_expr (val
);
1881 if (!useless_type_conversion_p (TREE_TYPE (lhs
),
1882 TREE_TYPE (new_rhs
)))
1883 new_rhs
= fold_convert (TREE_TYPE (lhs
), new_rhs
);
1884 res
= update_call_from_tree (gsi
, new_rhs
);
1889 /* Internal calls provide no argument types, so the extra laxity
1890 for normal calls does not apply. */
1891 if (gimple_call_internal_p (stmt
))
1894 /* The heuristic of fold_builtin_alloca_with_align differs before and
1895 after inlining, so we don't require the arg to be changed into a
1896 constant for folding, but just to be constant. */
1897 if (gimple_call_builtin_p (stmt
, BUILT_IN_ALLOCA_WITH_ALIGN
))
1899 tree new_rhs
= fold_builtin_alloca_with_align (stmt
);
1902 bool res
= update_call_from_tree (gsi
, new_rhs
);
1903 tree var
= TREE_OPERAND (TREE_OPERAND (new_rhs
, 0),0);
1905 insert_clobbers_for_var (*gsi
, var
);
1910 /* Propagate into the call arguments. Compared to replace_uses_in
1911 this can use the argument slot types for type verification
1912 instead of the current argument type. We also can safely
1913 drop qualifiers here as we are dealing with constants anyway. */
1914 argt
= TYPE_ARG_TYPES (gimple_call_fntype (stmt
));
1915 for (i
= 0; i
< gimple_call_num_args (stmt
) && argt
;
1916 ++i
, argt
= TREE_CHAIN (argt
))
1918 tree arg
= gimple_call_arg (stmt
, i
);
1919 if (TREE_CODE (arg
) == SSA_NAME
1920 && (val
= get_constant_value (arg
))
1921 && useless_type_conversion_p
1922 (TYPE_MAIN_VARIANT (TREE_VALUE (argt
)),
1923 TYPE_MAIN_VARIANT (TREE_TYPE (val
))))
1925 gimple_call_set_arg (stmt
, i
, unshare_expr (val
));
1935 tree lhs
= gimple_assign_lhs (stmt
);
1938 /* If we have a load that turned out to be constant replace it
1939 as we cannot propagate into all uses in all cases. */
1940 if (gimple_assign_single_p (stmt
)
1941 && TREE_CODE (lhs
) == SSA_NAME
1942 && (val
= get_constant_value (lhs
)))
1944 tree rhs
= unshare_expr (val
);
1945 if (!useless_type_conversion_p (TREE_TYPE (lhs
), TREE_TYPE (rhs
)))
1946 rhs
= fold_build1 (VIEW_CONVERT_EXPR
, TREE_TYPE (lhs
), rhs
);
1947 gimple_assign_set_rhs_from_tree (gsi
, rhs
);
1959 /* Visit the assignment statement STMT. Set the value of its LHS to the
1960 value computed by the RHS and store LHS in *OUTPUT_P. If STMT
1961 creates virtual definitions, set the value of each new name to that
1962 of the RHS (if we can derive a constant out of the RHS).
1963 Value-returning call statements also perform an assignment, and
1964 are handled here. */
1966 static enum ssa_prop_result
1967 visit_assignment (gimple stmt
, tree
*output_p
)
1970 enum ssa_prop_result retval
;
1972 tree lhs
= gimple_get_lhs (stmt
);
1974 gcc_assert (gimple_code (stmt
) != GIMPLE_CALL
1975 || gimple_call_lhs (stmt
) != NULL_TREE
);
1977 if (gimple_assign_single_p (stmt
)
1978 && gimple_assign_rhs_code (stmt
) == SSA_NAME
)
1979 /* For a simple copy operation, we copy the lattice values. */
1980 val
= *get_value (gimple_assign_rhs1 (stmt
));
1982 /* Evaluate the statement, which could be
1983 either a GIMPLE_ASSIGN or a GIMPLE_CALL. */
1984 val
= evaluate_stmt (stmt
);
1986 retval
= SSA_PROP_NOT_INTERESTING
;
1988 /* Set the lattice value of the statement's output. */
1989 if (TREE_CODE (lhs
) == SSA_NAME
)
1991 /* If STMT is an assignment to an SSA_NAME, we only have one
1993 if (set_lattice_value (lhs
, val
))
1996 if (val
.lattice_val
== VARYING
)
1997 retval
= SSA_PROP_VARYING
;
1999 retval
= SSA_PROP_INTERESTING
;
2007 /* Visit the conditional statement STMT. Return SSA_PROP_INTERESTING
2008 if it can determine which edge will be taken. Otherwise, return
2009 SSA_PROP_VARYING. */
2011 static enum ssa_prop_result
2012 visit_cond_stmt (gimple stmt
, edge
*taken_edge_p
)
2017 block
= gimple_bb (stmt
);
2018 val
= evaluate_stmt (stmt
);
2019 if (val
.lattice_val
!= CONSTANT
2020 || !val
.mask
.is_zero ())
2021 return SSA_PROP_VARYING
;
2023 /* Find which edge out of the conditional block will be taken and add it
2024 to the worklist. If no single edge can be determined statically,
2025 return SSA_PROP_VARYING to feed all the outgoing edges to the
2026 propagation engine. */
2027 *taken_edge_p
= find_taken_edge (block
, val
.value
);
2029 return SSA_PROP_INTERESTING
;
2031 return SSA_PROP_VARYING
;
2035 /* Evaluate statement STMT. If the statement produces an output value and
2036 its evaluation changes the lattice value of its output, return
2037 SSA_PROP_INTERESTING and set *OUTPUT_P to the SSA_NAME holding the
2040 If STMT is a conditional branch and we can determine its truth
2041 value, set *TAKEN_EDGE_P accordingly. If STMT produces a varying
2042 value, return SSA_PROP_VARYING. */
2044 static enum ssa_prop_result
2045 ccp_visit_stmt (gimple stmt
, edge
*taken_edge_p
, tree
*output_p
)
2050 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2052 fprintf (dump_file
, "\nVisiting statement:\n");
2053 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
2056 switch (gimple_code (stmt
))
2059 /* If the statement is an assignment that produces a single
2060 output value, evaluate its RHS to see if the lattice value of
2061 its output has changed. */
2062 return visit_assignment (stmt
, output_p
);
2065 /* A value-returning call also performs an assignment. */
2066 if (gimple_call_lhs (stmt
) != NULL_TREE
)
2067 return visit_assignment (stmt
, output_p
);
2072 /* If STMT is a conditional branch, see if we can determine
2073 which branch will be taken. */
2074 /* FIXME. It appears that we should be able to optimize
2075 computed GOTOs here as well. */
2076 return visit_cond_stmt (stmt
, taken_edge_p
);
2082 /* Any other kind of statement is not interesting for constant
2083 propagation and, therefore, not worth simulating. */
2084 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2085 fprintf (dump_file
, "No interesting values produced. Marked VARYING.\n");
2087 /* Definitions made by statements other than assignments to
2088 SSA_NAMEs represent unknown modifications to their outputs.
2089 Mark them VARYING. */
2090 FOR_EACH_SSA_TREE_OPERAND (def
, stmt
, iter
, SSA_OP_ALL_DEFS
)
2092 prop_value_t v
= { VARYING
, NULL_TREE
, { -1, (HOST_WIDE_INT
) -1 } };
2093 set_lattice_value (def
, v
);
2096 return SSA_PROP_VARYING
;
2100 /* Main entry point for SSA Conditional Constant Propagation. */
2105 unsigned int todo
= 0;
2106 calculate_dominance_info (CDI_DOMINATORS
);
2108 ssa_propagate (ccp_visit_stmt
, ccp_visit_phi_node
);
2109 if (ccp_finalize ())
2110 todo
= (TODO_cleanup_cfg
| TODO_update_ssa
| TODO_remove_unused_locals
);
2111 free_dominance_info (CDI_DOMINATORS
);
2119 return flag_tree_ccp
!= 0;
2123 struct gimple_opt_pass pass_ccp
=
2128 OPTGROUP_NONE
, /* optinfo_flags */
2129 gate_ccp
, /* gate */
2130 do_ssa_ccp
, /* execute */
2133 0, /* static_pass_number */
2134 TV_TREE_CCP
, /* tv_id */
2135 PROP_cfg
| PROP_ssa
, /* properties_required */
2136 0, /* properties_provided */
2137 0, /* properties_destroyed */
2138 0, /* todo_flags_start */
2140 | TODO_update_address_taken
2141 | TODO_verify_stmts
| TODO_ggc_collect
/* todo_flags_finish */
2147 /* Try to optimize out __builtin_stack_restore. Optimize it out
2148 if there is another __builtin_stack_restore in the same basic
2149 block and no calls or ASM_EXPRs are in between, or if this block's
2150 only outgoing edge is to EXIT_BLOCK and there are no calls or
2151 ASM_EXPRs after this __builtin_stack_restore. */
2154 optimize_stack_restore (gimple_stmt_iterator i
)
2159 basic_block bb
= gsi_bb (i
);
2160 gimple call
= gsi_stmt (i
);
2162 if (gimple_code (call
) != GIMPLE_CALL
2163 || gimple_call_num_args (call
) != 1
2164 || TREE_CODE (gimple_call_arg (call
, 0)) != SSA_NAME
2165 || !POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (call
, 0))))
2168 for (gsi_next (&i
); !gsi_end_p (i
); gsi_next (&i
))
2170 stmt
= gsi_stmt (i
);
2171 if (gimple_code (stmt
) == GIMPLE_ASM
)
2173 if (gimple_code (stmt
) != GIMPLE_CALL
)
2176 callee
= gimple_call_fndecl (stmt
);
2178 || DECL_BUILT_IN_CLASS (callee
) != BUILT_IN_NORMAL
2179 /* All regular builtins are ok, just obviously not alloca. */
2180 || DECL_FUNCTION_CODE (callee
) == BUILT_IN_ALLOCA
2181 || DECL_FUNCTION_CODE (callee
) == BUILT_IN_ALLOCA_WITH_ALIGN
)
2184 if (DECL_FUNCTION_CODE (callee
) == BUILT_IN_STACK_RESTORE
)
2185 goto second_stack_restore
;
2191 /* Allow one successor of the exit block, or zero successors. */
2192 switch (EDGE_COUNT (bb
->succs
))
2197 if (single_succ_edge (bb
)->dest
!= EXIT_BLOCK_PTR
)
2203 second_stack_restore
:
2205 /* If there's exactly one use, then zap the call to __builtin_stack_save.
2206 If there are multiple uses, then the last one should remove the call.
2207 In any case, whether the call to __builtin_stack_save can be removed
2208 or not is irrelevant to removing the call to __builtin_stack_restore. */
2209 if (has_single_use (gimple_call_arg (call
, 0)))
2211 gimple stack_save
= SSA_NAME_DEF_STMT (gimple_call_arg (call
, 0));
2212 if (is_gimple_call (stack_save
))
2214 callee
= gimple_call_fndecl (stack_save
);
2216 && DECL_BUILT_IN_CLASS (callee
) == BUILT_IN_NORMAL
2217 && DECL_FUNCTION_CODE (callee
) == BUILT_IN_STACK_SAVE
)
2219 gimple_stmt_iterator stack_save_gsi
;
2222 stack_save_gsi
= gsi_for_stmt (stack_save
);
2223 rhs
= build_int_cst (TREE_TYPE (gimple_call_arg (call
, 0)), 0);
2224 update_call_from_tree (&stack_save_gsi
, rhs
);
2229 /* No effect, so the statement will be deleted. */
2230 return integer_zero_node
;
2233 /* If va_list type is a simple pointer and nothing special is needed,
2234 optimize __builtin_va_start (&ap, 0) into ap = __builtin_next_arg (0),
2235 __builtin_va_end (&ap) out as NOP and __builtin_va_copy into a simple
2236 pointer assignment. */
2239 optimize_stdarg_builtin (gimple call
)
2241 tree callee
, lhs
, rhs
, cfun_va_list
;
2242 bool va_list_simple_ptr
;
2243 location_t loc
= gimple_location (call
);
2245 if (gimple_code (call
) != GIMPLE_CALL
)
2248 callee
= gimple_call_fndecl (call
);
2250 cfun_va_list
= targetm
.fn_abi_va_list (callee
);
2251 va_list_simple_ptr
= POINTER_TYPE_P (cfun_va_list
)
2252 && (TREE_TYPE (cfun_va_list
) == void_type_node
2253 || TREE_TYPE (cfun_va_list
) == char_type_node
);
2255 switch (DECL_FUNCTION_CODE (callee
))
2257 case BUILT_IN_VA_START
:
2258 if (!va_list_simple_ptr
2259 || targetm
.expand_builtin_va_start
!= NULL
2260 || !builtin_decl_explicit_p (BUILT_IN_NEXT_ARG
))
2263 if (gimple_call_num_args (call
) != 2)
2266 lhs
= gimple_call_arg (call
, 0);
2267 if (!POINTER_TYPE_P (TREE_TYPE (lhs
))
2268 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs
)))
2269 != TYPE_MAIN_VARIANT (cfun_va_list
))
2272 lhs
= build_fold_indirect_ref_loc (loc
, lhs
);
2273 rhs
= build_call_expr_loc (loc
, builtin_decl_explicit (BUILT_IN_NEXT_ARG
),
2274 1, integer_zero_node
);
2275 rhs
= fold_convert_loc (loc
, TREE_TYPE (lhs
), rhs
);
2276 return build2 (MODIFY_EXPR
, TREE_TYPE (lhs
), lhs
, rhs
);
2278 case BUILT_IN_VA_COPY
:
2279 if (!va_list_simple_ptr
)
2282 if (gimple_call_num_args (call
) != 2)
2285 lhs
= gimple_call_arg (call
, 0);
2286 if (!POINTER_TYPE_P (TREE_TYPE (lhs
))
2287 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs
)))
2288 != TYPE_MAIN_VARIANT (cfun_va_list
))
2291 lhs
= build_fold_indirect_ref_loc (loc
, lhs
);
2292 rhs
= gimple_call_arg (call
, 1);
2293 if (TYPE_MAIN_VARIANT (TREE_TYPE (rhs
))
2294 != TYPE_MAIN_VARIANT (cfun_va_list
))
2297 rhs
= fold_convert_loc (loc
, TREE_TYPE (lhs
), rhs
);
2298 return build2 (MODIFY_EXPR
, TREE_TYPE (lhs
), lhs
, rhs
);
2300 case BUILT_IN_VA_END
:
2301 /* No effect, so the statement will be deleted. */
2302 return integer_zero_node
;
2309 /* Attemp to make the block of __builtin_unreachable I unreachable by changing
2310 the incoming jumps. Return true if at least one jump was changed. */
2313 optimize_unreachable (gimple_stmt_iterator i
)
2315 basic_block bb
= gsi_bb (i
);
2316 gimple_stmt_iterator gsi
;
2322 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2324 stmt
= gsi_stmt (gsi
);
2326 if (is_gimple_debug (stmt
))
2329 if (gimple_code (stmt
) == GIMPLE_LABEL
)
2331 /* Verify we do not need to preserve the label. */
2332 if (FORCED_LABEL (gimple_label_label (stmt
)))
2338 /* Only handle the case that __builtin_unreachable is the first statement
2339 in the block. We rely on DCE to remove stmts without side-effects
2340 before __builtin_unreachable. */
2341 if (gsi_stmt (gsi
) != gsi_stmt (i
))
2346 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
2348 gsi
= gsi_last_bb (e
->src
);
2349 if (gsi_end_p (gsi
))
2352 stmt
= gsi_stmt (gsi
);
2353 if (gimple_code (stmt
) == GIMPLE_COND
)
2355 if (e
->flags
& EDGE_TRUE_VALUE
)
2356 gimple_cond_make_false (stmt
);
2357 else if (e
->flags
& EDGE_FALSE_VALUE
)
2358 gimple_cond_make_true (stmt
);
2365 /* Todo: handle other cases, f.i. switch statement. */
2375 /* A simple pass that attempts to fold all builtin functions. This pass
2376 is run after we've propagated as many constants as we can. */
2379 execute_fold_all_builtins (void)
2381 bool cfg_changed
= false;
2383 unsigned int todoflags
= 0;
2387 gimple_stmt_iterator i
;
2388 for (i
= gsi_start_bb (bb
); !gsi_end_p (i
); )
2390 gimple stmt
, old_stmt
;
2391 tree callee
, result
;
2392 enum built_in_function fcode
;
2394 stmt
= gsi_stmt (i
);
2396 if (gimple_code (stmt
) != GIMPLE_CALL
)
2401 callee
= gimple_call_fndecl (stmt
);
2402 if (!callee
|| DECL_BUILT_IN_CLASS (callee
) != BUILT_IN_NORMAL
)
2407 fcode
= DECL_FUNCTION_CODE (callee
);
2409 result
= gimple_fold_builtin (stmt
);
2412 gimple_remove_stmt_histograms (cfun
, stmt
);
2415 switch (DECL_FUNCTION_CODE (callee
))
2417 case BUILT_IN_CONSTANT_P
:
2418 /* Resolve __builtin_constant_p. If it hasn't been
2419 folded to integer_one_node by now, it's fairly
2420 certain that the value simply isn't constant. */
2421 result
= integer_zero_node
;
2424 case BUILT_IN_ASSUME_ALIGNED
:
2425 /* Remove __builtin_assume_aligned. */
2426 result
= gimple_call_arg (stmt
, 0);
2429 case BUILT_IN_STACK_RESTORE
:
2430 result
= optimize_stack_restore (i
);
2436 case BUILT_IN_UNREACHABLE
:
2437 if (optimize_unreachable (i
))
2441 case BUILT_IN_VA_START
:
2442 case BUILT_IN_VA_END
:
2443 case BUILT_IN_VA_COPY
:
2444 /* These shouldn't be folded before pass_stdarg. */
2445 result
= optimize_stdarg_builtin (stmt
);
2455 if (result
== NULL_TREE
)
2458 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2460 fprintf (dump_file
, "Simplified\n ");
2461 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
2465 if (!update_call_from_tree (&i
, result
))
2467 gimplify_and_update_call_from_tree (&i
, result
);
2468 todoflags
|= TODO_update_address_taken
;
2471 stmt
= gsi_stmt (i
);
2474 if (maybe_clean_or_replace_eh_stmt (old_stmt
, stmt
)
2475 && gimple_purge_dead_eh_edges (bb
))
2478 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2480 fprintf (dump_file
, "to\n ");
2481 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
2482 fprintf (dump_file
, "\n");
2485 /* Retry the same statement if it changed into another
2486 builtin, there might be new opportunities now. */
2487 if (gimple_code (stmt
) != GIMPLE_CALL
)
2492 callee
= gimple_call_fndecl (stmt
);
2494 || DECL_BUILT_IN_CLASS (callee
) != BUILT_IN_NORMAL
2495 || DECL_FUNCTION_CODE (callee
) == fcode
)
2500 /* Delete unreachable blocks. */
2502 todoflags
|= TODO_cleanup_cfg
;
2508 struct gimple_opt_pass pass_fold_builtins
=
2513 OPTGROUP_NONE
, /* optinfo_flags */
2515 execute_fold_all_builtins
, /* execute */
2518 0, /* static_pass_number */
2519 TV_NONE
, /* tv_id */
2520 PROP_cfg
| PROP_ssa
, /* properties_required */
2521 0, /* properties_provided */
2522 0, /* properties_destroyed */
2523 0, /* todo_flags_start */
2525 | TODO_update_ssa
/* todo_flags_finish */