1 /* Conditional constant propagation pass for the GNU compiler.
2 Copyright (C) 2000-2013 Free Software Foundation, Inc.
3 Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org>
4 Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by the
10 Free Software Foundation; either version 3, or (at your option) any
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Conditional constant propagation (CCP) is based on the SSA
23 propagation engine (tree-ssa-propagate.c). Constant assignments of
24 the form VAR = CST are propagated from the assignments into uses of
25 VAR, which in turn may generate new constants. The simulation uses
26 a four level lattice to keep track of constant values associated
27 with SSA names. Given an SSA name V_i, it may take one of the
30 UNINITIALIZED -> the initial state of the value. This value
31 is replaced with a correct initial value
32 the first time the value is used, so the
33 rest of the pass does not need to care about
34 it. Using this value simplifies initialization
35 of the pass, and prevents us from needlessly
36 scanning statements that are never reached.
38 UNDEFINED -> V_i is a local variable whose definition
39 has not been processed yet. Therefore we
40 don't yet know if its value is a constant
43 CONSTANT -> V_i has been found to hold a constant
46 VARYING -> V_i cannot take a constant value, or if it
47 does, it is not possible to determine it
50 The core of SSA-CCP is in ccp_visit_stmt and ccp_visit_phi_node:
52 1- In ccp_visit_stmt, we are interested in assignments whose RHS
53 evaluates into a constant and conditional jumps whose predicate
54 evaluates into a boolean true or false. When an assignment of
55 the form V_i = CONST is found, V_i's lattice value is set to
56 CONSTANT and CONST is associated with it. This causes the
57 propagation engine to add all the SSA edges coming out the
58 assignment into the worklists, so that statements that use V_i
61 If the statement is a conditional with a constant predicate, we
62 mark the outgoing edges as executable or not executable
63 depending on the predicate's value. This is then used when
64 visiting PHI nodes to know when a PHI argument can be ignored.
67 2- In ccp_visit_phi_node, if all the PHI arguments evaluate to the
68 same constant C, then the LHS of the PHI is set to C. This
69 evaluation is known as the "meet operation". Since one of the
70 goals of this evaluation is to optimistically return constant
71 values as often as possible, it uses two main short cuts:
73 - If an argument is flowing in through a non-executable edge, it
74 is ignored. This is useful in cases like this:
80 a_11 = PHI (a_9, a_10)
82 If PRED is known to always evaluate to false, then we can
83 assume that a_11 will always take its value from a_10, meaning
84 that instead of consider it VARYING (a_9 and a_10 have
85 different values), we can consider it CONSTANT 100.
87 - If an argument has an UNDEFINED value, then it does not affect
88 the outcome of the meet operation. If a variable V_i has an
89 UNDEFINED value, it means that either its defining statement
90 hasn't been visited yet or V_i has no defining statement, in
91 which case the original symbol 'V' is being used
92 uninitialized. Since 'V' is a local variable, the compiler
93 may assume any initial value for it.
96 After propagation, every variable V_i that ends up with a lattice
97 value of CONSTANT will have the associated constant value in the
98 array CONST_VAL[i].VALUE. That is fed into substitute_and_fold for
99 final substitution and folding.
103 Constant propagation with conditional branches,
104 Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
106 Building an Optimizing Compiler,
107 Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
109 Advanced Compiler Design and Implementation,
110 Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6 */
114 #include "coretypes.h"
119 #include "basic-block.h"
120 #include "function.h"
121 #include "gimple-pretty-print.h"
122 #include "tree-ssa.h"
123 #include "tree-pass.h"
124 #include "tree-ssa-propagate.h"
125 #include "value-prof.h"
126 #include "langhooks.h"
128 #include "diagnostic-core.h"
131 #include "hash-table.h"
134 /* Possible lattice values. */
143 struct prop_value_d
{
145 ccp_lattice_t lattice_val
;
147 /* Propagated value. */
150 /* Mask that applies to the propagated value during CCP. For
151 X with a CONSTANT lattice value X & ~mask == value & ~mask. */
155 typedef struct prop_value_d prop_value_t
;
157 /* Array of propagated constant values. After propagation,
158 CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I). If
159 the constant is held in an SSA name representing a memory store
160 (i.e., a VDEF), CONST_VAL[I].MEM_REF will contain the actual
161 memory reference used to store (i.e., the LHS of the assignment
163 static prop_value_t
*const_val
;
164 static unsigned n_const_val
;
166 static void canonicalize_float_value (prop_value_t
*);
167 static bool ccp_fold_stmt (gimple_stmt_iterator
*);
169 /* Dump constant propagation value VAL to file OUTF prefixed by PREFIX. */
172 dump_lattice_value (FILE *outf
, const char *prefix
, prop_value_t val
)
174 switch (val
.lattice_val
)
177 fprintf (outf
, "%sUNINITIALIZED", prefix
);
180 fprintf (outf
, "%sUNDEFINED", prefix
);
183 fprintf (outf
, "%sVARYING", prefix
);
186 if (TREE_CODE (val
.value
) != INTEGER_CST
187 || val
.mask
.is_zero ())
189 fprintf (outf
, "%sCONSTANT ", prefix
);
190 print_generic_expr (outf
, val
.value
, dump_flags
);
194 double_int cval
= tree_to_double_int (val
.value
).and_not (val
.mask
);
195 fprintf (outf
, "%sCONSTANT " HOST_WIDE_INT_PRINT_DOUBLE_HEX
,
196 prefix
, cval
.high
, cval
.low
);
197 fprintf (outf
, " (" HOST_WIDE_INT_PRINT_DOUBLE_HEX
")",
198 val
.mask
.high
, val
.mask
.low
);
207 /* Print lattice value VAL to stderr. */
209 void debug_lattice_value (prop_value_t val
);
212 debug_lattice_value (prop_value_t val
)
214 dump_lattice_value (stderr
, "", val
);
215 fprintf (stderr
, "\n");
219 /* Compute a default value for variable VAR and store it in the
220 CONST_VAL array. The following rules are used to get default
223 1- Global and static variables that are declared constant are
226 2- Any other value is considered UNDEFINED. This is useful when
227 considering PHI nodes. PHI arguments that are undefined do not
228 change the constant value of the PHI node, which allows for more
229 constants to be propagated.
231 3- Variables defined by statements other than assignments and PHI
232 nodes are considered VARYING.
234 4- Initial values of variables that are not GIMPLE registers are
235 considered VARYING. */
238 get_default_value (tree var
)
240 prop_value_t val
= { UNINITIALIZED
, NULL_TREE
, { 0, 0 } };
243 stmt
= SSA_NAME_DEF_STMT (var
);
245 if (gimple_nop_p (stmt
))
247 /* Variables defined by an empty statement are those used
248 before being initialized. If VAR is a local variable, we
249 can assume initially that it is UNDEFINED, otherwise we must
250 consider it VARYING. */
251 if (!virtual_operand_p (var
)
252 && TREE_CODE (SSA_NAME_VAR (var
)) == VAR_DECL
)
253 val
.lattice_val
= UNDEFINED
;
256 val
.lattice_val
= VARYING
;
257 val
.mask
= double_int_minus_one
;
260 else if (is_gimple_assign (stmt
))
263 if (gimple_assign_single_p (stmt
)
264 && DECL_P (gimple_assign_rhs1 (stmt
))
265 && (cst
= get_symbol_constant_value (gimple_assign_rhs1 (stmt
))))
267 val
.lattice_val
= CONSTANT
;
272 /* Any other variable defined by an assignment is considered
274 val
.lattice_val
= UNDEFINED
;
277 else if ((is_gimple_call (stmt
)
278 && gimple_call_lhs (stmt
) != NULL_TREE
)
279 || gimple_code (stmt
) == GIMPLE_PHI
)
281 /* A variable defined by a call or a PHI node is considered
283 val
.lattice_val
= UNDEFINED
;
287 /* Otherwise, VAR will never take on a constant value. */
288 val
.lattice_val
= VARYING
;
289 val
.mask
= double_int_minus_one
;
296 /* Get the constant value associated with variable VAR. */
298 static inline prop_value_t
*
303 if (const_val
== NULL
304 || SSA_NAME_VERSION (var
) >= n_const_val
)
307 val
= &const_val
[SSA_NAME_VERSION (var
)];
308 if (val
->lattice_val
== UNINITIALIZED
)
309 *val
= get_default_value (var
);
311 canonicalize_float_value (val
);
316 /* Return the constant tree value associated with VAR. */
319 get_constant_value (tree var
)
322 if (TREE_CODE (var
) != SSA_NAME
)
324 if (is_gimple_min_invariant (var
))
328 val
= get_value (var
);
330 && val
->lattice_val
== CONSTANT
331 && (TREE_CODE (val
->value
) != INTEGER_CST
332 || val
->mask
.is_zero ()))
337 /* Sets the value associated with VAR to VARYING. */
340 set_value_varying (tree var
)
342 prop_value_t
*val
= &const_val
[SSA_NAME_VERSION (var
)];
344 val
->lattice_val
= VARYING
;
345 val
->value
= NULL_TREE
;
346 val
->mask
= double_int_minus_one
;
349 /* For float types, modify the value of VAL to make ccp work correctly
350 for non-standard values (-0, NaN):
352 If HONOR_SIGNED_ZEROS is false, and VAL = -0, we canonicalize it to 0.
353 If HONOR_NANS is false, and VAL is NaN, we canonicalize it to UNDEFINED.
354 This is to fix the following problem (see PR 29921): Suppose we have
358 and we set value of y to NaN. This causes value of x to be set to NaN.
359 When we later determine that y is in fact VARYING, fold uses the fact
360 that HONOR_NANS is false, and we try to change the value of x to 0,
361 causing an ICE. With HONOR_NANS being false, the real appearance of
362 NaN would cause undefined behavior, though, so claiming that y (and x)
363 are UNDEFINED initially is correct. */
366 canonicalize_float_value (prop_value_t
*val
)
368 enum machine_mode mode
;
372 if (val
->lattice_val
!= CONSTANT
373 || TREE_CODE (val
->value
) != REAL_CST
)
376 d
= TREE_REAL_CST (val
->value
);
377 type
= TREE_TYPE (val
->value
);
378 mode
= TYPE_MODE (type
);
380 if (!HONOR_SIGNED_ZEROS (mode
)
381 && REAL_VALUE_MINUS_ZERO (d
))
383 val
->value
= build_real (type
, dconst0
);
387 if (!HONOR_NANS (mode
)
388 && REAL_VALUE_ISNAN (d
))
390 val
->lattice_val
= UNDEFINED
;
396 /* Return whether the lattice transition is valid. */
399 valid_lattice_transition (prop_value_t old_val
, prop_value_t new_val
)
401 /* Lattice transitions must always be monotonically increasing in
403 if (old_val
.lattice_val
< new_val
.lattice_val
)
406 if (old_val
.lattice_val
!= new_val
.lattice_val
)
409 if (!old_val
.value
&& !new_val
.value
)
412 /* Now both lattice values are CONSTANT. */
414 /* Allow transitioning from PHI <&x, not executable> == &x
415 to PHI <&x, &y> == common alignment. */
416 if (TREE_CODE (old_val
.value
) != INTEGER_CST
417 && TREE_CODE (new_val
.value
) == INTEGER_CST
)
420 /* Bit-lattices have to agree in the still valid bits. */
421 if (TREE_CODE (old_val
.value
) == INTEGER_CST
422 && TREE_CODE (new_val
.value
) == INTEGER_CST
)
423 return tree_to_double_int (old_val
.value
).and_not (new_val
.mask
)
424 == tree_to_double_int (new_val
.value
).and_not (new_val
.mask
);
426 /* Otherwise constant values have to agree. */
427 return operand_equal_p (old_val
.value
, new_val
.value
, 0);
430 /* Set the value for variable VAR to NEW_VAL. Return true if the new
431 value is different from VAR's previous value. */
434 set_lattice_value (tree var
, prop_value_t new_val
)
436 /* We can deal with old UNINITIALIZED values just fine here. */
437 prop_value_t
*old_val
= &const_val
[SSA_NAME_VERSION (var
)];
439 canonicalize_float_value (&new_val
);
441 /* We have to be careful to not go up the bitwise lattice
442 represented by the mask.
443 ??? This doesn't seem to be the best place to enforce this. */
444 if (new_val
.lattice_val
== CONSTANT
445 && old_val
->lattice_val
== CONSTANT
446 && TREE_CODE (new_val
.value
) == INTEGER_CST
447 && TREE_CODE (old_val
->value
) == INTEGER_CST
)
450 diff
= tree_to_double_int (new_val
.value
)
451 ^ tree_to_double_int (old_val
->value
);
452 new_val
.mask
= new_val
.mask
| old_val
->mask
| diff
;
455 gcc_assert (valid_lattice_transition (*old_val
, new_val
));
457 /* If *OLD_VAL and NEW_VAL are the same, return false to inform the
458 caller that this was a non-transition. */
459 if (old_val
->lattice_val
!= new_val
.lattice_val
460 || (new_val
.lattice_val
== CONSTANT
461 && TREE_CODE (new_val
.value
) == INTEGER_CST
462 && (TREE_CODE (old_val
->value
) != INTEGER_CST
463 || new_val
.mask
!= old_val
->mask
)))
465 /* ??? We would like to delay creation of INTEGER_CSTs from
466 partially constants here. */
468 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
470 dump_lattice_value (dump_file
, "Lattice value changed to ", new_val
);
471 fprintf (dump_file
, ". Adding SSA edges to worklist.\n");
476 gcc_assert (new_val
.lattice_val
!= UNINITIALIZED
);
483 static prop_value_t
get_value_for_expr (tree
, bool);
484 static prop_value_t
bit_value_binop (enum tree_code
, tree
, tree
, tree
);
485 static void bit_value_binop_1 (enum tree_code
, tree
, double_int
*, double_int
*,
486 tree
, double_int
, double_int
,
487 tree
, double_int
, double_int
);
489 /* Return a double_int that can be used for bitwise simplifications
493 value_to_double_int (prop_value_t val
)
496 && TREE_CODE (val
.value
) == INTEGER_CST
)
497 return tree_to_double_int (val
.value
);
499 return double_int_zero
;
502 /* Return the value for the address expression EXPR based on alignment
506 get_value_from_alignment (tree expr
)
508 tree type
= TREE_TYPE (expr
);
510 unsigned HOST_WIDE_INT bitpos
;
513 gcc_assert (TREE_CODE (expr
) == ADDR_EXPR
);
515 get_pointer_alignment_1 (expr
, &align
, &bitpos
);
516 val
.mask
= (POINTER_TYPE_P (type
) || TYPE_UNSIGNED (type
)
517 ? double_int::mask (TYPE_PRECISION (type
))
518 : double_int_minus_one
)
519 .and_not (double_int::from_uhwi (align
/ BITS_PER_UNIT
- 1));
520 val
.lattice_val
= val
.mask
.is_minus_one () ? VARYING
: CONSTANT
;
521 if (val
.lattice_val
== CONSTANT
)
523 = double_int_to_tree (type
,
524 double_int::from_uhwi (bitpos
/ BITS_PER_UNIT
));
526 val
.value
= NULL_TREE
;
531 /* Return the value for the tree operand EXPR. If FOR_BITS_P is true
532 return constant bits extracted from alignment information for
533 invariant addresses. */
536 get_value_for_expr (tree expr
, bool for_bits_p
)
540 if (TREE_CODE (expr
) == SSA_NAME
)
542 val
= *get_value (expr
);
544 && val
.lattice_val
== CONSTANT
545 && TREE_CODE (val
.value
) == ADDR_EXPR
)
546 val
= get_value_from_alignment (val
.value
);
548 else if (is_gimple_min_invariant (expr
)
549 && (!for_bits_p
|| TREE_CODE (expr
) != ADDR_EXPR
))
551 val
.lattice_val
= CONSTANT
;
553 val
.mask
= double_int_zero
;
554 canonicalize_float_value (&val
);
556 else if (TREE_CODE (expr
) == ADDR_EXPR
)
557 val
= get_value_from_alignment (expr
);
560 val
.lattice_val
= VARYING
;
561 val
.mask
= double_int_minus_one
;
562 val
.value
= NULL_TREE
;
567 /* Return the likely CCP lattice value for STMT.
569 If STMT has no operands, then return CONSTANT.
571 Else if undefinedness of operands of STMT cause its value to be
572 undefined, then return UNDEFINED.
574 Else if any operands of STMT are constants, then return CONSTANT.
576 Else return VARYING. */
579 likely_value (gimple stmt
)
581 bool has_constant_operand
, has_undefined_operand
, all_undefined_operands
;
586 enum gimple_code code
= gimple_code (stmt
);
588 /* This function appears to be called only for assignments, calls,
589 conditionals, and switches, due to the logic in visit_stmt. */
590 gcc_assert (code
== GIMPLE_ASSIGN
591 || code
== GIMPLE_CALL
592 || code
== GIMPLE_COND
593 || code
== GIMPLE_SWITCH
);
595 /* If the statement has volatile operands, it won't fold to a
597 if (gimple_has_volatile_ops (stmt
))
600 /* Arrive here for more complex cases. */
601 has_constant_operand
= false;
602 has_undefined_operand
= false;
603 all_undefined_operands
= true;
604 FOR_EACH_SSA_TREE_OPERAND (use
, stmt
, iter
, SSA_OP_USE
)
606 prop_value_t
*val
= get_value (use
);
608 if (val
->lattice_val
== UNDEFINED
)
609 has_undefined_operand
= true;
611 all_undefined_operands
= false;
613 if (val
->lattice_val
== CONSTANT
)
614 has_constant_operand
= true;
617 /* There may be constants in regular rhs operands. For calls we
618 have to ignore lhs, fndecl and static chain, otherwise only
620 for (i
= (is_gimple_call (stmt
) ? 2 : 0) + gimple_has_lhs (stmt
);
621 i
< gimple_num_ops (stmt
); ++i
)
623 tree op
= gimple_op (stmt
, i
);
624 if (!op
|| TREE_CODE (op
) == SSA_NAME
)
626 if (is_gimple_min_invariant (op
))
627 has_constant_operand
= true;
630 if (has_constant_operand
)
631 all_undefined_operands
= false;
633 if (has_undefined_operand
634 && code
== GIMPLE_CALL
635 && gimple_call_internal_p (stmt
))
636 switch (gimple_call_internal_fn (stmt
))
638 /* These 3 builtins use the first argument just as a magic
639 way how to find out a decl uid. */
640 case IFN_GOMP_SIMD_LANE
:
641 case IFN_GOMP_SIMD_VF
:
642 case IFN_GOMP_SIMD_LAST_LANE
:
643 has_undefined_operand
= false;
649 /* If the operation combines operands like COMPLEX_EXPR make sure to
650 not mark the result UNDEFINED if only one part of the result is
652 if (has_undefined_operand
&& all_undefined_operands
)
654 else if (code
== GIMPLE_ASSIGN
&& has_undefined_operand
)
656 switch (gimple_assign_rhs_code (stmt
))
658 /* Unary operators are handled with all_undefined_operands. */
661 case POINTER_PLUS_EXPR
:
662 /* Not MIN_EXPR, MAX_EXPR. One VARYING operand may be selected.
663 Not bitwise operators, one VARYING operand may specify the
664 result completely. Not logical operators for the same reason.
665 Not COMPLEX_EXPR as one VARYING operand makes the result partly
666 not UNDEFINED. Not *DIV_EXPR, comparisons and shifts because
667 the undefined operand may be promoted. */
671 /* If any part of an address is UNDEFINED, like the index
672 of an ARRAY_EXPR, then treat the result as UNDEFINED. */
679 /* If there was an UNDEFINED operand but the result may be not UNDEFINED
680 fall back to CONSTANT. During iteration UNDEFINED may still drop
682 if (has_undefined_operand
)
685 /* We do not consider virtual operands here -- load from read-only
686 memory may have only VARYING virtual operands, but still be
688 if (has_constant_operand
689 || gimple_references_memory_p (stmt
))
695 /* Returns true if STMT cannot be constant. */
698 surely_varying_stmt_p (gimple stmt
)
700 /* If the statement has operands that we cannot handle, it cannot be
702 if (gimple_has_volatile_ops (stmt
))
705 /* If it is a call and does not return a value or is not a
706 builtin and not an indirect call, it is varying. */
707 if (is_gimple_call (stmt
))
710 if (!gimple_call_lhs (stmt
)
711 || ((fndecl
= gimple_call_fndecl (stmt
)) != NULL_TREE
712 && !DECL_BUILT_IN (fndecl
)))
716 /* Any other store operation is not interesting. */
717 else if (gimple_vdef (stmt
))
720 /* Anything other than assignments and conditional jumps are not
721 interesting for CCP. */
722 if (gimple_code (stmt
) != GIMPLE_ASSIGN
723 && gimple_code (stmt
) != GIMPLE_COND
724 && gimple_code (stmt
) != GIMPLE_SWITCH
725 && gimple_code (stmt
) != GIMPLE_CALL
)
731 /* Initialize local data structures for CCP. */
734 ccp_initialize (void)
738 n_const_val
= num_ssa_names
;
739 const_val
= XCNEWVEC (prop_value_t
, n_const_val
);
741 /* Initialize simulation flags for PHI nodes and statements. */
744 gimple_stmt_iterator i
;
746 for (i
= gsi_start_bb (bb
); !gsi_end_p (i
); gsi_next (&i
))
748 gimple stmt
= gsi_stmt (i
);
751 /* If the statement is a control insn, then we do not
752 want to avoid simulating the statement once. Failure
753 to do so means that those edges will never get added. */
754 if (stmt_ends_bb_p (stmt
))
757 is_varying
= surely_varying_stmt_p (stmt
);
764 /* If the statement will not produce a constant, mark
765 all its outputs VARYING. */
766 FOR_EACH_SSA_TREE_OPERAND (def
, stmt
, iter
, SSA_OP_ALL_DEFS
)
767 set_value_varying (def
);
769 prop_set_simulate_again (stmt
, !is_varying
);
773 /* Now process PHI nodes. We never clear the simulate_again flag on
774 phi nodes, since we do not know which edges are executable yet,
775 except for phi nodes for virtual operands when we do not do store ccp. */
778 gimple_stmt_iterator i
;
780 for (i
= gsi_start_phis (bb
); !gsi_end_p (i
); gsi_next (&i
))
782 gimple phi
= gsi_stmt (i
);
784 if (virtual_operand_p (gimple_phi_result (phi
)))
785 prop_set_simulate_again (phi
, false);
787 prop_set_simulate_again (phi
, true);
792 /* Debug count support. Reset the values of ssa names
793 VARYING when the total number ssa names analyzed is
794 beyond the debug count specified. */
800 for (i
= 0; i
< num_ssa_names
; i
++)
804 const_val
[i
].lattice_val
= VARYING
;
805 const_val
[i
].mask
= double_int_minus_one
;
806 const_val
[i
].value
= NULL_TREE
;
812 /* Do final substitution of propagated values, cleanup the flowgraph and
813 free allocated storage.
815 Return TRUE when something was optimized. */
820 bool something_changed
;
825 /* Derive alignment and misalignment information from partially
826 constant pointers in the lattice. */
827 for (i
= 1; i
< num_ssa_names
; ++i
)
829 tree name
= ssa_name (i
);
831 unsigned int tem
, align
;
834 || !POINTER_TYPE_P (TREE_TYPE (name
)))
837 val
= get_value (name
);
838 if (val
->lattice_val
!= CONSTANT
839 || TREE_CODE (val
->value
) != INTEGER_CST
)
842 /* Trailing constant bits specify the alignment, trailing value
843 bits the misalignment. */
845 align
= (tem
& -tem
);
847 set_ptr_info_alignment (get_ptr_info (name
), align
,
848 TREE_INT_CST_LOW (val
->value
) & (align
- 1));
851 /* Perform substitutions based on the known constant values. */
852 something_changed
= substitute_and_fold (get_constant_value
,
853 ccp_fold_stmt
, true);
857 return something_changed
;;
861 /* Compute the meet operator between *VAL1 and *VAL2. Store the result
864 any M UNDEFINED = any
865 any M VARYING = VARYING
866 Ci M Cj = Ci if (i == j)
867 Ci M Cj = VARYING if (i != j)
871 ccp_lattice_meet (prop_value_t
*val1
, prop_value_t
*val2
)
873 if (val1
->lattice_val
== UNDEFINED
)
875 /* UNDEFINED M any = any */
878 else if (val2
->lattice_val
== UNDEFINED
)
880 /* any M UNDEFINED = any
881 Nothing to do. VAL1 already contains the value we want. */
884 else if (val1
->lattice_val
== VARYING
885 || val2
->lattice_val
== VARYING
)
887 /* any M VARYING = VARYING. */
888 val1
->lattice_val
= VARYING
;
889 val1
->mask
= double_int_minus_one
;
890 val1
->value
= NULL_TREE
;
892 else if (val1
->lattice_val
== CONSTANT
893 && val2
->lattice_val
== CONSTANT
894 && TREE_CODE (val1
->value
) == INTEGER_CST
895 && TREE_CODE (val2
->value
) == INTEGER_CST
)
897 /* Ci M Cj = Ci if (i == j)
898 Ci M Cj = VARYING if (i != j)
900 For INTEGER_CSTs mask unequal bits. If no equal bits remain,
902 val1
->mask
= val1
->mask
| val2
->mask
903 | (tree_to_double_int (val1
->value
)
904 ^ tree_to_double_int (val2
->value
));
905 if (val1
->mask
.is_minus_one ())
907 val1
->lattice_val
= VARYING
;
908 val1
->value
= NULL_TREE
;
911 else if (val1
->lattice_val
== CONSTANT
912 && val2
->lattice_val
== CONSTANT
913 && simple_cst_equal (val1
->value
, val2
->value
) == 1)
915 /* Ci M Cj = Ci if (i == j)
916 Ci M Cj = VARYING if (i != j)
918 VAL1 already contains the value we want for equivalent values. */
920 else if (val1
->lattice_val
== CONSTANT
921 && val2
->lattice_val
== CONSTANT
922 && (TREE_CODE (val1
->value
) == ADDR_EXPR
923 || TREE_CODE (val2
->value
) == ADDR_EXPR
))
925 /* When not equal addresses are involved try meeting for
927 prop_value_t tem
= *val2
;
928 if (TREE_CODE (val1
->value
) == ADDR_EXPR
)
929 *val1
= get_value_for_expr (val1
->value
, true);
930 if (TREE_CODE (val2
->value
) == ADDR_EXPR
)
931 tem
= get_value_for_expr (val2
->value
, true);
932 ccp_lattice_meet (val1
, &tem
);
936 /* Any other combination is VARYING. */
937 val1
->lattice_val
= VARYING
;
938 val1
->mask
= double_int_minus_one
;
939 val1
->value
= NULL_TREE
;
944 /* Loop through the PHI_NODE's parameters for BLOCK and compare their
945 lattice values to determine PHI_NODE's lattice value. The value of a
946 PHI node is determined calling ccp_lattice_meet with all the arguments
947 of the PHI node that are incoming via executable edges. */
949 static enum ssa_prop_result
950 ccp_visit_phi_node (gimple phi
)
953 prop_value_t
*old_val
, new_val
;
955 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
957 fprintf (dump_file
, "\nVisiting PHI node: ");
958 print_gimple_stmt (dump_file
, phi
, 0, dump_flags
);
961 old_val
= get_value (gimple_phi_result (phi
));
962 switch (old_val
->lattice_val
)
965 return SSA_PROP_VARYING
;
972 new_val
.lattice_val
= UNDEFINED
;
973 new_val
.value
= NULL_TREE
;
980 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
982 /* Compute the meet operator over all the PHI arguments flowing
983 through executable edges. */
984 edge e
= gimple_phi_arg_edge (phi
, i
);
986 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
989 "\n Argument #%d (%d -> %d %sexecutable)\n",
990 i
, e
->src
->index
, e
->dest
->index
,
991 (e
->flags
& EDGE_EXECUTABLE
) ? "" : "not ");
994 /* If the incoming edge is executable, Compute the meet operator for
995 the existing value of the PHI node and the current PHI argument. */
996 if (e
->flags
& EDGE_EXECUTABLE
)
998 tree arg
= gimple_phi_arg (phi
, i
)->def
;
999 prop_value_t arg_val
= get_value_for_expr (arg
, false);
1001 ccp_lattice_meet (&new_val
, &arg_val
);
1003 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1005 fprintf (dump_file
, "\t");
1006 print_generic_expr (dump_file
, arg
, dump_flags
);
1007 dump_lattice_value (dump_file
, "\tValue: ", arg_val
);
1008 fprintf (dump_file
, "\n");
1011 if (new_val
.lattice_val
== VARYING
)
1016 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1018 dump_lattice_value (dump_file
, "\n PHI node value: ", new_val
);
1019 fprintf (dump_file
, "\n\n");
1022 /* Make the transition to the new value. */
1023 if (set_lattice_value (gimple_phi_result (phi
), new_val
))
1025 if (new_val
.lattice_val
== VARYING
)
1026 return SSA_PROP_VARYING
;
1028 return SSA_PROP_INTERESTING
;
1031 return SSA_PROP_NOT_INTERESTING
;
1034 /* Return the constant value for OP or OP otherwise. */
1037 valueize_op (tree op
)
1039 if (TREE_CODE (op
) == SSA_NAME
)
1041 tree tem
= get_constant_value (op
);
1048 /* CCP specific front-end to the non-destructive constant folding
1051 Attempt to simplify the RHS of STMT knowing that one or more
1052 operands are constants.
1054 If simplification is possible, return the simplified RHS,
1055 otherwise return the original RHS or NULL_TREE. */
1058 ccp_fold (gimple stmt
)
1060 location_t loc
= gimple_location (stmt
);
1061 switch (gimple_code (stmt
))
1065 /* Handle comparison operators that can appear in GIMPLE form. */
1066 tree op0
= valueize_op (gimple_cond_lhs (stmt
));
1067 tree op1
= valueize_op (gimple_cond_rhs (stmt
));
1068 enum tree_code code
= gimple_cond_code (stmt
);
1069 return fold_binary_loc (loc
, code
, boolean_type_node
, op0
, op1
);
1074 /* Return the constant switch index. */
1075 return valueize_op (gimple_switch_index (stmt
));
1080 return gimple_fold_stmt_to_constant_1 (stmt
, valueize_op
);
1087 /* Apply the operation CODE in type TYPE to the value, mask pair
1088 RVAL and RMASK representing a value of type RTYPE and set
1089 the value, mask pair *VAL and *MASK to the result. */
1092 bit_value_unop_1 (enum tree_code code
, tree type
,
1093 double_int
*val
, double_int
*mask
,
1094 tree rtype
, double_int rval
, double_int rmask
)
1105 double_int temv
, temm
;
1106 /* Return ~rval + 1. */
1107 bit_value_unop_1 (BIT_NOT_EXPR
, type
, &temv
, &temm
, type
, rval
, rmask
);
1108 bit_value_binop_1 (PLUS_EXPR
, type
, val
, mask
,
1110 type
, double_int_one
, double_int_zero
);
1118 /* First extend mask and value according to the original type. */
1119 uns
= TYPE_UNSIGNED (rtype
);
1120 *mask
= rmask
.ext (TYPE_PRECISION (rtype
), uns
);
1121 *val
= rval
.ext (TYPE_PRECISION (rtype
), uns
);
1123 /* Then extend mask and value according to the target type. */
1124 uns
= TYPE_UNSIGNED (type
);
1125 *mask
= (*mask
).ext (TYPE_PRECISION (type
), uns
);
1126 *val
= (*val
).ext (TYPE_PRECISION (type
), uns
);
1131 *mask
= double_int_minus_one
;
1136 /* Apply the operation CODE in type TYPE to the value, mask pairs
1137 R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
1138 and R2TYPE and set the value, mask pair *VAL and *MASK to the result. */
1141 bit_value_binop_1 (enum tree_code code
, tree type
,
1142 double_int
*val
, double_int
*mask
,
1143 tree r1type
, double_int r1val
, double_int r1mask
,
1144 tree r2type
, double_int r2val
, double_int r2mask
)
1146 bool uns
= TYPE_UNSIGNED (type
);
1147 /* Assume we'll get a constant result. Use an initial varying value,
1148 we fall back to varying in the end if necessary. */
1149 *mask
= double_int_minus_one
;
1153 /* The mask is constant where there is a known not
1154 set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
1155 *mask
= (r1mask
| r2mask
) & (r1val
| r1mask
) & (r2val
| r2mask
);
1156 *val
= r1val
& r2val
;
1160 /* The mask is constant where there is a known
1161 set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)). */
1162 *mask
= (r1mask
| r2mask
)
1163 .and_not (r1val
.and_not (r1mask
) | r2val
.and_not (r2mask
));
1164 *val
= r1val
| r2val
;
1169 *mask
= r1mask
| r2mask
;
1170 *val
= r1val
^ r2val
;
1175 if (r2mask
.is_zero ())
1177 HOST_WIDE_INT shift
= r2val
.low
;
1178 if (code
== RROTATE_EXPR
)
1180 *mask
= r1mask
.lrotate (shift
, TYPE_PRECISION (type
));
1181 *val
= r1val
.lrotate (shift
, TYPE_PRECISION (type
));
1187 /* ??? We can handle partially known shift counts if we know
1188 its sign. That way we can tell that (x << (y | 8)) & 255
1190 if (r2mask
.is_zero ())
1192 HOST_WIDE_INT shift
= r2val
.low
;
1193 if (code
== RSHIFT_EXPR
)
1195 /* We need to know if we are doing a left or a right shift
1196 to properly shift in zeros for left shift and unsigned
1197 right shifts and the sign bit for signed right shifts.
1198 For signed right shifts we shift in varying in case
1199 the sign bit was varying. */
1202 *mask
= r1mask
.llshift (shift
, TYPE_PRECISION (type
));
1203 *val
= r1val
.llshift (shift
, TYPE_PRECISION (type
));
1208 *mask
= r1mask
.rshift (shift
, TYPE_PRECISION (type
), !uns
);
1209 *val
= r1val
.rshift (shift
, TYPE_PRECISION (type
), !uns
);
1220 case POINTER_PLUS_EXPR
:
1223 /* Do the addition with unknown bits set to zero, to give carry-ins of
1224 zero wherever possible. */
1225 lo
= r1val
.and_not (r1mask
) + r2val
.and_not (r2mask
);
1226 lo
= lo
.ext (TYPE_PRECISION (type
), uns
);
1227 /* Do the addition with unknown bits set to one, to give carry-ins of
1228 one wherever possible. */
1229 hi
= (r1val
| r1mask
) + (r2val
| r2mask
);
1230 hi
= hi
.ext (TYPE_PRECISION (type
), uns
);
1231 /* Each bit in the result is known if (a) the corresponding bits in
1232 both inputs are known, and (b) the carry-in to that bit position
1233 is known. We can check condition (b) by seeing if we got the same
1234 result with minimised carries as with maximised carries. */
1235 *mask
= r1mask
| r2mask
| (lo
^ hi
);
1236 *mask
= (*mask
).ext (TYPE_PRECISION (type
), uns
);
1237 /* It shouldn't matter whether we choose lo or hi here. */
1244 double_int temv
, temm
;
1245 bit_value_unop_1 (NEGATE_EXPR
, r2type
, &temv
, &temm
,
1246 r2type
, r2val
, r2mask
);
1247 bit_value_binop_1 (PLUS_EXPR
, type
, val
, mask
,
1248 r1type
, r1val
, r1mask
,
1249 r2type
, temv
, temm
);
1255 /* Just track trailing zeros in both operands and transfer
1256 them to the other. */
1257 int r1tz
= (r1val
| r1mask
).trailing_zeros ();
1258 int r2tz
= (r2val
| r2mask
).trailing_zeros ();
1259 if (r1tz
+ r2tz
>= HOST_BITS_PER_DOUBLE_INT
)
1261 *mask
= double_int_zero
;
1262 *val
= double_int_zero
;
1264 else if (r1tz
+ r2tz
> 0)
1266 *mask
= ~double_int::mask (r1tz
+ r2tz
);
1267 *mask
= (*mask
).ext (TYPE_PRECISION (type
), uns
);
1268 *val
= double_int_zero
;
1276 double_int m
= r1mask
| r2mask
;
1277 if (r1val
.and_not (m
) != r2val
.and_not (m
))
1279 *mask
= double_int_zero
;
1280 *val
= ((code
== EQ_EXPR
) ? double_int_zero
: double_int_one
);
1284 /* We know the result of a comparison is always one or zero. */
1285 *mask
= double_int_one
;
1286 *val
= double_int_zero
;
1294 double_int tem
= r1val
;
1300 code
= swap_tree_comparison (code
);
1307 /* If the most significant bits are not known we know nothing. */
1308 if (r1mask
.is_negative () || r2mask
.is_negative ())
1311 /* For comparisons the signedness is in the comparison operands. */
1312 uns
= TYPE_UNSIGNED (r1type
);
1314 /* If we know the most significant bits we know the values
1315 value ranges by means of treating varying bits as zero
1316 or one. Do a cross comparison of the max/min pairs. */
1317 maxmin
= (r1val
| r1mask
).cmp (r2val
.and_not (r2mask
), uns
);
1318 minmax
= r1val
.and_not (r1mask
).cmp (r2val
| r2mask
, uns
);
1319 if (maxmin
< 0) /* r1 is less than r2. */
1321 *mask
= double_int_zero
;
1322 *val
= double_int_one
;
1324 else if (minmax
> 0) /* r1 is not less or equal to r2. */
1326 *mask
= double_int_zero
;
1327 *val
= double_int_zero
;
1329 else if (maxmin
== minmax
) /* r1 and r2 are equal. */
1331 /* This probably should never happen as we'd have
1332 folded the thing during fully constant value folding. */
1333 *mask
= double_int_zero
;
1334 *val
= (code
== LE_EXPR
? double_int_one
: double_int_zero
);
1338 /* We know the result of a comparison is always one or zero. */
1339 *mask
= double_int_one
;
1340 *val
= double_int_zero
;
1349 /* Return the propagation value when applying the operation CODE to
1350 the value RHS yielding type TYPE. */
1353 bit_value_unop (enum tree_code code
, tree type
, tree rhs
)
1355 prop_value_t rval
= get_value_for_expr (rhs
, true);
1356 double_int value
, mask
;
1359 if (rval
.lattice_val
== UNDEFINED
)
1362 gcc_assert ((rval
.lattice_val
== CONSTANT
1363 && TREE_CODE (rval
.value
) == INTEGER_CST
)
1364 || rval
.mask
.is_minus_one ());
1365 bit_value_unop_1 (code
, type
, &value
, &mask
,
1366 TREE_TYPE (rhs
), value_to_double_int (rval
), rval
.mask
);
1367 if (!mask
.is_minus_one ())
1369 val
.lattice_val
= CONSTANT
;
1371 /* ??? Delay building trees here. */
1372 val
.value
= double_int_to_tree (type
, value
);
1376 val
.lattice_val
= VARYING
;
1377 val
.value
= NULL_TREE
;
1378 val
.mask
= double_int_minus_one
;
1383 /* Return the propagation value when applying the operation CODE to
1384 the values RHS1 and RHS2 yielding type TYPE. */
1387 bit_value_binop (enum tree_code code
, tree type
, tree rhs1
, tree rhs2
)
1389 prop_value_t r1val
= get_value_for_expr (rhs1
, true);
1390 prop_value_t r2val
= get_value_for_expr (rhs2
, true);
1391 double_int value
, mask
;
1394 if (r1val
.lattice_val
== UNDEFINED
1395 || r2val
.lattice_val
== UNDEFINED
)
1397 val
.lattice_val
= VARYING
;
1398 val
.value
= NULL_TREE
;
1399 val
.mask
= double_int_minus_one
;
1403 gcc_assert ((r1val
.lattice_val
== CONSTANT
1404 && TREE_CODE (r1val
.value
) == INTEGER_CST
)
1405 || r1val
.mask
.is_minus_one ());
1406 gcc_assert ((r2val
.lattice_val
== CONSTANT
1407 && TREE_CODE (r2val
.value
) == INTEGER_CST
)
1408 || r2val
.mask
.is_minus_one ());
1409 bit_value_binop_1 (code
, type
, &value
, &mask
,
1410 TREE_TYPE (rhs1
), value_to_double_int (r1val
), r1val
.mask
,
1411 TREE_TYPE (rhs2
), value_to_double_int (r2val
), r2val
.mask
);
1412 if (!mask
.is_minus_one ())
1414 val
.lattice_val
= CONSTANT
;
1416 /* ??? Delay building trees here. */
1417 val
.value
= double_int_to_tree (type
, value
);
1421 val
.lattice_val
= VARYING
;
1422 val
.value
= NULL_TREE
;
1423 val
.mask
= double_int_minus_one
;
1428 /* Return the propagation value when applying __builtin_assume_aligned to
1432 bit_value_assume_aligned (gimple stmt
)
1434 tree ptr
= gimple_call_arg (stmt
, 0), align
, misalign
= NULL_TREE
;
1435 tree type
= TREE_TYPE (ptr
);
1436 unsigned HOST_WIDE_INT aligni
, misaligni
= 0;
1437 prop_value_t ptrval
= get_value_for_expr (ptr
, true);
1438 prop_value_t alignval
;
1439 double_int value
, mask
;
1441 if (ptrval
.lattice_val
== UNDEFINED
)
1443 gcc_assert ((ptrval
.lattice_val
== CONSTANT
1444 && TREE_CODE (ptrval
.value
) == INTEGER_CST
)
1445 || ptrval
.mask
.is_minus_one ());
1446 align
= gimple_call_arg (stmt
, 1);
1447 if (!host_integerp (align
, 1))
1449 aligni
= tree_low_cst (align
, 1);
1451 || (aligni
& (aligni
- 1)) != 0)
1453 if (gimple_call_num_args (stmt
) > 2)
1455 misalign
= gimple_call_arg (stmt
, 2);
1456 if (!host_integerp (misalign
, 1))
1458 misaligni
= tree_low_cst (misalign
, 1);
1459 if (misaligni
>= aligni
)
1462 align
= build_int_cst_type (type
, -aligni
);
1463 alignval
= get_value_for_expr (align
, true);
1464 bit_value_binop_1 (BIT_AND_EXPR
, type
, &value
, &mask
,
1465 type
, value_to_double_int (ptrval
), ptrval
.mask
,
1466 type
, value_to_double_int (alignval
), alignval
.mask
);
1467 if (!mask
.is_minus_one ())
1469 val
.lattice_val
= CONSTANT
;
1471 gcc_assert ((mask
.low
& (aligni
- 1)) == 0);
1472 gcc_assert ((value
.low
& (aligni
- 1)) == 0);
1473 value
.low
|= misaligni
;
1474 /* ??? Delay building trees here. */
1475 val
.value
= double_int_to_tree (type
, value
);
1479 val
.lattice_val
= VARYING
;
1480 val
.value
= NULL_TREE
;
1481 val
.mask
= double_int_minus_one
;
1486 /* Evaluate statement STMT.
1487 Valid only for assignments, calls, conditionals, and switches. */
1490 evaluate_stmt (gimple stmt
)
1493 tree simplified
= NULL_TREE
;
1494 ccp_lattice_t likelyvalue
= likely_value (stmt
);
1495 bool is_constant
= false;
1498 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1500 fprintf (dump_file
, "which is likely ");
1501 switch (likelyvalue
)
1504 fprintf (dump_file
, "CONSTANT");
1507 fprintf (dump_file
, "UNDEFINED");
1510 fprintf (dump_file
, "VARYING");
1514 fprintf (dump_file
, "\n");
1517 /* If the statement is likely to have a CONSTANT result, then try
1518 to fold the statement to determine the constant value. */
1519 /* FIXME. This is the only place that we call ccp_fold.
1520 Since likely_value never returns CONSTANT for calls, we will
1521 not attempt to fold them, including builtins that may profit. */
1522 if (likelyvalue
== CONSTANT
)
1524 fold_defer_overflow_warnings ();
1525 simplified
= ccp_fold (stmt
);
1526 is_constant
= simplified
&& is_gimple_min_invariant (simplified
);
1527 fold_undefer_overflow_warnings (is_constant
, stmt
, 0);
1530 /* The statement produced a constant value. */
1531 val
.lattice_val
= CONSTANT
;
1532 val
.value
= simplified
;
1533 val
.mask
= double_int_zero
;
1536 /* If the statement is likely to have a VARYING result, then do not
1537 bother folding the statement. */
1538 else if (likelyvalue
== VARYING
)
1540 enum gimple_code code
= gimple_code (stmt
);
1541 if (code
== GIMPLE_ASSIGN
)
1543 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
1545 /* Other cases cannot satisfy is_gimple_min_invariant
1547 if (get_gimple_rhs_class (subcode
) == GIMPLE_SINGLE_RHS
)
1548 simplified
= gimple_assign_rhs1 (stmt
);
1550 else if (code
== GIMPLE_SWITCH
)
1551 simplified
= gimple_switch_index (stmt
);
1553 /* These cannot satisfy is_gimple_min_invariant without folding. */
1554 gcc_assert (code
== GIMPLE_CALL
|| code
== GIMPLE_COND
);
1555 is_constant
= simplified
&& is_gimple_min_invariant (simplified
);
1558 /* The statement produced a constant value. */
1559 val
.lattice_val
= CONSTANT
;
1560 val
.value
= simplified
;
1561 val
.mask
= double_int_zero
;
1565 /* Resort to simplification for bitwise tracking. */
1566 if (flag_tree_bit_ccp
1567 && (likelyvalue
== CONSTANT
|| is_gimple_call (stmt
))
1570 enum gimple_code code
= gimple_code (stmt
);
1571 val
.lattice_val
= VARYING
;
1572 val
.value
= NULL_TREE
;
1573 val
.mask
= double_int_minus_one
;
1574 if (code
== GIMPLE_ASSIGN
)
1576 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
1577 tree rhs1
= gimple_assign_rhs1 (stmt
);
1578 switch (get_gimple_rhs_class (subcode
))
1580 case GIMPLE_SINGLE_RHS
:
1581 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
1582 || POINTER_TYPE_P (TREE_TYPE (rhs1
)))
1583 val
= get_value_for_expr (rhs1
, true);
1586 case GIMPLE_UNARY_RHS
:
1587 if ((INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
1588 || POINTER_TYPE_P (TREE_TYPE (rhs1
)))
1589 && (INTEGRAL_TYPE_P (gimple_expr_type (stmt
))
1590 || POINTER_TYPE_P (gimple_expr_type (stmt
))))
1591 val
= bit_value_unop (subcode
, gimple_expr_type (stmt
), rhs1
);
1594 case GIMPLE_BINARY_RHS
:
1595 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
1596 || POINTER_TYPE_P (TREE_TYPE (rhs1
)))
1598 tree lhs
= gimple_assign_lhs (stmt
);
1599 tree rhs2
= gimple_assign_rhs2 (stmt
);
1600 val
= bit_value_binop (subcode
,
1601 TREE_TYPE (lhs
), rhs1
, rhs2
);
1608 else if (code
== GIMPLE_COND
)
1610 enum tree_code code
= gimple_cond_code (stmt
);
1611 tree rhs1
= gimple_cond_lhs (stmt
);
1612 tree rhs2
= gimple_cond_rhs (stmt
);
1613 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
1614 || POINTER_TYPE_P (TREE_TYPE (rhs1
)))
1615 val
= bit_value_binop (code
, TREE_TYPE (rhs1
), rhs1
, rhs2
);
1617 else if (gimple_call_builtin_p (stmt
, BUILT_IN_NORMAL
))
1619 tree fndecl
= gimple_call_fndecl (stmt
);
1620 switch (DECL_FUNCTION_CODE (fndecl
))
1622 case BUILT_IN_MALLOC
:
1623 case BUILT_IN_REALLOC
:
1624 case BUILT_IN_CALLOC
:
1625 case BUILT_IN_STRDUP
:
1626 case BUILT_IN_STRNDUP
:
1627 val
.lattice_val
= CONSTANT
;
1628 val
.value
= build_int_cst (TREE_TYPE (gimple_get_lhs (stmt
)), 0);
1629 val
.mask
= double_int::from_shwi
1630 (~(((HOST_WIDE_INT
) MALLOC_ABI_ALIGNMENT
)
1631 / BITS_PER_UNIT
- 1));
1634 case BUILT_IN_ALLOCA
:
1635 case BUILT_IN_ALLOCA_WITH_ALIGN
:
1636 align
= (DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_ALLOCA_WITH_ALIGN
1637 ? TREE_INT_CST_LOW (gimple_call_arg (stmt
, 1))
1638 : BIGGEST_ALIGNMENT
);
1639 val
.lattice_val
= CONSTANT
;
1640 val
.value
= build_int_cst (TREE_TYPE (gimple_get_lhs (stmt
)), 0);
1641 val
.mask
= double_int::from_shwi (~(((HOST_WIDE_INT
) align
)
1642 / BITS_PER_UNIT
- 1));
1645 /* These builtins return their first argument, unmodified. */
1646 case BUILT_IN_MEMCPY
:
1647 case BUILT_IN_MEMMOVE
:
1648 case BUILT_IN_MEMSET
:
1649 case BUILT_IN_STRCPY
:
1650 case BUILT_IN_STRNCPY
:
1651 case BUILT_IN_MEMCPY_CHK
:
1652 case BUILT_IN_MEMMOVE_CHK
:
1653 case BUILT_IN_MEMSET_CHK
:
1654 case BUILT_IN_STRCPY_CHK
:
1655 case BUILT_IN_STRNCPY_CHK
:
1656 val
= get_value_for_expr (gimple_call_arg (stmt
, 0), true);
1659 case BUILT_IN_ASSUME_ALIGNED
:
1660 val
= bit_value_assume_aligned (stmt
);
1666 is_constant
= (val
.lattice_val
== CONSTANT
);
1671 /* The statement produced a nonconstant value. If the statement
1672 had UNDEFINED operands, then the result of the statement
1673 should be UNDEFINED. Otherwise, the statement is VARYING. */
1674 if (likelyvalue
== UNDEFINED
)
1676 val
.lattice_val
= likelyvalue
;
1677 val
.mask
= double_int_zero
;
1681 val
.lattice_val
= VARYING
;
1682 val
.mask
= double_int_minus_one
;
1685 val
.value
= NULL_TREE
;
1691 typedef hash_table
<pointer_hash
<gimple_statement_d
> > gimple_htab
;
1693 /* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before
1694 each matching BUILT_IN_STACK_RESTORE. Mark visited phis in VISITED. */
1697 insert_clobber_before_stack_restore (tree saved_val
, tree var
,
1698 gimple_htab
*visited
)
1700 gimple stmt
, clobber_stmt
;
1702 imm_use_iterator iter
;
1703 gimple_stmt_iterator i
;
1706 FOR_EACH_IMM_USE_STMT (stmt
, iter
, saved_val
)
1707 if (gimple_call_builtin_p (stmt
, BUILT_IN_STACK_RESTORE
))
1709 clobber
= build_constructor (TREE_TYPE (var
),
1711 TREE_THIS_VOLATILE (clobber
) = 1;
1712 clobber_stmt
= gimple_build_assign (var
, clobber
);
1714 i
= gsi_for_stmt (stmt
);
1715 gsi_insert_before (&i
, clobber_stmt
, GSI_SAME_STMT
);
1717 else if (gimple_code (stmt
) == GIMPLE_PHI
)
1719 if (!visited
->is_created ())
1720 visited
->create (10);
1722 slot
= visited
->find_slot (stmt
, INSERT
);
1727 insert_clobber_before_stack_restore (gimple_phi_result (stmt
), var
,
1730 else if (gimple_assign_ssa_name_copy_p (stmt
))
1731 insert_clobber_before_stack_restore (gimple_assign_lhs (stmt
), var
,
1734 gcc_assert (is_gimple_debug (stmt
));
1737 /* Advance the iterator to the previous non-debug gimple statement in the same
1738 or dominating basic block. */
1741 gsi_prev_dom_bb_nondebug (gimple_stmt_iterator
*i
)
1745 gsi_prev_nondebug (i
);
1746 while (gsi_end_p (*i
))
1748 dom
= get_immediate_dominator (CDI_DOMINATORS
, i
->bb
);
1749 if (dom
== NULL
|| dom
== ENTRY_BLOCK_PTR
)
1752 *i
= gsi_last_bb (dom
);
1756 /* Find a BUILT_IN_STACK_SAVE dominating gsi_stmt (I), and insert
1757 a clobber of VAR before each matching BUILT_IN_STACK_RESTORE.
1759 It is possible that BUILT_IN_STACK_SAVE cannot be find in a dominator when a
1760 previous pass (such as DOM) duplicated it along multiple paths to a BB. In
1761 that case the function gives up without inserting the clobbers. */
1764 insert_clobbers_for_var (gimple_stmt_iterator i
, tree var
)
1768 gimple_htab visited
;
1770 for (; !gsi_end_p (i
); gsi_prev_dom_bb_nondebug (&i
))
1772 stmt
= gsi_stmt (i
);
1774 if (!gimple_call_builtin_p (stmt
, BUILT_IN_STACK_SAVE
))
1777 saved_val
= gimple_call_lhs (stmt
);
1778 if (saved_val
== NULL_TREE
)
1781 insert_clobber_before_stack_restore (saved_val
, var
, &visited
);
1785 if (visited
.is_created ())
1789 /* Detects a __builtin_alloca_with_align with constant size argument. Declares
1790 fixed-size array and returns the address, if found, otherwise returns
1794 fold_builtin_alloca_with_align (gimple stmt
)
1796 unsigned HOST_WIDE_INT size
, threshold
, n_elem
;
1797 tree lhs
, arg
, block
, var
, elem_type
, array_type
;
1800 lhs
= gimple_call_lhs (stmt
);
1801 if (lhs
== NULL_TREE
)
1804 /* Detect constant argument. */
1805 arg
= get_constant_value (gimple_call_arg (stmt
, 0));
1806 if (arg
== NULL_TREE
1807 || TREE_CODE (arg
) != INTEGER_CST
1808 || !host_integerp (arg
, 1))
1811 size
= TREE_INT_CST_LOW (arg
);
1813 /* Heuristic: don't fold large allocas. */
1814 threshold
= (unsigned HOST_WIDE_INT
)PARAM_VALUE (PARAM_LARGE_STACK_FRAME
);
1815 /* In case the alloca is located at function entry, it has the same lifetime
1816 as a declared array, so we allow a larger size. */
1817 block
= gimple_block (stmt
);
1818 if (!(cfun
->after_inlining
1819 && TREE_CODE (BLOCK_SUPERCONTEXT (block
)) == FUNCTION_DECL
))
1821 if (size
> threshold
)
1824 /* Declare array. */
1825 elem_type
= build_nonstandard_integer_type (BITS_PER_UNIT
, 1);
1826 n_elem
= size
* 8 / BITS_PER_UNIT
;
1827 array_type
= build_array_type_nelts (elem_type
, n_elem
);
1828 var
= create_tmp_var (array_type
, NULL
);
1829 DECL_ALIGN (var
) = TREE_INT_CST_LOW (gimple_call_arg (stmt
, 1));
1831 struct ptr_info_def
*pi
= SSA_NAME_PTR_INFO (lhs
);
1832 if (pi
!= NULL
&& !pi
->pt
.anything
)
1836 singleton_p
= pt_solution_singleton_p (&pi
->pt
, &uid
);
1837 gcc_assert (singleton_p
);
1838 SET_DECL_PT_UID (var
, uid
);
1842 /* Fold alloca to the address of the array. */
1843 return fold_convert (TREE_TYPE (lhs
), build_fold_addr_expr (var
));
1846 /* Fold the stmt at *GSI with CCP specific information that propagating
1847 and regular folding does not catch. */
1850 ccp_fold_stmt (gimple_stmt_iterator
*gsi
)
1852 gimple stmt
= gsi_stmt (*gsi
);
1854 switch (gimple_code (stmt
))
1859 /* Statement evaluation will handle type mismatches in constants
1860 more gracefully than the final propagation. This allows us to
1861 fold more conditionals here. */
1862 val
= evaluate_stmt (stmt
);
1863 if (val
.lattice_val
!= CONSTANT
1864 || !val
.mask
.is_zero ())
1869 fprintf (dump_file
, "Folding predicate ");
1870 print_gimple_expr (dump_file
, stmt
, 0, 0);
1871 fprintf (dump_file
, " to ");
1872 print_generic_expr (dump_file
, val
.value
, 0);
1873 fprintf (dump_file
, "\n");
1876 if (integer_zerop (val
.value
))
1877 gimple_cond_make_false (stmt
);
1879 gimple_cond_make_true (stmt
);
1886 tree lhs
= gimple_call_lhs (stmt
);
1887 int flags
= gimple_call_flags (stmt
);
1890 bool changed
= false;
1893 /* If the call was folded into a constant make sure it goes
1894 away even if we cannot propagate into all uses because of
1897 && TREE_CODE (lhs
) == SSA_NAME
1898 && (val
= get_constant_value (lhs
))
1899 /* Don't optimize away calls that have side-effects. */
1900 && (flags
& (ECF_CONST
|ECF_PURE
)) != 0
1901 && (flags
& ECF_LOOPING_CONST_OR_PURE
) == 0)
1903 tree new_rhs
= unshare_expr (val
);
1905 if (!useless_type_conversion_p (TREE_TYPE (lhs
),
1906 TREE_TYPE (new_rhs
)))
1907 new_rhs
= fold_convert (TREE_TYPE (lhs
), new_rhs
);
1908 res
= update_call_from_tree (gsi
, new_rhs
);
1913 /* Internal calls provide no argument types, so the extra laxity
1914 for normal calls does not apply. */
1915 if (gimple_call_internal_p (stmt
))
1918 /* The heuristic of fold_builtin_alloca_with_align differs before and
1919 after inlining, so we don't require the arg to be changed into a
1920 constant for folding, but just to be constant. */
1921 if (gimple_call_builtin_p (stmt
, BUILT_IN_ALLOCA_WITH_ALIGN
))
1923 tree new_rhs
= fold_builtin_alloca_with_align (stmt
);
1926 bool res
= update_call_from_tree (gsi
, new_rhs
);
1927 tree var
= TREE_OPERAND (TREE_OPERAND (new_rhs
, 0),0);
1929 insert_clobbers_for_var (*gsi
, var
);
1934 /* Propagate into the call arguments. Compared to replace_uses_in
1935 this can use the argument slot types for type verification
1936 instead of the current argument type. We also can safely
1937 drop qualifiers here as we are dealing with constants anyway. */
1938 argt
= TYPE_ARG_TYPES (gimple_call_fntype (stmt
));
1939 for (i
= 0; i
< gimple_call_num_args (stmt
) && argt
;
1940 ++i
, argt
= TREE_CHAIN (argt
))
1942 tree arg
= gimple_call_arg (stmt
, i
);
1943 if (TREE_CODE (arg
) == SSA_NAME
1944 && (val
= get_constant_value (arg
))
1945 && useless_type_conversion_p
1946 (TYPE_MAIN_VARIANT (TREE_VALUE (argt
)),
1947 TYPE_MAIN_VARIANT (TREE_TYPE (val
))))
1949 gimple_call_set_arg (stmt
, i
, unshare_expr (val
));
1959 tree lhs
= gimple_assign_lhs (stmt
);
1962 /* If we have a load that turned out to be constant replace it
1963 as we cannot propagate into all uses in all cases. */
1964 if (gimple_assign_single_p (stmt
)
1965 && TREE_CODE (lhs
) == SSA_NAME
1966 && (val
= get_constant_value (lhs
)))
1968 tree rhs
= unshare_expr (val
);
1969 if (!useless_type_conversion_p (TREE_TYPE (lhs
), TREE_TYPE (rhs
)))
1970 rhs
= fold_build1 (VIEW_CONVERT_EXPR
, TREE_TYPE (lhs
), rhs
);
1971 gimple_assign_set_rhs_from_tree (gsi
, rhs
);
1983 /* Visit the assignment statement STMT. Set the value of its LHS to the
1984 value computed by the RHS and store LHS in *OUTPUT_P. If STMT
1985 creates virtual definitions, set the value of each new name to that
1986 of the RHS (if we can derive a constant out of the RHS).
1987 Value-returning call statements also perform an assignment, and
1988 are handled here. */
1990 static enum ssa_prop_result
1991 visit_assignment (gimple stmt
, tree
*output_p
)
1994 enum ssa_prop_result retval
;
1996 tree lhs
= gimple_get_lhs (stmt
);
1998 gcc_assert (gimple_code (stmt
) != GIMPLE_CALL
1999 || gimple_call_lhs (stmt
) != NULL_TREE
);
2001 if (gimple_assign_single_p (stmt
)
2002 && gimple_assign_rhs_code (stmt
) == SSA_NAME
)
2003 /* For a simple copy operation, we copy the lattice values. */
2004 val
= *get_value (gimple_assign_rhs1 (stmt
));
2006 /* Evaluate the statement, which could be
2007 either a GIMPLE_ASSIGN or a GIMPLE_CALL. */
2008 val
= evaluate_stmt (stmt
);
2010 retval
= SSA_PROP_NOT_INTERESTING
;
2012 /* Set the lattice value of the statement's output. */
2013 if (TREE_CODE (lhs
) == SSA_NAME
)
2015 /* If STMT is an assignment to an SSA_NAME, we only have one
2017 if (set_lattice_value (lhs
, val
))
2020 if (val
.lattice_val
== VARYING
)
2021 retval
= SSA_PROP_VARYING
;
2023 retval
= SSA_PROP_INTERESTING
;
2031 /* Visit the conditional statement STMT. Return SSA_PROP_INTERESTING
2032 if it can determine which edge will be taken. Otherwise, return
2033 SSA_PROP_VARYING. */
2035 static enum ssa_prop_result
2036 visit_cond_stmt (gimple stmt
, edge
*taken_edge_p
)
2041 block
= gimple_bb (stmt
);
2042 val
= evaluate_stmt (stmt
);
2043 if (val
.lattice_val
!= CONSTANT
2044 || !val
.mask
.is_zero ())
2045 return SSA_PROP_VARYING
;
2047 /* Find which edge out of the conditional block will be taken and add it
2048 to the worklist. If no single edge can be determined statically,
2049 return SSA_PROP_VARYING to feed all the outgoing edges to the
2050 propagation engine. */
2051 *taken_edge_p
= find_taken_edge (block
, val
.value
);
2053 return SSA_PROP_INTERESTING
;
2055 return SSA_PROP_VARYING
;
2059 /* Evaluate statement STMT. If the statement produces an output value and
2060 its evaluation changes the lattice value of its output, return
2061 SSA_PROP_INTERESTING and set *OUTPUT_P to the SSA_NAME holding the
2064 If STMT is a conditional branch and we can determine its truth
2065 value, set *TAKEN_EDGE_P accordingly. If STMT produces a varying
2066 value, return SSA_PROP_VARYING. */
2068 static enum ssa_prop_result
2069 ccp_visit_stmt (gimple stmt
, edge
*taken_edge_p
, tree
*output_p
)
2074 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2076 fprintf (dump_file
, "\nVisiting statement:\n");
2077 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
2080 switch (gimple_code (stmt
))
2083 /* If the statement is an assignment that produces a single
2084 output value, evaluate its RHS to see if the lattice value of
2085 its output has changed. */
2086 return visit_assignment (stmt
, output_p
);
2089 /* A value-returning call also performs an assignment. */
2090 if (gimple_call_lhs (stmt
) != NULL_TREE
)
2091 return visit_assignment (stmt
, output_p
);
2096 /* If STMT is a conditional branch, see if we can determine
2097 which branch will be taken. */
2098 /* FIXME. It appears that we should be able to optimize
2099 computed GOTOs here as well. */
2100 return visit_cond_stmt (stmt
, taken_edge_p
);
2106 /* Any other kind of statement is not interesting for constant
2107 propagation and, therefore, not worth simulating. */
2108 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2109 fprintf (dump_file
, "No interesting values produced. Marked VARYING.\n");
2111 /* Definitions made by statements other than assignments to
2112 SSA_NAMEs represent unknown modifications to their outputs.
2113 Mark them VARYING. */
2114 FOR_EACH_SSA_TREE_OPERAND (def
, stmt
, iter
, SSA_OP_ALL_DEFS
)
2116 prop_value_t v
= { VARYING
, NULL_TREE
, { -1, (HOST_WIDE_INT
) -1 } };
2117 set_lattice_value (def
, v
);
2120 return SSA_PROP_VARYING
;
2124 /* Main entry point for SSA Conditional Constant Propagation. */
2129 unsigned int todo
= 0;
2130 calculate_dominance_info (CDI_DOMINATORS
);
2132 ssa_propagate (ccp_visit_stmt
, ccp_visit_phi_node
);
2133 if (ccp_finalize ())
2134 todo
= (TODO_cleanup_cfg
| TODO_update_ssa
);
2135 free_dominance_info (CDI_DOMINATORS
);
2143 return flag_tree_ccp
!= 0;
2149 const pass_data pass_data_ccp
=
2151 GIMPLE_PASS
, /* type */
2153 OPTGROUP_NONE
, /* optinfo_flags */
2154 true, /* has_gate */
2155 true, /* has_execute */
2156 TV_TREE_CCP
, /* tv_id */
2157 ( PROP_cfg
| PROP_ssa
), /* properties_required */
2158 0, /* properties_provided */
2159 0, /* properties_destroyed */
2160 0, /* todo_flags_start */
2161 ( TODO_verify_ssa
| TODO_update_address_taken
2162 | TODO_verify_stmts
), /* todo_flags_finish */
2165 class pass_ccp
: public gimple_opt_pass
2168 pass_ccp (gcc::context
*ctxt
)
2169 : gimple_opt_pass (pass_data_ccp
, ctxt
)
2172 /* opt_pass methods: */
2173 opt_pass
* clone () { return new pass_ccp (m_ctxt
); }
2174 bool gate () { return gate_ccp (); }
2175 unsigned int execute () { return do_ssa_ccp (); }
2177 }; // class pass_ccp
2182 make_pass_ccp (gcc::context
*ctxt
)
2184 return new pass_ccp (ctxt
);
2189 /* Try to optimize out __builtin_stack_restore. Optimize it out
2190 if there is another __builtin_stack_restore in the same basic
2191 block and no calls or ASM_EXPRs are in between, or if this block's
2192 only outgoing edge is to EXIT_BLOCK and there are no calls or
2193 ASM_EXPRs after this __builtin_stack_restore. */
2196 optimize_stack_restore (gimple_stmt_iterator i
)
2201 basic_block bb
= gsi_bb (i
);
2202 gimple call
= gsi_stmt (i
);
2204 if (gimple_code (call
) != GIMPLE_CALL
2205 || gimple_call_num_args (call
) != 1
2206 || TREE_CODE (gimple_call_arg (call
, 0)) != SSA_NAME
2207 || !POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (call
, 0))))
2210 for (gsi_next (&i
); !gsi_end_p (i
); gsi_next (&i
))
2212 stmt
= gsi_stmt (i
);
2213 if (gimple_code (stmt
) == GIMPLE_ASM
)
2215 if (gimple_code (stmt
) != GIMPLE_CALL
)
2218 callee
= gimple_call_fndecl (stmt
);
2220 || DECL_BUILT_IN_CLASS (callee
) != BUILT_IN_NORMAL
2221 /* All regular builtins are ok, just obviously not alloca. */
2222 || DECL_FUNCTION_CODE (callee
) == BUILT_IN_ALLOCA
2223 || DECL_FUNCTION_CODE (callee
) == BUILT_IN_ALLOCA_WITH_ALIGN
)
2226 if (DECL_FUNCTION_CODE (callee
) == BUILT_IN_STACK_RESTORE
)
2227 goto second_stack_restore
;
2233 /* Allow one successor of the exit block, or zero successors. */
2234 switch (EDGE_COUNT (bb
->succs
))
2239 if (single_succ_edge (bb
)->dest
!= EXIT_BLOCK_PTR
)
2245 second_stack_restore
:
2247 /* If there's exactly one use, then zap the call to __builtin_stack_save.
2248 If there are multiple uses, then the last one should remove the call.
2249 In any case, whether the call to __builtin_stack_save can be removed
2250 or not is irrelevant to removing the call to __builtin_stack_restore. */
2251 if (has_single_use (gimple_call_arg (call
, 0)))
2253 gimple stack_save
= SSA_NAME_DEF_STMT (gimple_call_arg (call
, 0));
2254 if (is_gimple_call (stack_save
))
2256 callee
= gimple_call_fndecl (stack_save
);
2258 && DECL_BUILT_IN_CLASS (callee
) == BUILT_IN_NORMAL
2259 && DECL_FUNCTION_CODE (callee
) == BUILT_IN_STACK_SAVE
)
2261 gimple_stmt_iterator stack_save_gsi
;
2264 stack_save_gsi
= gsi_for_stmt (stack_save
);
2265 rhs
= build_int_cst (TREE_TYPE (gimple_call_arg (call
, 0)), 0);
2266 update_call_from_tree (&stack_save_gsi
, rhs
);
2271 /* No effect, so the statement will be deleted. */
2272 return integer_zero_node
;
2275 /* If va_list type is a simple pointer and nothing special is needed,
2276 optimize __builtin_va_start (&ap, 0) into ap = __builtin_next_arg (0),
2277 __builtin_va_end (&ap) out as NOP and __builtin_va_copy into a simple
2278 pointer assignment. */
2281 optimize_stdarg_builtin (gimple call
)
2283 tree callee
, lhs
, rhs
, cfun_va_list
;
2284 bool va_list_simple_ptr
;
2285 location_t loc
= gimple_location (call
);
2287 if (gimple_code (call
) != GIMPLE_CALL
)
2290 callee
= gimple_call_fndecl (call
);
2292 cfun_va_list
= targetm
.fn_abi_va_list (callee
);
2293 va_list_simple_ptr
= POINTER_TYPE_P (cfun_va_list
)
2294 && (TREE_TYPE (cfun_va_list
) == void_type_node
2295 || TREE_TYPE (cfun_va_list
) == char_type_node
);
2297 switch (DECL_FUNCTION_CODE (callee
))
2299 case BUILT_IN_VA_START
:
2300 if (!va_list_simple_ptr
2301 || targetm
.expand_builtin_va_start
!= NULL
2302 || !builtin_decl_explicit_p (BUILT_IN_NEXT_ARG
))
2305 if (gimple_call_num_args (call
) != 2)
2308 lhs
= gimple_call_arg (call
, 0);
2309 if (!POINTER_TYPE_P (TREE_TYPE (lhs
))
2310 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs
)))
2311 != TYPE_MAIN_VARIANT (cfun_va_list
))
2314 lhs
= build_fold_indirect_ref_loc (loc
, lhs
);
2315 rhs
= build_call_expr_loc (loc
, builtin_decl_explicit (BUILT_IN_NEXT_ARG
),
2316 1, integer_zero_node
);
2317 rhs
= fold_convert_loc (loc
, TREE_TYPE (lhs
), rhs
);
2318 return build2 (MODIFY_EXPR
, TREE_TYPE (lhs
), lhs
, rhs
);
2320 case BUILT_IN_VA_COPY
:
2321 if (!va_list_simple_ptr
)
2324 if (gimple_call_num_args (call
) != 2)
2327 lhs
= gimple_call_arg (call
, 0);
2328 if (!POINTER_TYPE_P (TREE_TYPE (lhs
))
2329 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs
)))
2330 != TYPE_MAIN_VARIANT (cfun_va_list
))
2333 lhs
= build_fold_indirect_ref_loc (loc
, lhs
);
2334 rhs
= gimple_call_arg (call
, 1);
2335 if (TYPE_MAIN_VARIANT (TREE_TYPE (rhs
))
2336 != TYPE_MAIN_VARIANT (cfun_va_list
))
2339 rhs
= fold_convert_loc (loc
, TREE_TYPE (lhs
), rhs
);
2340 return build2 (MODIFY_EXPR
, TREE_TYPE (lhs
), lhs
, rhs
);
2342 case BUILT_IN_VA_END
:
2343 /* No effect, so the statement will be deleted. */
2344 return integer_zero_node
;
2351 /* Attemp to make the block of __builtin_unreachable I unreachable by changing
2352 the incoming jumps. Return true if at least one jump was changed. */
2355 optimize_unreachable (gimple_stmt_iterator i
)
2357 basic_block bb
= gsi_bb (i
);
2358 gimple_stmt_iterator gsi
;
2364 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2366 stmt
= gsi_stmt (gsi
);
2368 if (is_gimple_debug (stmt
))
2371 if (gimple_code (stmt
) == GIMPLE_LABEL
)
2373 /* Verify we do not need to preserve the label. */
2374 if (FORCED_LABEL (gimple_label_label (stmt
)))
2380 /* Only handle the case that __builtin_unreachable is the first statement
2381 in the block. We rely on DCE to remove stmts without side-effects
2382 before __builtin_unreachable. */
2383 if (gsi_stmt (gsi
) != gsi_stmt (i
))
2388 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
2390 gsi
= gsi_last_bb (e
->src
);
2391 if (gsi_end_p (gsi
))
2394 stmt
= gsi_stmt (gsi
);
2395 if (gimple_code (stmt
) == GIMPLE_COND
)
2397 if (e
->flags
& EDGE_TRUE_VALUE
)
2398 gimple_cond_make_false (stmt
);
2399 else if (e
->flags
& EDGE_FALSE_VALUE
)
2400 gimple_cond_make_true (stmt
);
2407 /* Todo: handle other cases, f.i. switch statement. */
2417 /* A simple pass that attempts to fold all builtin functions. This pass
2418 is run after we've propagated as many constants as we can. */
2421 execute_fold_all_builtins (void)
2423 bool cfg_changed
= false;
2425 unsigned int todoflags
= 0;
2429 gimple_stmt_iterator i
;
2430 for (i
= gsi_start_bb (bb
); !gsi_end_p (i
); )
2432 gimple stmt
, old_stmt
;
2433 tree callee
, result
;
2434 enum built_in_function fcode
;
2436 stmt
= gsi_stmt (i
);
2438 if (gimple_code (stmt
) != GIMPLE_CALL
)
2440 /* Remove all *ssaname_N ={v} {CLOBBER}; stmts,
2441 after the last GIMPLE DSE they aren't needed and might
2442 unnecessarily keep the SSA_NAMEs live. */
2443 if (gimple_clobber_p (stmt
))
2445 tree lhs
= gimple_assign_lhs (stmt
);
2446 if (TREE_CODE (lhs
) == MEM_REF
2447 && TREE_CODE (TREE_OPERAND (lhs
, 0)) == SSA_NAME
)
2449 unlink_stmt_vdef (stmt
);
2450 gsi_remove (&i
, true);
2451 release_defs (stmt
);
2458 callee
= gimple_call_fndecl (stmt
);
2459 if (!callee
|| DECL_BUILT_IN_CLASS (callee
) != BUILT_IN_NORMAL
)
2464 fcode
= DECL_FUNCTION_CODE (callee
);
2466 result
= gimple_fold_builtin (stmt
);
2469 gimple_remove_stmt_histograms (cfun
, stmt
);
2472 switch (DECL_FUNCTION_CODE (callee
))
2474 case BUILT_IN_CONSTANT_P
:
2475 /* Resolve __builtin_constant_p. If it hasn't been
2476 folded to integer_one_node by now, it's fairly
2477 certain that the value simply isn't constant. */
2478 result
= integer_zero_node
;
2481 case BUILT_IN_ASSUME_ALIGNED
:
2482 /* Remove __builtin_assume_aligned. */
2483 result
= gimple_call_arg (stmt
, 0);
2486 case BUILT_IN_STACK_RESTORE
:
2487 result
= optimize_stack_restore (i
);
2493 case BUILT_IN_UNREACHABLE
:
2494 if (optimize_unreachable (i
))
2498 case BUILT_IN_VA_START
:
2499 case BUILT_IN_VA_END
:
2500 case BUILT_IN_VA_COPY
:
2501 /* These shouldn't be folded before pass_stdarg. */
2502 result
= optimize_stdarg_builtin (stmt
);
2512 if (result
== NULL_TREE
)
2515 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2517 fprintf (dump_file
, "Simplified\n ");
2518 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
2522 if (!update_call_from_tree (&i
, result
))
2524 gimplify_and_update_call_from_tree (&i
, result
);
2525 todoflags
|= TODO_update_address_taken
;
2528 stmt
= gsi_stmt (i
);
2531 if (maybe_clean_or_replace_eh_stmt (old_stmt
, stmt
)
2532 && gimple_purge_dead_eh_edges (bb
))
2535 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2537 fprintf (dump_file
, "to\n ");
2538 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
2539 fprintf (dump_file
, "\n");
2542 /* Retry the same statement if it changed into another
2543 builtin, there might be new opportunities now. */
2544 if (gimple_code (stmt
) != GIMPLE_CALL
)
2549 callee
= gimple_call_fndecl (stmt
);
2551 || DECL_BUILT_IN_CLASS (callee
) != BUILT_IN_NORMAL
2552 || DECL_FUNCTION_CODE (callee
) == fcode
)
2557 /* Delete unreachable blocks. */
2559 todoflags
|= TODO_cleanup_cfg
;
2567 const pass_data pass_data_fold_builtins
=
2569 GIMPLE_PASS
, /* type */
2571 OPTGROUP_NONE
, /* optinfo_flags */
2572 false, /* has_gate */
2573 true, /* has_execute */
2574 TV_NONE
, /* tv_id */
2575 ( PROP_cfg
| PROP_ssa
), /* properties_required */
2576 0, /* properties_provided */
2577 0, /* properties_destroyed */
2578 0, /* todo_flags_start */
2579 ( TODO_verify_ssa
| TODO_update_ssa
), /* todo_flags_finish */
2582 class pass_fold_builtins
: public gimple_opt_pass
2585 pass_fold_builtins (gcc::context
*ctxt
)
2586 : gimple_opt_pass (pass_data_fold_builtins
, ctxt
)
2589 /* opt_pass methods: */
2590 opt_pass
* clone () { return new pass_fold_builtins (m_ctxt
); }
2591 unsigned int execute () { return execute_fold_all_builtins (); }
2593 }; // class pass_fold_builtins
2598 make_pass_fold_builtins (gcc::context
*ctxt
)
2600 return new pass_fold_builtins (ctxt
);