1 /* Conditional constant propagation pass for the GNU compiler.
2 Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009,
3 2010 Free Software Foundation, Inc.
4 Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org>
5 Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it
10 under the terms of the GNU General Public License as published by the
11 Free Software Foundation; either version 3, or (at your option) any
14 GCC is distributed in the hope that it will be useful, but WITHOUT
15 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 /* Conditional constant propagation (CCP) is based on the SSA
24 propagation engine (tree-ssa-propagate.c). Constant assignments of
25 the form VAR = CST are propagated from the assignments into uses of
26 VAR, which in turn may generate new constants. The simulation uses
27 a four level lattice to keep track of constant values associated
28 with SSA names. Given an SSA name V_i, it may take one of the
31 UNINITIALIZED -> the initial state of the value. This value
32 is replaced with a correct initial value
33 the first time the value is used, so the
34 rest of the pass does not need to care about
35 it. Using this value simplifies initialization
36 of the pass, and prevents us from needlessly
37 scanning statements that are never reached.
39 UNDEFINED -> V_i is a local variable whose definition
40 has not been processed yet. Therefore we
41 don't yet know if its value is a constant
44 CONSTANT -> V_i has been found to hold a constant
47 VARYING -> V_i cannot take a constant value, or if it
48 does, it is not possible to determine it
51 The core of SSA-CCP is in ccp_visit_stmt and ccp_visit_phi_node:
53 1- In ccp_visit_stmt, we are interested in assignments whose RHS
54 evaluates into a constant and conditional jumps whose predicate
55 evaluates into a boolean true or false. When an assignment of
56 the form V_i = CONST is found, V_i's lattice value is set to
57 CONSTANT and CONST is associated with it. This causes the
58 propagation engine to add all the SSA edges coming out the
59 assignment into the worklists, so that statements that use V_i
62 If the statement is a conditional with a constant predicate, we
63 mark the outgoing edges as executable or not executable
64 depending on the predicate's value. This is then used when
65 visiting PHI nodes to know when a PHI argument can be ignored.
68 2- In ccp_visit_phi_node, if all the PHI arguments evaluate to the
69 same constant C, then the LHS of the PHI is set to C. This
70 evaluation is known as the "meet operation". Since one of the
71 goals of this evaluation is to optimistically return constant
72 values as often as possible, it uses two main short cuts:
74 - If an argument is flowing in through a non-executable edge, it
75 is ignored. This is useful in cases like this:
81 a_11 = PHI (a_9, a_10)
83 If PRED is known to always evaluate to false, then we can
84 assume that a_11 will always take its value from a_10, meaning
85 that instead of consider it VARYING (a_9 and a_10 have
86 different values), we can consider it CONSTANT 100.
88 - If an argument has an UNDEFINED value, then it does not affect
89 the outcome of the meet operation. If a variable V_i has an
90 UNDEFINED value, it means that either its defining statement
91 hasn't been visited yet or V_i has no defining statement, in
92 which case the original symbol 'V' is being used
93 uninitialized. Since 'V' is a local variable, the compiler
94 may assume any initial value for it.
97 After propagation, every variable V_i that ends up with a lattice
98 value of CONSTANT will have the associated constant value in the
99 array CONST_VAL[i].VALUE. That is fed into substitute_and_fold for
100 final substitution and folding.
104 Constant propagation with conditional branches,
105 Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
107 Building an Optimizing Compiler,
108 Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
110 Advanced Compiler Design and Implementation,
111 Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6 */
115 #include "coretypes.h"
120 #include "basic-block.h"
122 #include "function.h"
123 #include "tree-pretty-print.h"
124 #include "gimple-pretty-print.h"
126 #include "tree-dump.h"
127 #include "tree-flow.h"
128 #include "tree-pass.h"
129 #include "tree-ssa-propagate.h"
130 #include "value-prof.h"
131 #include "langhooks.h"
133 #include "diagnostic-core.h"
138 /* Possible lattice values. */
147 struct prop_value_d
{
149 ccp_lattice_t lattice_val
;
151 /* Propagated value. */
154 /* Mask that applies to the propagated value during CCP. For
155 X with a CONSTANT lattice value X & ~mask == value & ~mask. */
159 typedef struct prop_value_d prop_value_t
;
161 /* Array of propagated constant values. After propagation,
162 CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I). If
163 the constant is held in an SSA name representing a memory store
164 (i.e., a VDEF), CONST_VAL[I].MEM_REF will contain the actual
165 memory reference used to store (i.e., the LHS of the assignment
167 static prop_value_t
*const_val
;
169 static void canonicalize_float_value (prop_value_t
*);
170 static bool ccp_fold_stmt (gimple_stmt_iterator
*);
171 static tree
fold_ctor_reference (tree type
, tree ctor
,
172 unsigned HOST_WIDE_INT offset
,
173 unsigned HOST_WIDE_INT size
);
175 /* Dump constant propagation value VAL to file OUTF prefixed by PREFIX. */
178 dump_lattice_value (FILE *outf
, const char *prefix
, prop_value_t val
)
180 switch (val
.lattice_val
)
183 fprintf (outf
, "%sUNINITIALIZED", prefix
);
186 fprintf (outf
, "%sUNDEFINED", prefix
);
189 fprintf (outf
, "%sVARYING", prefix
);
192 fprintf (outf
, "%sCONSTANT ", prefix
);
193 if (TREE_CODE (val
.value
) != INTEGER_CST
194 || double_int_zero_p (val
.mask
))
195 print_generic_expr (outf
, val
.value
, dump_flags
);
198 double_int cval
= double_int_and_not (tree_to_double_int (val
.value
),
200 fprintf (outf
, "%sCONSTANT " HOST_WIDE_INT_PRINT_DOUBLE_HEX
,
201 prefix
, cval
.high
, cval
.low
);
202 fprintf (outf
, " (" HOST_WIDE_INT_PRINT_DOUBLE_HEX
")",
203 val
.mask
.high
, val
.mask
.low
);
212 /* Print lattice value VAL to stderr. */
214 void debug_lattice_value (prop_value_t val
);
217 debug_lattice_value (prop_value_t val
)
219 dump_lattice_value (stderr
, "", val
);
220 fprintf (stderr
, "\n");
224 /* Compute a default value for variable VAR and store it in the
225 CONST_VAL array. The following rules are used to get default
228 1- Global and static variables that are declared constant are
231 2- Any other value is considered UNDEFINED. This is useful when
232 considering PHI nodes. PHI arguments that are undefined do not
233 change the constant value of the PHI node, which allows for more
234 constants to be propagated.
236 3- Variables defined by statements other than assignments and PHI
237 nodes are considered VARYING.
239 4- Initial values of variables that are not GIMPLE registers are
240 considered VARYING. */
243 get_default_value (tree var
)
245 tree sym
= SSA_NAME_VAR (var
);
246 prop_value_t val
= { UNINITIALIZED
, NULL_TREE
, { 0, 0 } };
249 stmt
= SSA_NAME_DEF_STMT (var
);
251 if (gimple_nop_p (stmt
))
253 /* Variables defined by an empty statement are those used
254 before being initialized. If VAR is a local variable, we
255 can assume initially that it is UNDEFINED, otherwise we must
256 consider it VARYING. */
257 if (is_gimple_reg (sym
)
258 && TREE_CODE (sym
) == VAR_DECL
)
259 val
.lattice_val
= UNDEFINED
;
262 val
.lattice_val
= VARYING
;
263 val
.mask
= double_int_minus_one
;
266 else if (is_gimple_assign (stmt
)
267 /* Value-returning GIMPLE_CALL statements assign to
268 a variable, and are treated similarly to GIMPLE_ASSIGN. */
269 || (is_gimple_call (stmt
)
270 && gimple_call_lhs (stmt
) != NULL_TREE
)
271 || gimple_code (stmt
) == GIMPLE_PHI
)
274 if (gimple_assign_single_p (stmt
)
275 && DECL_P (gimple_assign_rhs1 (stmt
))
276 && (cst
= get_symbol_constant_value (gimple_assign_rhs1 (stmt
))))
278 val
.lattice_val
= CONSTANT
;
282 /* Any other variable defined by an assignment or a PHI node
283 is considered UNDEFINED. */
284 val
.lattice_val
= UNDEFINED
;
288 /* Otherwise, VAR will never take on a constant value. */
289 val
.lattice_val
= VARYING
;
290 val
.mask
= double_int_minus_one
;
297 /* Get the constant value associated with variable VAR. */
299 static inline prop_value_t
*
304 if (const_val
== NULL
)
307 val
= &const_val
[SSA_NAME_VERSION (var
)];
308 if (val
->lattice_val
== UNINITIALIZED
)
309 *val
= get_default_value (var
);
311 canonicalize_float_value (val
);
316 /* Return the constant tree value associated with VAR. */
319 get_constant_value (tree var
)
322 if (TREE_CODE (var
) != SSA_NAME
)
324 if (is_gimple_min_invariant (var
))
328 val
= get_value (var
);
330 && val
->lattice_val
== CONSTANT
331 && (TREE_CODE (val
->value
) != INTEGER_CST
332 || double_int_zero_p (val
->mask
)))
337 /* Sets the value associated with VAR to VARYING. */
340 set_value_varying (tree var
)
342 prop_value_t
*val
= &const_val
[SSA_NAME_VERSION (var
)];
344 val
->lattice_val
= VARYING
;
345 val
->value
= NULL_TREE
;
346 val
->mask
= double_int_minus_one
;
349 /* For float types, modify the value of VAL to make ccp work correctly
350 for non-standard values (-0, NaN):
352 If HONOR_SIGNED_ZEROS is false, and VAL = -0, we canonicalize it to 0.
353 If HONOR_NANS is false, and VAL is NaN, we canonicalize it to UNDEFINED.
354 This is to fix the following problem (see PR 29921): Suppose we have
358 and we set value of y to NaN. This causes value of x to be set to NaN.
359 When we later determine that y is in fact VARYING, fold uses the fact
360 that HONOR_NANS is false, and we try to change the value of x to 0,
361 causing an ICE. With HONOR_NANS being false, the real appearance of
362 NaN would cause undefined behavior, though, so claiming that y (and x)
363 are UNDEFINED initially is correct. */
366 canonicalize_float_value (prop_value_t
*val
)
368 enum machine_mode mode
;
372 if (val
->lattice_val
!= CONSTANT
373 || TREE_CODE (val
->value
) != REAL_CST
)
376 d
= TREE_REAL_CST (val
->value
);
377 type
= TREE_TYPE (val
->value
);
378 mode
= TYPE_MODE (type
);
380 if (!HONOR_SIGNED_ZEROS (mode
)
381 && REAL_VALUE_MINUS_ZERO (d
))
383 val
->value
= build_real (type
, dconst0
);
387 if (!HONOR_NANS (mode
)
388 && REAL_VALUE_ISNAN (d
))
390 val
->lattice_val
= UNDEFINED
;
396 /* Return whether the lattice transition is valid. */
399 valid_lattice_transition (prop_value_t old_val
, prop_value_t new_val
)
401 /* Lattice transitions must always be monotonically increasing in
403 if (old_val
.lattice_val
< new_val
.lattice_val
)
406 if (old_val
.lattice_val
!= new_val
.lattice_val
)
409 if (!old_val
.value
&& !new_val
.value
)
412 /* Now both lattice values are CONSTANT. */
414 /* Allow transitioning from &x to &x & ~3. */
415 if (TREE_CODE (old_val
.value
) != INTEGER_CST
416 && TREE_CODE (new_val
.value
) == INTEGER_CST
)
419 /* Bit-lattices have to agree in the still valid bits. */
420 if (TREE_CODE (old_val
.value
) == INTEGER_CST
421 && TREE_CODE (new_val
.value
) == INTEGER_CST
)
422 return double_int_equal_p
423 (double_int_and_not (tree_to_double_int (old_val
.value
),
425 double_int_and_not (tree_to_double_int (new_val
.value
),
428 /* Otherwise constant values have to agree. */
429 return operand_equal_p (old_val
.value
, new_val
.value
, 0);
432 /* Set the value for variable VAR to NEW_VAL. Return true if the new
433 value is different from VAR's previous value. */
436 set_lattice_value (tree var
, prop_value_t new_val
)
438 /* We can deal with old UNINITIALIZED values just fine here. */
439 prop_value_t
*old_val
= &const_val
[SSA_NAME_VERSION (var
)];
441 canonicalize_float_value (&new_val
);
443 /* We have to be careful to not go up the bitwise lattice
444 represented by the mask.
445 ??? This doesn't seem to be the best place to enforce this. */
446 if (new_val
.lattice_val
== CONSTANT
447 && old_val
->lattice_val
== CONSTANT
448 && TREE_CODE (new_val
.value
) == INTEGER_CST
449 && TREE_CODE (old_val
->value
) == INTEGER_CST
)
452 diff
= double_int_xor (tree_to_double_int (new_val
.value
),
453 tree_to_double_int (old_val
->value
));
454 new_val
.mask
= double_int_ior (new_val
.mask
,
455 double_int_ior (old_val
->mask
, diff
));
458 gcc_assert (valid_lattice_transition (*old_val
, new_val
));
460 /* If *OLD_VAL and NEW_VAL are the same, return false to inform the
461 caller that this was a non-transition. */
462 if (old_val
->lattice_val
!= new_val
.lattice_val
463 || (new_val
.lattice_val
== CONSTANT
464 && TREE_CODE (new_val
.value
) == INTEGER_CST
465 && (TREE_CODE (old_val
->value
) != INTEGER_CST
466 || !double_int_equal_p (new_val
.mask
, old_val
->mask
))))
468 /* ??? We would like to delay creation of INTEGER_CSTs from
469 partially constants here. */
471 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
473 dump_lattice_value (dump_file
, "Lattice value changed to ", new_val
);
474 fprintf (dump_file
, ". Adding SSA edges to worklist.\n");
479 gcc_assert (new_val
.lattice_val
!= UNINITIALIZED
);
486 static prop_value_t
get_value_for_expr (tree
, bool);
487 static prop_value_t
bit_value_binop (enum tree_code
, tree
, tree
, tree
);
488 static void bit_value_binop_1 (enum tree_code
, tree
, double_int
*, double_int
*,
489 tree
, double_int
, double_int
,
490 tree
, double_int
, double_int
);
492 /* Return a double_int that can be used for bitwise simplifications
496 value_to_double_int (prop_value_t val
)
499 && TREE_CODE (val
.value
) == INTEGER_CST
)
500 return tree_to_double_int (val
.value
);
502 return double_int_zero
;
505 /* Return the value for the address expression EXPR based on alignment
509 get_value_from_alignment (tree expr
)
512 HOST_WIDE_INT bitsize
, bitpos
;
514 enum machine_mode mode
;
517 gcc_assert (TREE_CODE (expr
) == ADDR_EXPR
);
519 base
= get_inner_reference (TREE_OPERAND (expr
, 0),
520 &bitsize
, &bitpos
, &offset
,
521 &mode
, &align
, &align
, false);
522 if (TREE_CODE (base
) == MEM_REF
)
523 val
= bit_value_binop (PLUS_EXPR
, TREE_TYPE (expr
),
524 TREE_OPERAND (base
, 0), TREE_OPERAND (base
, 1));
526 && ((align
= get_object_alignment (base
, BIGGEST_ALIGNMENT
))
529 val
.lattice_val
= CONSTANT
;
530 /* We assume pointers are zero-extended. */
531 val
.mask
= double_int_and_not
532 (double_int_mask (TYPE_PRECISION (TREE_TYPE (expr
))),
533 uhwi_to_double_int (align
/ BITS_PER_UNIT
- 1));
534 val
.value
= build_int_cst (TREE_TYPE (expr
), 0);
538 val
.lattice_val
= VARYING
;
539 val
.mask
= double_int_minus_one
;
540 val
.value
= NULL_TREE
;
544 double_int value
, mask
;
545 bit_value_binop_1 (PLUS_EXPR
, TREE_TYPE (expr
), &value
, &mask
,
546 TREE_TYPE (expr
), value_to_double_int (val
), val
.mask
,
548 shwi_to_double_int (bitpos
/ BITS_PER_UNIT
),
550 val
.lattice_val
= double_int_minus_one_p (mask
) ? VARYING
: CONSTANT
;
552 if (val
.lattice_val
== CONSTANT
)
553 val
.value
= double_int_to_tree (TREE_TYPE (expr
), value
);
555 val
.value
= NULL_TREE
;
557 /* ??? We should handle i * 4 and more complex expressions from
558 the offset, possibly by just expanding get_value_for_expr. */
559 if (offset
!= NULL_TREE
)
561 double_int value
, mask
;
562 prop_value_t oval
= get_value_for_expr (offset
, true);
563 bit_value_binop_1 (PLUS_EXPR
, TREE_TYPE (expr
), &value
, &mask
,
564 TREE_TYPE (expr
), value_to_double_int (val
), val
.mask
,
565 TREE_TYPE (expr
), value_to_double_int (oval
),
568 if (double_int_minus_one_p (mask
))
570 val
.lattice_val
= VARYING
;
571 val
.value
= NULL_TREE
;
575 val
.lattice_val
= CONSTANT
;
576 val
.value
= double_int_to_tree (TREE_TYPE (expr
), value
);
583 /* Return the value for the tree operand EXPR. If FOR_BITS_P is true
584 return constant bits extracted from alignment information for
585 invariant addresses. */
588 get_value_for_expr (tree expr
, bool for_bits_p
)
592 if (TREE_CODE (expr
) == SSA_NAME
)
594 val
= *get_value (expr
);
596 && val
.lattice_val
== CONSTANT
597 && TREE_CODE (val
.value
) == ADDR_EXPR
)
598 val
= get_value_from_alignment (val
.value
);
600 else if (is_gimple_min_invariant (expr
)
601 && (!for_bits_p
|| TREE_CODE (expr
) != ADDR_EXPR
))
603 val
.lattice_val
= CONSTANT
;
605 val
.mask
= double_int_zero
;
606 canonicalize_float_value (&val
);
608 else if (TREE_CODE (expr
) == ADDR_EXPR
)
609 val
= get_value_from_alignment (expr
);
612 val
.lattice_val
= VARYING
;
613 val
.mask
= double_int_minus_one
;
614 val
.value
= NULL_TREE
;
619 /* Return the likely CCP lattice value for STMT.
621 If STMT has no operands, then return CONSTANT.
623 Else if undefinedness of operands of STMT cause its value to be
624 undefined, then return UNDEFINED.
626 Else if any operands of STMT are constants, then return CONSTANT.
628 Else return VARYING. */
631 likely_value (gimple stmt
)
633 bool has_constant_operand
, has_undefined_operand
, all_undefined_operands
;
638 enum gimple_code code
= gimple_code (stmt
);
640 /* This function appears to be called only for assignments, calls,
641 conditionals, and switches, due to the logic in visit_stmt. */
642 gcc_assert (code
== GIMPLE_ASSIGN
643 || code
== GIMPLE_CALL
644 || code
== GIMPLE_COND
645 || code
== GIMPLE_SWITCH
);
647 /* If the statement has volatile operands, it won't fold to a
649 if (gimple_has_volatile_ops (stmt
))
652 /* Arrive here for more complex cases. */
653 has_constant_operand
= false;
654 has_undefined_operand
= false;
655 all_undefined_operands
= true;
656 FOR_EACH_SSA_TREE_OPERAND (use
, stmt
, iter
, SSA_OP_USE
)
658 prop_value_t
*val
= get_value (use
);
660 if (val
->lattice_val
== UNDEFINED
)
661 has_undefined_operand
= true;
663 all_undefined_operands
= false;
665 if (val
->lattice_val
== CONSTANT
)
666 has_constant_operand
= true;
669 /* There may be constants in regular rhs operands. For calls we
670 have to ignore lhs, fndecl and static chain, otherwise only
672 for (i
= (is_gimple_call (stmt
) ? 2 : 0) + gimple_has_lhs (stmt
);
673 i
< gimple_num_ops (stmt
); ++i
)
675 tree op
= gimple_op (stmt
, i
);
676 if (!op
|| TREE_CODE (op
) == SSA_NAME
)
678 if (is_gimple_min_invariant (op
))
679 has_constant_operand
= true;
682 if (has_constant_operand
)
683 all_undefined_operands
= false;
685 /* If the operation combines operands like COMPLEX_EXPR make sure to
686 not mark the result UNDEFINED if only one part of the result is
688 if (has_undefined_operand
&& all_undefined_operands
)
690 else if (code
== GIMPLE_ASSIGN
&& has_undefined_operand
)
692 switch (gimple_assign_rhs_code (stmt
))
694 /* Unary operators are handled with all_undefined_operands. */
697 case POINTER_PLUS_EXPR
:
698 /* Not MIN_EXPR, MAX_EXPR. One VARYING operand may be selected.
699 Not bitwise operators, one VARYING operand may specify the
700 result completely. Not logical operators for the same reason.
701 Not COMPLEX_EXPR as one VARYING operand makes the result partly
702 not UNDEFINED. Not *DIV_EXPR, comparisons and shifts because
703 the undefined operand may be promoted. */
710 /* If there was an UNDEFINED operand but the result may be not UNDEFINED
711 fall back to VARYING even if there were CONSTANT operands. */
712 if (has_undefined_operand
)
715 /* We do not consider virtual operands here -- load from read-only
716 memory may have only VARYING virtual operands, but still be
718 if (has_constant_operand
719 || gimple_references_memory_p (stmt
))
725 /* Returns true if STMT cannot be constant. */
728 surely_varying_stmt_p (gimple stmt
)
730 /* If the statement has operands that we cannot handle, it cannot be
732 if (gimple_has_volatile_ops (stmt
))
735 /* If it is a call and does not return a value or is not a
736 builtin and not an indirect call, it is varying. */
737 if (is_gimple_call (stmt
))
740 if (!gimple_call_lhs (stmt
)
741 || ((fndecl
= gimple_call_fndecl (stmt
)) != NULL_TREE
742 && !DECL_BUILT_IN (fndecl
)))
746 /* Any other store operation is not interesting. */
747 else if (gimple_vdef (stmt
))
750 /* Anything other than assignments and conditional jumps are not
751 interesting for CCP. */
752 if (gimple_code (stmt
) != GIMPLE_ASSIGN
753 && gimple_code (stmt
) != GIMPLE_COND
754 && gimple_code (stmt
) != GIMPLE_SWITCH
755 && gimple_code (stmt
) != GIMPLE_CALL
)
761 /* Initialize local data structures for CCP. */
764 ccp_initialize (void)
768 const_val
= XCNEWVEC (prop_value_t
, num_ssa_names
);
770 /* Initialize simulation flags for PHI nodes and statements. */
773 gimple_stmt_iterator i
;
775 for (i
= gsi_start_bb (bb
); !gsi_end_p (i
); gsi_next (&i
))
777 gimple stmt
= gsi_stmt (i
);
780 /* If the statement is a control insn, then we do not
781 want to avoid simulating the statement once. Failure
782 to do so means that those edges will never get added. */
783 if (stmt_ends_bb_p (stmt
))
786 is_varying
= surely_varying_stmt_p (stmt
);
793 /* If the statement will not produce a constant, mark
794 all its outputs VARYING. */
795 FOR_EACH_SSA_TREE_OPERAND (def
, stmt
, iter
, SSA_OP_ALL_DEFS
)
796 set_value_varying (def
);
798 prop_set_simulate_again (stmt
, !is_varying
);
802 /* Now process PHI nodes. We never clear the simulate_again flag on
803 phi nodes, since we do not know which edges are executable yet,
804 except for phi nodes for virtual operands when we do not do store ccp. */
807 gimple_stmt_iterator i
;
809 for (i
= gsi_start_phis (bb
); !gsi_end_p (i
); gsi_next (&i
))
811 gimple phi
= gsi_stmt (i
);
813 if (!is_gimple_reg (gimple_phi_result (phi
)))
814 prop_set_simulate_again (phi
, false);
816 prop_set_simulate_again (phi
, true);
821 /* Debug count support. Reset the values of ssa names
822 VARYING when the total number ssa names analyzed is
823 beyond the debug count specified. */
829 for (i
= 0; i
< num_ssa_names
; i
++)
833 const_val
[i
].lattice_val
= VARYING
;
834 const_val
[i
].mask
= double_int_minus_one
;
835 const_val
[i
].value
= NULL_TREE
;
841 /* Do final substitution of propagated values, cleanup the flowgraph and
842 free allocated storage.
844 Return TRUE when something was optimized. */
849 bool something_changed
;
854 /* Derive alignment and misalignment information from partially
855 constant pointers in the lattice. */
856 for (i
= 1; i
< num_ssa_names
; ++i
)
858 tree name
= ssa_name (i
);
860 struct ptr_info_def
*pi
;
861 unsigned int tem
, align
;
864 || !POINTER_TYPE_P (TREE_TYPE (name
)))
867 val
= get_value (name
);
868 if (val
->lattice_val
!= CONSTANT
869 || TREE_CODE (val
->value
) != INTEGER_CST
)
872 /* Trailing constant bits specify the alignment, trailing value
873 bits the misalignment. */
875 align
= (tem
& -tem
);
879 pi
= get_ptr_info (name
);
881 pi
->misalign
= TREE_INT_CST_LOW (val
->value
) & (align
- 1);
884 /* Perform substitutions based on the known constant values. */
885 something_changed
= substitute_and_fold (get_constant_value
,
886 ccp_fold_stmt
, true);
890 return something_changed
;;
894 /* Compute the meet operator between *VAL1 and *VAL2. Store the result
897 any M UNDEFINED = any
898 any M VARYING = VARYING
899 Ci M Cj = Ci if (i == j)
900 Ci M Cj = VARYING if (i != j)
904 ccp_lattice_meet (prop_value_t
*val1
, prop_value_t
*val2
)
906 if (val1
->lattice_val
== UNDEFINED
)
908 /* UNDEFINED M any = any */
911 else if (val2
->lattice_val
== UNDEFINED
)
913 /* any M UNDEFINED = any
914 Nothing to do. VAL1 already contains the value we want. */
917 else if (val1
->lattice_val
== VARYING
918 || val2
->lattice_val
== VARYING
)
920 /* any M VARYING = VARYING. */
921 val1
->lattice_val
= VARYING
;
922 val1
->mask
= double_int_minus_one
;
923 val1
->value
= NULL_TREE
;
925 else if (val1
->lattice_val
== CONSTANT
926 && val2
->lattice_val
== CONSTANT
927 && TREE_CODE (val1
->value
) == INTEGER_CST
928 && TREE_CODE (val2
->value
) == INTEGER_CST
)
930 /* Ci M Cj = Ci if (i == j)
931 Ci M Cj = VARYING if (i != j)
933 For INTEGER_CSTs mask unequal bits. If no equal bits remain,
936 = double_int_ior (double_int_ior (val1
->mask
,
938 double_int_xor (tree_to_double_int (val1
->value
),
939 tree_to_double_int (val2
->value
)));
940 if (double_int_minus_one_p (val1
->mask
))
942 val1
->lattice_val
= VARYING
;
943 val1
->value
= NULL_TREE
;
946 else if (val1
->lattice_val
== CONSTANT
947 && val2
->lattice_val
== CONSTANT
948 && simple_cst_equal (val1
->value
, val2
->value
) == 1)
950 /* Ci M Cj = Ci if (i == j)
951 Ci M Cj = VARYING if (i != j)
953 VAL1 already contains the value we want for equivalent values. */
955 else if (val1
->lattice_val
== CONSTANT
956 && val2
->lattice_val
== CONSTANT
957 && (TREE_CODE (val1
->value
) == ADDR_EXPR
958 || TREE_CODE (val2
->value
) == ADDR_EXPR
))
960 /* When not equal addresses are involved try meeting for
962 prop_value_t tem
= *val2
;
963 if (TREE_CODE (val1
->value
) == ADDR_EXPR
)
964 *val1
= get_value_for_expr (val1
->value
, true);
965 if (TREE_CODE (val2
->value
) == ADDR_EXPR
)
966 tem
= get_value_for_expr (val2
->value
, true);
967 ccp_lattice_meet (val1
, &tem
);
971 /* Any other combination is VARYING. */
972 val1
->lattice_val
= VARYING
;
973 val1
->mask
= double_int_minus_one
;
974 val1
->value
= NULL_TREE
;
979 /* Loop through the PHI_NODE's parameters for BLOCK and compare their
980 lattice values to determine PHI_NODE's lattice value. The value of a
981 PHI node is determined calling ccp_lattice_meet with all the arguments
982 of the PHI node that are incoming via executable edges. */
984 static enum ssa_prop_result
985 ccp_visit_phi_node (gimple phi
)
988 prop_value_t
*old_val
, new_val
;
990 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
992 fprintf (dump_file
, "\nVisiting PHI node: ");
993 print_gimple_stmt (dump_file
, phi
, 0, dump_flags
);
996 old_val
= get_value (gimple_phi_result (phi
));
997 switch (old_val
->lattice_val
)
1000 return SSA_PROP_VARYING
;
1007 new_val
.lattice_val
= UNDEFINED
;
1008 new_val
.value
= NULL_TREE
;
1015 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
1017 /* Compute the meet operator over all the PHI arguments flowing
1018 through executable edges. */
1019 edge e
= gimple_phi_arg_edge (phi
, i
);
1021 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1024 "\n Argument #%d (%d -> %d %sexecutable)\n",
1025 i
, e
->src
->index
, e
->dest
->index
,
1026 (e
->flags
& EDGE_EXECUTABLE
) ? "" : "not ");
1029 /* If the incoming edge is executable, Compute the meet operator for
1030 the existing value of the PHI node and the current PHI argument. */
1031 if (e
->flags
& EDGE_EXECUTABLE
)
1033 tree arg
= gimple_phi_arg (phi
, i
)->def
;
1034 prop_value_t arg_val
= get_value_for_expr (arg
, false);
1036 ccp_lattice_meet (&new_val
, &arg_val
);
1038 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1040 fprintf (dump_file
, "\t");
1041 print_generic_expr (dump_file
, arg
, dump_flags
);
1042 dump_lattice_value (dump_file
, "\tValue: ", arg_val
);
1043 fprintf (dump_file
, "\n");
1046 if (new_val
.lattice_val
== VARYING
)
1051 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1053 dump_lattice_value (dump_file
, "\n PHI node value: ", new_val
);
1054 fprintf (dump_file
, "\n\n");
1057 /* Make the transition to the new value. */
1058 if (set_lattice_value (gimple_phi_result (phi
), new_val
))
1060 if (new_val
.lattice_val
== VARYING
)
1061 return SSA_PROP_VARYING
;
1063 return SSA_PROP_INTERESTING
;
1066 return SSA_PROP_NOT_INTERESTING
;
1069 /* Return the constant value for OP or OP otherwise. */
1072 valueize_op (tree op
)
1074 if (TREE_CODE (op
) == SSA_NAME
)
1076 tree tem
= get_constant_value (op
);
1083 /* CCP specific front-end to the non-destructive constant folding
1086 Attempt to simplify the RHS of STMT knowing that one or more
1087 operands are constants.
1089 If simplification is possible, return the simplified RHS,
1090 otherwise return the original RHS or NULL_TREE. */
1093 ccp_fold (gimple stmt
)
1095 location_t loc
= gimple_location (stmt
);
1096 switch (gimple_code (stmt
))
1100 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
1102 switch (get_gimple_rhs_class (subcode
))
1104 case GIMPLE_SINGLE_RHS
:
1106 tree rhs
= gimple_assign_rhs1 (stmt
);
1107 enum tree_code_class kind
= TREE_CODE_CLASS (subcode
);
1109 if (TREE_CODE (rhs
) == SSA_NAME
)
1111 /* If the RHS is an SSA_NAME, return its known constant value,
1113 return get_constant_value (rhs
);
1115 /* Handle propagating invariant addresses into address operations.
1116 The folding we do here matches that in tree-ssa-forwprop.c. */
1117 else if (TREE_CODE (rhs
) == ADDR_EXPR
)
1120 base
= &TREE_OPERAND (rhs
, 0);
1121 while (handled_component_p (*base
))
1122 base
= &TREE_OPERAND (*base
, 0);
1123 if (TREE_CODE (*base
) == MEM_REF
1124 && TREE_CODE (TREE_OPERAND (*base
, 0)) == SSA_NAME
)
1126 tree val
= get_constant_value (TREE_OPERAND (*base
, 0));
1128 && TREE_CODE (val
) == ADDR_EXPR
)
1130 tree ret
, save
= *base
;
1132 new_base
= fold_build2 (MEM_REF
, TREE_TYPE (*base
),
1134 TREE_OPERAND (*base
, 1));
1135 /* We need to return a new tree, not modify the IL
1136 or share parts of it. So play some tricks to
1137 avoid manually building it. */
1139 ret
= unshare_expr (rhs
);
1140 recompute_tree_invariant_for_addr_expr (ret
);
1146 else if (TREE_CODE (rhs
) == CONSTRUCTOR
1147 && TREE_CODE (TREE_TYPE (rhs
)) == VECTOR_TYPE
1148 && (CONSTRUCTOR_NELTS (rhs
)
1149 == TYPE_VECTOR_SUBPARTS (TREE_TYPE (rhs
))))
1155 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (rhs
), i
, val
)
1157 val
= valueize_op (val
);
1158 if (TREE_CODE (val
) == INTEGER_CST
1159 || TREE_CODE (val
) == REAL_CST
1160 || TREE_CODE (val
) == FIXED_CST
)
1161 list
= tree_cons (NULL_TREE
, val
, list
);
1166 return build_vector (TREE_TYPE (rhs
), nreverse (list
));
1169 if (kind
== tcc_reference
)
1171 if ((TREE_CODE (rhs
) == VIEW_CONVERT_EXPR
1172 || TREE_CODE (rhs
) == REALPART_EXPR
1173 || TREE_CODE (rhs
) == IMAGPART_EXPR
)
1174 && TREE_CODE (TREE_OPERAND (rhs
, 0)) == SSA_NAME
)
1176 tree val
= get_constant_value (TREE_OPERAND (rhs
, 0));
1178 return fold_unary_loc (EXPR_LOCATION (rhs
),
1180 TREE_TYPE (rhs
), val
);
1182 else if (TREE_CODE (rhs
) == MEM_REF
1183 && TREE_CODE (TREE_OPERAND (rhs
, 0)) == SSA_NAME
)
1185 tree val
= get_constant_value (TREE_OPERAND (rhs
, 0));
1187 && TREE_CODE (val
) == ADDR_EXPR
)
1189 tree tem
= fold_build2 (MEM_REF
, TREE_TYPE (rhs
),
1191 TREE_OPERAND (rhs
, 1));
1196 return fold_const_aggregate_ref (rhs
);
1198 else if (kind
== tcc_declaration
)
1199 return get_symbol_constant_value (rhs
);
1203 case GIMPLE_UNARY_RHS
:
1205 /* Handle unary operators that can appear in GIMPLE form.
1206 Note that we know the single operand must be a constant,
1207 so this should almost always return a simplified RHS. */
1208 tree lhs
= gimple_assign_lhs (stmt
);
1209 tree op0
= valueize_op (gimple_assign_rhs1 (stmt
));
1211 /* Conversions are useless for CCP purposes if they are
1212 value-preserving. Thus the restrictions that
1213 useless_type_conversion_p places for pointer type conversions
1214 do not apply here. Substitution later will only substitute to
1216 if (CONVERT_EXPR_CODE_P (subcode
)
1217 && POINTER_TYPE_P (TREE_TYPE (lhs
))
1218 && POINTER_TYPE_P (TREE_TYPE (op0
)))
1221 /* Try to re-construct array references on-the-fly. */
1222 if (!useless_type_conversion_p (TREE_TYPE (lhs
),
1224 && ((tem
= maybe_fold_offset_to_address
1226 op0
, integer_zero_node
, TREE_TYPE (lhs
)))
1233 fold_unary_ignore_overflow_loc (loc
, subcode
,
1234 gimple_expr_type (stmt
), op0
);
1237 case GIMPLE_BINARY_RHS
:
1239 /* Handle binary operators that can appear in GIMPLE form. */
1240 tree op0
= valueize_op (gimple_assign_rhs1 (stmt
));
1241 tree op1
= valueize_op (gimple_assign_rhs2 (stmt
));
1243 /* Translate &x + CST into an invariant form suitable for
1244 further propagation. */
1245 if (gimple_assign_rhs_code (stmt
) == POINTER_PLUS_EXPR
1246 && TREE_CODE (op0
) == ADDR_EXPR
1247 && TREE_CODE (op1
) == INTEGER_CST
)
1249 tree off
= fold_convert (ptr_type_node
, op1
);
1250 return build_fold_addr_expr
1251 (fold_build2 (MEM_REF
,
1252 TREE_TYPE (TREE_TYPE (op0
)),
1253 unshare_expr (op0
), off
));
1256 return fold_binary_loc (loc
, subcode
,
1257 gimple_expr_type (stmt
), op0
, op1
);
1260 case GIMPLE_TERNARY_RHS
:
1262 /* Handle ternary operators that can appear in GIMPLE form. */
1263 tree op0
= valueize_op (gimple_assign_rhs1 (stmt
));
1264 tree op1
= valueize_op (gimple_assign_rhs2 (stmt
));
1265 tree op2
= valueize_op (gimple_assign_rhs3 (stmt
));
1267 return fold_ternary_loc (loc
, subcode
,
1268 gimple_expr_type (stmt
), op0
, op1
, op2
);
1279 tree fn
= valueize_op (gimple_call_fn (stmt
));
1280 if (TREE_CODE (fn
) == ADDR_EXPR
1281 && TREE_CODE (TREE_OPERAND (fn
, 0)) == FUNCTION_DECL
1282 && DECL_BUILT_IN (TREE_OPERAND (fn
, 0)))
1284 tree
*args
= XALLOCAVEC (tree
, gimple_call_num_args (stmt
));
1287 for (i
= 0; i
< gimple_call_num_args (stmt
); ++i
)
1288 args
[i
] = valueize_op (gimple_call_arg (stmt
, i
));
1289 call
= build_call_array_loc (loc
,
1290 gimple_call_return_type (stmt
),
1291 fn
, gimple_call_num_args (stmt
), args
);
1292 retval
= fold_call_expr (EXPR_LOCATION (call
), call
, false);
1294 /* fold_call_expr wraps the result inside a NOP_EXPR. */
1295 STRIP_NOPS (retval
);
1303 /* Handle comparison operators that can appear in GIMPLE form. */
1304 tree op0
= valueize_op (gimple_cond_lhs (stmt
));
1305 tree op1
= valueize_op (gimple_cond_rhs (stmt
));
1306 enum tree_code code
= gimple_cond_code (stmt
);
1307 return fold_binary_loc (loc
, code
, boolean_type_node
, op0
, op1
);
1312 /* Return the constant switch index. */
1313 return valueize_op (gimple_switch_index (stmt
));
1321 /* See if we can find constructor defining value of BASE.
1322 When we know the consructor with constant offset (such as
1323 base is array[40] and we do know constructor of array), then
1324 BIT_OFFSET is adjusted accordingly.
1326 As a special case, return error_mark_node when constructor
1327 is not explicitly available, but it is known to be zero
1328 such as 'static const int a;'. */
1330 get_base_constructor (tree base
, HOST_WIDE_INT
*bit_offset
)
1332 HOST_WIDE_INT bit_offset2
, size
, max_size
;
1333 if (TREE_CODE (base
) == MEM_REF
)
1335 if (!integer_zerop (TREE_OPERAND (base
, 1)))
1337 if (!host_integerp (TREE_OPERAND (base
, 1), 0))
1339 *bit_offset
+= (mem_ref_offset (base
).low
1343 base
= get_constant_value (TREE_OPERAND (base
, 0));
1344 if (!base
|| TREE_CODE (base
) != ADDR_EXPR
)
1346 base
= TREE_OPERAND (base
, 0);
1349 /* Get a CONSTRUCTOR. If BASE is a VAR_DECL, get its
1350 DECL_INITIAL. If BASE is a nested reference into another
1351 ARRAY_REF or COMPONENT_REF, make a recursive call to resolve
1352 the inner reference. */
1353 switch (TREE_CODE (base
))
1356 if (!const_value_known_p (base
))
1361 if (!DECL_INITIAL (base
)
1362 && (TREE_STATIC (base
) || DECL_EXTERNAL (base
)))
1363 return error_mark_node
;
1364 return DECL_INITIAL (base
);
1370 base
= get_ref_base_and_extent (base
, &bit_offset2
, &size
, &max_size
);
1371 if (max_size
== -1 || size
!= max_size
)
1373 *bit_offset
+= bit_offset2
;
1374 return get_base_constructor (base
, bit_offset
);
1387 /* CTOR is STRING_CST. Fold reference of type TYPE and size SIZE
1388 to the memory at bit OFFSET.
1390 We do only simple job of folding byte accesses. */
1393 fold_string_cst_ctor_reference (tree type
, tree ctor
, unsigned HOST_WIDE_INT offset
,
1394 unsigned HOST_WIDE_INT size
)
1396 if (INTEGRAL_TYPE_P (type
)
1397 && (TYPE_MODE (type
)
1398 == TYPE_MODE (TREE_TYPE (TREE_TYPE (ctor
))))
1399 && (GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (TREE_TYPE (ctor
))))
1401 && GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (ctor
)))) == 1
1402 && size
== BITS_PER_UNIT
1403 && !(offset
% BITS_PER_UNIT
))
1405 offset
/= BITS_PER_UNIT
;
1406 if (offset
< (unsigned HOST_WIDE_INT
) TREE_STRING_LENGTH (ctor
))
1407 return build_int_cst_type (type
, (TREE_STRING_POINTER (ctor
)
1410 const char a[20]="hello";
1413 might lead to offset greater than string length. In this case we
1414 know value is either initialized to 0 or out of bounds. Return 0
1416 return build_zero_cst (type
);
1421 /* CTOR is CONSTRUCTOR of an array type. Fold reference of type TYPE and size
1422 SIZE to the memory at bit OFFSET. */
1425 fold_array_ctor_reference (tree type
, tree ctor
,
1426 unsigned HOST_WIDE_INT offset
,
1427 unsigned HOST_WIDE_INT size
)
1429 unsigned HOST_WIDE_INT cnt
;
1431 double_int low_bound
, elt_size
;
1432 double_int index
, max_index
;
1433 double_int access_index
;
1434 tree domain_type
= TYPE_DOMAIN (TREE_TYPE (ctor
));
1435 HOST_WIDE_INT inner_offset
;
1437 /* Compute low bound and elt size. */
1438 if (domain_type
&& TYPE_MIN_VALUE (domain_type
))
1440 /* Static constructors for variably sized objects makes no sense. */
1441 gcc_assert (TREE_CODE (TYPE_MIN_VALUE (domain_type
)) == INTEGER_CST
);
1442 low_bound
= tree_to_double_int (TYPE_MIN_VALUE (domain_type
));
1445 low_bound
= double_int_zero
;
1446 /* Static constructors for variably sized objects makes no sense. */
1447 gcc_assert (TREE_CODE(TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (ctor
))))
1450 tree_to_double_int (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (ctor
))));
1453 /* We can handle only constantly sized accesses that are known to not
1454 be larger than size of array element. */
1455 if (!TYPE_SIZE_UNIT (type
)
1456 || TREE_CODE (TYPE_SIZE_UNIT (type
)) != INTEGER_CST
1457 || double_int_cmp (elt_size
,
1458 tree_to_double_int (TYPE_SIZE_UNIT (type
)), 0) < 0)
1461 /* Compute the array index we look for. */
1462 access_index
= double_int_udiv (uhwi_to_double_int (offset
/ BITS_PER_UNIT
),
1463 elt_size
, TRUNC_DIV_EXPR
);
1464 access_index
= double_int_add (access_index
, low_bound
);
1466 /* And offset within the access. */
1467 inner_offset
= offset
% (double_int_to_uhwi (elt_size
) * BITS_PER_UNIT
);
1469 /* See if the array field is large enough to span whole access. We do not
1470 care to fold accesses spanning multiple array indexes. */
1471 if (inner_offset
+ size
> double_int_to_uhwi (elt_size
) * BITS_PER_UNIT
)
1474 index
= double_int_sub (low_bound
, double_int_one
);
1475 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor
), cnt
, cfield
, cval
)
1477 /* Array constructor might explicitely set index, or specify range
1478 or leave index NULL meaning that it is next index after previous
1482 if (TREE_CODE (cfield
) == INTEGER_CST
)
1483 max_index
= index
= tree_to_double_int (cfield
);
1486 gcc_assert (TREE_CODE (cfield
) == RANGE_EXPR
);
1487 index
= tree_to_double_int (TREE_OPERAND (cfield
, 0));
1488 max_index
= tree_to_double_int (TREE_OPERAND (cfield
, 1));
1492 max_index
= index
= double_int_add (index
, double_int_one
);
1494 /* Do we have match? */
1495 if (double_int_cmp (access_index
, index
, 1) >= 0
1496 && double_int_cmp (access_index
, max_index
, 1) <= 0)
1497 return fold_ctor_reference (type
, cval
, inner_offset
, size
);
1499 /* When memory is not explicitely mentioned in constructor,
1500 it is 0 (or out of range). */
1501 return build_zero_cst (type
);
1504 /* CTOR is CONSTRUCTOR of an aggregate or vector.
1505 Fold reference of type TYPE and size SIZE to the memory at bit OFFSET. */
1508 fold_nonarray_ctor_reference (tree type
, tree ctor
,
1509 unsigned HOST_WIDE_INT offset
,
1510 unsigned HOST_WIDE_INT size
)
1512 unsigned HOST_WIDE_INT cnt
;
1515 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor
), cnt
, cfield
,
1518 tree byte_offset
= DECL_FIELD_OFFSET (cfield
);
1519 tree field_offset
= DECL_FIELD_BIT_OFFSET (cfield
);
1520 tree field_size
= DECL_SIZE (cfield
);
1521 double_int bitoffset
;
1522 double_int byte_offset_cst
= tree_to_double_int (byte_offset
);
1523 double_int bits_per_unit_cst
= uhwi_to_double_int (BITS_PER_UNIT
);
1524 double_int bitoffset_end
;
1526 /* Variable sized objects in static constructors makes no sense. */
1527 gcc_assert (TREE_CODE (field_offset
) == INTEGER_CST
1528 && TREE_CODE (byte_offset
) == INTEGER_CST
1529 && TREE_CODE (field_size
) == INTEGER_CST
);
1531 /* Compute bit offset of the field. */
1532 bitoffset
= double_int_add (tree_to_double_int (field_offset
),
1533 double_int_mul (byte_offset_cst
,
1534 bits_per_unit_cst
));
1535 /* Compute bit offset where the field ends. */
1536 bitoffset_end
= double_int_add (bitoffset
,
1537 tree_to_double_int (field_size
));
1539 /* Is OFFSET in the range (BITOFFSET, BITOFFSET_END)? */
1540 if (double_int_cmp (uhwi_to_double_int (offset
), bitoffset
, 0) >= 0
1541 && double_int_cmp (uhwi_to_double_int (offset
),
1542 bitoffset_end
, 0) < 0)
1544 double_int access_end
= double_int_add (uhwi_to_double_int (offset
),
1545 uhwi_to_double_int (size
));
1546 double_int inner_offset
= double_int_sub (uhwi_to_double_int (offset
),
1548 /* We do have overlap. Now see if field is large enough to
1549 cover the access. Give up for accesses spanning multiple
1551 if (double_int_cmp (access_end
, bitoffset_end
, 0) > 0)
1553 return fold_ctor_reference (type
, cval
,
1554 double_int_to_uhwi (inner_offset
), size
);
1557 /* When memory is not explicitely mentioned in constructor, it is 0. */
1558 return build_zero_cst (type
);
1561 /* CTOR is value initializing memory, fold reference of type TYPE and size SIZE
1562 to the memory at bit OFFSET. */
1565 fold_ctor_reference (tree type
, tree ctor
, unsigned HOST_WIDE_INT offset
,
1566 unsigned HOST_WIDE_INT size
)
1570 /* We found the field with exact match. */
1571 if (useless_type_conversion_p (type
, TREE_TYPE (ctor
))
1573 return canonicalize_constructor_val (ctor
);
1575 /* We are at the end of walk, see if we can view convert the
1577 if (!AGGREGATE_TYPE_P (TREE_TYPE (ctor
)) && !offset
1578 /* VIEW_CONVERT_EXPR is defined only for matching sizes. */
1579 && operand_equal_p (TYPE_SIZE (type
),
1580 TYPE_SIZE (TREE_TYPE (ctor
)), 0))
1582 ret
= canonicalize_constructor_val (ctor
);
1583 ret
= fold_unary (VIEW_CONVERT_EXPR
, type
, ret
);
1588 if (TREE_CODE (ctor
) == STRING_CST
)
1589 return fold_string_cst_ctor_reference (type
, ctor
, offset
, size
);
1590 if (TREE_CODE (ctor
) == CONSTRUCTOR
)
1593 if (TREE_CODE (TREE_TYPE (ctor
)) == ARRAY_TYPE
)
1594 return fold_array_ctor_reference (type
, ctor
, offset
, size
);
1596 return fold_nonarray_ctor_reference (type
, ctor
, offset
, size
);
1602 /* Return the tree representing the element referenced by T if T is an
1603 ARRAY_REF or COMPONENT_REF into constant aggregates. Return
1604 NULL_TREE otherwise. */
1607 fold_const_aggregate_ref (tree t
)
1609 tree ctor
, idx
, base
;
1610 HOST_WIDE_INT offset
, size
, max_size
;
1613 if (TREE_CODE_CLASS (TREE_CODE (t
)) == tcc_declaration
)
1614 return get_symbol_constant_value (t
);
1616 tem
= fold_read_from_constant_string (t
);
1620 switch (TREE_CODE (t
))
1623 case ARRAY_RANGE_REF
:
1624 /* Constant indexes are handled well by get_base_constructor.
1625 Only special case variable offsets.
1626 FIXME: This code can't handle nested references with variable indexes
1627 (they will be handled only by iteration of ccp). Perhaps we can bring
1628 get_ref_base_and_extent here and make it use get_constant_value. */
1629 if (TREE_CODE (TREE_OPERAND (t
, 1)) == SSA_NAME
1630 && (idx
= get_constant_value (TREE_OPERAND (t
, 1)))
1631 && host_integerp (idx
, 0))
1633 tree low_bound
, unit_size
;
1635 /* If the resulting bit-offset is constant, track it. */
1636 if ((low_bound
= array_ref_low_bound (t
),
1637 host_integerp (low_bound
, 0))
1638 && (unit_size
= array_ref_element_size (t
),
1639 host_integerp (unit_size
, 1)))
1641 offset
= TREE_INT_CST_LOW (idx
);
1642 offset
-= TREE_INT_CST_LOW (low_bound
);
1643 offset
*= TREE_INT_CST_LOW (unit_size
);
1644 offset
*= BITS_PER_UNIT
;
1646 base
= TREE_OPERAND (t
, 0);
1647 ctor
= get_base_constructor (base
, &offset
);
1648 /* Empty constructor. Always fold to 0. */
1649 if (ctor
== error_mark_node
)
1650 return build_zero_cst (TREE_TYPE (t
));
1651 /* Out of bound array access. Value is undefined, but don't fold. */
1654 /* We can not determine ctor. */
1657 return fold_ctor_reference (TREE_TYPE (t
), ctor
, offset
,
1658 TREE_INT_CST_LOW (unit_size
)
1666 case TARGET_MEM_REF
:
1668 base
= get_ref_base_and_extent (t
, &offset
, &size
, &max_size
);
1669 ctor
= get_base_constructor (base
, &offset
);
1671 /* Empty constructor. Always fold to 0. */
1672 if (ctor
== error_mark_node
)
1673 return build_zero_cst (TREE_TYPE (t
));
1674 /* We do not know precise address. */
1675 if (max_size
== -1 || max_size
!= size
)
1677 /* We can not determine ctor. */
1681 /* Out of bound array access. Value is undefined, but don't fold. */
1685 return fold_ctor_reference (TREE_TYPE (t
), ctor
, offset
, size
);
1690 tree c
= fold_const_aggregate_ref (TREE_OPERAND (t
, 0));
1691 if (c
&& TREE_CODE (c
) == COMPLEX_CST
)
1692 return fold_build1_loc (EXPR_LOCATION (t
),
1693 TREE_CODE (t
), TREE_TYPE (t
), c
);
1704 /* Apply the operation CODE in type TYPE to the value, mask pair
1705 RVAL and RMASK representing a value of type RTYPE and set
1706 the value, mask pair *VAL and *MASK to the result. */
1709 bit_value_unop_1 (enum tree_code code
, tree type
,
1710 double_int
*val
, double_int
*mask
,
1711 tree rtype
, double_int rval
, double_int rmask
)
1717 *val
= double_int_not (rval
);
1722 double_int temv
, temm
;
1723 /* Return ~rval + 1. */
1724 bit_value_unop_1 (BIT_NOT_EXPR
, type
, &temv
, &temm
, type
, rval
, rmask
);
1725 bit_value_binop_1 (PLUS_EXPR
, type
, val
, mask
,
1727 type
, double_int_one
, double_int_zero
);
1735 /* First extend mask and value according to the original type. */
1736 uns
= (TREE_CODE (rtype
) == INTEGER_TYPE
&& TYPE_IS_SIZETYPE (rtype
)
1737 ? 0 : TYPE_UNSIGNED (rtype
));
1738 *mask
= double_int_ext (rmask
, TYPE_PRECISION (rtype
), uns
);
1739 *val
= double_int_ext (rval
, TYPE_PRECISION (rtype
), uns
);
1741 /* Then extend mask and value according to the target type. */
1742 uns
= (TREE_CODE (type
) == INTEGER_TYPE
&& TYPE_IS_SIZETYPE (type
)
1743 ? 0 : TYPE_UNSIGNED (type
));
1744 *mask
= double_int_ext (*mask
, TYPE_PRECISION (type
), uns
);
1745 *val
= double_int_ext (*val
, TYPE_PRECISION (type
), uns
);
1750 *mask
= double_int_minus_one
;
1755 /* Apply the operation CODE in type TYPE to the value, mask pairs
1756 R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
1757 and R2TYPE and set the value, mask pair *VAL and *MASK to the result. */
1760 bit_value_binop_1 (enum tree_code code
, tree type
,
1761 double_int
*val
, double_int
*mask
,
1762 tree r1type
, double_int r1val
, double_int r1mask
,
1763 tree r2type
, double_int r2val
, double_int r2mask
)
1765 bool uns
= (TREE_CODE (type
) == INTEGER_TYPE
1766 && TYPE_IS_SIZETYPE (type
) ? 0 : TYPE_UNSIGNED (type
));
1767 /* Assume we'll get a constant result. Use an initial varying value,
1768 we fall back to varying in the end if necessary. */
1769 *mask
= double_int_minus_one
;
1773 /* The mask is constant where there is a known not
1774 set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
1775 *mask
= double_int_and (double_int_ior (r1mask
, r2mask
),
1776 double_int_and (double_int_ior (r1val
, r1mask
),
1777 double_int_ior (r2val
, r2mask
)));
1778 *val
= double_int_and (r1val
, r2val
);
1782 /* The mask is constant where there is a known
1783 set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)). */
1784 *mask
= double_int_and_not
1785 (double_int_ior (r1mask
, r2mask
),
1786 double_int_ior (double_int_and_not (r1val
, r1mask
),
1787 double_int_and_not (r2val
, r2mask
)));
1788 *val
= double_int_ior (r1val
, r2val
);
1793 *mask
= double_int_ior (r1mask
, r2mask
);
1794 *val
= double_int_xor (r1val
, r2val
);
1799 if (double_int_zero_p (r2mask
))
1801 HOST_WIDE_INT shift
= r2val
.low
;
1802 if (code
== RROTATE_EXPR
)
1804 *mask
= double_int_lrotate (r1mask
, shift
, TYPE_PRECISION (type
));
1805 *val
= double_int_lrotate (r1val
, shift
, TYPE_PRECISION (type
));
1811 /* ??? We can handle partially known shift counts if we know
1812 its sign. That way we can tell that (x << (y | 8)) & 255
1814 if (double_int_zero_p (r2mask
))
1816 HOST_WIDE_INT shift
= r2val
.low
;
1817 if (code
== RSHIFT_EXPR
)
1819 /* We need to know if we are doing a left or a right shift
1820 to properly shift in zeros for left shift and unsigned
1821 right shifts and the sign bit for signed right shifts.
1822 For signed right shifts we shift in varying in case
1823 the sign bit was varying. */
1826 *mask
= double_int_lshift (r1mask
, shift
,
1827 TYPE_PRECISION (type
), false);
1828 *val
= double_int_lshift (r1val
, shift
,
1829 TYPE_PRECISION (type
), false);
1834 *mask
= double_int_rshift (r1mask
, shift
,
1835 TYPE_PRECISION (type
), !uns
);
1836 *val
= double_int_rshift (r1val
, shift
,
1837 TYPE_PRECISION (type
), !uns
);
1848 case POINTER_PLUS_EXPR
:
1851 /* Do the addition with unknown bits set to zero, to give carry-ins of
1852 zero wherever possible. */
1853 lo
= double_int_add (double_int_and_not (r1val
, r1mask
),
1854 double_int_and_not (r2val
, r2mask
));
1855 lo
= double_int_ext (lo
, TYPE_PRECISION (type
), uns
);
1856 /* Do the addition with unknown bits set to one, to give carry-ins of
1857 one wherever possible. */
1858 hi
= double_int_add (double_int_ior (r1val
, r1mask
),
1859 double_int_ior (r2val
, r2mask
));
1860 hi
= double_int_ext (hi
, TYPE_PRECISION (type
), uns
);
1861 /* Each bit in the result is known if (a) the corresponding bits in
1862 both inputs are known, and (b) the carry-in to that bit position
1863 is known. We can check condition (b) by seeing if we got the same
1864 result with minimised carries as with maximised carries. */
1865 *mask
= double_int_ior (double_int_ior (r1mask
, r2mask
),
1866 double_int_xor (lo
, hi
));
1867 *mask
= double_int_ext (*mask
, TYPE_PRECISION (type
), uns
);
1868 /* It shouldn't matter whether we choose lo or hi here. */
1875 double_int temv
, temm
;
1876 bit_value_unop_1 (NEGATE_EXPR
, r2type
, &temv
, &temm
,
1877 r2type
, r2val
, r2mask
);
1878 bit_value_binop_1 (PLUS_EXPR
, type
, val
, mask
,
1879 r1type
, r1val
, r1mask
,
1880 r2type
, temv
, temm
);
1886 /* Just track trailing zeros in both operands and transfer
1887 them to the other. */
1888 int r1tz
= double_int_ctz (double_int_ior (r1val
, r1mask
));
1889 int r2tz
= double_int_ctz (double_int_ior (r2val
, r2mask
));
1890 if (r1tz
+ r2tz
>= HOST_BITS_PER_DOUBLE_INT
)
1892 *mask
= double_int_zero
;
1893 *val
= double_int_zero
;
1895 else if (r1tz
+ r2tz
> 0)
1897 *mask
= double_int_not (double_int_mask (r1tz
+ r2tz
));
1898 *mask
= double_int_ext (*mask
, TYPE_PRECISION (type
), uns
);
1899 *val
= double_int_zero
;
1907 double_int m
= double_int_ior (r1mask
, r2mask
);
1908 if (!double_int_equal_p (double_int_and_not (r1val
, m
),
1909 double_int_and_not (r2val
, m
)))
1911 *mask
= double_int_zero
;
1912 *val
= ((code
== EQ_EXPR
) ? double_int_zero
: double_int_one
);
1916 /* We know the result of a comparison is always one or zero. */
1917 *mask
= double_int_one
;
1918 *val
= double_int_zero
;
1926 double_int tem
= r1val
;
1932 code
= swap_tree_comparison (code
);
1939 /* If the most significant bits are not known we know nothing. */
1940 if (double_int_negative_p (r1mask
) || double_int_negative_p (r2mask
))
1943 /* If we know the most significant bits we know the values
1944 value ranges by means of treating varying bits as zero
1945 or one. Do a cross comparison of the max/min pairs. */
1946 maxmin
= double_int_cmp (double_int_ior (r1val
, r1mask
),
1947 double_int_and_not (r2val
, r2mask
), uns
);
1948 minmax
= double_int_cmp (double_int_and_not (r1val
, r1mask
),
1949 double_int_ior (r2val
, r2mask
), uns
);
1950 if (maxmin
< 0) /* r1 is less than r2. */
1952 *mask
= double_int_zero
;
1953 *val
= double_int_one
;
1955 else if (minmax
> 0) /* r1 is not less or equal to r2. */
1957 *mask
= double_int_zero
;
1958 *val
= double_int_zero
;
1960 else if (maxmin
== minmax
) /* r1 and r2 are equal. */
1962 /* This probably should never happen as we'd have
1963 folded the thing during fully constant value folding. */
1964 *mask
= double_int_zero
;
1965 *val
= (code
== LE_EXPR
? double_int_one
: double_int_zero
);
1969 /* We know the result of a comparison is always one or zero. */
1970 *mask
= double_int_one
;
1971 *val
= double_int_zero
;
1980 /* Return the propagation value when applying the operation CODE to
1981 the value RHS yielding type TYPE. */
1984 bit_value_unop (enum tree_code code
, tree type
, tree rhs
)
1986 prop_value_t rval
= get_value_for_expr (rhs
, true);
1987 double_int value
, mask
;
1989 gcc_assert ((rval
.lattice_val
== CONSTANT
1990 && TREE_CODE (rval
.value
) == INTEGER_CST
)
1991 || double_int_minus_one_p (rval
.mask
));
1992 bit_value_unop_1 (code
, type
, &value
, &mask
,
1993 TREE_TYPE (rhs
), value_to_double_int (rval
), rval
.mask
);
1994 if (!double_int_minus_one_p (mask
))
1996 val
.lattice_val
= CONSTANT
;
1998 /* ??? Delay building trees here. */
1999 val
.value
= double_int_to_tree (type
, value
);
2003 val
.lattice_val
= VARYING
;
2004 val
.value
= NULL_TREE
;
2005 val
.mask
= double_int_minus_one
;
2010 /* Return the propagation value when applying the operation CODE to
2011 the values RHS1 and RHS2 yielding type TYPE. */
2014 bit_value_binop (enum tree_code code
, tree type
, tree rhs1
, tree rhs2
)
2016 prop_value_t r1val
= get_value_for_expr (rhs1
, true);
2017 prop_value_t r2val
= get_value_for_expr (rhs2
, true);
2018 double_int value
, mask
;
2020 gcc_assert ((r1val
.lattice_val
== CONSTANT
2021 && TREE_CODE (r1val
.value
) == INTEGER_CST
)
2022 || double_int_minus_one_p (r1val
.mask
));
2023 gcc_assert ((r2val
.lattice_val
== CONSTANT
2024 && TREE_CODE (r2val
.value
) == INTEGER_CST
)
2025 || double_int_minus_one_p (r2val
.mask
));
2026 bit_value_binop_1 (code
, type
, &value
, &mask
,
2027 TREE_TYPE (rhs1
), value_to_double_int (r1val
), r1val
.mask
,
2028 TREE_TYPE (rhs2
), value_to_double_int (r2val
), r2val
.mask
);
2029 if (!double_int_minus_one_p (mask
))
2031 val
.lattice_val
= CONSTANT
;
2033 /* ??? Delay building trees here. */
2034 val
.value
= double_int_to_tree (type
, value
);
2038 val
.lattice_val
= VARYING
;
2039 val
.value
= NULL_TREE
;
2040 val
.mask
= double_int_minus_one
;
2045 /* Evaluate statement STMT.
2046 Valid only for assignments, calls, conditionals, and switches. */
2049 evaluate_stmt (gimple stmt
)
2052 tree simplified
= NULL_TREE
;
2053 ccp_lattice_t likelyvalue
= likely_value (stmt
);
2054 bool is_constant
= false;
2056 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2058 fprintf (dump_file
, "which is likely ");
2059 switch (likelyvalue
)
2062 fprintf (dump_file
, "CONSTANT");
2065 fprintf (dump_file
, "UNDEFINED");
2068 fprintf (dump_file
, "VARYING");
2072 fprintf (dump_file
, "\n");
2075 /* If the statement is likely to have a CONSTANT result, then try
2076 to fold the statement to determine the constant value. */
2077 /* FIXME. This is the only place that we call ccp_fold.
2078 Since likely_value never returns CONSTANT for calls, we will
2079 not attempt to fold them, including builtins that may profit. */
2080 if (likelyvalue
== CONSTANT
)
2082 fold_defer_overflow_warnings ();
2083 simplified
= ccp_fold (stmt
);
2084 is_constant
= simplified
&& is_gimple_min_invariant (simplified
);
2085 fold_undefer_overflow_warnings (is_constant
, stmt
, 0);
2088 /* The statement produced a constant value. */
2089 val
.lattice_val
= CONSTANT
;
2090 val
.value
= simplified
;
2091 val
.mask
= double_int_zero
;
2094 /* If the statement is likely to have a VARYING result, then do not
2095 bother folding the statement. */
2096 else if (likelyvalue
== VARYING
)
2098 enum gimple_code code
= gimple_code (stmt
);
2099 if (code
== GIMPLE_ASSIGN
)
2101 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
2103 /* Other cases cannot satisfy is_gimple_min_invariant
2105 if (get_gimple_rhs_class (subcode
) == GIMPLE_SINGLE_RHS
)
2106 simplified
= gimple_assign_rhs1 (stmt
);
2108 else if (code
== GIMPLE_SWITCH
)
2109 simplified
= gimple_switch_index (stmt
);
2111 /* These cannot satisfy is_gimple_min_invariant without folding. */
2112 gcc_assert (code
== GIMPLE_CALL
|| code
== GIMPLE_COND
);
2113 is_constant
= simplified
&& is_gimple_min_invariant (simplified
);
2116 /* The statement produced a constant value. */
2117 val
.lattice_val
= CONSTANT
;
2118 val
.value
= simplified
;
2119 val
.mask
= double_int_zero
;
2123 /* Resort to simplification for bitwise tracking. */
2124 if (flag_tree_bit_ccp
2125 && likelyvalue
== CONSTANT
2128 enum gimple_code code
= gimple_code (stmt
);
2130 val
.lattice_val
= VARYING
;
2131 val
.value
= NULL_TREE
;
2132 val
.mask
= double_int_minus_one
;
2133 if (code
== GIMPLE_ASSIGN
)
2135 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
2136 tree rhs1
= gimple_assign_rhs1 (stmt
);
2137 switch (get_gimple_rhs_class (subcode
))
2139 case GIMPLE_SINGLE_RHS
:
2140 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
2141 || POINTER_TYPE_P (TREE_TYPE (rhs1
)))
2142 val
= get_value_for_expr (rhs1
, true);
2145 case GIMPLE_UNARY_RHS
:
2146 if ((INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
2147 || POINTER_TYPE_P (TREE_TYPE (rhs1
)))
2148 && (INTEGRAL_TYPE_P (gimple_expr_type (stmt
))
2149 || POINTER_TYPE_P (gimple_expr_type (stmt
))))
2150 val
= bit_value_unop (subcode
, gimple_expr_type (stmt
), rhs1
);
2153 case GIMPLE_BINARY_RHS
:
2154 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
2155 || POINTER_TYPE_P (TREE_TYPE (rhs1
)))
2157 tree rhs2
= gimple_assign_rhs2 (stmt
);
2158 val
= bit_value_binop (subcode
,
2159 TREE_TYPE (rhs1
), rhs1
, rhs2
);
2166 else if (code
== GIMPLE_COND
)
2168 enum tree_code code
= gimple_cond_code (stmt
);
2169 tree rhs1
= gimple_cond_lhs (stmt
);
2170 tree rhs2
= gimple_cond_rhs (stmt
);
2171 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
2172 || POINTER_TYPE_P (TREE_TYPE (rhs1
)))
2173 val
= bit_value_binop (code
, TREE_TYPE (rhs1
), rhs1
, rhs2
);
2175 else if (code
== GIMPLE_CALL
2176 && (fndecl
= gimple_call_fndecl (stmt
))
2177 && DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
2179 switch (DECL_FUNCTION_CODE (fndecl
))
2181 case BUILT_IN_MALLOC
:
2182 case BUILT_IN_REALLOC
:
2183 case BUILT_IN_CALLOC
:
2184 val
.lattice_val
= CONSTANT
;
2185 val
.value
= build_int_cst (TREE_TYPE (gimple_get_lhs (stmt
)), 0);
2186 val
.mask
= shwi_to_double_int
2187 (~(((HOST_WIDE_INT
) MALLOC_ABI_ALIGNMENT
)
2188 / BITS_PER_UNIT
- 1));
2191 case BUILT_IN_ALLOCA
:
2192 val
.lattice_val
= CONSTANT
;
2193 val
.value
= build_int_cst (TREE_TYPE (gimple_get_lhs (stmt
)), 0);
2194 val
.mask
= shwi_to_double_int
2195 (~(((HOST_WIDE_INT
) BIGGEST_ALIGNMENT
)
2196 / BITS_PER_UNIT
- 1));
2202 is_constant
= (val
.lattice_val
== CONSTANT
);
2207 /* The statement produced a nonconstant value. If the statement
2208 had UNDEFINED operands, then the result of the statement
2209 should be UNDEFINED. Otherwise, the statement is VARYING. */
2210 if (likelyvalue
== UNDEFINED
)
2212 val
.lattice_val
= likelyvalue
;
2213 val
.mask
= double_int_zero
;
2217 val
.lattice_val
= VARYING
;
2218 val
.mask
= double_int_minus_one
;
2221 val
.value
= NULL_TREE
;
2227 /* Fold the stmt at *GSI with CCP specific information that propagating
2228 and regular folding does not catch. */
2231 ccp_fold_stmt (gimple_stmt_iterator
*gsi
)
2233 gimple stmt
= gsi_stmt (*gsi
);
2235 switch (gimple_code (stmt
))
2240 /* Statement evaluation will handle type mismatches in constants
2241 more gracefully than the final propagation. This allows us to
2242 fold more conditionals here. */
2243 val
= evaluate_stmt (stmt
);
2244 if (val
.lattice_val
!= CONSTANT
2245 || !double_int_zero_p (val
.mask
))
2250 fprintf (dump_file
, "Folding predicate ");
2251 print_gimple_expr (dump_file
, stmt
, 0, 0);
2252 fprintf (dump_file
, " to ");
2253 print_generic_expr (dump_file
, val
.value
, 0);
2254 fprintf (dump_file
, "\n");
2257 if (integer_zerop (val
.value
))
2258 gimple_cond_make_false (stmt
);
2260 gimple_cond_make_true (stmt
);
2267 tree lhs
= gimple_call_lhs (stmt
);
2271 bool changed
= false;
2274 /* If the call was folded into a constant make sure it goes
2275 away even if we cannot propagate into all uses because of
2278 && TREE_CODE (lhs
) == SSA_NAME
2279 && (val
= get_constant_value (lhs
)))
2281 tree new_rhs
= unshare_expr (val
);
2283 if (!useless_type_conversion_p (TREE_TYPE (lhs
),
2284 TREE_TYPE (new_rhs
)))
2285 new_rhs
= fold_convert (TREE_TYPE (lhs
), new_rhs
);
2286 res
= update_call_from_tree (gsi
, new_rhs
);
2291 /* Propagate into the call arguments. Compared to replace_uses_in
2292 this can use the argument slot types for type verification
2293 instead of the current argument type. We also can safely
2294 drop qualifiers here as we are dealing with constants anyway. */
2295 argt
= TYPE_ARG_TYPES (TREE_TYPE (TREE_TYPE (gimple_call_fn (stmt
))));
2296 for (i
= 0; i
< gimple_call_num_args (stmt
) && argt
;
2297 ++i
, argt
= TREE_CHAIN (argt
))
2299 tree arg
= gimple_call_arg (stmt
, i
);
2300 if (TREE_CODE (arg
) == SSA_NAME
2301 && (val
= get_constant_value (arg
))
2302 && useless_type_conversion_p
2303 (TYPE_MAIN_VARIANT (TREE_VALUE (argt
)),
2304 TYPE_MAIN_VARIANT (TREE_TYPE (val
))))
2306 gimple_call_set_arg (stmt
, i
, unshare_expr (val
));
2311 callee
= gimple_call_fn (stmt
);
2312 if (TREE_CODE (callee
) == OBJ_TYPE_REF
2313 && TREE_CODE (OBJ_TYPE_REF_EXPR (callee
)) == SSA_NAME
)
2315 tree expr
= OBJ_TYPE_REF_EXPR (callee
);
2316 OBJ_TYPE_REF_EXPR (callee
) = valueize_op (expr
);
2317 if (TREE_CODE (OBJ_TYPE_REF_EXPR (callee
)) == ADDR_EXPR
)
2320 t
= gimple_fold_obj_type_ref (callee
, NULL_TREE
);
2323 gimple_call_set_fn (stmt
, t
);
2327 OBJ_TYPE_REF_EXPR (callee
) = expr
;
2335 tree lhs
= gimple_assign_lhs (stmt
);
2338 /* If we have a load that turned out to be constant replace it
2339 as we cannot propagate into all uses in all cases. */
2340 if (gimple_assign_single_p (stmt
)
2341 && TREE_CODE (lhs
) == SSA_NAME
2342 && (val
= get_constant_value (lhs
)))
2344 tree rhs
= unshare_expr (val
);
2345 if (!useless_type_conversion_p (TREE_TYPE (lhs
), TREE_TYPE (rhs
)))
2346 rhs
= fold_build1 (VIEW_CONVERT_EXPR
, TREE_TYPE (lhs
), rhs
);
2347 gimple_assign_set_rhs_from_tree (gsi
, rhs
);
2359 /* Visit the assignment statement STMT. Set the value of its LHS to the
2360 value computed by the RHS and store LHS in *OUTPUT_P. If STMT
2361 creates virtual definitions, set the value of each new name to that
2362 of the RHS (if we can derive a constant out of the RHS).
2363 Value-returning call statements also perform an assignment, and
2364 are handled here. */
2366 static enum ssa_prop_result
2367 visit_assignment (gimple stmt
, tree
*output_p
)
2370 enum ssa_prop_result retval
;
2372 tree lhs
= gimple_get_lhs (stmt
);
2374 gcc_assert (gimple_code (stmt
) != GIMPLE_CALL
2375 || gimple_call_lhs (stmt
) != NULL_TREE
);
2377 if (gimple_assign_single_p (stmt
)
2378 && gimple_assign_rhs_code (stmt
) == SSA_NAME
)
2379 /* For a simple copy operation, we copy the lattice values. */
2380 val
= *get_value (gimple_assign_rhs1 (stmt
));
2382 /* Evaluate the statement, which could be
2383 either a GIMPLE_ASSIGN or a GIMPLE_CALL. */
2384 val
= evaluate_stmt (stmt
);
2386 retval
= SSA_PROP_NOT_INTERESTING
;
2388 /* Set the lattice value of the statement's output. */
2389 if (TREE_CODE (lhs
) == SSA_NAME
)
2391 /* If STMT is an assignment to an SSA_NAME, we only have one
2393 if (set_lattice_value (lhs
, val
))
2396 if (val
.lattice_val
== VARYING
)
2397 retval
= SSA_PROP_VARYING
;
2399 retval
= SSA_PROP_INTERESTING
;
2407 /* Visit the conditional statement STMT. Return SSA_PROP_INTERESTING
2408 if it can determine which edge will be taken. Otherwise, return
2409 SSA_PROP_VARYING. */
2411 static enum ssa_prop_result
2412 visit_cond_stmt (gimple stmt
, edge
*taken_edge_p
)
2417 block
= gimple_bb (stmt
);
2418 val
= evaluate_stmt (stmt
);
2419 if (val
.lattice_val
!= CONSTANT
2420 || !double_int_zero_p (val
.mask
))
2421 return SSA_PROP_VARYING
;
2423 /* Find which edge out of the conditional block will be taken and add it
2424 to the worklist. If no single edge can be determined statically,
2425 return SSA_PROP_VARYING to feed all the outgoing edges to the
2426 propagation engine. */
2427 *taken_edge_p
= find_taken_edge (block
, val
.value
);
2429 return SSA_PROP_INTERESTING
;
2431 return SSA_PROP_VARYING
;
2435 /* Evaluate statement STMT. If the statement produces an output value and
2436 its evaluation changes the lattice value of its output, return
2437 SSA_PROP_INTERESTING and set *OUTPUT_P to the SSA_NAME holding the
2440 If STMT is a conditional branch and we can determine its truth
2441 value, set *TAKEN_EDGE_P accordingly. If STMT produces a varying
2442 value, return SSA_PROP_VARYING. */
2444 static enum ssa_prop_result
2445 ccp_visit_stmt (gimple stmt
, edge
*taken_edge_p
, tree
*output_p
)
2450 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2452 fprintf (dump_file
, "\nVisiting statement:\n");
2453 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
2456 switch (gimple_code (stmt
))
2459 /* If the statement is an assignment that produces a single
2460 output value, evaluate its RHS to see if the lattice value of
2461 its output has changed. */
2462 return visit_assignment (stmt
, output_p
);
2465 /* A value-returning call also performs an assignment. */
2466 if (gimple_call_lhs (stmt
) != NULL_TREE
)
2467 return visit_assignment (stmt
, output_p
);
2472 /* If STMT is a conditional branch, see if we can determine
2473 which branch will be taken. */
2474 /* FIXME. It appears that we should be able to optimize
2475 computed GOTOs here as well. */
2476 return visit_cond_stmt (stmt
, taken_edge_p
);
2482 /* Any other kind of statement is not interesting for constant
2483 propagation and, therefore, not worth simulating. */
2484 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2485 fprintf (dump_file
, "No interesting values produced. Marked VARYING.\n");
2487 /* Definitions made by statements other than assignments to
2488 SSA_NAMEs represent unknown modifications to their outputs.
2489 Mark them VARYING. */
2490 FOR_EACH_SSA_TREE_OPERAND (def
, stmt
, iter
, SSA_OP_ALL_DEFS
)
2492 prop_value_t v
= { VARYING
, NULL_TREE
, { -1, (HOST_WIDE_INT
) -1 } };
2493 set_lattice_value (def
, v
);
2496 return SSA_PROP_VARYING
;
2500 /* Main entry point for SSA Conditional Constant Propagation. */
2506 ssa_propagate (ccp_visit_stmt
, ccp_visit_phi_node
);
2507 if (ccp_finalize ())
2508 return (TODO_cleanup_cfg
| TODO_update_ssa
| TODO_remove_unused_locals
);
2517 return flag_tree_ccp
!= 0;
2521 struct gimple_opt_pass pass_ccp
=
2526 gate_ccp
, /* gate */
2527 do_ssa_ccp
, /* execute */
2530 0, /* static_pass_number */
2531 TV_TREE_CCP
, /* tv_id */
2532 PROP_cfg
| PROP_ssa
, /* properties_required */
2533 0, /* properties_provided */
2534 0, /* properties_destroyed */
2535 0, /* todo_flags_start */
2536 TODO_dump_func
| TODO_verify_ssa
2537 | TODO_verify_stmts
| TODO_ggc_collect
/* todo_flags_finish */
2543 /* Try to optimize out __builtin_stack_restore. Optimize it out
2544 if there is another __builtin_stack_restore in the same basic
2545 block and no calls or ASM_EXPRs are in between, or if this block's
2546 only outgoing edge is to EXIT_BLOCK and there are no calls or
2547 ASM_EXPRs after this __builtin_stack_restore. */
2550 optimize_stack_restore (gimple_stmt_iterator i
)
2555 basic_block bb
= gsi_bb (i
);
2556 gimple call
= gsi_stmt (i
);
2558 if (gimple_code (call
) != GIMPLE_CALL
2559 || gimple_call_num_args (call
) != 1
2560 || TREE_CODE (gimple_call_arg (call
, 0)) != SSA_NAME
2561 || !POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (call
, 0))))
2564 for (gsi_next (&i
); !gsi_end_p (i
); gsi_next (&i
))
2566 stmt
= gsi_stmt (i
);
2567 if (gimple_code (stmt
) == GIMPLE_ASM
)
2569 if (gimple_code (stmt
) != GIMPLE_CALL
)
2572 callee
= gimple_call_fndecl (stmt
);
2574 || DECL_BUILT_IN_CLASS (callee
) != BUILT_IN_NORMAL
2575 /* All regular builtins are ok, just obviously not alloca. */
2576 || DECL_FUNCTION_CODE (callee
) == BUILT_IN_ALLOCA
)
2579 if (DECL_FUNCTION_CODE (callee
) == BUILT_IN_STACK_RESTORE
)
2580 goto second_stack_restore
;
2586 /* Allow one successor of the exit block, or zero successors. */
2587 switch (EDGE_COUNT (bb
->succs
))
2592 if (single_succ_edge (bb
)->dest
!= EXIT_BLOCK_PTR
)
2598 second_stack_restore
:
2600 /* If there's exactly one use, then zap the call to __builtin_stack_save.
2601 If there are multiple uses, then the last one should remove the call.
2602 In any case, whether the call to __builtin_stack_save can be removed
2603 or not is irrelevant to removing the call to __builtin_stack_restore. */
2604 if (has_single_use (gimple_call_arg (call
, 0)))
2606 gimple stack_save
= SSA_NAME_DEF_STMT (gimple_call_arg (call
, 0));
2607 if (is_gimple_call (stack_save
))
2609 callee
= gimple_call_fndecl (stack_save
);
2611 && DECL_BUILT_IN_CLASS (callee
) == BUILT_IN_NORMAL
2612 && DECL_FUNCTION_CODE (callee
) == BUILT_IN_STACK_SAVE
)
2614 gimple_stmt_iterator stack_save_gsi
;
2617 stack_save_gsi
= gsi_for_stmt (stack_save
);
2618 rhs
= build_int_cst (TREE_TYPE (gimple_call_arg (call
, 0)), 0);
2619 update_call_from_tree (&stack_save_gsi
, rhs
);
2624 /* No effect, so the statement will be deleted. */
2625 return integer_zero_node
;
2628 /* If va_list type is a simple pointer and nothing special is needed,
2629 optimize __builtin_va_start (&ap, 0) into ap = __builtin_next_arg (0),
2630 __builtin_va_end (&ap) out as NOP and __builtin_va_copy into a simple
2631 pointer assignment. */
2634 optimize_stdarg_builtin (gimple call
)
2636 tree callee
, lhs
, rhs
, cfun_va_list
;
2637 bool va_list_simple_ptr
;
2638 location_t loc
= gimple_location (call
);
2640 if (gimple_code (call
) != GIMPLE_CALL
)
2643 callee
= gimple_call_fndecl (call
);
2645 cfun_va_list
= targetm
.fn_abi_va_list (callee
);
2646 va_list_simple_ptr
= POINTER_TYPE_P (cfun_va_list
)
2647 && (TREE_TYPE (cfun_va_list
) == void_type_node
2648 || TREE_TYPE (cfun_va_list
) == char_type_node
);
2650 switch (DECL_FUNCTION_CODE (callee
))
2652 case BUILT_IN_VA_START
:
2653 if (!va_list_simple_ptr
2654 || targetm
.expand_builtin_va_start
!= NULL
2655 || built_in_decls
[BUILT_IN_NEXT_ARG
] == NULL
)
2658 if (gimple_call_num_args (call
) != 2)
2661 lhs
= gimple_call_arg (call
, 0);
2662 if (!POINTER_TYPE_P (TREE_TYPE (lhs
))
2663 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs
)))
2664 != TYPE_MAIN_VARIANT (cfun_va_list
))
2667 lhs
= build_fold_indirect_ref_loc (loc
, lhs
);
2668 rhs
= build_call_expr_loc (loc
, built_in_decls
[BUILT_IN_NEXT_ARG
],
2669 1, integer_zero_node
);
2670 rhs
= fold_convert_loc (loc
, TREE_TYPE (lhs
), rhs
);
2671 return build2 (MODIFY_EXPR
, TREE_TYPE (lhs
), lhs
, rhs
);
2673 case BUILT_IN_VA_COPY
:
2674 if (!va_list_simple_ptr
)
2677 if (gimple_call_num_args (call
) != 2)
2680 lhs
= gimple_call_arg (call
, 0);
2681 if (!POINTER_TYPE_P (TREE_TYPE (lhs
))
2682 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs
)))
2683 != TYPE_MAIN_VARIANT (cfun_va_list
))
2686 lhs
= build_fold_indirect_ref_loc (loc
, lhs
);
2687 rhs
= gimple_call_arg (call
, 1);
2688 if (TYPE_MAIN_VARIANT (TREE_TYPE (rhs
))
2689 != TYPE_MAIN_VARIANT (cfun_va_list
))
2692 rhs
= fold_convert_loc (loc
, TREE_TYPE (lhs
), rhs
);
2693 return build2 (MODIFY_EXPR
, TREE_TYPE (lhs
), lhs
, rhs
);
2695 case BUILT_IN_VA_END
:
2696 /* No effect, so the statement will be deleted. */
2697 return integer_zero_node
;
2704 /* A simple pass that attempts to fold all builtin functions. This pass
2705 is run after we've propagated as many constants as we can. */
2708 execute_fold_all_builtins (void)
2710 bool cfg_changed
= false;
2712 unsigned int todoflags
= 0;
2716 gimple_stmt_iterator i
;
2717 for (i
= gsi_start_bb (bb
); !gsi_end_p (i
); )
2719 gimple stmt
, old_stmt
;
2720 tree callee
, result
;
2721 enum built_in_function fcode
;
2723 stmt
= gsi_stmt (i
);
2725 if (gimple_code (stmt
) != GIMPLE_CALL
)
2730 callee
= gimple_call_fndecl (stmt
);
2731 if (!callee
|| DECL_BUILT_IN_CLASS (callee
) != BUILT_IN_NORMAL
)
2736 fcode
= DECL_FUNCTION_CODE (callee
);
2738 result
= gimple_fold_builtin (stmt
);
2741 gimple_remove_stmt_histograms (cfun
, stmt
);
2744 switch (DECL_FUNCTION_CODE (callee
))
2746 case BUILT_IN_CONSTANT_P
:
2747 /* Resolve __builtin_constant_p. If it hasn't been
2748 folded to integer_one_node by now, it's fairly
2749 certain that the value simply isn't constant. */
2750 result
= integer_zero_node
;
2753 case BUILT_IN_STACK_RESTORE
:
2754 result
= optimize_stack_restore (i
);
2760 case BUILT_IN_VA_START
:
2761 case BUILT_IN_VA_END
:
2762 case BUILT_IN_VA_COPY
:
2763 /* These shouldn't be folded before pass_stdarg. */
2764 result
= optimize_stdarg_builtin (stmt
);
2774 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2776 fprintf (dump_file
, "Simplified\n ");
2777 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
2781 if (!update_call_from_tree (&i
, result
))
2783 gimplify_and_update_call_from_tree (&i
, result
);
2784 todoflags
|= TODO_update_address_taken
;
2787 stmt
= gsi_stmt (i
);
2790 if (maybe_clean_or_replace_eh_stmt (old_stmt
, stmt
)
2791 && gimple_purge_dead_eh_edges (bb
))
2794 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2796 fprintf (dump_file
, "to\n ");
2797 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
2798 fprintf (dump_file
, "\n");
2801 /* Retry the same statement if it changed into another
2802 builtin, there might be new opportunities now. */
2803 if (gimple_code (stmt
) != GIMPLE_CALL
)
2808 callee
= gimple_call_fndecl (stmt
);
2810 || DECL_BUILT_IN_CLASS (callee
) != BUILT_IN_NORMAL
2811 || DECL_FUNCTION_CODE (callee
) == fcode
)
2816 /* Delete unreachable blocks. */
2818 todoflags
|= TODO_cleanup_cfg
;
2824 struct gimple_opt_pass pass_fold_builtins
=
2830 execute_fold_all_builtins
, /* execute */
2833 0, /* static_pass_number */
2834 TV_NONE
, /* tv_id */
2835 PROP_cfg
| PROP_ssa
, /* properties_required */
2836 0, /* properties_provided */
2837 0, /* properties_destroyed */
2838 0, /* todo_flags_start */
2841 | TODO_update_ssa
/* todo_flags_finish */