1 /* Conditional constant propagation pass for the GNU compiler.
2 Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009,
3 2010, 2011 Free Software Foundation, Inc.
4 Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org>
5 Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it
10 under the terms of the GNU General Public License as published by the
11 Free Software Foundation; either version 3, or (at your option) any
14 GCC is distributed in the hope that it will be useful, but WITHOUT
15 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 /* Conditional constant propagation (CCP) is based on the SSA
24 propagation engine (tree-ssa-propagate.c). Constant assignments of
25 the form VAR = CST are propagated from the assignments into uses of
26 VAR, which in turn may generate new constants. The simulation uses
27 a four level lattice to keep track of constant values associated
28 with SSA names. Given an SSA name V_i, it may take one of the
31 UNINITIALIZED -> the initial state of the value. This value
32 is replaced with a correct initial value
33 the first time the value is used, so the
34 rest of the pass does not need to care about
35 it. Using this value simplifies initialization
36 of the pass, and prevents us from needlessly
37 scanning statements that are never reached.
39 UNDEFINED -> V_i is a local variable whose definition
40 has not been processed yet. Therefore we
41 don't yet know if its value is a constant
44 CONSTANT -> V_i has been found to hold a constant
47 VARYING -> V_i cannot take a constant value, or if it
48 does, it is not possible to determine it
51 The core of SSA-CCP is in ccp_visit_stmt and ccp_visit_phi_node:
53 1- In ccp_visit_stmt, we are interested in assignments whose RHS
54 evaluates into a constant and conditional jumps whose predicate
55 evaluates into a boolean true or false. When an assignment of
56 the form V_i = CONST is found, V_i's lattice value is set to
57 CONSTANT and CONST is associated with it. This causes the
58 propagation engine to add all the SSA edges coming out the
59 assignment into the worklists, so that statements that use V_i
62 If the statement is a conditional with a constant predicate, we
63 mark the outgoing edges as executable or not executable
64 depending on the predicate's value. This is then used when
65 visiting PHI nodes to know when a PHI argument can be ignored.
68 2- In ccp_visit_phi_node, if all the PHI arguments evaluate to the
69 same constant C, then the LHS of the PHI is set to C. This
70 evaluation is known as the "meet operation". Since one of the
71 goals of this evaluation is to optimistically return constant
72 values as often as possible, it uses two main short cuts:
74 - If an argument is flowing in through a non-executable edge, it
75 is ignored. This is useful in cases like this:
81 a_11 = PHI (a_9, a_10)
83 If PRED is known to always evaluate to false, then we can
84 assume that a_11 will always take its value from a_10, meaning
85 that instead of consider it VARYING (a_9 and a_10 have
86 different values), we can consider it CONSTANT 100.
88 - If an argument has an UNDEFINED value, then it does not affect
89 the outcome of the meet operation. If a variable V_i has an
90 UNDEFINED value, it means that either its defining statement
91 hasn't been visited yet or V_i has no defining statement, in
92 which case the original symbol 'V' is being used
93 uninitialized. Since 'V' is a local variable, the compiler
94 may assume any initial value for it.
97 After propagation, every variable V_i that ends up with a lattice
98 value of CONSTANT will have the associated constant value in the
99 array CONST_VAL[i].VALUE. That is fed into substitute_and_fold for
100 final substitution and folding.
104 Constant propagation with conditional branches,
105 Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
107 Building an Optimizing Compiler,
108 Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
110 Advanced Compiler Design and Implementation,
111 Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6 */
115 #include "coretypes.h"
120 #include "basic-block.h"
122 #include "function.h"
123 #include "tree-pretty-print.h"
124 #include "gimple-pretty-print.h"
126 #include "tree-dump.h"
127 #include "tree-flow.h"
128 #include "tree-pass.h"
129 #include "tree-ssa-propagate.h"
130 #include "value-prof.h"
131 #include "langhooks.h"
133 #include "diagnostic-core.h"
137 /* Possible lattice values. */
146 struct prop_value_d
{
148 ccp_lattice_t lattice_val
;
150 /* Propagated value. */
153 /* Mask that applies to the propagated value during CCP. For
154 X with a CONSTANT lattice value X & ~mask == value & ~mask. */
158 typedef struct prop_value_d prop_value_t
;
160 /* Array of propagated constant values. After propagation,
161 CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I). If
162 the constant is held in an SSA name representing a memory store
163 (i.e., a VDEF), CONST_VAL[I].MEM_REF will contain the actual
164 memory reference used to store (i.e., the LHS of the assignment
166 static prop_value_t
*const_val
;
168 static void canonicalize_float_value (prop_value_t
*);
169 static bool ccp_fold_stmt (gimple_stmt_iterator
*);
170 static tree
fold_ctor_reference (tree type
, tree ctor
,
171 unsigned HOST_WIDE_INT offset
,
172 unsigned HOST_WIDE_INT size
);
174 /* Dump constant propagation value VAL to file OUTF prefixed by PREFIX. */
177 dump_lattice_value (FILE *outf
, const char *prefix
, prop_value_t val
)
179 switch (val
.lattice_val
)
182 fprintf (outf
, "%sUNINITIALIZED", prefix
);
185 fprintf (outf
, "%sUNDEFINED", prefix
);
188 fprintf (outf
, "%sVARYING", prefix
);
191 fprintf (outf
, "%sCONSTANT ", prefix
);
192 if (TREE_CODE (val
.value
) != INTEGER_CST
193 || double_int_zero_p (val
.mask
))
194 print_generic_expr (outf
, val
.value
, dump_flags
);
197 double_int cval
= double_int_and_not (tree_to_double_int (val
.value
),
199 fprintf (outf
, "%sCONSTANT " HOST_WIDE_INT_PRINT_DOUBLE_HEX
,
200 prefix
, cval
.high
, cval
.low
);
201 fprintf (outf
, " (" HOST_WIDE_INT_PRINT_DOUBLE_HEX
")",
202 val
.mask
.high
, val
.mask
.low
);
211 /* Print lattice value VAL to stderr. */
213 void debug_lattice_value (prop_value_t val
);
216 debug_lattice_value (prop_value_t val
)
218 dump_lattice_value (stderr
, "", val
);
219 fprintf (stderr
, "\n");
223 /* Compute a default value for variable VAR and store it in the
224 CONST_VAL array. The following rules are used to get default
227 1- Global and static variables that are declared constant are
230 2- Any other value is considered UNDEFINED. This is useful when
231 considering PHI nodes. PHI arguments that are undefined do not
232 change the constant value of the PHI node, which allows for more
233 constants to be propagated.
235 3- Variables defined by statements other than assignments and PHI
236 nodes are considered VARYING.
238 4- Initial values of variables that are not GIMPLE registers are
239 considered VARYING. */
242 get_default_value (tree var
)
244 tree sym
= SSA_NAME_VAR (var
);
245 prop_value_t val
= { UNINITIALIZED
, NULL_TREE
, { 0, 0 } };
248 stmt
= SSA_NAME_DEF_STMT (var
);
250 if (gimple_nop_p (stmt
))
252 /* Variables defined by an empty statement are those used
253 before being initialized. If VAR is a local variable, we
254 can assume initially that it is UNDEFINED, otherwise we must
255 consider it VARYING. */
256 if (is_gimple_reg (sym
)
257 && TREE_CODE (sym
) == VAR_DECL
)
258 val
.lattice_val
= UNDEFINED
;
261 val
.lattice_val
= VARYING
;
262 val
.mask
= double_int_minus_one
;
265 else if (is_gimple_assign (stmt
)
266 /* Value-returning GIMPLE_CALL statements assign to
267 a variable, and are treated similarly to GIMPLE_ASSIGN. */
268 || (is_gimple_call (stmt
)
269 && gimple_call_lhs (stmt
) != NULL_TREE
)
270 || gimple_code (stmt
) == GIMPLE_PHI
)
273 if (gimple_assign_single_p (stmt
)
274 && DECL_P (gimple_assign_rhs1 (stmt
))
275 && (cst
= get_symbol_constant_value (gimple_assign_rhs1 (stmt
))))
277 val
.lattice_val
= CONSTANT
;
281 /* Any other variable defined by an assignment or a PHI node
282 is considered UNDEFINED. */
283 val
.lattice_val
= UNDEFINED
;
287 /* Otherwise, VAR will never take on a constant value. */
288 val
.lattice_val
= VARYING
;
289 val
.mask
= double_int_minus_one
;
296 /* Get the constant value associated with variable VAR. */
298 static inline prop_value_t
*
303 if (const_val
== NULL
)
306 val
= &const_val
[SSA_NAME_VERSION (var
)];
307 if (val
->lattice_val
== UNINITIALIZED
)
308 *val
= get_default_value (var
);
310 canonicalize_float_value (val
);
315 /* Return the constant tree value associated with VAR. */
318 get_constant_value (tree var
)
321 if (TREE_CODE (var
) != SSA_NAME
)
323 if (is_gimple_min_invariant (var
))
327 val
= get_value (var
);
329 && val
->lattice_val
== CONSTANT
330 && (TREE_CODE (val
->value
) != INTEGER_CST
331 || double_int_zero_p (val
->mask
)))
336 /* Sets the value associated with VAR to VARYING. */
339 set_value_varying (tree var
)
341 prop_value_t
*val
= &const_val
[SSA_NAME_VERSION (var
)];
343 val
->lattice_val
= VARYING
;
344 val
->value
= NULL_TREE
;
345 val
->mask
= double_int_minus_one
;
348 /* For float types, modify the value of VAL to make ccp work correctly
349 for non-standard values (-0, NaN):
351 If HONOR_SIGNED_ZEROS is false, and VAL = -0, we canonicalize it to 0.
352 If HONOR_NANS is false, and VAL is NaN, we canonicalize it to UNDEFINED.
353 This is to fix the following problem (see PR 29921): Suppose we have
357 and we set value of y to NaN. This causes value of x to be set to NaN.
358 When we later determine that y is in fact VARYING, fold uses the fact
359 that HONOR_NANS is false, and we try to change the value of x to 0,
360 causing an ICE. With HONOR_NANS being false, the real appearance of
361 NaN would cause undefined behavior, though, so claiming that y (and x)
362 are UNDEFINED initially is correct. */
365 canonicalize_float_value (prop_value_t
*val
)
367 enum machine_mode mode
;
371 if (val
->lattice_val
!= CONSTANT
372 || TREE_CODE (val
->value
) != REAL_CST
)
375 d
= TREE_REAL_CST (val
->value
);
376 type
= TREE_TYPE (val
->value
);
377 mode
= TYPE_MODE (type
);
379 if (!HONOR_SIGNED_ZEROS (mode
)
380 && REAL_VALUE_MINUS_ZERO (d
))
382 val
->value
= build_real (type
, dconst0
);
386 if (!HONOR_NANS (mode
)
387 && REAL_VALUE_ISNAN (d
))
389 val
->lattice_val
= UNDEFINED
;
395 /* Return whether the lattice transition is valid. */
398 valid_lattice_transition (prop_value_t old_val
, prop_value_t new_val
)
400 /* Lattice transitions must always be monotonically increasing in
402 if (old_val
.lattice_val
< new_val
.lattice_val
)
405 if (old_val
.lattice_val
!= new_val
.lattice_val
)
408 if (!old_val
.value
&& !new_val
.value
)
411 /* Now both lattice values are CONSTANT. */
413 /* Allow transitioning from &x to &x & ~3. */
414 if (TREE_CODE (old_val
.value
) != INTEGER_CST
415 && TREE_CODE (new_val
.value
) == INTEGER_CST
)
418 /* Bit-lattices have to agree in the still valid bits. */
419 if (TREE_CODE (old_val
.value
) == INTEGER_CST
420 && TREE_CODE (new_val
.value
) == INTEGER_CST
)
421 return double_int_equal_p
422 (double_int_and_not (tree_to_double_int (old_val
.value
),
424 double_int_and_not (tree_to_double_int (new_val
.value
),
427 /* Otherwise constant values have to agree. */
428 return operand_equal_p (old_val
.value
, new_val
.value
, 0);
431 /* Set the value for variable VAR to NEW_VAL. Return true if the new
432 value is different from VAR's previous value. */
435 set_lattice_value (tree var
, prop_value_t new_val
)
437 /* We can deal with old UNINITIALIZED values just fine here. */
438 prop_value_t
*old_val
= &const_val
[SSA_NAME_VERSION (var
)];
440 canonicalize_float_value (&new_val
);
442 /* We have to be careful to not go up the bitwise lattice
443 represented by the mask.
444 ??? This doesn't seem to be the best place to enforce this. */
445 if (new_val
.lattice_val
== CONSTANT
446 && old_val
->lattice_val
== CONSTANT
447 && TREE_CODE (new_val
.value
) == INTEGER_CST
448 && TREE_CODE (old_val
->value
) == INTEGER_CST
)
451 diff
= double_int_xor (tree_to_double_int (new_val
.value
),
452 tree_to_double_int (old_val
->value
));
453 new_val
.mask
= double_int_ior (new_val
.mask
,
454 double_int_ior (old_val
->mask
, diff
));
457 gcc_assert (valid_lattice_transition (*old_val
, new_val
));
459 /* If *OLD_VAL and NEW_VAL are the same, return false to inform the
460 caller that this was a non-transition. */
461 if (old_val
->lattice_val
!= new_val
.lattice_val
462 || (new_val
.lattice_val
== CONSTANT
463 && TREE_CODE (new_val
.value
) == INTEGER_CST
464 && (TREE_CODE (old_val
->value
) != INTEGER_CST
465 || !double_int_equal_p (new_val
.mask
, old_val
->mask
))))
467 /* ??? We would like to delay creation of INTEGER_CSTs from
468 partially constants here. */
470 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
472 dump_lattice_value (dump_file
, "Lattice value changed to ", new_val
);
473 fprintf (dump_file
, ". Adding SSA edges to worklist.\n");
478 gcc_assert (new_val
.lattice_val
!= UNINITIALIZED
);
485 static prop_value_t
get_value_for_expr (tree
, bool);
486 static prop_value_t
bit_value_binop (enum tree_code
, tree
, tree
, tree
);
487 static void bit_value_binop_1 (enum tree_code
, tree
, double_int
*, double_int
*,
488 tree
, double_int
, double_int
,
489 tree
, double_int
, double_int
);
491 /* Return a double_int that can be used for bitwise simplifications
495 value_to_double_int (prop_value_t val
)
498 && TREE_CODE (val
.value
) == INTEGER_CST
)
499 return tree_to_double_int (val
.value
);
501 return double_int_zero
;
504 /* Return the value for the address expression EXPR based on alignment
508 get_value_from_alignment (tree expr
)
511 HOST_WIDE_INT bitsize
, bitpos
;
513 enum machine_mode mode
;
516 gcc_assert (TREE_CODE (expr
) == ADDR_EXPR
);
518 base
= get_inner_reference (TREE_OPERAND (expr
, 0),
519 &bitsize
, &bitpos
, &offset
,
520 &mode
, &align
, &align
, false);
521 if (TREE_CODE (base
) == MEM_REF
)
522 val
= bit_value_binop (PLUS_EXPR
, TREE_TYPE (expr
),
523 TREE_OPERAND (base
, 0), TREE_OPERAND (base
, 1));
525 /* ??? While function decls have DECL_ALIGN their addresses
526 may encode extra information in the lower bits on some
527 targets (PR47239). Simply punt for function decls for now. */
528 && TREE_CODE (base
) != FUNCTION_DECL
529 && ((align
= get_object_alignment (base
, BIGGEST_ALIGNMENT
))
532 val
.lattice_val
= CONSTANT
;
533 /* We assume pointers are zero-extended. */
534 val
.mask
= double_int_and_not
535 (double_int_mask (TYPE_PRECISION (TREE_TYPE (expr
))),
536 uhwi_to_double_int (align
/ BITS_PER_UNIT
- 1));
537 val
.value
= build_int_cst (TREE_TYPE (expr
), 0);
541 val
.lattice_val
= VARYING
;
542 val
.mask
= double_int_minus_one
;
543 val
.value
= NULL_TREE
;
547 double_int value
, mask
;
548 bit_value_binop_1 (PLUS_EXPR
, TREE_TYPE (expr
), &value
, &mask
,
549 TREE_TYPE (expr
), value_to_double_int (val
), val
.mask
,
551 shwi_to_double_int (bitpos
/ BITS_PER_UNIT
),
553 val
.lattice_val
= double_int_minus_one_p (mask
) ? VARYING
: CONSTANT
;
555 if (val
.lattice_val
== CONSTANT
)
556 val
.value
= double_int_to_tree (TREE_TYPE (expr
), value
);
558 val
.value
= NULL_TREE
;
560 /* ??? We should handle i * 4 and more complex expressions from
561 the offset, possibly by just expanding get_value_for_expr. */
562 if (offset
!= NULL_TREE
)
564 double_int value
, mask
;
565 prop_value_t oval
= get_value_for_expr (offset
, true);
566 bit_value_binop_1 (PLUS_EXPR
, TREE_TYPE (expr
), &value
, &mask
,
567 TREE_TYPE (expr
), value_to_double_int (val
), val
.mask
,
568 TREE_TYPE (expr
), value_to_double_int (oval
),
571 if (double_int_minus_one_p (mask
))
573 val
.lattice_val
= VARYING
;
574 val
.value
= NULL_TREE
;
578 val
.lattice_val
= CONSTANT
;
579 val
.value
= double_int_to_tree (TREE_TYPE (expr
), value
);
586 /* Return the value for the tree operand EXPR. If FOR_BITS_P is true
587 return constant bits extracted from alignment information for
588 invariant addresses. */
591 get_value_for_expr (tree expr
, bool for_bits_p
)
595 if (TREE_CODE (expr
) == SSA_NAME
)
597 val
= *get_value (expr
);
599 && val
.lattice_val
== CONSTANT
600 && TREE_CODE (val
.value
) == ADDR_EXPR
)
601 val
= get_value_from_alignment (val
.value
);
603 else if (is_gimple_min_invariant (expr
)
604 && (!for_bits_p
|| TREE_CODE (expr
) != ADDR_EXPR
))
606 val
.lattice_val
= CONSTANT
;
608 val
.mask
= double_int_zero
;
609 canonicalize_float_value (&val
);
611 else if (TREE_CODE (expr
) == ADDR_EXPR
)
612 val
= get_value_from_alignment (expr
);
615 val
.lattice_val
= VARYING
;
616 val
.mask
= double_int_minus_one
;
617 val
.value
= NULL_TREE
;
622 /* Return the likely CCP lattice value for STMT.
624 If STMT has no operands, then return CONSTANT.
626 Else if undefinedness of operands of STMT cause its value to be
627 undefined, then return UNDEFINED.
629 Else if any operands of STMT are constants, then return CONSTANT.
631 Else return VARYING. */
634 likely_value (gimple stmt
)
636 bool has_constant_operand
, has_undefined_operand
, all_undefined_operands
;
641 enum gimple_code code
= gimple_code (stmt
);
643 /* This function appears to be called only for assignments, calls,
644 conditionals, and switches, due to the logic in visit_stmt. */
645 gcc_assert (code
== GIMPLE_ASSIGN
646 || code
== GIMPLE_CALL
647 || code
== GIMPLE_COND
648 || code
== GIMPLE_SWITCH
);
650 /* If the statement has volatile operands, it won't fold to a
652 if (gimple_has_volatile_ops (stmt
))
655 /* Arrive here for more complex cases. */
656 has_constant_operand
= false;
657 has_undefined_operand
= false;
658 all_undefined_operands
= true;
659 FOR_EACH_SSA_TREE_OPERAND (use
, stmt
, iter
, SSA_OP_USE
)
661 prop_value_t
*val
= get_value (use
);
663 if (val
->lattice_val
== UNDEFINED
)
664 has_undefined_operand
= true;
666 all_undefined_operands
= false;
668 if (val
->lattice_val
== CONSTANT
)
669 has_constant_operand
= true;
672 /* There may be constants in regular rhs operands. For calls we
673 have to ignore lhs, fndecl and static chain, otherwise only
675 for (i
= (is_gimple_call (stmt
) ? 2 : 0) + gimple_has_lhs (stmt
);
676 i
< gimple_num_ops (stmt
); ++i
)
678 tree op
= gimple_op (stmt
, i
);
679 if (!op
|| TREE_CODE (op
) == SSA_NAME
)
681 if (is_gimple_min_invariant (op
))
682 has_constant_operand
= true;
685 if (has_constant_operand
)
686 all_undefined_operands
= false;
688 /* If the operation combines operands like COMPLEX_EXPR make sure to
689 not mark the result UNDEFINED if only one part of the result is
691 if (has_undefined_operand
&& all_undefined_operands
)
693 else if (code
== GIMPLE_ASSIGN
&& has_undefined_operand
)
695 switch (gimple_assign_rhs_code (stmt
))
697 /* Unary operators are handled with all_undefined_operands. */
700 case POINTER_PLUS_EXPR
:
701 /* Not MIN_EXPR, MAX_EXPR. One VARYING operand may be selected.
702 Not bitwise operators, one VARYING operand may specify the
703 result completely. Not logical operators for the same reason.
704 Not COMPLEX_EXPR as one VARYING operand makes the result partly
705 not UNDEFINED. Not *DIV_EXPR, comparisons and shifts because
706 the undefined operand may be promoted. */
713 /* If there was an UNDEFINED operand but the result may be not UNDEFINED
714 fall back to VARYING even if there were CONSTANT operands. */
715 if (has_undefined_operand
)
718 /* We do not consider virtual operands here -- load from read-only
719 memory may have only VARYING virtual operands, but still be
721 if (has_constant_operand
722 || gimple_references_memory_p (stmt
))
728 /* Returns true if STMT cannot be constant. */
731 surely_varying_stmt_p (gimple stmt
)
733 /* If the statement has operands that we cannot handle, it cannot be
735 if (gimple_has_volatile_ops (stmt
))
738 /* If it is a call and does not return a value or is not a
739 builtin and not an indirect call, it is varying. */
740 if (is_gimple_call (stmt
))
743 if (!gimple_call_lhs (stmt
)
744 || ((fndecl
= gimple_call_fndecl (stmt
)) != NULL_TREE
745 && !DECL_BUILT_IN (fndecl
)))
749 /* Any other store operation is not interesting. */
750 else if (gimple_vdef (stmt
))
753 /* Anything other than assignments and conditional jumps are not
754 interesting for CCP. */
755 if (gimple_code (stmt
) != GIMPLE_ASSIGN
756 && gimple_code (stmt
) != GIMPLE_COND
757 && gimple_code (stmt
) != GIMPLE_SWITCH
758 && gimple_code (stmt
) != GIMPLE_CALL
)
764 /* Initialize local data structures for CCP. */
767 ccp_initialize (void)
771 const_val
= XCNEWVEC (prop_value_t
, num_ssa_names
);
773 /* Initialize simulation flags for PHI nodes and statements. */
776 gimple_stmt_iterator i
;
778 for (i
= gsi_start_bb (bb
); !gsi_end_p (i
); gsi_next (&i
))
780 gimple stmt
= gsi_stmt (i
);
783 /* If the statement is a control insn, then we do not
784 want to avoid simulating the statement once. Failure
785 to do so means that those edges will never get added. */
786 if (stmt_ends_bb_p (stmt
))
789 is_varying
= surely_varying_stmt_p (stmt
);
796 /* If the statement will not produce a constant, mark
797 all its outputs VARYING. */
798 FOR_EACH_SSA_TREE_OPERAND (def
, stmt
, iter
, SSA_OP_ALL_DEFS
)
799 set_value_varying (def
);
801 prop_set_simulate_again (stmt
, !is_varying
);
805 /* Now process PHI nodes. We never clear the simulate_again flag on
806 phi nodes, since we do not know which edges are executable yet,
807 except for phi nodes for virtual operands when we do not do store ccp. */
810 gimple_stmt_iterator i
;
812 for (i
= gsi_start_phis (bb
); !gsi_end_p (i
); gsi_next (&i
))
814 gimple phi
= gsi_stmt (i
);
816 if (!is_gimple_reg (gimple_phi_result (phi
)))
817 prop_set_simulate_again (phi
, false);
819 prop_set_simulate_again (phi
, true);
824 /* Debug count support. Reset the values of ssa names
825 VARYING when the total number ssa names analyzed is
826 beyond the debug count specified. */
832 for (i
= 0; i
< num_ssa_names
; i
++)
836 const_val
[i
].lattice_val
= VARYING
;
837 const_val
[i
].mask
= double_int_minus_one
;
838 const_val
[i
].value
= NULL_TREE
;
844 /* Do final substitution of propagated values, cleanup the flowgraph and
845 free allocated storage.
847 Return TRUE when something was optimized. */
852 bool something_changed
;
857 /* Derive alignment and misalignment information from partially
858 constant pointers in the lattice. */
859 for (i
= 1; i
< num_ssa_names
; ++i
)
861 tree name
= ssa_name (i
);
863 struct ptr_info_def
*pi
;
864 unsigned int tem
, align
;
867 || !POINTER_TYPE_P (TREE_TYPE (name
)))
870 val
= get_value (name
);
871 if (val
->lattice_val
!= CONSTANT
872 || TREE_CODE (val
->value
) != INTEGER_CST
)
875 /* Trailing constant bits specify the alignment, trailing value
876 bits the misalignment. */
878 align
= (tem
& -tem
);
882 pi
= get_ptr_info (name
);
884 pi
->misalign
= TREE_INT_CST_LOW (val
->value
) & (align
- 1);
887 /* Perform substitutions based on the known constant values. */
888 something_changed
= substitute_and_fold (get_constant_value
,
889 ccp_fold_stmt
, true);
893 return something_changed
;;
897 /* Compute the meet operator between *VAL1 and *VAL2. Store the result
900 any M UNDEFINED = any
901 any M VARYING = VARYING
902 Ci M Cj = Ci if (i == j)
903 Ci M Cj = VARYING if (i != j)
907 ccp_lattice_meet (prop_value_t
*val1
, prop_value_t
*val2
)
909 if (val1
->lattice_val
== UNDEFINED
)
911 /* UNDEFINED M any = any */
914 else if (val2
->lattice_val
== UNDEFINED
)
916 /* any M UNDEFINED = any
917 Nothing to do. VAL1 already contains the value we want. */
920 else if (val1
->lattice_val
== VARYING
921 || val2
->lattice_val
== VARYING
)
923 /* any M VARYING = VARYING. */
924 val1
->lattice_val
= VARYING
;
925 val1
->mask
= double_int_minus_one
;
926 val1
->value
= NULL_TREE
;
928 else if (val1
->lattice_val
== CONSTANT
929 && val2
->lattice_val
== CONSTANT
930 && TREE_CODE (val1
->value
) == INTEGER_CST
931 && TREE_CODE (val2
->value
) == INTEGER_CST
)
933 /* Ci M Cj = Ci if (i == j)
934 Ci M Cj = VARYING if (i != j)
936 For INTEGER_CSTs mask unequal bits. If no equal bits remain,
939 = double_int_ior (double_int_ior (val1
->mask
,
941 double_int_xor (tree_to_double_int (val1
->value
),
942 tree_to_double_int (val2
->value
)));
943 if (double_int_minus_one_p (val1
->mask
))
945 val1
->lattice_val
= VARYING
;
946 val1
->value
= NULL_TREE
;
949 else if (val1
->lattice_val
== CONSTANT
950 && val2
->lattice_val
== CONSTANT
951 && simple_cst_equal (val1
->value
, val2
->value
) == 1)
953 /* Ci M Cj = Ci if (i == j)
954 Ci M Cj = VARYING if (i != j)
956 VAL1 already contains the value we want for equivalent values. */
958 else if (val1
->lattice_val
== CONSTANT
959 && val2
->lattice_val
== CONSTANT
960 && (TREE_CODE (val1
->value
) == ADDR_EXPR
961 || TREE_CODE (val2
->value
) == ADDR_EXPR
))
963 /* When not equal addresses are involved try meeting for
965 prop_value_t tem
= *val2
;
966 if (TREE_CODE (val1
->value
) == ADDR_EXPR
)
967 *val1
= get_value_for_expr (val1
->value
, true);
968 if (TREE_CODE (val2
->value
) == ADDR_EXPR
)
969 tem
= get_value_for_expr (val2
->value
, true);
970 ccp_lattice_meet (val1
, &tem
);
974 /* Any other combination is VARYING. */
975 val1
->lattice_val
= VARYING
;
976 val1
->mask
= double_int_minus_one
;
977 val1
->value
= NULL_TREE
;
982 /* Loop through the PHI_NODE's parameters for BLOCK and compare their
983 lattice values to determine PHI_NODE's lattice value. The value of a
984 PHI node is determined calling ccp_lattice_meet with all the arguments
985 of the PHI node that are incoming via executable edges. */
987 static enum ssa_prop_result
988 ccp_visit_phi_node (gimple phi
)
991 prop_value_t
*old_val
, new_val
;
993 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
995 fprintf (dump_file
, "\nVisiting PHI node: ");
996 print_gimple_stmt (dump_file
, phi
, 0, dump_flags
);
999 old_val
= get_value (gimple_phi_result (phi
));
1000 switch (old_val
->lattice_val
)
1003 return SSA_PROP_VARYING
;
1010 new_val
.lattice_val
= UNDEFINED
;
1011 new_val
.value
= NULL_TREE
;
1018 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
1020 /* Compute the meet operator over all the PHI arguments flowing
1021 through executable edges. */
1022 edge e
= gimple_phi_arg_edge (phi
, i
);
1024 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1027 "\n Argument #%d (%d -> %d %sexecutable)\n",
1028 i
, e
->src
->index
, e
->dest
->index
,
1029 (e
->flags
& EDGE_EXECUTABLE
) ? "" : "not ");
1032 /* If the incoming edge is executable, Compute the meet operator for
1033 the existing value of the PHI node and the current PHI argument. */
1034 if (e
->flags
& EDGE_EXECUTABLE
)
1036 tree arg
= gimple_phi_arg (phi
, i
)->def
;
1037 prop_value_t arg_val
= get_value_for_expr (arg
, false);
1039 ccp_lattice_meet (&new_val
, &arg_val
);
1041 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1043 fprintf (dump_file
, "\t");
1044 print_generic_expr (dump_file
, arg
, dump_flags
);
1045 dump_lattice_value (dump_file
, "\tValue: ", arg_val
);
1046 fprintf (dump_file
, "\n");
1049 if (new_val
.lattice_val
== VARYING
)
1054 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1056 dump_lattice_value (dump_file
, "\n PHI node value: ", new_val
);
1057 fprintf (dump_file
, "\n\n");
1060 /* Make the transition to the new value. */
1061 if (set_lattice_value (gimple_phi_result (phi
), new_val
))
1063 if (new_val
.lattice_val
== VARYING
)
1064 return SSA_PROP_VARYING
;
1066 return SSA_PROP_INTERESTING
;
1069 return SSA_PROP_NOT_INTERESTING
;
1072 /* Return the constant value for OP or OP otherwise. */
1075 valueize_op (tree op
)
1077 if (TREE_CODE (op
) == SSA_NAME
)
1079 tree tem
= get_constant_value (op
);
1086 /* CCP specific front-end to the non-destructive constant folding
1089 Attempt to simplify the RHS of STMT knowing that one or more
1090 operands are constants.
1092 If simplification is possible, return the simplified RHS,
1093 otherwise return the original RHS or NULL_TREE. */
1096 ccp_fold (gimple stmt
)
1098 location_t loc
= gimple_location (stmt
);
1099 switch (gimple_code (stmt
))
1103 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
1105 switch (get_gimple_rhs_class (subcode
))
1107 case GIMPLE_SINGLE_RHS
:
1109 tree rhs
= gimple_assign_rhs1 (stmt
);
1110 enum tree_code_class kind
= TREE_CODE_CLASS (subcode
);
1112 if (TREE_CODE (rhs
) == SSA_NAME
)
1114 /* If the RHS is an SSA_NAME, return its known constant value,
1116 return get_constant_value (rhs
);
1118 /* Handle propagating invariant addresses into address operations.
1119 The folding we do here matches that in tree-ssa-forwprop.c. */
1120 else if (TREE_CODE (rhs
) == ADDR_EXPR
)
1123 base
= &TREE_OPERAND (rhs
, 0);
1124 while (handled_component_p (*base
))
1125 base
= &TREE_OPERAND (*base
, 0);
1126 if (TREE_CODE (*base
) == MEM_REF
1127 && TREE_CODE (TREE_OPERAND (*base
, 0)) == SSA_NAME
)
1129 tree val
= get_constant_value (TREE_OPERAND (*base
, 0));
1131 && TREE_CODE (val
) == ADDR_EXPR
)
1133 tree ret
, save
= *base
;
1135 new_base
= fold_build2 (MEM_REF
, TREE_TYPE (*base
),
1137 TREE_OPERAND (*base
, 1));
1138 /* We need to return a new tree, not modify the IL
1139 or share parts of it. So play some tricks to
1140 avoid manually building it. */
1142 ret
= unshare_expr (rhs
);
1143 recompute_tree_invariant_for_addr_expr (ret
);
1149 else if (TREE_CODE (rhs
) == CONSTRUCTOR
1150 && TREE_CODE (TREE_TYPE (rhs
)) == VECTOR_TYPE
1151 && (CONSTRUCTOR_NELTS (rhs
)
1152 == TYPE_VECTOR_SUBPARTS (TREE_TYPE (rhs
))))
1158 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (rhs
), i
, val
)
1160 val
= valueize_op (val
);
1161 if (TREE_CODE (val
) == INTEGER_CST
1162 || TREE_CODE (val
) == REAL_CST
1163 || TREE_CODE (val
) == FIXED_CST
)
1164 list
= tree_cons (NULL_TREE
, val
, list
);
1169 return build_vector (TREE_TYPE (rhs
), nreverse (list
));
1172 if (kind
== tcc_reference
)
1174 if ((TREE_CODE (rhs
) == VIEW_CONVERT_EXPR
1175 || TREE_CODE (rhs
) == REALPART_EXPR
1176 || TREE_CODE (rhs
) == IMAGPART_EXPR
)
1177 && TREE_CODE (TREE_OPERAND (rhs
, 0)) == SSA_NAME
)
1179 tree val
= get_constant_value (TREE_OPERAND (rhs
, 0));
1181 return fold_unary_loc (EXPR_LOCATION (rhs
),
1183 TREE_TYPE (rhs
), val
);
1185 else if (TREE_CODE (rhs
) == MEM_REF
1186 && TREE_CODE (TREE_OPERAND (rhs
, 0)) == SSA_NAME
)
1188 tree val
= get_constant_value (TREE_OPERAND (rhs
, 0));
1190 && TREE_CODE (val
) == ADDR_EXPR
)
1192 tree tem
= fold_build2 (MEM_REF
, TREE_TYPE (rhs
),
1194 TREE_OPERAND (rhs
, 1));
1199 return fold_const_aggregate_ref (rhs
);
1201 else if (kind
== tcc_declaration
)
1202 return get_symbol_constant_value (rhs
);
1206 case GIMPLE_UNARY_RHS
:
1208 /* Handle unary operators that can appear in GIMPLE form.
1209 Note that we know the single operand must be a constant,
1210 so this should almost always return a simplified RHS. */
1211 tree lhs
= gimple_assign_lhs (stmt
);
1212 tree op0
= valueize_op (gimple_assign_rhs1 (stmt
));
1214 /* Conversions are useless for CCP purposes if they are
1215 value-preserving. Thus the restrictions that
1216 useless_type_conversion_p places for pointer type conversions
1217 do not apply here. Substitution later will only substitute to
1219 if (CONVERT_EXPR_CODE_P (subcode
)
1220 && POINTER_TYPE_P (TREE_TYPE (lhs
))
1221 && POINTER_TYPE_P (TREE_TYPE (op0
)))
1224 /* Try to re-construct array references on-the-fly. */
1225 if (!useless_type_conversion_p (TREE_TYPE (lhs
),
1227 && ((tem
= maybe_fold_offset_to_address
1229 op0
, integer_zero_node
, TREE_TYPE (lhs
)))
1236 fold_unary_ignore_overflow_loc (loc
, subcode
,
1237 gimple_expr_type (stmt
), op0
);
1240 case GIMPLE_BINARY_RHS
:
1242 /* Handle binary operators that can appear in GIMPLE form. */
1243 tree op0
= valueize_op (gimple_assign_rhs1 (stmt
));
1244 tree op1
= valueize_op (gimple_assign_rhs2 (stmt
));
1246 /* Translate &x + CST into an invariant form suitable for
1247 further propagation. */
1248 if (gimple_assign_rhs_code (stmt
) == POINTER_PLUS_EXPR
1249 && TREE_CODE (op0
) == ADDR_EXPR
1250 && TREE_CODE (op1
) == INTEGER_CST
)
1252 tree off
= fold_convert (ptr_type_node
, op1
);
1253 return build_fold_addr_expr
1254 (fold_build2 (MEM_REF
,
1255 TREE_TYPE (TREE_TYPE (op0
)),
1256 unshare_expr (op0
), off
));
1259 return fold_binary_loc (loc
, subcode
,
1260 gimple_expr_type (stmt
), op0
, op1
);
1263 case GIMPLE_TERNARY_RHS
:
1265 /* Handle ternary operators that can appear in GIMPLE form. */
1266 tree op0
= valueize_op (gimple_assign_rhs1 (stmt
));
1267 tree op1
= valueize_op (gimple_assign_rhs2 (stmt
));
1268 tree op2
= valueize_op (gimple_assign_rhs3 (stmt
));
1270 return fold_ternary_loc (loc
, subcode
,
1271 gimple_expr_type (stmt
), op0
, op1
, op2
);
1282 tree fn
= valueize_op (gimple_call_fn (stmt
));
1283 if (TREE_CODE (fn
) == ADDR_EXPR
1284 && TREE_CODE (TREE_OPERAND (fn
, 0)) == FUNCTION_DECL
1285 && DECL_BUILT_IN (TREE_OPERAND (fn
, 0)))
1287 tree
*args
= XALLOCAVEC (tree
, gimple_call_num_args (stmt
));
1290 for (i
= 0; i
< gimple_call_num_args (stmt
); ++i
)
1291 args
[i
] = valueize_op (gimple_call_arg (stmt
, i
));
1292 call
= build_call_array_loc (loc
,
1293 gimple_call_return_type (stmt
),
1294 fn
, gimple_call_num_args (stmt
), args
);
1295 retval
= fold_call_expr (EXPR_LOCATION (call
), call
, false);
1297 /* fold_call_expr wraps the result inside a NOP_EXPR. */
1298 STRIP_NOPS (retval
);
1306 /* Handle comparison operators that can appear in GIMPLE form. */
1307 tree op0
= valueize_op (gimple_cond_lhs (stmt
));
1308 tree op1
= valueize_op (gimple_cond_rhs (stmt
));
1309 enum tree_code code
= gimple_cond_code (stmt
);
1310 return fold_binary_loc (loc
, code
, boolean_type_node
, op0
, op1
);
1315 /* Return the constant switch index. */
1316 return valueize_op (gimple_switch_index (stmt
));
1324 /* See if we can find constructor defining value of BASE.
1325 When we know the consructor with constant offset (such as
1326 base is array[40] and we do know constructor of array), then
1327 BIT_OFFSET is adjusted accordingly.
1329 As a special case, return error_mark_node when constructor
1330 is not explicitly available, but it is known to be zero
1331 such as 'static const int a;'. */
1333 get_base_constructor (tree base
, HOST_WIDE_INT
*bit_offset
)
1335 HOST_WIDE_INT bit_offset2
, size
, max_size
;
1336 if (TREE_CODE (base
) == MEM_REF
)
1338 if (!integer_zerop (TREE_OPERAND (base
, 1)))
1340 if (!host_integerp (TREE_OPERAND (base
, 1), 0))
1342 *bit_offset
+= (mem_ref_offset (base
).low
1346 base
= get_constant_value (TREE_OPERAND (base
, 0));
1347 if (!base
|| TREE_CODE (base
) != ADDR_EXPR
)
1349 base
= TREE_OPERAND (base
, 0);
1352 /* Get a CONSTRUCTOR. If BASE is a VAR_DECL, get its
1353 DECL_INITIAL. If BASE is a nested reference into another
1354 ARRAY_REF or COMPONENT_REF, make a recursive call to resolve
1355 the inner reference. */
1356 switch (TREE_CODE (base
))
1359 if (!const_value_known_p (base
))
1364 if (!DECL_INITIAL (base
)
1365 && (TREE_STATIC (base
) || DECL_EXTERNAL (base
)))
1366 return error_mark_node
;
1367 return DECL_INITIAL (base
);
1371 base
= get_ref_base_and_extent (base
, &bit_offset2
, &size
, &max_size
);
1372 if (max_size
== -1 || size
!= max_size
)
1374 *bit_offset
+= bit_offset2
;
1375 return get_base_constructor (base
, bit_offset
);
1386 /* CTOR is STRING_CST. Fold reference of type TYPE and size SIZE
1387 to the memory at bit OFFSET.
1389 We do only simple job of folding byte accesses. */
1392 fold_string_cst_ctor_reference (tree type
, tree ctor
, unsigned HOST_WIDE_INT offset
,
1393 unsigned HOST_WIDE_INT size
)
1395 if (INTEGRAL_TYPE_P (type
)
1396 && (TYPE_MODE (type
)
1397 == TYPE_MODE (TREE_TYPE (TREE_TYPE (ctor
))))
1398 && (GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (TREE_TYPE (ctor
))))
1400 && GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (ctor
)))) == 1
1401 && size
== BITS_PER_UNIT
1402 && !(offset
% BITS_PER_UNIT
))
1404 offset
/= BITS_PER_UNIT
;
1405 if (offset
< (unsigned HOST_WIDE_INT
) TREE_STRING_LENGTH (ctor
))
1406 return build_int_cst_type (type
, (TREE_STRING_POINTER (ctor
)
1409 const char a[20]="hello";
1412 might lead to offset greater than string length. In this case we
1413 know value is either initialized to 0 or out of bounds. Return 0
1415 return build_zero_cst (type
);
1420 /* CTOR is CONSTRUCTOR of an array type. Fold reference of type TYPE and size
1421 SIZE to the memory at bit OFFSET. */
1424 fold_array_ctor_reference (tree type
, tree ctor
,
1425 unsigned HOST_WIDE_INT offset
,
1426 unsigned HOST_WIDE_INT size
)
1428 unsigned HOST_WIDE_INT cnt
;
1430 double_int low_bound
, elt_size
;
1431 double_int index
, max_index
;
1432 double_int access_index
;
1433 tree domain_type
= TYPE_DOMAIN (TREE_TYPE (ctor
));
1434 HOST_WIDE_INT inner_offset
;
1436 /* Compute low bound and elt size. */
1437 if (domain_type
&& TYPE_MIN_VALUE (domain_type
))
1439 /* Static constructors for variably sized objects makes no sense. */
1440 gcc_assert (TREE_CODE (TYPE_MIN_VALUE (domain_type
)) == INTEGER_CST
);
1441 low_bound
= tree_to_double_int (TYPE_MIN_VALUE (domain_type
));
1444 low_bound
= double_int_zero
;
1445 /* Static constructors for variably sized objects makes no sense. */
1446 gcc_assert (TREE_CODE(TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (ctor
))))
1449 tree_to_double_int (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (ctor
))));
1452 /* We can handle only constantly sized accesses that are known to not
1453 be larger than size of array element. */
1454 if (!TYPE_SIZE_UNIT (type
)
1455 || TREE_CODE (TYPE_SIZE_UNIT (type
)) != INTEGER_CST
1456 || double_int_cmp (elt_size
,
1457 tree_to_double_int (TYPE_SIZE_UNIT (type
)), 0) < 0)
1460 /* Compute the array index we look for. */
1461 access_index
= double_int_udiv (uhwi_to_double_int (offset
/ BITS_PER_UNIT
),
1462 elt_size
, TRUNC_DIV_EXPR
);
1463 access_index
= double_int_add (access_index
, low_bound
);
1465 /* And offset within the access. */
1466 inner_offset
= offset
% (double_int_to_uhwi (elt_size
) * BITS_PER_UNIT
);
1468 /* See if the array field is large enough to span whole access. We do not
1469 care to fold accesses spanning multiple array indexes. */
1470 if (inner_offset
+ size
> double_int_to_uhwi (elt_size
) * BITS_PER_UNIT
)
1473 index
= double_int_sub (low_bound
, double_int_one
);
1474 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor
), cnt
, cfield
, cval
)
1476 /* Array constructor might explicitely set index, or specify range
1477 or leave index NULL meaning that it is next index after previous
1481 if (TREE_CODE (cfield
) == INTEGER_CST
)
1482 max_index
= index
= tree_to_double_int (cfield
);
1485 gcc_assert (TREE_CODE (cfield
) == RANGE_EXPR
);
1486 index
= tree_to_double_int (TREE_OPERAND (cfield
, 0));
1487 max_index
= tree_to_double_int (TREE_OPERAND (cfield
, 1));
1491 max_index
= index
= double_int_add (index
, double_int_one
);
1493 /* Do we have match? */
1494 if (double_int_cmp (access_index
, index
, 1) >= 0
1495 && double_int_cmp (access_index
, max_index
, 1) <= 0)
1496 return fold_ctor_reference (type
, cval
, inner_offset
, size
);
1498 /* When memory is not explicitely mentioned in constructor,
1499 it is 0 (or out of range). */
1500 return build_zero_cst (type
);
1503 /* CTOR is CONSTRUCTOR of an aggregate or vector.
1504 Fold reference of type TYPE and size SIZE to the memory at bit OFFSET. */
1507 fold_nonarray_ctor_reference (tree type
, tree ctor
,
1508 unsigned HOST_WIDE_INT offset
,
1509 unsigned HOST_WIDE_INT size
)
1511 unsigned HOST_WIDE_INT cnt
;
1514 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor
), cnt
, cfield
,
1517 tree byte_offset
= DECL_FIELD_OFFSET (cfield
);
1518 tree field_offset
= DECL_FIELD_BIT_OFFSET (cfield
);
1519 tree field_size
= DECL_SIZE (cfield
);
1520 double_int bitoffset
;
1521 double_int byte_offset_cst
= tree_to_double_int (byte_offset
);
1522 double_int bits_per_unit_cst
= uhwi_to_double_int (BITS_PER_UNIT
);
1523 double_int bitoffset_end
, access_end
;
1525 /* Variable sized objects in static constructors makes no sense,
1526 but field_size can be NULL for flexible array members. */
1527 gcc_assert (TREE_CODE (field_offset
) == INTEGER_CST
1528 && TREE_CODE (byte_offset
) == INTEGER_CST
1529 && (field_size
!= NULL_TREE
1530 ? TREE_CODE (field_size
) == INTEGER_CST
1531 : TREE_CODE (TREE_TYPE (cfield
)) == ARRAY_TYPE
));
1533 /* Compute bit offset of the field. */
1534 bitoffset
= double_int_add (tree_to_double_int (field_offset
),
1535 double_int_mul (byte_offset_cst
,
1536 bits_per_unit_cst
));
1537 /* Compute bit offset where the field ends. */
1538 if (field_size
!= NULL_TREE
)
1539 bitoffset_end
= double_int_add (bitoffset
,
1540 tree_to_double_int (field_size
));
1542 bitoffset_end
= double_int_zero
;
1544 access_end
= double_int_add (uhwi_to_double_int (offset
),
1545 uhwi_to_double_int (size
));
1547 /* Is there any overlap between [OFFSET, OFFSET+SIZE) and
1548 [BITOFFSET, BITOFFSET_END)? */
1549 if (double_int_cmp (access_end
, bitoffset
, 0) > 0
1550 && (field_size
== NULL_TREE
1551 || double_int_cmp (uhwi_to_double_int (offset
),
1552 bitoffset_end
, 0) < 0))
1554 double_int inner_offset
= double_int_sub (uhwi_to_double_int (offset
),
1556 /* We do have overlap. Now see if field is large enough to
1557 cover the access. Give up for accesses spanning multiple
1559 if (double_int_cmp (access_end
, bitoffset_end
, 0) > 0)
1561 if (double_int_cmp (uhwi_to_double_int (offset
), bitoffset
, 0) < 0)
1563 return fold_ctor_reference (type
, cval
,
1564 double_int_to_uhwi (inner_offset
), size
);
1567 /* When memory is not explicitely mentioned in constructor, it is 0. */
1568 return build_zero_cst (type
);
1571 /* CTOR is value initializing memory, fold reference of type TYPE and size SIZE
1572 to the memory at bit OFFSET. */
1575 fold_ctor_reference (tree type
, tree ctor
, unsigned HOST_WIDE_INT offset
,
1576 unsigned HOST_WIDE_INT size
)
1580 /* We found the field with exact match. */
1581 if (useless_type_conversion_p (type
, TREE_TYPE (ctor
))
1583 return canonicalize_constructor_val (ctor
);
1585 /* We are at the end of walk, see if we can view convert the
1587 if (!AGGREGATE_TYPE_P (TREE_TYPE (ctor
)) && !offset
1588 /* VIEW_CONVERT_EXPR is defined only for matching sizes. */
1589 && operand_equal_p (TYPE_SIZE (type
),
1590 TYPE_SIZE (TREE_TYPE (ctor
)), 0))
1592 ret
= canonicalize_constructor_val (ctor
);
1593 ret
= fold_unary (VIEW_CONVERT_EXPR
, type
, ret
);
1598 if (TREE_CODE (ctor
) == STRING_CST
)
1599 return fold_string_cst_ctor_reference (type
, ctor
, offset
, size
);
1600 if (TREE_CODE (ctor
) == CONSTRUCTOR
)
1603 if (TREE_CODE (TREE_TYPE (ctor
)) == ARRAY_TYPE
)
1604 return fold_array_ctor_reference (type
, ctor
, offset
, size
);
1606 return fold_nonarray_ctor_reference (type
, ctor
, offset
, size
);
1612 /* Return the tree representing the element referenced by T if T is an
1613 ARRAY_REF or COMPONENT_REF into constant aggregates. Return
1614 NULL_TREE otherwise. */
1617 fold_const_aggregate_ref (tree t
)
1619 tree ctor
, idx
, base
;
1620 HOST_WIDE_INT offset
, size
, max_size
;
1623 if (TREE_THIS_VOLATILE (t
))
1626 if (TREE_CODE_CLASS (TREE_CODE (t
)) == tcc_declaration
)
1627 return get_symbol_constant_value (t
);
1629 tem
= fold_read_from_constant_string (t
);
1633 switch (TREE_CODE (t
))
1636 case ARRAY_RANGE_REF
:
1637 /* Constant indexes are handled well by get_base_constructor.
1638 Only special case variable offsets.
1639 FIXME: This code can't handle nested references with variable indexes
1640 (they will be handled only by iteration of ccp). Perhaps we can bring
1641 get_ref_base_and_extent here and make it use get_constant_value. */
1642 if (TREE_CODE (TREE_OPERAND (t
, 1)) == SSA_NAME
1643 && (idx
= get_constant_value (TREE_OPERAND (t
, 1)))
1644 && host_integerp (idx
, 0))
1646 tree low_bound
, unit_size
;
1648 /* If the resulting bit-offset is constant, track it. */
1649 if ((low_bound
= array_ref_low_bound (t
),
1650 host_integerp (low_bound
, 0))
1651 && (unit_size
= array_ref_element_size (t
),
1652 host_integerp (unit_size
, 1)))
1654 offset
= TREE_INT_CST_LOW (idx
);
1655 offset
-= TREE_INT_CST_LOW (low_bound
);
1656 offset
*= TREE_INT_CST_LOW (unit_size
);
1657 offset
*= BITS_PER_UNIT
;
1659 base
= TREE_OPERAND (t
, 0);
1660 ctor
= get_base_constructor (base
, &offset
);
1661 /* Empty constructor. Always fold to 0. */
1662 if (ctor
== error_mark_node
)
1663 return build_zero_cst (TREE_TYPE (t
));
1664 /* Out of bound array access. Value is undefined, but don't fold. */
1667 /* We can not determine ctor. */
1670 return fold_ctor_reference (TREE_TYPE (t
), ctor
, offset
,
1671 TREE_INT_CST_LOW (unit_size
)
1679 case TARGET_MEM_REF
:
1681 base
= get_ref_base_and_extent (t
, &offset
, &size
, &max_size
);
1682 ctor
= get_base_constructor (base
, &offset
);
1684 /* Empty constructor. Always fold to 0. */
1685 if (ctor
== error_mark_node
)
1686 return build_zero_cst (TREE_TYPE (t
));
1687 /* We do not know precise address. */
1688 if (max_size
== -1 || max_size
!= size
)
1690 /* We can not determine ctor. */
1694 /* Out of bound array access. Value is undefined, but don't fold. */
1698 return fold_ctor_reference (TREE_TYPE (t
), ctor
, offset
, size
);
1703 tree c
= fold_const_aggregate_ref (TREE_OPERAND (t
, 0));
1704 if (c
&& TREE_CODE (c
) == COMPLEX_CST
)
1705 return fold_build1_loc (EXPR_LOCATION (t
),
1706 TREE_CODE (t
), TREE_TYPE (t
), c
);
1717 /* Apply the operation CODE in type TYPE to the value, mask pair
1718 RVAL and RMASK representing a value of type RTYPE and set
1719 the value, mask pair *VAL and *MASK to the result. */
1722 bit_value_unop_1 (enum tree_code code
, tree type
,
1723 double_int
*val
, double_int
*mask
,
1724 tree rtype
, double_int rval
, double_int rmask
)
1730 *val
= double_int_not (rval
);
1735 double_int temv
, temm
;
1736 /* Return ~rval + 1. */
1737 bit_value_unop_1 (BIT_NOT_EXPR
, type
, &temv
, &temm
, type
, rval
, rmask
);
1738 bit_value_binop_1 (PLUS_EXPR
, type
, val
, mask
,
1740 type
, double_int_one
, double_int_zero
);
1748 /* First extend mask and value according to the original type. */
1749 uns
= (TREE_CODE (rtype
) == INTEGER_TYPE
&& TYPE_IS_SIZETYPE (rtype
)
1750 ? 0 : TYPE_UNSIGNED (rtype
));
1751 *mask
= double_int_ext (rmask
, TYPE_PRECISION (rtype
), uns
);
1752 *val
= double_int_ext (rval
, TYPE_PRECISION (rtype
), uns
);
1754 /* Then extend mask and value according to the target type. */
1755 uns
= (TREE_CODE (type
) == INTEGER_TYPE
&& TYPE_IS_SIZETYPE (type
)
1756 ? 0 : TYPE_UNSIGNED (type
));
1757 *mask
= double_int_ext (*mask
, TYPE_PRECISION (type
), uns
);
1758 *val
= double_int_ext (*val
, TYPE_PRECISION (type
), uns
);
1763 *mask
= double_int_minus_one
;
1768 /* Apply the operation CODE in type TYPE to the value, mask pairs
1769 R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
1770 and R2TYPE and set the value, mask pair *VAL and *MASK to the result. */
1773 bit_value_binop_1 (enum tree_code code
, tree type
,
1774 double_int
*val
, double_int
*mask
,
1775 tree r1type
, double_int r1val
, double_int r1mask
,
1776 tree r2type
, double_int r2val
, double_int r2mask
)
1778 bool uns
= (TREE_CODE (type
) == INTEGER_TYPE
1779 && TYPE_IS_SIZETYPE (type
) ? 0 : TYPE_UNSIGNED (type
));
1780 /* Assume we'll get a constant result. Use an initial varying value,
1781 we fall back to varying in the end if necessary. */
1782 *mask
= double_int_minus_one
;
1786 /* The mask is constant where there is a known not
1787 set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
1788 *mask
= double_int_and (double_int_ior (r1mask
, r2mask
),
1789 double_int_and (double_int_ior (r1val
, r1mask
),
1790 double_int_ior (r2val
, r2mask
)));
1791 *val
= double_int_and (r1val
, r2val
);
1795 /* The mask is constant where there is a known
1796 set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)). */
1797 *mask
= double_int_and_not
1798 (double_int_ior (r1mask
, r2mask
),
1799 double_int_ior (double_int_and_not (r1val
, r1mask
),
1800 double_int_and_not (r2val
, r2mask
)));
1801 *val
= double_int_ior (r1val
, r2val
);
1806 *mask
= double_int_ior (r1mask
, r2mask
);
1807 *val
= double_int_xor (r1val
, r2val
);
1812 if (double_int_zero_p (r2mask
))
1814 HOST_WIDE_INT shift
= r2val
.low
;
1815 if (code
== RROTATE_EXPR
)
1817 *mask
= double_int_lrotate (r1mask
, shift
, TYPE_PRECISION (type
));
1818 *val
= double_int_lrotate (r1val
, shift
, TYPE_PRECISION (type
));
1824 /* ??? We can handle partially known shift counts if we know
1825 its sign. That way we can tell that (x << (y | 8)) & 255
1827 if (double_int_zero_p (r2mask
))
1829 HOST_WIDE_INT shift
= r2val
.low
;
1830 if (code
== RSHIFT_EXPR
)
1832 /* We need to know if we are doing a left or a right shift
1833 to properly shift in zeros for left shift and unsigned
1834 right shifts and the sign bit for signed right shifts.
1835 For signed right shifts we shift in varying in case
1836 the sign bit was varying. */
1839 *mask
= double_int_lshift (r1mask
, shift
,
1840 TYPE_PRECISION (type
), false);
1841 *val
= double_int_lshift (r1val
, shift
,
1842 TYPE_PRECISION (type
), false);
1846 /* ??? We can have sizetype related inconsistencies in
1848 if ((TREE_CODE (r1type
) == INTEGER_TYPE
1849 && (TYPE_IS_SIZETYPE (r1type
)
1850 ? 0 : TYPE_UNSIGNED (r1type
))) != uns
)
1854 *mask
= double_int_rshift (r1mask
, shift
,
1855 TYPE_PRECISION (type
), !uns
);
1856 *val
= double_int_rshift (r1val
, shift
,
1857 TYPE_PRECISION (type
), !uns
);
1868 case POINTER_PLUS_EXPR
:
1871 /* Do the addition with unknown bits set to zero, to give carry-ins of
1872 zero wherever possible. */
1873 lo
= double_int_add (double_int_and_not (r1val
, r1mask
),
1874 double_int_and_not (r2val
, r2mask
));
1875 lo
= double_int_ext (lo
, TYPE_PRECISION (type
), uns
);
1876 /* Do the addition with unknown bits set to one, to give carry-ins of
1877 one wherever possible. */
1878 hi
= double_int_add (double_int_ior (r1val
, r1mask
),
1879 double_int_ior (r2val
, r2mask
));
1880 hi
= double_int_ext (hi
, TYPE_PRECISION (type
), uns
);
1881 /* Each bit in the result is known if (a) the corresponding bits in
1882 both inputs are known, and (b) the carry-in to that bit position
1883 is known. We can check condition (b) by seeing if we got the same
1884 result with minimised carries as with maximised carries. */
1885 *mask
= double_int_ior (double_int_ior (r1mask
, r2mask
),
1886 double_int_xor (lo
, hi
));
1887 *mask
= double_int_ext (*mask
, TYPE_PRECISION (type
), uns
);
1888 /* It shouldn't matter whether we choose lo or hi here. */
1895 double_int temv
, temm
;
1896 bit_value_unop_1 (NEGATE_EXPR
, r2type
, &temv
, &temm
,
1897 r2type
, r2val
, r2mask
);
1898 bit_value_binop_1 (PLUS_EXPR
, type
, val
, mask
,
1899 r1type
, r1val
, r1mask
,
1900 r2type
, temv
, temm
);
1906 /* Just track trailing zeros in both operands and transfer
1907 them to the other. */
1908 int r1tz
= double_int_ctz (double_int_ior (r1val
, r1mask
));
1909 int r2tz
= double_int_ctz (double_int_ior (r2val
, r2mask
));
1910 if (r1tz
+ r2tz
>= HOST_BITS_PER_DOUBLE_INT
)
1912 *mask
= double_int_zero
;
1913 *val
= double_int_zero
;
1915 else if (r1tz
+ r2tz
> 0)
1917 *mask
= double_int_not (double_int_mask (r1tz
+ r2tz
));
1918 *mask
= double_int_ext (*mask
, TYPE_PRECISION (type
), uns
);
1919 *val
= double_int_zero
;
1927 double_int m
= double_int_ior (r1mask
, r2mask
);
1928 if (!double_int_equal_p (double_int_and_not (r1val
, m
),
1929 double_int_and_not (r2val
, m
)))
1931 *mask
= double_int_zero
;
1932 *val
= ((code
== EQ_EXPR
) ? double_int_zero
: double_int_one
);
1936 /* We know the result of a comparison is always one or zero. */
1937 *mask
= double_int_one
;
1938 *val
= double_int_zero
;
1946 double_int tem
= r1val
;
1952 code
= swap_tree_comparison (code
);
1959 /* If the most significant bits are not known we know nothing. */
1960 if (double_int_negative_p (r1mask
) || double_int_negative_p (r2mask
))
1963 /* For comparisons the signedness is in the comparison operands. */
1964 uns
= (TREE_CODE (r1type
) == INTEGER_TYPE
1965 && TYPE_IS_SIZETYPE (r1type
) ? 0 : TYPE_UNSIGNED (r1type
));
1966 /* ??? We can have sizetype related inconsistencies in the IL. */
1967 if ((TREE_CODE (r2type
) == INTEGER_TYPE
1968 && TYPE_IS_SIZETYPE (r2type
) ? 0 : TYPE_UNSIGNED (r2type
)) != uns
)
1971 /* If we know the most significant bits we know the values
1972 value ranges by means of treating varying bits as zero
1973 or one. Do a cross comparison of the max/min pairs. */
1974 maxmin
= double_int_cmp (double_int_ior (r1val
, r1mask
),
1975 double_int_and_not (r2val
, r2mask
), uns
);
1976 minmax
= double_int_cmp (double_int_and_not (r1val
, r1mask
),
1977 double_int_ior (r2val
, r2mask
), uns
);
1978 if (maxmin
< 0) /* r1 is less than r2. */
1980 *mask
= double_int_zero
;
1981 *val
= double_int_one
;
1983 else if (minmax
> 0) /* r1 is not less or equal to r2. */
1985 *mask
= double_int_zero
;
1986 *val
= double_int_zero
;
1988 else if (maxmin
== minmax
) /* r1 and r2 are equal. */
1990 /* This probably should never happen as we'd have
1991 folded the thing during fully constant value folding. */
1992 *mask
= double_int_zero
;
1993 *val
= (code
== LE_EXPR
? double_int_one
: double_int_zero
);
1997 /* We know the result of a comparison is always one or zero. */
1998 *mask
= double_int_one
;
1999 *val
= double_int_zero
;
2008 /* Return the propagation value when applying the operation CODE to
2009 the value RHS yielding type TYPE. */
2012 bit_value_unop (enum tree_code code
, tree type
, tree rhs
)
2014 prop_value_t rval
= get_value_for_expr (rhs
, true);
2015 double_int value
, mask
;
2017 gcc_assert ((rval
.lattice_val
== CONSTANT
2018 && TREE_CODE (rval
.value
) == INTEGER_CST
)
2019 || double_int_minus_one_p (rval
.mask
));
2020 bit_value_unop_1 (code
, type
, &value
, &mask
,
2021 TREE_TYPE (rhs
), value_to_double_int (rval
), rval
.mask
);
2022 if (!double_int_minus_one_p (mask
))
2024 val
.lattice_val
= CONSTANT
;
2026 /* ??? Delay building trees here. */
2027 val
.value
= double_int_to_tree (type
, value
);
2031 val
.lattice_val
= VARYING
;
2032 val
.value
= NULL_TREE
;
2033 val
.mask
= double_int_minus_one
;
2038 /* Return the propagation value when applying the operation CODE to
2039 the values RHS1 and RHS2 yielding type TYPE. */
2042 bit_value_binop (enum tree_code code
, tree type
, tree rhs1
, tree rhs2
)
2044 prop_value_t r1val
= get_value_for_expr (rhs1
, true);
2045 prop_value_t r2val
= get_value_for_expr (rhs2
, true);
2046 double_int value
, mask
;
2048 gcc_assert ((r1val
.lattice_val
== CONSTANT
2049 && TREE_CODE (r1val
.value
) == INTEGER_CST
)
2050 || double_int_minus_one_p (r1val
.mask
));
2051 gcc_assert ((r2val
.lattice_val
== CONSTANT
2052 && TREE_CODE (r2val
.value
) == INTEGER_CST
)
2053 || double_int_minus_one_p (r2val
.mask
));
2054 bit_value_binop_1 (code
, type
, &value
, &mask
,
2055 TREE_TYPE (rhs1
), value_to_double_int (r1val
), r1val
.mask
,
2056 TREE_TYPE (rhs2
), value_to_double_int (r2val
), r2val
.mask
);
2057 if (!double_int_minus_one_p (mask
))
2059 val
.lattice_val
= CONSTANT
;
2061 /* ??? Delay building trees here. */
2062 val
.value
= double_int_to_tree (type
, value
);
2066 val
.lattice_val
= VARYING
;
2067 val
.value
= NULL_TREE
;
2068 val
.mask
= double_int_minus_one
;
2073 /* Evaluate statement STMT.
2074 Valid only for assignments, calls, conditionals, and switches. */
2077 evaluate_stmt (gimple stmt
)
2080 tree simplified
= NULL_TREE
;
2081 ccp_lattice_t likelyvalue
= likely_value (stmt
);
2082 bool is_constant
= false;
2084 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2086 fprintf (dump_file
, "which is likely ");
2087 switch (likelyvalue
)
2090 fprintf (dump_file
, "CONSTANT");
2093 fprintf (dump_file
, "UNDEFINED");
2096 fprintf (dump_file
, "VARYING");
2100 fprintf (dump_file
, "\n");
2103 /* If the statement is likely to have a CONSTANT result, then try
2104 to fold the statement to determine the constant value. */
2105 /* FIXME. This is the only place that we call ccp_fold.
2106 Since likely_value never returns CONSTANT for calls, we will
2107 not attempt to fold them, including builtins that may profit. */
2108 if (likelyvalue
== CONSTANT
)
2110 fold_defer_overflow_warnings ();
2111 simplified
= ccp_fold (stmt
);
2112 is_constant
= simplified
&& is_gimple_min_invariant (simplified
);
2113 fold_undefer_overflow_warnings (is_constant
, stmt
, 0);
2116 /* The statement produced a constant value. */
2117 val
.lattice_val
= CONSTANT
;
2118 val
.value
= simplified
;
2119 val
.mask
= double_int_zero
;
2122 /* If the statement is likely to have a VARYING result, then do not
2123 bother folding the statement. */
2124 else if (likelyvalue
== VARYING
)
2126 enum gimple_code code
= gimple_code (stmt
);
2127 if (code
== GIMPLE_ASSIGN
)
2129 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
2131 /* Other cases cannot satisfy is_gimple_min_invariant
2133 if (get_gimple_rhs_class (subcode
) == GIMPLE_SINGLE_RHS
)
2134 simplified
= gimple_assign_rhs1 (stmt
);
2136 else if (code
== GIMPLE_SWITCH
)
2137 simplified
= gimple_switch_index (stmt
);
2139 /* These cannot satisfy is_gimple_min_invariant without folding. */
2140 gcc_assert (code
== GIMPLE_CALL
|| code
== GIMPLE_COND
);
2141 is_constant
= simplified
&& is_gimple_min_invariant (simplified
);
2144 /* The statement produced a constant value. */
2145 val
.lattice_val
= CONSTANT
;
2146 val
.value
= simplified
;
2147 val
.mask
= double_int_zero
;
2151 /* Resort to simplification for bitwise tracking. */
2152 if (flag_tree_bit_ccp
2153 && likelyvalue
== CONSTANT
2156 enum gimple_code code
= gimple_code (stmt
);
2158 val
.lattice_val
= VARYING
;
2159 val
.value
= NULL_TREE
;
2160 val
.mask
= double_int_minus_one
;
2161 if (code
== GIMPLE_ASSIGN
)
2163 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
2164 tree rhs1
= gimple_assign_rhs1 (stmt
);
2165 switch (get_gimple_rhs_class (subcode
))
2167 case GIMPLE_SINGLE_RHS
:
2168 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
2169 || POINTER_TYPE_P (TREE_TYPE (rhs1
)))
2170 val
= get_value_for_expr (rhs1
, true);
2173 case GIMPLE_UNARY_RHS
:
2174 if ((INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
2175 || POINTER_TYPE_P (TREE_TYPE (rhs1
)))
2176 && (INTEGRAL_TYPE_P (gimple_expr_type (stmt
))
2177 || POINTER_TYPE_P (gimple_expr_type (stmt
))))
2178 val
= bit_value_unop (subcode
, gimple_expr_type (stmt
), rhs1
);
2181 case GIMPLE_BINARY_RHS
:
2182 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
2183 || POINTER_TYPE_P (TREE_TYPE (rhs1
)))
2185 tree lhs
= gimple_assign_lhs (stmt
);
2186 tree rhs2
= gimple_assign_rhs2 (stmt
);
2187 val
= bit_value_binop (subcode
,
2188 TREE_TYPE (lhs
), rhs1
, rhs2
);
2195 else if (code
== GIMPLE_COND
)
2197 enum tree_code code
= gimple_cond_code (stmt
);
2198 tree rhs1
= gimple_cond_lhs (stmt
);
2199 tree rhs2
= gimple_cond_rhs (stmt
);
2200 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
2201 || POINTER_TYPE_P (TREE_TYPE (rhs1
)))
2202 val
= bit_value_binop (code
, TREE_TYPE (rhs1
), rhs1
, rhs2
);
2204 else if (code
== GIMPLE_CALL
2205 && (fndecl
= gimple_call_fndecl (stmt
))
2206 && DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
2208 switch (DECL_FUNCTION_CODE (fndecl
))
2210 case BUILT_IN_MALLOC
:
2211 case BUILT_IN_REALLOC
:
2212 case BUILT_IN_CALLOC
:
2213 val
.lattice_val
= CONSTANT
;
2214 val
.value
= build_int_cst (TREE_TYPE (gimple_get_lhs (stmt
)), 0);
2215 val
.mask
= shwi_to_double_int
2216 (~(((HOST_WIDE_INT
) MALLOC_ABI_ALIGNMENT
)
2217 / BITS_PER_UNIT
- 1));
2220 case BUILT_IN_ALLOCA
:
2221 val
.lattice_val
= CONSTANT
;
2222 val
.value
= build_int_cst (TREE_TYPE (gimple_get_lhs (stmt
)), 0);
2223 val
.mask
= shwi_to_double_int
2224 (~(((HOST_WIDE_INT
) BIGGEST_ALIGNMENT
)
2225 / BITS_PER_UNIT
- 1));
2231 is_constant
= (val
.lattice_val
== CONSTANT
);
2236 /* The statement produced a nonconstant value. If the statement
2237 had UNDEFINED operands, then the result of the statement
2238 should be UNDEFINED. Otherwise, the statement is VARYING. */
2239 if (likelyvalue
== UNDEFINED
)
2241 val
.lattice_val
= likelyvalue
;
2242 val
.mask
= double_int_zero
;
2246 val
.lattice_val
= VARYING
;
2247 val
.mask
= double_int_minus_one
;
2250 val
.value
= NULL_TREE
;
2256 /* Fold the stmt at *GSI with CCP specific information that propagating
2257 and regular folding does not catch. */
2260 ccp_fold_stmt (gimple_stmt_iterator
*gsi
)
2262 gimple stmt
= gsi_stmt (*gsi
);
2264 switch (gimple_code (stmt
))
2269 /* Statement evaluation will handle type mismatches in constants
2270 more gracefully than the final propagation. This allows us to
2271 fold more conditionals here. */
2272 val
= evaluate_stmt (stmt
);
2273 if (val
.lattice_val
!= CONSTANT
2274 || !double_int_zero_p (val
.mask
))
2279 fprintf (dump_file
, "Folding predicate ");
2280 print_gimple_expr (dump_file
, stmt
, 0, 0);
2281 fprintf (dump_file
, " to ");
2282 print_generic_expr (dump_file
, val
.value
, 0);
2283 fprintf (dump_file
, "\n");
2286 if (integer_zerop (val
.value
))
2287 gimple_cond_make_false (stmt
);
2289 gimple_cond_make_true (stmt
);
2296 tree lhs
= gimple_call_lhs (stmt
);
2300 bool changed
= false;
2303 /* If the call was folded into a constant make sure it goes
2304 away even if we cannot propagate into all uses because of
2307 && TREE_CODE (lhs
) == SSA_NAME
2308 && (val
= get_constant_value (lhs
)))
2310 tree new_rhs
= unshare_expr (val
);
2312 if (!useless_type_conversion_p (TREE_TYPE (lhs
),
2313 TREE_TYPE (new_rhs
)))
2314 new_rhs
= fold_convert (TREE_TYPE (lhs
), new_rhs
);
2315 res
= update_call_from_tree (gsi
, new_rhs
);
2320 /* Propagate into the call arguments. Compared to replace_uses_in
2321 this can use the argument slot types for type verification
2322 instead of the current argument type. We also can safely
2323 drop qualifiers here as we are dealing with constants anyway. */
2324 argt
= TYPE_ARG_TYPES (TREE_TYPE (TREE_TYPE (gimple_call_fn (stmt
))));
2325 for (i
= 0; i
< gimple_call_num_args (stmt
) && argt
;
2326 ++i
, argt
= TREE_CHAIN (argt
))
2328 tree arg
= gimple_call_arg (stmt
, i
);
2329 if (TREE_CODE (arg
) == SSA_NAME
2330 && (val
= get_constant_value (arg
))
2331 && useless_type_conversion_p
2332 (TYPE_MAIN_VARIANT (TREE_VALUE (argt
)),
2333 TYPE_MAIN_VARIANT (TREE_TYPE (val
))))
2335 gimple_call_set_arg (stmt
, i
, unshare_expr (val
));
2340 callee
= gimple_call_fn (stmt
);
2341 if (TREE_CODE (callee
) == OBJ_TYPE_REF
2342 && TREE_CODE (OBJ_TYPE_REF_EXPR (callee
)) == SSA_NAME
)
2344 tree expr
= OBJ_TYPE_REF_EXPR (callee
);
2345 OBJ_TYPE_REF_EXPR (callee
) = valueize_op (expr
);
2346 if (gimple_fold_call (gsi
, false))
2348 OBJ_TYPE_REF_EXPR (callee
) = expr
;
2356 tree lhs
= gimple_assign_lhs (stmt
);
2359 /* If we have a load that turned out to be constant replace it
2360 as we cannot propagate into all uses in all cases. */
2361 if (gimple_assign_single_p (stmt
)
2362 && TREE_CODE (lhs
) == SSA_NAME
2363 && (val
= get_constant_value (lhs
)))
2365 tree rhs
= unshare_expr (val
);
2366 if (!useless_type_conversion_p (TREE_TYPE (lhs
), TREE_TYPE (rhs
)))
2367 rhs
= fold_build1 (VIEW_CONVERT_EXPR
, TREE_TYPE (lhs
), rhs
);
2368 gimple_assign_set_rhs_from_tree (gsi
, rhs
);
2380 /* Visit the assignment statement STMT. Set the value of its LHS to the
2381 value computed by the RHS and store LHS in *OUTPUT_P. If STMT
2382 creates virtual definitions, set the value of each new name to that
2383 of the RHS (if we can derive a constant out of the RHS).
2384 Value-returning call statements also perform an assignment, and
2385 are handled here. */
2387 static enum ssa_prop_result
2388 visit_assignment (gimple stmt
, tree
*output_p
)
2391 enum ssa_prop_result retval
;
2393 tree lhs
= gimple_get_lhs (stmt
);
2395 gcc_assert (gimple_code (stmt
) != GIMPLE_CALL
2396 || gimple_call_lhs (stmt
) != NULL_TREE
);
2398 if (gimple_assign_single_p (stmt
)
2399 && gimple_assign_rhs_code (stmt
) == SSA_NAME
)
2400 /* For a simple copy operation, we copy the lattice values. */
2401 val
= *get_value (gimple_assign_rhs1 (stmt
));
2403 /* Evaluate the statement, which could be
2404 either a GIMPLE_ASSIGN or a GIMPLE_CALL. */
2405 val
= evaluate_stmt (stmt
);
2407 retval
= SSA_PROP_NOT_INTERESTING
;
2409 /* Set the lattice value of the statement's output. */
2410 if (TREE_CODE (lhs
) == SSA_NAME
)
2412 /* If STMT is an assignment to an SSA_NAME, we only have one
2414 if (set_lattice_value (lhs
, val
))
2417 if (val
.lattice_val
== VARYING
)
2418 retval
= SSA_PROP_VARYING
;
2420 retval
= SSA_PROP_INTERESTING
;
2428 /* Visit the conditional statement STMT. Return SSA_PROP_INTERESTING
2429 if it can determine which edge will be taken. Otherwise, return
2430 SSA_PROP_VARYING. */
2432 static enum ssa_prop_result
2433 visit_cond_stmt (gimple stmt
, edge
*taken_edge_p
)
2438 block
= gimple_bb (stmt
);
2439 val
= evaluate_stmt (stmt
);
2440 if (val
.lattice_val
!= CONSTANT
2441 || !double_int_zero_p (val
.mask
))
2442 return SSA_PROP_VARYING
;
2444 /* Find which edge out of the conditional block will be taken and add it
2445 to the worklist. If no single edge can be determined statically,
2446 return SSA_PROP_VARYING to feed all the outgoing edges to the
2447 propagation engine. */
2448 *taken_edge_p
= find_taken_edge (block
, val
.value
);
2450 return SSA_PROP_INTERESTING
;
2452 return SSA_PROP_VARYING
;
2456 /* Evaluate statement STMT. If the statement produces an output value and
2457 its evaluation changes the lattice value of its output, return
2458 SSA_PROP_INTERESTING and set *OUTPUT_P to the SSA_NAME holding the
2461 If STMT is a conditional branch and we can determine its truth
2462 value, set *TAKEN_EDGE_P accordingly. If STMT produces a varying
2463 value, return SSA_PROP_VARYING. */
2465 static enum ssa_prop_result
2466 ccp_visit_stmt (gimple stmt
, edge
*taken_edge_p
, tree
*output_p
)
2471 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2473 fprintf (dump_file
, "\nVisiting statement:\n");
2474 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
2477 switch (gimple_code (stmt
))
2480 /* If the statement is an assignment that produces a single
2481 output value, evaluate its RHS to see if the lattice value of
2482 its output has changed. */
2483 return visit_assignment (stmt
, output_p
);
2486 /* A value-returning call also performs an assignment. */
2487 if (gimple_call_lhs (stmt
) != NULL_TREE
)
2488 return visit_assignment (stmt
, output_p
);
2493 /* If STMT is a conditional branch, see if we can determine
2494 which branch will be taken. */
2495 /* FIXME. It appears that we should be able to optimize
2496 computed GOTOs here as well. */
2497 return visit_cond_stmt (stmt
, taken_edge_p
);
2503 /* Any other kind of statement is not interesting for constant
2504 propagation and, therefore, not worth simulating. */
2505 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2506 fprintf (dump_file
, "No interesting values produced. Marked VARYING.\n");
2508 /* Definitions made by statements other than assignments to
2509 SSA_NAMEs represent unknown modifications to their outputs.
2510 Mark them VARYING. */
2511 FOR_EACH_SSA_TREE_OPERAND (def
, stmt
, iter
, SSA_OP_ALL_DEFS
)
2513 prop_value_t v
= { VARYING
, NULL_TREE
, { -1, (HOST_WIDE_INT
) -1 } };
2514 set_lattice_value (def
, v
);
2517 return SSA_PROP_VARYING
;
2521 /* Main entry point for SSA Conditional Constant Propagation. */
2527 ssa_propagate (ccp_visit_stmt
, ccp_visit_phi_node
);
2528 if (ccp_finalize ())
2529 return (TODO_cleanup_cfg
| TODO_update_ssa
| TODO_remove_unused_locals
);
2538 return flag_tree_ccp
!= 0;
2542 struct gimple_opt_pass pass_ccp
=
2547 gate_ccp
, /* gate */
2548 do_ssa_ccp
, /* execute */
2551 0, /* static_pass_number */
2552 TV_TREE_CCP
, /* tv_id */
2553 PROP_cfg
| PROP_ssa
, /* properties_required */
2554 0, /* properties_provided */
2555 0, /* properties_destroyed */
2556 0, /* todo_flags_start */
2557 TODO_dump_func
| TODO_verify_ssa
2558 | TODO_verify_stmts
| TODO_ggc_collect
/* todo_flags_finish */
2564 /* Try to optimize out __builtin_stack_restore. Optimize it out
2565 if there is another __builtin_stack_restore in the same basic
2566 block and no calls or ASM_EXPRs are in between, or if this block's
2567 only outgoing edge is to EXIT_BLOCK and there are no calls or
2568 ASM_EXPRs after this __builtin_stack_restore. */
2571 optimize_stack_restore (gimple_stmt_iterator i
)
2576 basic_block bb
= gsi_bb (i
);
2577 gimple call
= gsi_stmt (i
);
2579 if (gimple_code (call
) != GIMPLE_CALL
2580 || gimple_call_num_args (call
) != 1
2581 || TREE_CODE (gimple_call_arg (call
, 0)) != SSA_NAME
2582 || !POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (call
, 0))))
2585 for (gsi_next (&i
); !gsi_end_p (i
); gsi_next (&i
))
2587 stmt
= gsi_stmt (i
);
2588 if (gimple_code (stmt
) == GIMPLE_ASM
)
2590 if (gimple_code (stmt
) != GIMPLE_CALL
)
2593 callee
= gimple_call_fndecl (stmt
);
2595 || DECL_BUILT_IN_CLASS (callee
) != BUILT_IN_NORMAL
2596 /* All regular builtins are ok, just obviously not alloca. */
2597 || DECL_FUNCTION_CODE (callee
) == BUILT_IN_ALLOCA
)
2600 if (DECL_FUNCTION_CODE (callee
) == BUILT_IN_STACK_RESTORE
)
2601 goto second_stack_restore
;
2607 /* Allow one successor of the exit block, or zero successors. */
2608 switch (EDGE_COUNT (bb
->succs
))
2613 if (single_succ_edge (bb
)->dest
!= EXIT_BLOCK_PTR
)
2619 second_stack_restore
:
2621 /* If there's exactly one use, then zap the call to __builtin_stack_save.
2622 If there are multiple uses, then the last one should remove the call.
2623 In any case, whether the call to __builtin_stack_save can be removed
2624 or not is irrelevant to removing the call to __builtin_stack_restore. */
2625 if (has_single_use (gimple_call_arg (call
, 0)))
2627 gimple stack_save
= SSA_NAME_DEF_STMT (gimple_call_arg (call
, 0));
2628 if (is_gimple_call (stack_save
))
2630 callee
= gimple_call_fndecl (stack_save
);
2632 && DECL_BUILT_IN_CLASS (callee
) == BUILT_IN_NORMAL
2633 && DECL_FUNCTION_CODE (callee
) == BUILT_IN_STACK_SAVE
)
2635 gimple_stmt_iterator stack_save_gsi
;
2638 stack_save_gsi
= gsi_for_stmt (stack_save
);
2639 rhs
= build_int_cst (TREE_TYPE (gimple_call_arg (call
, 0)), 0);
2640 update_call_from_tree (&stack_save_gsi
, rhs
);
2645 /* No effect, so the statement will be deleted. */
2646 return integer_zero_node
;
2649 /* If va_list type is a simple pointer and nothing special is needed,
2650 optimize __builtin_va_start (&ap, 0) into ap = __builtin_next_arg (0),
2651 __builtin_va_end (&ap) out as NOP and __builtin_va_copy into a simple
2652 pointer assignment. */
2655 optimize_stdarg_builtin (gimple call
)
2657 tree callee
, lhs
, rhs
, cfun_va_list
;
2658 bool va_list_simple_ptr
;
2659 location_t loc
= gimple_location (call
);
2661 if (gimple_code (call
) != GIMPLE_CALL
)
2664 callee
= gimple_call_fndecl (call
);
2666 cfun_va_list
= targetm
.fn_abi_va_list (callee
);
2667 va_list_simple_ptr
= POINTER_TYPE_P (cfun_va_list
)
2668 && (TREE_TYPE (cfun_va_list
) == void_type_node
2669 || TREE_TYPE (cfun_va_list
) == char_type_node
);
2671 switch (DECL_FUNCTION_CODE (callee
))
2673 case BUILT_IN_VA_START
:
2674 if (!va_list_simple_ptr
2675 || targetm
.expand_builtin_va_start
!= NULL
2676 || built_in_decls
[BUILT_IN_NEXT_ARG
] == NULL
)
2679 if (gimple_call_num_args (call
) != 2)
2682 lhs
= gimple_call_arg (call
, 0);
2683 if (!POINTER_TYPE_P (TREE_TYPE (lhs
))
2684 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs
)))
2685 != TYPE_MAIN_VARIANT (cfun_va_list
))
2688 lhs
= build_fold_indirect_ref_loc (loc
, lhs
);
2689 rhs
= build_call_expr_loc (loc
, built_in_decls
[BUILT_IN_NEXT_ARG
],
2690 1, integer_zero_node
);
2691 rhs
= fold_convert_loc (loc
, TREE_TYPE (lhs
), rhs
);
2692 return build2 (MODIFY_EXPR
, TREE_TYPE (lhs
), lhs
, rhs
);
2694 case BUILT_IN_VA_COPY
:
2695 if (!va_list_simple_ptr
)
2698 if (gimple_call_num_args (call
) != 2)
2701 lhs
= gimple_call_arg (call
, 0);
2702 if (!POINTER_TYPE_P (TREE_TYPE (lhs
))
2703 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs
)))
2704 != TYPE_MAIN_VARIANT (cfun_va_list
))
2707 lhs
= build_fold_indirect_ref_loc (loc
, lhs
);
2708 rhs
= gimple_call_arg (call
, 1);
2709 if (TYPE_MAIN_VARIANT (TREE_TYPE (rhs
))
2710 != TYPE_MAIN_VARIANT (cfun_va_list
))
2713 rhs
= fold_convert_loc (loc
, TREE_TYPE (lhs
), rhs
);
2714 return build2 (MODIFY_EXPR
, TREE_TYPE (lhs
), lhs
, rhs
);
2716 case BUILT_IN_VA_END
:
2717 /* No effect, so the statement will be deleted. */
2718 return integer_zero_node
;
2725 /* A simple pass that attempts to fold all builtin functions. This pass
2726 is run after we've propagated as many constants as we can. */
2729 execute_fold_all_builtins (void)
2731 bool cfg_changed
= false;
2733 unsigned int todoflags
= 0;
2737 gimple_stmt_iterator i
;
2738 for (i
= gsi_start_bb (bb
); !gsi_end_p (i
); )
2740 gimple stmt
, old_stmt
;
2741 tree callee
, result
;
2742 enum built_in_function fcode
;
2744 stmt
= gsi_stmt (i
);
2746 if (gimple_code (stmt
) != GIMPLE_CALL
)
2751 callee
= gimple_call_fndecl (stmt
);
2752 if (!callee
|| DECL_BUILT_IN_CLASS (callee
) != BUILT_IN_NORMAL
)
2757 fcode
= DECL_FUNCTION_CODE (callee
);
2759 result
= gimple_fold_builtin (stmt
);
2762 gimple_remove_stmt_histograms (cfun
, stmt
);
2765 switch (DECL_FUNCTION_CODE (callee
))
2767 case BUILT_IN_CONSTANT_P
:
2768 /* Resolve __builtin_constant_p. If it hasn't been
2769 folded to integer_one_node by now, it's fairly
2770 certain that the value simply isn't constant. */
2771 result
= integer_zero_node
;
2774 case BUILT_IN_STACK_RESTORE
:
2775 result
= optimize_stack_restore (i
);
2781 case BUILT_IN_VA_START
:
2782 case BUILT_IN_VA_END
:
2783 case BUILT_IN_VA_COPY
:
2784 /* These shouldn't be folded before pass_stdarg. */
2785 result
= optimize_stdarg_builtin (stmt
);
2795 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2797 fprintf (dump_file
, "Simplified\n ");
2798 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
2802 if (!update_call_from_tree (&i
, result
))
2804 gimplify_and_update_call_from_tree (&i
, result
);
2805 todoflags
|= TODO_update_address_taken
;
2808 stmt
= gsi_stmt (i
);
2811 if (maybe_clean_or_replace_eh_stmt (old_stmt
, stmt
)
2812 && gimple_purge_dead_eh_edges (bb
))
2815 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2817 fprintf (dump_file
, "to\n ");
2818 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
2819 fprintf (dump_file
, "\n");
2822 /* Retry the same statement if it changed into another
2823 builtin, there might be new opportunities now. */
2824 if (gimple_code (stmt
) != GIMPLE_CALL
)
2829 callee
= gimple_call_fndecl (stmt
);
2831 || DECL_BUILT_IN_CLASS (callee
) != BUILT_IN_NORMAL
2832 || DECL_FUNCTION_CODE (callee
) == fcode
)
2837 /* Delete unreachable blocks. */
2839 todoflags
|= TODO_cleanup_cfg
;
2845 struct gimple_opt_pass pass_fold_builtins
=
2851 execute_fold_all_builtins
, /* execute */
2854 0, /* static_pass_number */
2855 TV_NONE
, /* tv_id */
2856 PROP_cfg
| PROP_ssa
, /* properties_required */
2857 0, /* properties_provided */
2858 0, /* properties_destroyed */
2859 0, /* todo_flags_start */
2862 | TODO_update_ssa
/* todo_flags_finish */