1 /* Conditional constant propagation pass for the GNU compiler.
2 Copyright (C) 2000-2022 Free Software Foundation, Inc.
3 Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org>
4 Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by the
10 Free Software Foundation; either version 3, or (at your option) any
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Conditional constant propagation (CCP) is based on the SSA
23 propagation engine (tree-ssa-propagate.cc). Constant assignments of
24 the form VAR = CST are propagated from the assignments into uses of
25 VAR, which in turn may generate new constants. The simulation uses
26 a four level lattice to keep track of constant values associated
27 with SSA names. Given an SSA name V_i, it may take one of the
30 UNINITIALIZED -> the initial state of the value. This value
31 is replaced with a correct initial value
32 the first time the value is used, so the
33 rest of the pass does not need to care about
34 it. Using this value simplifies initialization
35 of the pass, and prevents us from needlessly
36 scanning statements that are never reached.
38 UNDEFINED -> V_i is a local variable whose definition
39 has not been processed yet. Therefore we
40 don't yet know if its value is a constant
43 CONSTANT -> V_i has been found to hold a constant
46 VARYING -> V_i cannot take a constant value, or if it
47 does, it is not possible to determine it
50 The core of SSA-CCP is in ccp_visit_stmt and ccp_visit_phi_node:
52 1- In ccp_visit_stmt, we are interested in assignments whose RHS
53 evaluates into a constant and conditional jumps whose predicate
54 evaluates into a boolean true or false. When an assignment of
55 the form V_i = CONST is found, V_i's lattice value is set to
56 CONSTANT and CONST is associated with it. This causes the
57 propagation engine to add all the SSA edges coming out the
58 assignment into the worklists, so that statements that use V_i
61 If the statement is a conditional with a constant predicate, we
62 mark the outgoing edges as executable or not executable
63 depending on the predicate's value. This is then used when
64 visiting PHI nodes to know when a PHI argument can be ignored.
67 2- In ccp_visit_phi_node, if all the PHI arguments evaluate to the
68 same constant C, then the LHS of the PHI is set to C. This
69 evaluation is known as the "meet operation". Since one of the
70 goals of this evaluation is to optimistically return constant
71 values as often as possible, it uses two main short cuts:
73 - If an argument is flowing in through a non-executable edge, it
74 is ignored. This is useful in cases like this:
80 a_11 = PHI (a_9, a_10)
82 If PRED is known to always evaluate to false, then we can
83 assume that a_11 will always take its value from a_10, meaning
84 that instead of consider it VARYING (a_9 and a_10 have
85 different values), we can consider it CONSTANT 100.
87 - If an argument has an UNDEFINED value, then it does not affect
88 the outcome of the meet operation. If a variable V_i has an
89 UNDEFINED value, it means that either its defining statement
90 hasn't been visited yet or V_i has no defining statement, in
91 which case the original symbol 'V' is being used
92 uninitialized. Since 'V' is a local variable, the compiler
93 may assume any initial value for it.
96 After propagation, every variable V_i that ends up with a lattice
97 value of CONSTANT will have the associated constant value in the
98 array CONST_VAL[i].VALUE. That is fed into substitute_and_fold for
99 final substitution and folding.
101 This algorithm uses wide-ints at the max precision of the target.
102 This means that, with one uninteresting exception, variables with
103 UNSIGNED types never go to VARYING because the bits above the
104 precision of the type of the variable are always zero. The
105 uninteresting case is a variable of UNSIGNED type that has the
106 maximum precision of the target. Such variables can go to VARYING,
107 but this causes no loss of infomation since these variables will
112 Constant propagation with conditional branches,
113 Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
115 Building an Optimizing Compiler,
116 Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
118 Advanced Compiler Design and Implementation,
119 Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6 */
123 #include "coretypes.h"
128 #include "tree-pass.h"
130 #include "gimple-pretty-print.h"
131 #include "fold-const.h"
132 #include "gimple-iterator.h"
133 #include "gimple-fold.h"
135 #include "gimplify.h"
136 #include "tree-cfg.h"
137 #include "tree-ssa-propagate.h"
139 #include "builtins.h"
141 #include "stor-layout.h"
142 #include "optabs-query.h"
143 #include "tree-ssa-ccp.h"
144 #include "tree-dfa.h"
145 #include "diagnostic-core.h"
146 #include "stringpool.h"
148 #include "tree-vector-builder.h"
150 #include "alloc-pool.h"
151 #include "symbol-summary.h"
152 #include "ipa-utils.h"
153 #include "ipa-prop.h"
154 #include "internal-fn.h"
156 /* Possible lattice values. */
165 class ccp_prop_value_t
{
168 ccp_lattice_t lattice_val
;
170 /* Propagated value. */
173 /* Mask that applies to the propagated value during CCP. For X
174 with a CONSTANT lattice value X & ~mask == value & ~mask. The
175 zero bits in the mask cover constant values. The ones mean no
180 class ccp_propagate
: public ssa_propagation_engine
183 enum ssa_prop_result
visit_stmt (gimple
*, edge
*, tree
*) final override
;
184 enum ssa_prop_result
visit_phi (gphi
*) final override
;
187 /* Array of propagated constant values. After propagation,
188 CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I). If
189 the constant is held in an SSA name representing a memory store
190 (i.e., a VDEF), CONST_VAL[I].MEM_REF will contain the actual
191 memory reference used to store (i.e., the LHS of the assignment
193 static ccp_prop_value_t
*const_val
;
194 static unsigned n_const_val
;
196 static void canonicalize_value (ccp_prop_value_t
*);
197 static void ccp_lattice_meet (ccp_prop_value_t
*, ccp_prop_value_t
*);
199 /* Dump constant propagation value VAL to file OUTF prefixed by PREFIX. */
202 dump_lattice_value (FILE *outf
, const char *prefix
, ccp_prop_value_t val
)
204 switch (val
.lattice_val
)
207 fprintf (outf
, "%sUNINITIALIZED", prefix
);
210 fprintf (outf
, "%sUNDEFINED", prefix
);
213 fprintf (outf
, "%sVARYING", prefix
);
216 if (TREE_CODE (val
.value
) != INTEGER_CST
219 fprintf (outf
, "%sCONSTANT ", prefix
);
220 print_generic_expr (outf
, val
.value
, dump_flags
);
224 widest_int cval
= wi::bit_and_not (wi::to_widest (val
.value
),
226 fprintf (outf
, "%sCONSTANT ", prefix
);
227 print_hex (cval
, outf
);
228 fprintf (outf
, " (");
229 print_hex (val
.mask
, outf
);
239 /* Print lattice value VAL to stderr. */
241 void debug_lattice_value (ccp_prop_value_t val
);
244 debug_lattice_value (ccp_prop_value_t val
)
246 dump_lattice_value (stderr
, "", val
);
247 fprintf (stderr
, "\n");
250 /* Extend NONZERO_BITS to a full mask, based on sgn. */
253 extend_mask (const wide_int
&nonzero_bits
, signop sgn
)
255 return widest_int::from (nonzero_bits
, sgn
);
258 /* Compute a default value for variable VAR and store it in the
259 CONST_VAL array. The following rules are used to get default
262 1- Global and static variables that are declared constant are
265 2- Any other value is considered UNDEFINED. This is useful when
266 considering PHI nodes. PHI arguments that are undefined do not
267 change the constant value of the PHI node, which allows for more
268 constants to be propagated.
270 3- Variables defined by statements other than assignments and PHI
271 nodes are considered VARYING.
273 4- Initial values of variables that are not GIMPLE registers are
274 considered VARYING. */
276 static ccp_prop_value_t
277 get_default_value (tree var
)
279 ccp_prop_value_t val
= { UNINITIALIZED
, NULL_TREE
, 0 };
282 stmt
= SSA_NAME_DEF_STMT (var
);
284 if (gimple_nop_p (stmt
))
286 /* Variables defined by an empty statement are those used
287 before being initialized. If VAR is a local variable, we
288 can assume initially that it is UNDEFINED, otherwise we must
289 consider it VARYING. */
290 if (!virtual_operand_p (var
)
291 && SSA_NAME_VAR (var
)
292 && TREE_CODE (SSA_NAME_VAR (var
)) == VAR_DECL
)
293 val
.lattice_val
= UNDEFINED
;
296 val
.lattice_val
= VARYING
;
298 if (flag_tree_bit_ccp
)
300 wide_int nonzero_bits
= get_nonzero_bits (var
);
304 if (SSA_NAME_VAR (var
)
305 && TREE_CODE (SSA_NAME_VAR (var
)) == PARM_DECL
306 && ipcp_get_parm_bits (SSA_NAME_VAR (var
), &value
, &mask
))
308 val
.lattice_val
= CONSTANT
;
310 widest_int ipa_value
= wi::to_widest (value
);
311 /* Unknown bits from IPA CP must be equal to zero. */
312 gcc_assert (wi::bit_and (ipa_value
, mask
) == 0);
314 if (nonzero_bits
!= -1)
315 val
.mask
&= extend_mask (nonzero_bits
,
316 TYPE_SIGN (TREE_TYPE (var
)));
318 else if (nonzero_bits
!= -1)
320 val
.lattice_val
= CONSTANT
;
321 val
.value
= build_zero_cst (TREE_TYPE (var
));
322 val
.mask
= extend_mask (nonzero_bits
,
323 TYPE_SIGN (TREE_TYPE (var
)));
328 else if (is_gimple_assign (stmt
))
331 if (gimple_assign_single_p (stmt
)
332 && DECL_P (gimple_assign_rhs1 (stmt
))
333 && (cst
= get_symbol_constant_value (gimple_assign_rhs1 (stmt
))))
335 val
.lattice_val
= CONSTANT
;
340 /* Any other variable defined by an assignment is considered
342 val
.lattice_val
= UNDEFINED
;
345 else if ((is_gimple_call (stmt
)
346 && gimple_call_lhs (stmt
) != NULL_TREE
)
347 || gimple_code (stmt
) == GIMPLE_PHI
)
349 /* A variable defined by a call or a PHI node is considered
351 val
.lattice_val
= UNDEFINED
;
355 /* Otherwise, VAR will never take on a constant value. */
356 val
.lattice_val
= VARYING
;
364 /* Get the constant value associated with variable VAR. */
366 static inline ccp_prop_value_t
*
369 ccp_prop_value_t
*val
;
371 if (const_val
== NULL
372 || SSA_NAME_VERSION (var
) >= n_const_val
)
375 val
= &const_val
[SSA_NAME_VERSION (var
)];
376 if (val
->lattice_val
== UNINITIALIZED
)
377 *val
= get_default_value (var
);
379 canonicalize_value (val
);
384 /* Return the constant tree value associated with VAR. */
387 get_constant_value (tree var
)
389 ccp_prop_value_t
*val
;
390 if (TREE_CODE (var
) != SSA_NAME
)
392 if (is_gimple_min_invariant (var
))
396 val
= get_value (var
);
398 && val
->lattice_val
== CONSTANT
399 && (TREE_CODE (val
->value
) != INTEGER_CST
405 /* Sets the value associated with VAR to VARYING. */
408 set_value_varying (tree var
)
410 ccp_prop_value_t
*val
= &const_val
[SSA_NAME_VERSION (var
)];
412 val
->lattice_val
= VARYING
;
413 val
->value
= NULL_TREE
;
417 /* For integer constants, make sure to drop TREE_OVERFLOW. */
420 canonicalize_value (ccp_prop_value_t
*val
)
422 if (val
->lattice_val
!= CONSTANT
)
425 if (TREE_OVERFLOW_P (val
->value
))
426 val
->value
= drop_tree_overflow (val
->value
);
429 /* Return whether the lattice transition is valid. */
432 valid_lattice_transition (ccp_prop_value_t old_val
, ccp_prop_value_t new_val
)
434 /* Lattice transitions must always be monotonically increasing in
436 if (old_val
.lattice_val
< new_val
.lattice_val
)
439 if (old_val
.lattice_val
!= new_val
.lattice_val
)
442 if (!old_val
.value
&& !new_val
.value
)
445 /* Now both lattice values are CONSTANT. */
447 /* Allow arbitrary copy changes as we might look through PHI <a_1, ...>
448 when only a single copy edge is executable. */
449 if (TREE_CODE (old_val
.value
) == SSA_NAME
450 && TREE_CODE (new_val
.value
) == SSA_NAME
)
453 /* Allow transitioning from a constant to a copy. */
454 if (is_gimple_min_invariant (old_val
.value
)
455 && TREE_CODE (new_val
.value
) == SSA_NAME
)
458 /* Allow transitioning from PHI <&x, not executable> == &x
459 to PHI <&x, &y> == common alignment. */
460 if (TREE_CODE (old_val
.value
) != INTEGER_CST
461 && TREE_CODE (new_val
.value
) == INTEGER_CST
)
464 /* Bit-lattices have to agree in the still valid bits. */
465 if (TREE_CODE (old_val
.value
) == INTEGER_CST
466 && TREE_CODE (new_val
.value
) == INTEGER_CST
)
467 return (wi::bit_and_not (wi::to_widest (old_val
.value
), new_val
.mask
)
468 == wi::bit_and_not (wi::to_widest (new_val
.value
), new_val
.mask
));
470 /* Otherwise constant values have to agree. */
471 if (operand_equal_p (old_val
.value
, new_val
.value
, 0))
474 /* At least the kinds and types should agree now. */
475 if (TREE_CODE (old_val
.value
) != TREE_CODE (new_val
.value
)
476 || !types_compatible_p (TREE_TYPE (old_val
.value
),
477 TREE_TYPE (new_val
.value
)))
480 /* For floats and !HONOR_NANS allow transitions from (partial) NaN
482 tree type
= TREE_TYPE (new_val
.value
);
483 if (SCALAR_FLOAT_TYPE_P (type
)
484 && !HONOR_NANS (type
))
486 if (REAL_VALUE_ISNAN (TREE_REAL_CST (old_val
.value
)))
489 else if (VECTOR_FLOAT_TYPE_P (type
)
490 && !HONOR_NANS (type
))
493 = tree_vector_builder::binary_encoded_nelts (old_val
.value
,
495 for (unsigned int i
= 0; i
< count
; ++i
)
496 if (!REAL_VALUE_ISNAN
497 (TREE_REAL_CST (VECTOR_CST_ENCODED_ELT (old_val
.value
, i
)))
498 && !operand_equal_p (VECTOR_CST_ENCODED_ELT (old_val
.value
, i
),
499 VECTOR_CST_ENCODED_ELT (new_val
.value
, i
), 0))
503 else if (COMPLEX_FLOAT_TYPE_P (type
)
504 && !HONOR_NANS (type
))
506 if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_REALPART (old_val
.value
)))
507 && !operand_equal_p (TREE_REALPART (old_val
.value
),
508 TREE_REALPART (new_val
.value
), 0))
510 if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_IMAGPART (old_val
.value
)))
511 && !operand_equal_p (TREE_IMAGPART (old_val
.value
),
512 TREE_IMAGPART (new_val
.value
), 0))
519 /* Set the value for variable VAR to NEW_VAL. Return true if the new
520 value is different from VAR's previous value. */
523 set_lattice_value (tree var
, ccp_prop_value_t
*new_val
)
525 /* We can deal with old UNINITIALIZED values just fine here. */
526 ccp_prop_value_t
*old_val
= &const_val
[SSA_NAME_VERSION (var
)];
528 canonicalize_value (new_val
);
530 /* We have to be careful to not go up the bitwise lattice
531 represented by the mask. Instead of dropping to VARYING
532 use the meet operator to retain a conservative value.
533 Missed optimizations like PR65851 makes this necessary.
534 It also ensures we converge to a stable lattice solution. */
535 if (old_val
->lattice_val
!= UNINITIALIZED
536 /* But avoid using meet for constant -> copy transitions. */
537 && !(old_val
->lattice_val
== CONSTANT
538 && CONSTANT_CLASS_P (old_val
->value
)
539 && new_val
->lattice_val
== CONSTANT
540 && TREE_CODE (new_val
->value
) == SSA_NAME
))
541 ccp_lattice_meet (new_val
, old_val
);
543 gcc_checking_assert (valid_lattice_transition (*old_val
, *new_val
));
545 /* If *OLD_VAL and NEW_VAL are the same, return false to inform the
546 caller that this was a non-transition. */
547 if (old_val
->lattice_val
!= new_val
->lattice_val
548 || (new_val
->lattice_val
== CONSTANT
549 && (TREE_CODE (new_val
->value
) != TREE_CODE (old_val
->value
)
550 || (TREE_CODE (new_val
->value
) == INTEGER_CST
551 && (new_val
->mask
!= old_val
->mask
552 || (wi::bit_and_not (wi::to_widest (old_val
->value
),
554 != wi::bit_and_not (wi::to_widest (new_val
->value
),
556 || (TREE_CODE (new_val
->value
) != INTEGER_CST
557 && !operand_equal_p (new_val
->value
, old_val
->value
, 0)))))
559 /* ??? We would like to delay creation of INTEGER_CSTs from
560 partially constants here. */
562 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
564 dump_lattice_value (dump_file
, "Lattice value changed to ", *new_val
);
565 fprintf (dump_file
, ". Adding SSA edges to worklist.\n");
570 gcc_assert (new_val
->lattice_val
!= UNINITIALIZED
);
577 static ccp_prop_value_t
get_value_for_expr (tree
, bool);
578 static ccp_prop_value_t
bit_value_binop (enum tree_code
, tree
, tree
, tree
);
579 void bit_value_binop (enum tree_code
, signop
, int, widest_int
*, widest_int
*,
580 signop
, int, const widest_int
&, const widest_int
&,
581 signop
, int, const widest_int
&, const widest_int
&);
583 /* Return a widest_int that can be used for bitwise simplifications
587 value_to_wide_int (ccp_prop_value_t val
)
590 && TREE_CODE (val
.value
) == INTEGER_CST
)
591 return wi::to_widest (val
.value
);
596 /* Return the value for the address expression EXPR based on alignment
599 static ccp_prop_value_t
600 get_value_from_alignment (tree expr
)
602 tree type
= TREE_TYPE (expr
);
603 ccp_prop_value_t val
;
604 unsigned HOST_WIDE_INT bitpos
;
607 gcc_assert (TREE_CODE (expr
) == ADDR_EXPR
);
609 get_pointer_alignment_1 (expr
, &align
, &bitpos
);
610 val
.mask
= wi::bit_and_not
611 (POINTER_TYPE_P (type
) || TYPE_UNSIGNED (type
)
612 ? wi::mask
<widest_int
> (TYPE_PRECISION (type
), false)
614 align
/ BITS_PER_UNIT
- 1);
616 = wi::sext (val
.mask
, TYPE_PRECISION (type
)) == -1 ? VARYING
: CONSTANT
;
617 if (val
.lattice_val
== CONSTANT
)
618 val
.value
= build_int_cstu (type
, bitpos
/ BITS_PER_UNIT
);
620 val
.value
= NULL_TREE
;
625 /* Return the value for the tree operand EXPR. If FOR_BITS_P is true
626 return constant bits extracted from alignment information for
627 invariant addresses. */
629 static ccp_prop_value_t
630 get_value_for_expr (tree expr
, bool for_bits_p
)
632 ccp_prop_value_t val
;
634 if (TREE_CODE (expr
) == SSA_NAME
)
636 ccp_prop_value_t
*val_
= get_value (expr
);
641 val
.lattice_val
= VARYING
;
642 val
.value
= NULL_TREE
;
646 && val
.lattice_val
== CONSTANT
)
648 if (TREE_CODE (val
.value
) == ADDR_EXPR
)
649 val
= get_value_from_alignment (val
.value
);
650 else if (TREE_CODE (val
.value
) != INTEGER_CST
)
652 val
.lattice_val
= VARYING
;
653 val
.value
= NULL_TREE
;
657 /* Fall back to a copy value. */
659 && val
.lattice_val
== VARYING
660 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (expr
))
662 val
.lattice_val
= CONSTANT
;
667 else if (is_gimple_min_invariant (expr
)
668 && (!for_bits_p
|| TREE_CODE (expr
) == INTEGER_CST
))
670 val
.lattice_val
= CONSTANT
;
673 canonicalize_value (&val
);
675 else if (TREE_CODE (expr
) == ADDR_EXPR
)
676 val
= get_value_from_alignment (expr
);
679 val
.lattice_val
= VARYING
;
681 val
.value
= NULL_TREE
;
684 if (val
.lattice_val
== VARYING
685 && TYPE_UNSIGNED (TREE_TYPE (expr
)))
686 val
.mask
= wi::zext (val
.mask
, TYPE_PRECISION (TREE_TYPE (expr
)));
691 /* Return the likely CCP lattice value for STMT.
693 If STMT has no operands, then return CONSTANT.
695 Else if undefinedness of operands of STMT cause its value to be
696 undefined, then return UNDEFINED.
698 Else if any operands of STMT are constants, then return CONSTANT.
700 Else return VARYING. */
703 likely_value (gimple
*stmt
)
705 bool has_constant_operand
, has_undefined_operand
, all_undefined_operands
;
706 bool has_nsa_operand
;
711 enum gimple_code code
= gimple_code (stmt
);
713 /* This function appears to be called only for assignments, calls,
714 conditionals, and switches, due to the logic in visit_stmt. */
715 gcc_assert (code
== GIMPLE_ASSIGN
716 || code
== GIMPLE_CALL
717 || code
== GIMPLE_COND
718 || code
== GIMPLE_SWITCH
);
720 /* If the statement has volatile operands, it won't fold to a
722 if (gimple_has_volatile_ops (stmt
))
725 /* Arrive here for more complex cases. */
726 has_constant_operand
= false;
727 has_undefined_operand
= false;
728 all_undefined_operands
= true;
729 has_nsa_operand
= false;
730 FOR_EACH_SSA_TREE_OPERAND (use
, stmt
, iter
, SSA_OP_USE
)
732 ccp_prop_value_t
*val
= get_value (use
);
734 if (val
&& val
->lattice_val
== UNDEFINED
)
735 has_undefined_operand
= true;
737 all_undefined_operands
= false;
739 if (val
&& val
->lattice_val
== CONSTANT
)
740 has_constant_operand
= true;
742 if (SSA_NAME_IS_DEFAULT_DEF (use
)
743 || !prop_simulate_again_p (SSA_NAME_DEF_STMT (use
)))
744 has_nsa_operand
= true;
747 /* There may be constants in regular rhs operands. For calls we
748 have to ignore lhs, fndecl and static chain, otherwise only
750 for (i
= (is_gimple_call (stmt
) ? 2 : 0) + gimple_has_lhs (stmt
);
751 i
< gimple_num_ops (stmt
); ++i
)
753 tree op
= gimple_op (stmt
, i
);
754 if (!op
|| TREE_CODE (op
) == SSA_NAME
)
756 if (is_gimple_min_invariant (op
))
757 has_constant_operand
= true;
760 if (has_constant_operand
)
761 all_undefined_operands
= false;
763 if (has_undefined_operand
764 && code
== GIMPLE_CALL
765 && gimple_call_internal_p (stmt
))
766 switch (gimple_call_internal_fn (stmt
))
768 /* These 3 builtins use the first argument just as a magic
769 way how to find out a decl uid. */
770 case IFN_GOMP_SIMD_LANE
:
771 case IFN_GOMP_SIMD_VF
:
772 case IFN_GOMP_SIMD_LAST_LANE
:
773 has_undefined_operand
= false;
779 /* If the operation combines operands like COMPLEX_EXPR make sure to
780 not mark the result UNDEFINED if only one part of the result is
782 if (has_undefined_operand
&& all_undefined_operands
)
784 else if (code
== GIMPLE_ASSIGN
&& has_undefined_operand
)
786 switch (gimple_assign_rhs_code (stmt
))
788 /* Unary operators are handled with all_undefined_operands. */
791 case POINTER_PLUS_EXPR
:
793 /* Not MIN_EXPR, MAX_EXPR. One VARYING operand may be selected.
794 Not bitwise operators, one VARYING operand may specify the
796 Not logical operators for the same reason, apart from XOR.
797 Not COMPLEX_EXPR as one VARYING operand makes the result partly
798 not UNDEFINED. Not *DIV_EXPR, comparisons and shifts because
799 the undefined operand may be promoted. */
803 /* If any part of an address is UNDEFINED, like the index
804 of an ARRAY_EXPR, then treat the result as UNDEFINED. */
811 /* If there was an UNDEFINED operand but the result may be not UNDEFINED
812 fall back to CONSTANT. During iteration UNDEFINED may still drop
814 if (has_undefined_operand
)
817 /* We do not consider virtual operands here -- load from read-only
818 memory may have only VARYING virtual operands, but still be
819 constant. Also we can combine the stmt with definitions from
820 operands whose definitions are not simulated again. */
821 if (has_constant_operand
823 || gimple_references_memory_p (stmt
))
829 /* Returns true if STMT cannot be constant. */
832 surely_varying_stmt_p (gimple
*stmt
)
834 /* If the statement has operands that we cannot handle, it cannot be
836 if (gimple_has_volatile_ops (stmt
))
839 /* If it is a call and does not return a value or is not a
840 builtin and not an indirect call or a call to function with
841 assume_aligned/alloc_align attribute, it is varying. */
842 if (is_gimple_call (stmt
))
844 tree fndecl
, fntype
= gimple_call_fntype (stmt
);
845 if (!gimple_call_lhs (stmt
)
846 || ((fndecl
= gimple_call_fndecl (stmt
)) != NULL_TREE
847 && !fndecl_built_in_p (fndecl
)
848 && !lookup_attribute ("assume_aligned",
849 TYPE_ATTRIBUTES (fntype
))
850 && !lookup_attribute ("alloc_align",
851 TYPE_ATTRIBUTES (fntype
))))
855 /* Any other store operation is not interesting. */
856 else if (gimple_vdef (stmt
))
859 /* Anything other than assignments and conditional jumps are not
860 interesting for CCP. */
861 if (gimple_code (stmt
) != GIMPLE_ASSIGN
862 && gimple_code (stmt
) != GIMPLE_COND
863 && gimple_code (stmt
) != GIMPLE_SWITCH
864 && gimple_code (stmt
) != GIMPLE_CALL
)
870 /* Initialize local data structures for CCP. */
873 ccp_initialize (void)
877 n_const_val
= num_ssa_names
;
878 const_val
= XCNEWVEC (ccp_prop_value_t
, n_const_val
);
880 /* Initialize simulation flags for PHI nodes and statements. */
881 FOR_EACH_BB_FN (bb
, cfun
)
883 gimple_stmt_iterator i
;
885 for (i
= gsi_start_bb (bb
); !gsi_end_p (i
); gsi_next (&i
))
887 gimple
*stmt
= gsi_stmt (i
);
890 /* If the statement is a control insn, then we do not
891 want to avoid simulating the statement once. Failure
892 to do so means that those edges will never get added. */
893 if (stmt_ends_bb_p (stmt
))
896 is_varying
= surely_varying_stmt_p (stmt
);
903 /* If the statement will not produce a constant, mark
904 all its outputs VARYING. */
905 FOR_EACH_SSA_TREE_OPERAND (def
, stmt
, iter
, SSA_OP_ALL_DEFS
)
906 set_value_varying (def
);
908 prop_set_simulate_again (stmt
, !is_varying
);
912 /* Now process PHI nodes. We never clear the simulate_again flag on
913 phi nodes, since we do not know which edges are executable yet,
914 except for phi nodes for virtual operands when we do not do store ccp. */
915 FOR_EACH_BB_FN (bb
, cfun
)
919 for (i
= gsi_start_phis (bb
); !gsi_end_p (i
); gsi_next (&i
))
921 gphi
*phi
= i
.phi ();
923 if (virtual_operand_p (gimple_phi_result (phi
)))
924 prop_set_simulate_again (phi
, false);
926 prop_set_simulate_again (phi
, true);
931 /* Debug count support. Reset the values of ssa names
932 VARYING when the total number ssa names analyzed is
933 beyond the debug count specified. */
939 for (i
= 0; i
< num_ssa_names
; i
++)
943 const_val
[i
].lattice_val
= VARYING
;
944 const_val
[i
].mask
= -1;
945 const_val
[i
].value
= NULL_TREE
;
951 /* We want to provide our own GET_VALUE and FOLD_STMT virtual methods. */
952 class ccp_folder
: public substitute_and_fold_engine
955 tree
value_of_expr (tree
, gimple
*) final override
;
956 bool fold_stmt (gimple_stmt_iterator
*) final override
;
959 /* This method just wraps GET_CONSTANT_VALUE for now. Over time
960 naked calls to GET_CONSTANT_VALUE should be eliminated in favor
961 of calling member functions. */
964 ccp_folder::value_of_expr (tree op
, gimple
*)
966 return get_constant_value (op
);
969 /* Do final substitution of propagated values, cleanup the flowgraph and
970 free allocated storage. If NONZERO_P, record nonzero bits.
972 Return TRUE when something was optimized. */
975 ccp_finalize (bool nonzero_p
)
977 bool something_changed
;
983 /* Derive alignment and misalignment information from partially
984 constant pointers in the lattice or nonzero bits from partially
985 constant integers. */
986 FOR_EACH_SSA_NAME (i
, name
, cfun
)
988 ccp_prop_value_t
*val
;
989 unsigned int tem
, align
;
991 if (!POINTER_TYPE_P (TREE_TYPE (name
))
992 && (!INTEGRAL_TYPE_P (TREE_TYPE (name
))
993 /* Don't record nonzero bits before IPA to avoid
994 using too much memory. */
998 val
= get_value (name
);
999 if (val
->lattice_val
!= CONSTANT
1000 || TREE_CODE (val
->value
) != INTEGER_CST
1004 if (POINTER_TYPE_P (TREE_TYPE (name
)))
1006 /* Trailing mask bits specify the alignment, trailing value
1007 bits the misalignment. */
1008 tem
= val
->mask
.to_uhwi ();
1009 align
= least_bit_hwi (tem
);
1011 set_ptr_info_alignment (get_ptr_info (name
), align
,
1012 (TREE_INT_CST_LOW (val
->value
)
1017 unsigned int precision
= TYPE_PRECISION (TREE_TYPE (val
->value
));
1018 wide_int nonzero_bits
1019 = (wide_int::from (val
->mask
, precision
, UNSIGNED
)
1020 | wi::to_wide (val
->value
));
1021 nonzero_bits
&= get_nonzero_bits (name
);
1022 set_nonzero_bits (name
, nonzero_bits
);
1026 /* Perform substitutions based on the known constant values. */
1027 class ccp_folder ccp_folder
;
1028 something_changed
= ccp_folder
.substitute_and_fold ();
1032 return something_changed
;
1036 /* Compute the meet operator between *VAL1 and *VAL2. Store the result
1039 any M UNDEFINED = any
1040 any M VARYING = VARYING
1041 Ci M Cj = Ci if (i == j)
1042 Ci M Cj = VARYING if (i != j)
1046 ccp_lattice_meet (ccp_prop_value_t
*val1
, ccp_prop_value_t
*val2
)
1048 if (val1
->lattice_val
== UNDEFINED
1049 /* For UNDEFINED M SSA we can't always SSA because its definition
1050 may not dominate the PHI node. Doing optimistic copy propagation
1051 also causes a lot of gcc.dg/uninit-pred*.c FAILs. */
1052 && (val2
->lattice_val
!= CONSTANT
1053 || TREE_CODE (val2
->value
) != SSA_NAME
))
1055 /* UNDEFINED M any = any */
1058 else if (val2
->lattice_val
== UNDEFINED
1060 && (val1
->lattice_val
!= CONSTANT
1061 || TREE_CODE (val1
->value
) != SSA_NAME
))
1063 /* any M UNDEFINED = any
1064 Nothing to do. VAL1 already contains the value we want. */
1067 else if (val1
->lattice_val
== VARYING
1068 || val2
->lattice_val
== VARYING
)
1070 /* any M VARYING = VARYING. */
1071 val1
->lattice_val
= VARYING
;
1073 val1
->value
= NULL_TREE
;
1075 else if (val1
->lattice_val
== CONSTANT
1076 && val2
->lattice_val
== CONSTANT
1077 && TREE_CODE (val1
->value
) == INTEGER_CST
1078 && TREE_CODE (val2
->value
) == INTEGER_CST
)
1080 /* Ci M Cj = Ci if (i == j)
1081 Ci M Cj = VARYING if (i != j)
1083 For INTEGER_CSTs mask unequal bits. If no equal bits remain,
1085 val1
->mask
= (val1
->mask
| val2
->mask
1086 | (wi::to_widest (val1
->value
)
1087 ^ wi::to_widest (val2
->value
)));
1088 if (wi::sext (val1
->mask
, TYPE_PRECISION (TREE_TYPE (val1
->value
))) == -1)
1090 val1
->lattice_val
= VARYING
;
1091 val1
->value
= NULL_TREE
;
1094 else if (val1
->lattice_val
== CONSTANT
1095 && val2
->lattice_val
== CONSTANT
1096 && operand_equal_p (val1
->value
, val2
->value
, 0))
1098 /* Ci M Cj = Ci if (i == j)
1099 Ci M Cj = VARYING if (i != j)
1101 VAL1 already contains the value we want for equivalent values. */
1103 else if (val1
->lattice_val
== CONSTANT
1104 && val2
->lattice_val
== CONSTANT
1105 && (TREE_CODE (val1
->value
) == ADDR_EXPR
1106 || TREE_CODE (val2
->value
) == ADDR_EXPR
))
1108 /* When not equal addresses are involved try meeting for
1110 ccp_prop_value_t tem
= *val2
;
1111 if (TREE_CODE (val1
->value
) == ADDR_EXPR
)
1112 *val1
= get_value_for_expr (val1
->value
, true);
1113 if (TREE_CODE (val2
->value
) == ADDR_EXPR
)
1114 tem
= get_value_for_expr (val2
->value
, true);
1115 ccp_lattice_meet (val1
, &tem
);
1119 /* Any other combination is VARYING. */
1120 val1
->lattice_val
= VARYING
;
1122 val1
->value
= NULL_TREE
;
1127 /* Loop through the PHI_NODE's parameters for BLOCK and compare their
1128 lattice values to determine PHI_NODE's lattice value. The value of a
1129 PHI node is determined calling ccp_lattice_meet with all the arguments
1130 of the PHI node that are incoming via executable edges. */
1132 enum ssa_prop_result
1133 ccp_propagate::visit_phi (gphi
*phi
)
1136 ccp_prop_value_t new_val
;
1138 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1140 fprintf (dump_file
, "\nVisiting PHI node: ");
1141 print_gimple_stmt (dump_file
, phi
, 0, dump_flags
);
1144 new_val
.lattice_val
= UNDEFINED
;
1145 new_val
.value
= NULL_TREE
;
1149 bool non_exec_edge
= false;
1150 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
1152 /* Compute the meet operator over all the PHI arguments flowing
1153 through executable edges. */
1154 edge e
= gimple_phi_arg_edge (phi
, i
);
1156 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1159 "\tArgument #%d (%d -> %d %sexecutable)\n",
1160 i
, e
->src
->index
, e
->dest
->index
,
1161 (e
->flags
& EDGE_EXECUTABLE
) ? "" : "not ");
1164 /* If the incoming edge is executable, Compute the meet operator for
1165 the existing value of the PHI node and the current PHI argument. */
1166 if (e
->flags
& EDGE_EXECUTABLE
)
1168 tree arg
= gimple_phi_arg (phi
, i
)->def
;
1169 ccp_prop_value_t arg_val
= get_value_for_expr (arg
, false);
1177 ccp_lattice_meet (&new_val
, &arg_val
);
1179 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1181 fprintf (dump_file
, "\t");
1182 print_generic_expr (dump_file
, arg
, dump_flags
);
1183 dump_lattice_value (dump_file
, "\tValue: ", arg_val
);
1184 fprintf (dump_file
, "\n");
1187 if (new_val
.lattice_val
== VARYING
)
1191 non_exec_edge
= true;
1194 /* In case there were non-executable edges and the value is a copy
1195 make sure its definition dominates the PHI node. */
1197 && new_val
.lattice_val
== CONSTANT
1198 && TREE_CODE (new_val
.value
) == SSA_NAME
1199 && ! SSA_NAME_IS_DEFAULT_DEF (new_val
.value
)
1200 && ! dominated_by_p (CDI_DOMINATORS
, gimple_bb (phi
),
1201 gimple_bb (SSA_NAME_DEF_STMT (new_val
.value
))))
1203 new_val
.lattice_val
= VARYING
;
1204 new_val
.value
= NULL_TREE
;
1208 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1210 dump_lattice_value (dump_file
, "\n PHI node value: ", new_val
);
1211 fprintf (dump_file
, "\n\n");
1214 /* Make the transition to the new value. */
1215 if (set_lattice_value (gimple_phi_result (phi
), &new_val
))
1217 if (new_val
.lattice_val
== VARYING
)
1218 return SSA_PROP_VARYING
;
1220 return SSA_PROP_INTERESTING
;
1223 return SSA_PROP_NOT_INTERESTING
;
1226 /* Return the constant value for OP or OP otherwise. */
1229 valueize_op (tree op
)
1231 if (TREE_CODE (op
) == SSA_NAME
)
1233 tree tem
= get_constant_value (op
);
1240 /* Return the constant value for OP, but signal to not follow SSA
1241 edges if the definition may be simulated again. */
1244 valueize_op_1 (tree op
)
1246 if (TREE_CODE (op
) == SSA_NAME
)
1248 /* If the definition may be simulated again we cannot follow
1249 this SSA edge as the SSA propagator does not necessarily
1250 re-visit the use. */
1251 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
1252 if (!gimple_nop_p (def_stmt
)
1253 && prop_simulate_again_p (def_stmt
))
1255 tree tem
= get_constant_value (op
);
1262 /* CCP specific front-end to the non-destructive constant folding
1265 Attempt to simplify the RHS of STMT knowing that one or more
1266 operands are constants.
1268 If simplification is possible, return the simplified RHS,
1269 otherwise return the original RHS or NULL_TREE. */
1272 ccp_fold (gimple
*stmt
)
1274 location_t loc
= gimple_location (stmt
);
1275 switch (gimple_code (stmt
))
1279 /* Handle comparison operators that can appear in GIMPLE form. */
1280 tree op0
= valueize_op (gimple_cond_lhs (stmt
));
1281 tree op1
= valueize_op (gimple_cond_rhs (stmt
));
1282 enum tree_code code
= gimple_cond_code (stmt
);
1283 return fold_binary_loc (loc
, code
, boolean_type_node
, op0
, op1
);
1288 /* Return the constant switch index. */
1289 return valueize_op (gimple_switch_index (as_a
<gswitch
*> (stmt
)));
1294 return gimple_fold_stmt_to_constant_1 (stmt
,
1295 valueize_op
, valueize_op_1
);
1302 /* Determine the minimum and maximum values, *MIN and *MAX respectively,
1303 represented by the mask pair VAL and MASK with signedness SGN and
1304 precision PRECISION. */
1307 value_mask_to_min_max (widest_int
*min
, widest_int
*max
,
1308 const widest_int
&val
, const widest_int
&mask
,
1309 signop sgn
, int precision
)
1311 *min
= wi::bit_and_not (val
, mask
);
1313 if (sgn
== SIGNED
&& wi::neg_p (mask
))
1315 widest_int sign_bit
= wi::lshift (1, precision
- 1);
1318 /* MAX is zero extended, and MIN is sign extended. */
1319 *min
= wi::ext (*min
, precision
, sgn
);
1320 *max
= wi::ext (*max
, precision
, sgn
);
1324 /* Apply the operation CODE in type TYPE to the value, mask pair
1325 RVAL and RMASK representing a value of type RTYPE and set
1326 the value, mask pair *VAL and *MASK to the result. */
1329 bit_value_unop (enum tree_code code
, signop type_sgn
, int type_precision
,
1330 widest_int
*val
, widest_int
*mask
,
1331 signop rtype_sgn
, int rtype_precision
,
1332 const widest_int
&rval
, const widest_int
&rmask
)
1343 widest_int temv
, temm
;
1344 /* Return ~rval + 1. */
1345 bit_value_unop (BIT_NOT_EXPR
, type_sgn
, type_precision
, &temv
, &temm
,
1346 type_sgn
, type_precision
, rval
, rmask
);
1347 bit_value_binop (PLUS_EXPR
, type_sgn
, type_precision
, val
, mask
,
1348 type_sgn
, type_precision
, temv
, temm
,
1349 type_sgn
, type_precision
, 1, 0);
1355 /* First extend mask and value according to the original type. */
1356 *mask
= wi::ext (rmask
, rtype_precision
, rtype_sgn
);
1357 *val
= wi::ext (rval
, rtype_precision
, rtype_sgn
);
1359 /* Then extend mask and value according to the target type. */
1360 *mask
= wi::ext (*mask
, type_precision
, type_sgn
);
1361 *val
= wi::ext (*val
, type_precision
, type_sgn
);
1367 if (wi::sext (rmask
, rtype_precision
) == -1)
1369 else if (wi::neg_p (rmask
))
1371 /* Result is either rval or -rval. */
1372 widest_int temv
, temm
;
1373 bit_value_unop (NEGATE_EXPR
, rtype_sgn
, rtype_precision
, &temv
,
1374 &temm
, type_sgn
, type_precision
, rval
, rmask
);
1375 temm
|= (rmask
| (rval
^ temv
));
1376 /* Extend the result. */
1377 *mask
= wi::ext (temm
, type_precision
, type_sgn
);
1378 *val
= wi::ext (temv
, type_precision
, type_sgn
);
1380 else if (wi::neg_p (rval
))
1382 bit_value_unop (NEGATE_EXPR
, type_sgn
, type_precision
, val
, mask
,
1383 type_sgn
, type_precision
, rval
, rmask
);
1398 /* Determine the mask pair *VAL and *MASK from multiplying the
1399 argument mask pair RVAL, RMASK by the unsigned constant C. */
1401 bit_value_mult_const (signop sgn
, int width
,
1402 widest_int
*val
, widest_int
*mask
,
1403 const widest_int
&rval
, const widest_int
&rmask
,
1406 widest_int sum_mask
= 0;
1408 /* Ensure rval_lo only contains known bits. */
1409 widest_int rval_lo
= wi::bit_and_not (rval
, rmask
);
1413 /* General case (some bits of multiplicand are known set). */
1414 widest_int sum_val
= 0;
1417 /* Determine the lowest bit set in the multiplier. */
1418 int bitpos
= wi::ctz (c
);
1419 widest_int term_mask
= rmask
<< bitpos
;
1420 widest_int term_val
= rval_lo
<< bitpos
;
1423 widest_int lo
= sum_val
+ term_val
;
1424 widest_int hi
= (sum_val
| sum_mask
) + (term_val
| term_mask
);
1425 sum_mask
|= term_mask
| (lo
^ hi
);
1428 /* Clear this bit in the multiplier. */
1429 c
^= wi::lshift (1, bitpos
);
1431 /* Correctly extend the result value. */
1432 *val
= wi::ext (sum_val
, width
, sgn
);
1436 /* Special case (no bits of multiplicand are known set). */
1439 /* Determine the lowest bit set in the multiplier. */
1440 int bitpos
= wi::ctz (c
);
1441 widest_int term_mask
= rmask
<< bitpos
;
1444 widest_int hi
= sum_mask
+ term_mask
;
1445 sum_mask
|= term_mask
| hi
;
1447 /* Clear this bit in the multiplier. */
1448 c
^= wi::lshift (1, bitpos
);
1453 /* Correctly extend the result mask. */
1454 *mask
= wi::ext (sum_mask
, width
, sgn
);
1457 /* Fill up to MAX values in the BITS array with values representing
1458 each of the non-zero bits in the value X. Returns the number of
1459 bits in X (capped at the maximum value MAX). For example, an X
1460 value 11, places 1, 2 and 8 in BITS and returns the value 3. */
1463 get_individual_bits (widest_int
*bits
, widest_int x
, unsigned int max
)
1465 unsigned int count
= 0;
1466 while (count
< max
&& x
!= 0)
1468 int bitpos
= wi::ctz (x
);
1469 bits
[count
] = wi::lshift (1, bitpos
);
1476 /* Array of 2^N - 1 values representing the bits flipped between
1477 consecutive Gray codes. This is used to efficiently enumerate
1478 all permutations on N bits using XOR. */
1479 static const unsigned char gray_code_bit_flips
[63] = {
1480 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4,
1481 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5,
1482 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4,
1483 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0
1486 /* Apply the operation CODE in type TYPE to the value, mask pairs
1487 R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
1488 and R2TYPE and set the value, mask pair *VAL and *MASK to the result. */
1491 bit_value_binop (enum tree_code code
, signop sgn
, int width
,
1492 widest_int
*val
, widest_int
*mask
,
1493 signop r1type_sgn
, int r1type_precision
,
1494 const widest_int
&r1val
, const widest_int
&r1mask
,
1495 signop r2type_sgn
, int r2type_precision ATTRIBUTE_UNUSED
,
1496 const widest_int
&r2val
, const widest_int
&r2mask
)
1498 bool swap_p
= false;
1500 /* Assume we'll get a constant result. Use an initial non varying
1501 value, we fall back to varying in the end if necessary. */
1503 /* Ensure that VAL is initialized (to any value). */
1509 /* The mask is constant where there is a known not
1510 set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
1511 *mask
= (r1mask
| r2mask
) & (r1val
| r1mask
) & (r2val
| r2mask
);
1512 *val
= r1val
& r2val
;
1516 /* The mask is constant where there is a known
1517 set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)). */
1518 *mask
= wi::bit_and_not (r1mask
| r2mask
,
1519 wi::bit_and_not (r1val
, r1mask
)
1520 | wi::bit_and_not (r2val
, r2mask
));
1521 *val
= r1val
| r2val
;
1526 *mask
= r1mask
| r2mask
;
1527 *val
= r1val
^ r2val
;
1534 widest_int shift
= r2val
;
1542 if (wi::neg_p (shift
, r2type_sgn
))
1545 if (code
== RROTATE_EXPR
)
1546 code
= LROTATE_EXPR
;
1548 code
= RROTATE_EXPR
;
1550 if (code
== RROTATE_EXPR
)
1552 *mask
= wi::rrotate (r1mask
, shift
, width
);
1553 *val
= wi::rrotate (r1val
, shift
, width
);
1557 *mask
= wi::lrotate (r1mask
, shift
, width
);
1558 *val
= wi::lrotate (r1val
, shift
, width
);
1562 else if (wi::ltu_p (r2val
| r2mask
, width
)
1563 && wi::popcount (r2mask
) <= 4)
1566 widest_int res_val
, res_mask
;
1567 widest_int tmp_val
, tmp_mask
;
1568 widest_int shift
= wi::bit_and_not (r2val
, r2mask
);
1569 unsigned int bit_count
= get_individual_bits (bits
, r2mask
, 4);
1570 unsigned int count
= (1 << bit_count
) - 1;
1572 /* Initialize result to rotate by smallest value of shift. */
1573 if (code
== RROTATE_EXPR
)
1575 res_mask
= wi::rrotate (r1mask
, shift
, width
);
1576 res_val
= wi::rrotate (r1val
, shift
, width
);
1580 res_mask
= wi::lrotate (r1mask
, shift
, width
);
1581 res_val
= wi::lrotate (r1val
, shift
, width
);
1584 /* Iterate through the remaining values of shift. */
1585 for (unsigned int i
=0; i
<count
; i
++)
1587 shift
^= bits
[gray_code_bit_flips
[i
]];
1588 if (code
== RROTATE_EXPR
)
1590 tmp_mask
= wi::rrotate (r1mask
, shift
, width
);
1591 tmp_val
= wi::rrotate (r1val
, shift
, width
);
1595 tmp_mask
= wi::lrotate (r1mask
, shift
, width
);
1596 tmp_val
= wi::lrotate (r1val
, shift
, width
);
1598 /* Accumulate the result. */
1599 res_mask
|= tmp_mask
| (res_val
^ tmp_val
);
1601 *val
= wi::bit_and_not (res_val
, res_mask
);
1608 /* ??? We can handle partially known shift counts if we know
1609 its sign. That way we can tell that (x << (y | 8)) & 255
1613 widest_int shift
= r2val
;
1621 if (wi::neg_p (shift
, r2type_sgn
))
1623 if (code
== RSHIFT_EXPR
)
1625 *mask
= wi::rshift (wi::ext (r1mask
, width
, sgn
), shift
, sgn
);
1626 *val
= wi::rshift (wi::ext (r1val
, width
, sgn
), shift
, sgn
);
1630 *mask
= wi::ext (r1mask
<< shift
, width
, sgn
);
1631 *val
= wi::ext (r1val
<< shift
, width
, sgn
);
1635 else if (wi::ltu_p (r2val
| r2mask
, width
))
1637 if (wi::popcount (r2mask
) <= 4)
1640 widest_int arg_val
, arg_mask
;
1641 widest_int res_val
, res_mask
;
1642 widest_int tmp_val
, tmp_mask
;
1643 widest_int shift
= wi::bit_and_not (r2val
, r2mask
);
1644 unsigned int bit_count
= get_individual_bits (bits
, r2mask
, 4);
1645 unsigned int count
= (1 << bit_count
) - 1;
1647 /* Initialize result to shift by smallest value of shift. */
1648 if (code
== RSHIFT_EXPR
)
1650 arg_mask
= wi::ext (r1mask
, width
, sgn
);
1651 arg_val
= wi::ext (r1val
, width
, sgn
);
1652 res_mask
= wi::rshift (arg_mask
, shift
, sgn
);
1653 res_val
= wi::rshift (arg_val
, shift
, sgn
);
1659 res_mask
= arg_mask
<< shift
;
1660 res_val
= arg_val
<< shift
;
1663 /* Iterate through the remaining values of shift. */
1664 for (unsigned int i
=0; i
<count
; i
++)
1666 shift
^= bits
[gray_code_bit_flips
[i
]];
1667 if (code
== RSHIFT_EXPR
)
1669 tmp_mask
= wi::rshift (arg_mask
, shift
, sgn
);
1670 tmp_val
= wi::rshift (arg_val
, shift
, sgn
);
1674 tmp_mask
= arg_mask
<< shift
;
1675 tmp_val
= arg_val
<< shift
;
1677 /* Accumulate the result. */
1678 res_mask
|= tmp_mask
| (res_val
^ tmp_val
);
1680 res_mask
= wi::ext (res_mask
, width
, sgn
);
1681 res_val
= wi::ext (res_val
, width
, sgn
);
1682 *val
= wi::bit_and_not (res_val
, res_mask
);
1685 else if ((r1val
| r1mask
) == 0)
1687 /* Handle shifts of zero to avoid undefined wi::ctz below. */
1691 else if (code
== LSHIFT_EXPR
)
1693 widest_int tmp
= wi::mask
<widest_int
> (width
, false);
1694 tmp
<<= wi::ctz (r1val
| r1mask
);
1695 tmp
<<= wi::bit_and_not (r2val
, r2mask
);
1696 *mask
= wi::ext (tmp
, width
, sgn
);
1699 else if (!wi::neg_p (r1val
| r1mask
, sgn
))
1701 /* Logical right shift, or zero sign bit. */
1702 widest_int arg
= r1val
| r1mask
;
1703 int lzcount
= wi::clz (arg
);
1705 lzcount
-= wi::get_precision (arg
) - width
;
1706 widest_int tmp
= wi::mask
<widest_int
> (width
, false);
1707 tmp
= wi::lrshift (tmp
, lzcount
);
1708 tmp
= wi::lrshift (tmp
, wi::bit_and_not (r2val
, r2mask
));
1709 *mask
= wi::ext (tmp
, width
, sgn
);
1712 else if (!wi::neg_p (r1mask
))
1714 /* Arithmetic right shift with set sign bit. */
1715 widest_int arg
= wi::bit_and_not (r1val
, r1mask
);
1716 int sbcount
= wi::clrsb (arg
);
1717 sbcount
-= wi::get_precision (arg
) - width
;
1718 widest_int tmp
= wi::mask
<widest_int
> (width
, false);
1719 tmp
= wi::lrshift (tmp
, sbcount
);
1720 tmp
= wi::lrshift (tmp
, wi::bit_and_not (r2val
, r2mask
));
1721 *mask
= wi::sext (tmp
, width
);
1722 tmp
= wi::bit_not (tmp
);
1723 *val
= wi::sext (tmp
, width
);
1729 case POINTER_PLUS_EXPR
:
1731 /* Do the addition with unknown bits set to zero, to give carry-ins of
1732 zero wherever possible. */
1733 widest_int lo
= (wi::bit_and_not (r1val
, r1mask
)
1734 + wi::bit_and_not (r2val
, r2mask
));
1735 lo
= wi::ext (lo
, width
, sgn
);
1736 /* Do the addition with unknown bits set to one, to give carry-ins of
1737 one wherever possible. */
1738 widest_int hi
= (r1val
| r1mask
) + (r2val
| r2mask
);
1739 hi
= wi::ext (hi
, width
, sgn
);
1740 /* Each bit in the result is known if (a) the corresponding bits in
1741 both inputs are known, and (b) the carry-in to that bit position
1742 is known. We can check condition (b) by seeing if we got the same
1743 result with minimised carries as with maximised carries. */
1744 *mask
= r1mask
| r2mask
| (lo
^ hi
);
1745 *mask
= wi::ext (*mask
, width
, sgn
);
1746 /* It shouldn't matter whether we choose lo or hi here. */
1752 case POINTER_DIFF_EXPR
:
1754 /* Subtraction is derived from the addition algorithm above. */
1755 widest_int lo
= wi::bit_and_not (r1val
, r1mask
) - (r2val
| r2mask
);
1756 lo
= wi::ext (lo
, width
, sgn
);
1757 widest_int hi
= (r1val
| r1mask
) - wi::bit_and_not (r2val
, r2mask
);
1758 hi
= wi::ext (hi
, width
, sgn
);
1759 *mask
= r1mask
| r2mask
| (lo
^ hi
);
1760 *mask
= wi::ext (*mask
, width
, sgn
);
1767 && !wi::neg_p (r2val
, sgn
)
1768 && (flag_expensive_optimizations
|| wi::popcount (r2val
) < 8))
1769 bit_value_mult_const (sgn
, width
, val
, mask
, r1val
, r1mask
, r2val
);
1770 else if (r1mask
== 0
1771 && !wi::neg_p (r1val
, sgn
)
1772 && (flag_expensive_optimizations
|| wi::popcount (r1val
) < 8))
1773 bit_value_mult_const (sgn
, width
, val
, mask
, r2val
, r2mask
, r1val
);
1776 /* Just track trailing zeros in both operands and transfer
1777 them to the other. */
1778 int r1tz
= wi::ctz (r1val
| r1mask
);
1779 int r2tz
= wi::ctz (r2val
| r2mask
);
1780 if (r1tz
+ r2tz
>= width
)
1785 else if (r1tz
+ r2tz
> 0)
1787 *mask
= wi::ext (wi::mask
<widest_int
> (r1tz
+ r2tz
, true),
1797 widest_int m
= r1mask
| r2mask
;
1798 if (wi::bit_and_not (r1val
, m
) != wi::bit_and_not (r2val
, m
))
1801 *val
= ((code
== EQ_EXPR
) ? 0 : 1);
1805 /* We know the result of a comparison is always one or zero. */
1815 code
= swap_tree_comparison (code
);
1820 widest_int min1
, max1
, min2
, max2
;
1823 const widest_int
&o1val
= swap_p
? r2val
: r1val
;
1824 const widest_int
&o1mask
= swap_p
? r2mask
: r1mask
;
1825 const widest_int
&o2val
= swap_p
? r1val
: r2val
;
1826 const widest_int
&o2mask
= swap_p
? r1mask
: r2mask
;
1828 value_mask_to_min_max (&min1
, &max1
, o1val
, o1mask
,
1829 r1type_sgn
, r1type_precision
);
1830 value_mask_to_min_max (&min2
, &max2
, o2val
, o2mask
,
1831 r1type_sgn
, r1type_precision
);
1833 /* For comparisons the signedness is in the comparison operands. */
1834 /* Do a cross comparison of the max/min pairs. */
1835 maxmin
= wi::cmp (max1
, min2
, r1type_sgn
);
1836 minmax
= wi::cmp (min1
, max2
, r1type_sgn
);
1837 if (maxmin
< (code
== LE_EXPR
? 1: 0)) /* o1 < or <= o2. */
1842 else if (minmax
> (code
== LT_EXPR
? -1 : 0)) /* o1 >= or > o2. */
1847 else if (maxmin
== minmax
) /* o1 and o2 are equal. */
1849 /* This probably should never happen as we'd have
1850 folded the thing during fully constant value folding. */
1852 *val
= (code
== LE_EXPR
? 1 : 0);
1856 /* We know the result of a comparison is always one or zero. */
1866 widest_int min1
, max1
, min2
, max2
;
1868 value_mask_to_min_max (&min1
, &max1
, r1val
, r1mask
, sgn
, width
);
1869 value_mask_to_min_max (&min2
, &max2
, r2val
, r2mask
, sgn
, width
);
1871 if (wi::cmp (max1
, min2
, sgn
) <= 0) /* r1 is less than r2. */
1873 if (code
== MIN_EXPR
)
1884 else if (wi::cmp (min1
, max2
, sgn
) >= 0) /* r2 is less than r1. */
1886 if (code
== MIN_EXPR
)
1899 /* The result is either r1 or r2. */
1900 *mask
= r1mask
| r2mask
| (r1val
^ r2val
);
1906 case TRUNC_MOD_EXPR
:
1908 widest_int r1max
= r1val
| r1mask
;
1909 widest_int r2max
= r2val
| r2mask
;
1911 || (!wi::neg_p (r1max
) && !wi::neg_p (r2max
)))
1913 /* Confirm R2 has some bits set, to avoid division by zero. */
1914 widest_int r2min
= wi::bit_and_not (r2val
, r2mask
);
1917 /* R1 % R2 is R1 if R1 is always less than R2. */
1918 if (wi::ltu_p (r1max
, r2min
))
1925 /* R1 % R2 is always less than the maximum of R2. */
1926 unsigned int lzcount
= wi::clz (r2max
);
1927 unsigned int bits
= wi::get_precision (r2max
) - lzcount
;
1928 if (r2max
== wi::lshift (1, bits
))
1930 *mask
= wi::mask
<widest_int
> (bits
, false);
1938 case TRUNC_DIV_EXPR
:
1940 widest_int r1max
= r1val
| r1mask
;
1941 widest_int r2max
= r2val
| r2mask
;
1942 if (r2mask
== 0 && !wi::neg_p (r1max
))
1944 widest_int shift
= wi::exact_log2 (r2val
);
1947 // Handle division by a power of 2 as an rshift.
1948 bit_value_binop (RSHIFT_EXPR
, sgn
, width
, val
, mask
,
1949 r1type_sgn
, r1type_precision
, r1val
, r1mask
,
1950 r2type_sgn
, r2type_precision
, shift
, r2mask
);
1955 || (!wi::neg_p (r1max
) && !wi::neg_p (r2max
)))
1957 /* Confirm R2 has some bits set, to avoid division by zero. */
1958 widest_int r2min
= wi::bit_and_not (r2val
, r2mask
);
1961 /* R1 / R2 is zero if R1 is always less than R2. */
1962 if (wi::ltu_p (r1max
, r2min
))
1969 widest_int upper
= wi::udiv_trunc (r1max
, r2min
);
1970 unsigned int lzcount
= wi::clz (upper
);
1971 unsigned int bits
= wi::get_precision (upper
) - lzcount
;
1972 *mask
= wi::mask
<widest_int
> (bits
, false);
1984 /* Return the propagation value when applying the operation CODE to
1985 the value RHS yielding type TYPE. */
1987 static ccp_prop_value_t
1988 bit_value_unop (enum tree_code code
, tree type
, tree rhs
)
1990 ccp_prop_value_t rval
= get_value_for_expr (rhs
, true);
1991 widest_int value
, mask
;
1992 ccp_prop_value_t val
;
1994 if (rval
.lattice_val
== UNDEFINED
)
1997 gcc_assert ((rval
.lattice_val
== CONSTANT
1998 && TREE_CODE (rval
.value
) == INTEGER_CST
)
1999 || wi::sext (rval
.mask
, TYPE_PRECISION (TREE_TYPE (rhs
))) == -1);
2000 bit_value_unop (code
, TYPE_SIGN (type
), TYPE_PRECISION (type
), &value
, &mask
,
2001 TYPE_SIGN (TREE_TYPE (rhs
)), TYPE_PRECISION (TREE_TYPE (rhs
)),
2002 value_to_wide_int (rval
), rval
.mask
);
2003 if (wi::sext (mask
, TYPE_PRECISION (type
)) != -1)
2005 val
.lattice_val
= CONSTANT
;
2007 /* ??? Delay building trees here. */
2008 val
.value
= wide_int_to_tree (type
, value
);
2012 val
.lattice_val
= VARYING
;
2013 val
.value
= NULL_TREE
;
2019 /* Return the propagation value when applying the operation CODE to
2020 the values RHS1 and RHS2 yielding type TYPE. */
2022 static ccp_prop_value_t
2023 bit_value_binop (enum tree_code code
, tree type
, tree rhs1
, tree rhs2
)
2025 ccp_prop_value_t r1val
= get_value_for_expr (rhs1
, true);
2026 ccp_prop_value_t r2val
= get_value_for_expr (rhs2
, true);
2027 widest_int value
, mask
;
2028 ccp_prop_value_t val
;
2030 if (r1val
.lattice_val
== UNDEFINED
2031 || r2val
.lattice_val
== UNDEFINED
)
2033 val
.lattice_val
= VARYING
;
2034 val
.value
= NULL_TREE
;
2039 gcc_assert ((r1val
.lattice_val
== CONSTANT
2040 && TREE_CODE (r1val
.value
) == INTEGER_CST
)
2041 || wi::sext (r1val
.mask
,
2042 TYPE_PRECISION (TREE_TYPE (rhs1
))) == -1);
2043 gcc_assert ((r2val
.lattice_val
== CONSTANT
2044 && TREE_CODE (r2val
.value
) == INTEGER_CST
)
2045 || wi::sext (r2val
.mask
,
2046 TYPE_PRECISION (TREE_TYPE (rhs2
))) == -1);
2047 bit_value_binop (code
, TYPE_SIGN (type
), TYPE_PRECISION (type
), &value
, &mask
,
2048 TYPE_SIGN (TREE_TYPE (rhs1
)), TYPE_PRECISION (TREE_TYPE (rhs1
)),
2049 value_to_wide_int (r1val
), r1val
.mask
,
2050 TYPE_SIGN (TREE_TYPE (rhs2
)), TYPE_PRECISION (TREE_TYPE (rhs2
)),
2051 value_to_wide_int (r2val
), r2val
.mask
);
2053 /* (x * x) & 2 == 0. */
2054 if (code
== MULT_EXPR
&& rhs1
== rhs2
&& TYPE_PRECISION (type
) > 1)
2057 if (wi::sext (mask
, TYPE_PRECISION (type
)) != -1)
2058 value
= wi::bit_and_not (value
, m
);
2061 mask
= wi::bit_and_not (mask
, m
);
2064 if (wi::sext (mask
, TYPE_PRECISION (type
)) != -1)
2066 val
.lattice_val
= CONSTANT
;
2068 /* ??? Delay building trees here. */
2069 val
.value
= wide_int_to_tree (type
, value
);
2073 val
.lattice_val
= VARYING
;
2074 val
.value
= NULL_TREE
;
2080 /* Return the propagation value for __builtin_assume_aligned
2081 and functions with assume_aligned or alloc_aligned attribute.
2082 For __builtin_assume_aligned, ATTR is NULL_TREE,
2083 for assume_aligned attribute ATTR is non-NULL and ALLOC_ALIGNED
2084 is false, for alloc_aligned attribute ATTR is non-NULL and
2085 ALLOC_ALIGNED is true. */
2087 static ccp_prop_value_t
2088 bit_value_assume_aligned (gimple
*stmt
, tree attr
, ccp_prop_value_t ptrval
,
2091 tree align
, misalign
= NULL_TREE
, type
;
2092 unsigned HOST_WIDE_INT aligni
, misaligni
= 0;
2093 ccp_prop_value_t alignval
;
2094 widest_int value
, mask
;
2095 ccp_prop_value_t val
;
2097 if (attr
== NULL_TREE
)
2099 tree ptr
= gimple_call_arg (stmt
, 0);
2100 type
= TREE_TYPE (ptr
);
2101 ptrval
= get_value_for_expr (ptr
, true);
2105 tree lhs
= gimple_call_lhs (stmt
);
2106 type
= TREE_TYPE (lhs
);
2109 if (ptrval
.lattice_val
== UNDEFINED
)
2111 gcc_assert ((ptrval
.lattice_val
== CONSTANT
2112 && TREE_CODE (ptrval
.value
) == INTEGER_CST
)
2113 || wi::sext (ptrval
.mask
, TYPE_PRECISION (type
)) == -1);
2114 if (attr
== NULL_TREE
)
2116 /* Get aligni and misaligni from __builtin_assume_aligned. */
2117 align
= gimple_call_arg (stmt
, 1);
2118 if (!tree_fits_uhwi_p (align
))
2120 aligni
= tree_to_uhwi (align
);
2121 if (gimple_call_num_args (stmt
) > 2)
2123 misalign
= gimple_call_arg (stmt
, 2);
2124 if (!tree_fits_uhwi_p (misalign
))
2126 misaligni
= tree_to_uhwi (misalign
);
2131 /* Get aligni and misaligni from assume_aligned or
2132 alloc_align attributes. */
2133 if (TREE_VALUE (attr
) == NULL_TREE
)
2135 attr
= TREE_VALUE (attr
);
2136 align
= TREE_VALUE (attr
);
2137 if (!tree_fits_uhwi_p (align
))
2139 aligni
= tree_to_uhwi (align
);
2142 if (aligni
== 0 || aligni
> gimple_call_num_args (stmt
))
2144 align
= gimple_call_arg (stmt
, aligni
- 1);
2145 if (!tree_fits_uhwi_p (align
))
2147 aligni
= tree_to_uhwi (align
);
2149 else if (TREE_CHAIN (attr
) && TREE_VALUE (TREE_CHAIN (attr
)))
2151 misalign
= TREE_VALUE (TREE_CHAIN (attr
));
2152 if (!tree_fits_uhwi_p (misalign
))
2154 misaligni
= tree_to_uhwi (misalign
);
2157 if (aligni
<= 1 || (aligni
& (aligni
- 1)) != 0 || misaligni
>= aligni
)
2160 align
= build_int_cst_type (type
, -aligni
);
2161 alignval
= get_value_for_expr (align
, true);
2162 bit_value_binop (BIT_AND_EXPR
, TYPE_SIGN (type
), TYPE_PRECISION (type
), &value
, &mask
,
2163 TYPE_SIGN (type
), TYPE_PRECISION (type
), value_to_wide_int (ptrval
), ptrval
.mask
,
2164 TYPE_SIGN (type
), TYPE_PRECISION (type
), value_to_wide_int (alignval
), alignval
.mask
);
2166 if (wi::sext (mask
, TYPE_PRECISION (type
)) != -1)
2168 val
.lattice_val
= CONSTANT
;
2170 gcc_assert ((mask
.to_uhwi () & (aligni
- 1)) == 0);
2171 gcc_assert ((value
.to_uhwi () & (aligni
- 1)) == 0);
2173 /* ??? Delay building trees here. */
2174 val
.value
= wide_int_to_tree (type
, value
);
2178 val
.lattice_val
= VARYING
;
2179 val
.value
= NULL_TREE
;
2185 /* Evaluate statement STMT.
2186 Valid only for assignments, calls, conditionals, and switches. */
2188 static ccp_prop_value_t
2189 evaluate_stmt (gimple
*stmt
)
2191 ccp_prop_value_t val
;
2192 tree simplified
= NULL_TREE
;
2193 ccp_lattice_t likelyvalue
= likely_value (stmt
);
2194 bool is_constant
= false;
2196 bool ignore_return_flags
= false;
2198 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2200 fprintf (dump_file
, "which is likely ");
2201 switch (likelyvalue
)
2204 fprintf (dump_file
, "CONSTANT");
2207 fprintf (dump_file
, "UNDEFINED");
2210 fprintf (dump_file
, "VARYING");
2214 fprintf (dump_file
, "\n");
2217 /* If the statement is likely to have a CONSTANT result, then try
2218 to fold the statement to determine the constant value. */
2219 /* FIXME. This is the only place that we call ccp_fold.
2220 Since likely_value never returns CONSTANT for calls, we will
2221 not attempt to fold them, including builtins that may profit. */
2222 if (likelyvalue
== CONSTANT
)
2224 fold_defer_overflow_warnings ();
2225 simplified
= ccp_fold (stmt
);
2227 && TREE_CODE (simplified
) == SSA_NAME
)
2229 /* We may not use values of something that may be simulated again,
2230 see valueize_op_1. */
2231 if (SSA_NAME_IS_DEFAULT_DEF (simplified
)
2232 || ! prop_simulate_again_p (SSA_NAME_DEF_STMT (simplified
)))
2234 ccp_prop_value_t
*val
= get_value (simplified
);
2235 if (val
&& val
->lattice_val
!= VARYING
)
2237 fold_undefer_overflow_warnings (true, stmt
, 0);
2242 /* We may also not place a non-valueized copy in the lattice
2243 as that might become stale if we never re-visit this stmt. */
2244 simplified
= NULL_TREE
;
2246 is_constant
= simplified
&& is_gimple_min_invariant (simplified
);
2247 fold_undefer_overflow_warnings (is_constant
, stmt
, 0);
2250 /* The statement produced a constant value. */
2251 val
.lattice_val
= CONSTANT
;
2252 val
.value
= simplified
;
2257 /* If the statement is likely to have a VARYING result, then do not
2258 bother folding the statement. */
2259 else if (likelyvalue
== VARYING
)
2261 enum gimple_code code
= gimple_code (stmt
);
2262 if (code
== GIMPLE_ASSIGN
)
2264 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
2266 /* Other cases cannot satisfy is_gimple_min_invariant
2268 if (get_gimple_rhs_class (subcode
) == GIMPLE_SINGLE_RHS
)
2269 simplified
= gimple_assign_rhs1 (stmt
);
2271 else if (code
== GIMPLE_SWITCH
)
2272 simplified
= gimple_switch_index (as_a
<gswitch
*> (stmt
));
2274 /* These cannot satisfy is_gimple_min_invariant without folding. */
2275 gcc_assert (code
== GIMPLE_CALL
|| code
== GIMPLE_COND
);
2276 is_constant
= simplified
&& is_gimple_min_invariant (simplified
);
2279 /* The statement produced a constant value. */
2280 val
.lattice_val
= CONSTANT
;
2281 val
.value
= simplified
;
2285 /* If the statement result is likely UNDEFINED, make it so. */
2286 else if (likelyvalue
== UNDEFINED
)
2288 val
.lattice_val
= UNDEFINED
;
2289 val
.value
= NULL_TREE
;
2294 /* Resort to simplification for bitwise tracking. */
2295 if (flag_tree_bit_ccp
2296 && (likelyvalue
== CONSTANT
|| is_gimple_call (stmt
)
2297 || (gimple_assign_single_p (stmt
)
2298 && gimple_assign_rhs_code (stmt
) == ADDR_EXPR
))
2301 enum gimple_code code
= gimple_code (stmt
);
2302 val
.lattice_val
= VARYING
;
2303 val
.value
= NULL_TREE
;
2305 if (code
== GIMPLE_ASSIGN
)
2307 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
2308 tree rhs1
= gimple_assign_rhs1 (stmt
);
2309 tree lhs
= gimple_assign_lhs (stmt
);
2310 if ((INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
2311 || POINTER_TYPE_P (TREE_TYPE (lhs
)))
2312 && (INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
2313 || POINTER_TYPE_P (TREE_TYPE (rhs1
))))
2314 switch (get_gimple_rhs_class (subcode
))
2316 case GIMPLE_SINGLE_RHS
:
2317 val
= get_value_for_expr (rhs1
, true);
2320 case GIMPLE_UNARY_RHS
:
2321 val
= bit_value_unop (subcode
, TREE_TYPE (lhs
), rhs1
);
2324 case GIMPLE_BINARY_RHS
:
2325 val
= bit_value_binop (subcode
, TREE_TYPE (lhs
), rhs1
,
2326 gimple_assign_rhs2 (stmt
));
2332 else if (code
== GIMPLE_COND
)
2334 enum tree_code code
= gimple_cond_code (stmt
);
2335 tree rhs1
= gimple_cond_lhs (stmt
);
2336 tree rhs2
= gimple_cond_rhs (stmt
);
2337 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
2338 || POINTER_TYPE_P (TREE_TYPE (rhs1
)))
2339 val
= bit_value_binop (code
, TREE_TYPE (rhs1
), rhs1
, rhs2
);
2341 else if (gimple_call_builtin_p (stmt
, BUILT_IN_NORMAL
))
2343 tree fndecl
= gimple_call_fndecl (stmt
);
2344 switch (DECL_FUNCTION_CODE (fndecl
))
2346 case BUILT_IN_MALLOC
:
2347 case BUILT_IN_REALLOC
:
2348 case BUILT_IN_CALLOC
:
2349 case BUILT_IN_STRDUP
:
2350 case BUILT_IN_STRNDUP
:
2351 val
.lattice_val
= CONSTANT
;
2352 val
.value
= build_int_cst (TREE_TYPE (gimple_get_lhs (stmt
)), 0);
2353 val
.mask
= ~((HOST_WIDE_INT
) MALLOC_ABI_ALIGNMENT
2354 / BITS_PER_UNIT
- 1);
2357 CASE_BUILT_IN_ALLOCA
:
2358 align
= (DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_ALLOCA
2360 : TREE_INT_CST_LOW (gimple_call_arg (stmt
, 1)));
2361 val
.lattice_val
= CONSTANT
;
2362 val
.value
= build_int_cst (TREE_TYPE (gimple_get_lhs (stmt
)), 0);
2363 val
.mask
= ~((HOST_WIDE_INT
) align
/ BITS_PER_UNIT
- 1);
2366 case BUILT_IN_ASSUME_ALIGNED
:
2367 val
= bit_value_assume_aligned (stmt
, NULL_TREE
, val
, false);
2368 ignore_return_flags
= true;
2371 case BUILT_IN_ALIGNED_ALLOC
:
2372 case BUILT_IN_GOMP_ALLOC
:
2374 tree align
= get_constant_value (gimple_call_arg (stmt
, 0));
2376 && tree_fits_uhwi_p (align
))
2378 unsigned HOST_WIDE_INT aligni
= tree_to_uhwi (align
);
2380 /* align must be power-of-two */
2381 && (aligni
& (aligni
- 1)) == 0)
2383 val
.lattice_val
= CONSTANT
;
2384 val
.value
= build_int_cst (ptr_type_node
, 0);
2391 case BUILT_IN_BSWAP16
:
2392 case BUILT_IN_BSWAP32
:
2393 case BUILT_IN_BSWAP64
:
2394 case BUILT_IN_BSWAP128
:
2395 val
= get_value_for_expr (gimple_call_arg (stmt
, 0), true);
2396 if (val
.lattice_val
== UNDEFINED
)
2398 else if (val
.lattice_val
== CONSTANT
2400 && TREE_CODE (val
.value
) == INTEGER_CST
)
2402 tree type
= TREE_TYPE (gimple_call_lhs (stmt
));
2403 int prec
= TYPE_PRECISION (type
);
2404 wide_int wval
= wi::to_wide (val
.value
);
2406 = wide_int_to_tree (type
,
2407 wide_int::from (wval
, prec
,
2408 UNSIGNED
).bswap ());
2410 = widest_int::from (wide_int::from (val
.mask
, prec
,
2413 if (wi::sext (val
.mask
, prec
) != -1)
2416 val
.lattice_val
= VARYING
;
2417 val
.value
= NULL_TREE
;
2424 if (is_gimple_call (stmt
) && gimple_call_lhs (stmt
))
2426 tree fntype
= gimple_call_fntype (stmt
);
2429 tree attrs
= lookup_attribute ("assume_aligned",
2430 TYPE_ATTRIBUTES (fntype
));
2432 val
= bit_value_assume_aligned (stmt
, attrs
, val
, false);
2433 attrs
= lookup_attribute ("alloc_align",
2434 TYPE_ATTRIBUTES (fntype
));
2436 val
= bit_value_assume_aligned (stmt
, attrs
, val
, true);
2438 int flags
= ignore_return_flags
2439 ? 0 : gimple_call_return_flags (as_a
<gcall
*> (stmt
));
2440 if (flags
& ERF_RETURNS_ARG
2441 && (flags
& ERF_RETURN_ARG_MASK
) < gimple_call_num_args (stmt
))
2443 val
= get_value_for_expr
2444 (gimple_call_arg (stmt
,
2445 flags
& ERF_RETURN_ARG_MASK
), true);
2448 is_constant
= (val
.lattice_val
== CONSTANT
);
2451 if (flag_tree_bit_ccp
2452 && ((is_constant
&& TREE_CODE (val
.value
) == INTEGER_CST
)
2454 && gimple_get_lhs (stmt
)
2455 && TREE_CODE (gimple_get_lhs (stmt
)) == SSA_NAME
)
2457 tree lhs
= gimple_get_lhs (stmt
);
2458 wide_int nonzero_bits
= get_nonzero_bits (lhs
);
2459 if (nonzero_bits
!= -1)
2463 val
.lattice_val
= CONSTANT
;
2464 val
.value
= build_zero_cst (TREE_TYPE (lhs
));
2465 val
.mask
= extend_mask (nonzero_bits
, TYPE_SIGN (TREE_TYPE (lhs
)));
2470 if (wi::bit_and_not (wi::to_wide (val
.value
), nonzero_bits
) != 0)
2471 val
.value
= wide_int_to_tree (TREE_TYPE (lhs
),
2473 & wi::to_wide (val
.value
));
2474 if (nonzero_bits
== 0)
2477 val
.mask
= val
.mask
& extend_mask (nonzero_bits
,
2478 TYPE_SIGN (TREE_TYPE (lhs
)));
2483 /* The statement produced a nonconstant value. */
2486 /* The statement produced a copy. */
2487 if (simplified
&& TREE_CODE (simplified
) == SSA_NAME
2488 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (simplified
))
2490 val
.lattice_val
= CONSTANT
;
2491 val
.value
= simplified
;
2494 /* The statement is VARYING. */
2497 val
.lattice_val
= VARYING
;
2498 val
.value
= NULL_TREE
;
2506 typedef hash_table
<nofree_ptr_hash
<gimple
> > gimple_htab
;
2508 /* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before
2509 each matching BUILT_IN_STACK_RESTORE. Mark visited phis in VISITED. */
2512 insert_clobber_before_stack_restore (tree saved_val
, tree var
,
2513 gimple_htab
**visited
)
2516 gassign
*clobber_stmt
;
2518 imm_use_iterator iter
;
2519 gimple_stmt_iterator i
;
2522 FOR_EACH_IMM_USE_STMT (stmt
, iter
, saved_val
)
2523 if (gimple_call_builtin_p (stmt
, BUILT_IN_STACK_RESTORE
))
2525 clobber
= build_clobber (TREE_TYPE (var
), CLOBBER_EOL
);
2526 clobber_stmt
= gimple_build_assign (var
, clobber
);
2528 i
= gsi_for_stmt (stmt
);
2529 gsi_insert_before (&i
, clobber_stmt
, GSI_SAME_STMT
);
2531 else if (gimple_code (stmt
) == GIMPLE_PHI
)
2534 *visited
= new gimple_htab (10);
2536 slot
= (*visited
)->find_slot (stmt
, INSERT
);
2541 insert_clobber_before_stack_restore (gimple_phi_result (stmt
), var
,
2544 else if (gimple_assign_ssa_name_copy_p (stmt
))
2545 insert_clobber_before_stack_restore (gimple_assign_lhs (stmt
), var
,
2549 /* Advance the iterator to the previous non-debug gimple statement in the same
2550 or dominating basic block. */
2553 gsi_prev_dom_bb_nondebug (gimple_stmt_iterator
*i
)
2557 gsi_prev_nondebug (i
);
2558 while (gsi_end_p (*i
))
2560 dom
= get_immediate_dominator (CDI_DOMINATORS
, gsi_bb (*i
));
2561 if (dom
== NULL
|| dom
== ENTRY_BLOCK_PTR_FOR_FN (cfun
))
2564 *i
= gsi_last_bb (dom
);
2568 /* Find a BUILT_IN_STACK_SAVE dominating gsi_stmt (I), and insert
2569 a clobber of VAR before each matching BUILT_IN_STACK_RESTORE.
2571 It is possible that BUILT_IN_STACK_SAVE cannot be found in a dominator when
2572 a previous pass (such as DOM) duplicated it along multiple paths to a BB.
2573 In that case the function gives up without inserting the clobbers. */
2576 insert_clobbers_for_var (gimple_stmt_iterator i
, tree var
)
2580 gimple_htab
*visited
= NULL
;
2582 for (; !gsi_end_p (i
); gsi_prev_dom_bb_nondebug (&i
))
2584 stmt
= gsi_stmt (i
);
2586 if (!gimple_call_builtin_p (stmt
, BUILT_IN_STACK_SAVE
))
2589 saved_val
= gimple_call_lhs (stmt
);
2590 if (saved_val
== NULL_TREE
)
2593 insert_clobber_before_stack_restore (saved_val
, var
, &visited
);
2600 /* Detects a __builtin_alloca_with_align with constant size argument. Declares
2601 fixed-size array and returns the address, if found, otherwise returns
2605 fold_builtin_alloca_with_align (gimple
*stmt
)
2607 unsigned HOST_WIDE_INT size
, threshold
, n_elem
;
2608 tree lhs
, arg
, block
, var
, elem_type
, array_type
;
2611 lhs
= gimple_call_lhs (stmt
);
2612 if (lhs
== NULL_TREE
)
2615 /* Detect constant argument. */
2616 arg
= get_constant_value (gimple_call_arg (stmt
, 0));
2617 if (arg
== NULL_TREE
2618 || TREE_CODE (arg
) != INTEGER_CST
2619 || !tree_fits_uhwi_p (arg
))
2622 size
= tree_to_uhwi (arg
);
2624 /* Heuristic: don't fold large allocas. */
2625 threshold
= (unsigned HOST_WIDE_INT
)param_large_stack_frame
;
2626 /* In case the alloca is located at function entry, it has the same lifetime
2627 as a declared array, so we allow a larger size. */
2628 block
= gimple_block (stmt
);
2629 if (!(cfun
->after_inlining
2631 && TREE_CODE (BLOCK_SUPERCONTEXT (block
)) == FUNCTION_DECL
))
2633 if (size
> threshold
)
2636 /* We have to be able to move points-to info. We used to assert
2637 that we can but IPA PTA might end up with two UIDs here
2638 as it might need to handle more than one instance being
2639 live at the same time. Instead of trying to detect this case
2640 (using the first UID would be OK) just give up for now. */
2641 struct ptr_info_def
*pi
= SSA_NAME_PTR_INFO (lhs
);
2645 && !pt_solution_singleton_or_null_p (&pi
->pt
, &uid
))
2648 /* Declare array. */
2649 elem_type
= build_nonstandard_integer_type (BITS_PER_UNIT
, 1);
2650 n_elem
= size
* 8 / BITS_PER_UNIT
;
2651 array_type
= build_array_type_nelts (elem_type
, n_elem
);
2653 if (tree ssa_name
= SSA_NAME_IDENTIFIER (lhs
))
2655 /* Give the temporary a name derived from the name of the VLA
2656 declaration so it can be referenced in diagnostics. */
2657 const char *name
= IDENTIFIER_POINTER (ssa_name
);
2658 var
= create_tmp_var (array_type
, name
);
2661 var
= create_tmp_var (array_type
);
2663 if (gimple
*lhsdef
= SSA_NAME_DEF_STMT (lhs
))
2665 /* Set the temporary's location to that of the VLA declaration
2666 so it can be pointed to in diagnostics. */
2667 location_t loc
= gimple_location (lhsdef
);
2668 DECL_SOURCE_LOCATION (var
) = loc
;
2671 SET_DECL_ALIGN (var
, TREE_INT_CST_LOW (gimple_call_arg (stmt
, 1)));
2673 SET_DECL_PT_UID (var
, uid
);
2675 /* Fold alloca to the address of the array. */
2676 return fold_convert (TREE_TYPE (lhs
), build_fold_addr_expr (var
));
2679 /* Fold the stmt at *GSI with CCP specific information that propagating
2680 and regular folding does not catch. */
2683 ccp_folder::fold_stmt (gimple_stmt_iterator
*gsi
)
2685 gimple
*stmt
= gsi_stmt (*gsi
);
2687 switch (gimple_code (stmt
))
2691 gcond
*cond_stmt
= as_a
<gcond
*> (stmt
);
2692 ccp_prop_value_t val
;
2693 /* Statement evaluation will handle type mismatches in constants
2694 more gracefully than the final propagation. This allows us to
2695 fold more conditionals here. */
2696 val
= evaluate_stmt (stmt
);
2697 if (val
.lattice_val
!= CONSTANT
2703 fprintf (dump_file
, "Folding predicate ");
2704 print_gimple_expr (dump_file
, stmt
, 0);
2705 fprintf (dump_file
, " to ");
2706 print_generic_expr (dump_file
, val
.value
);
2707 fprintf (dump_file
, "\n");
2710 if (integer_zerop (val
.value
))
2711 gimple_cond_make_false (cond_stmt
);
2713 gimple_cond_make_true (cond_stmt
);
2720 tree lhs
= gimple_call_lhs (stmt
);
2721 int flags
= gimple_call_flags (stmt
);
2724 bool changed
= false;
2727 /* If the call was folded into a constant make sure it goes
2728 away even if we cannot propagate into all uses because of
2731 && TREE_CODE (lhs
) == SSA_NAME
2732 && (val
= get_constant_value (lhs
))
2733 /* Don't optimize away calls that have side-effects. */
2734 && (flags
& (ECF_CONST
|ECF_PURE
)) != 0
2735 && (flags
& ECF_LOOPING_CONST_OR_PURE
) == 0)
2737 tree new_rhs
= unshare_expr (val
);
2738 if (!useless_type_conversion_p (TREE_TYPE (lhs
),
2739 TREE_TYPE (new_rhs
)))
2740 new_rhs
= fold_convert (TREE_TYPE (lhs
), new_rhs
);
2741 gimplify_and_update_call_from_tree (gsi
, new_rhs
);
2745 /* Internal calls provide no argument types, so the extra laxity
2746 for normal calls does not apply. */
2747 if (gimple_call_internal_p (stmt
))
2750 /* The heuristic of fold_builtin_alloca_with_align differs before and
2751 after inlining, so we don't require the arg to be changed into a
2752 constant for folding, but just to be constant. */
2753 if (gimple_call_builtin_p (stmt
, BUILT_IN_ALLOCA_WITH_ALIGN
)
2754 || gimple_call_builtin_p (stmt
, BUILT_IN_ALLOCA_WITH_ALIGN_AND_MAX
))
2756 tree new_rhs
= fold_builtin_alloca_with_align (stmt
);
2759 gimplify_and_update_call_from_tree (gsi
, new_rhs
);
2760 tree var
= TREE_OPERAND (TREE_OPERAND (new_rhs
, 0),0);
2761 insert_clobbers_for_var (*gsi
, var
);
2766 /* If there's no extra info from an assume_aligned call,
2767 drop it so it doesn't act as otherwise useless dataflow
2769 if (gimple_call_builtin_p (stmt
, BUILT_IN_ASSUME_ALIGNED
))
2771 tree ptr
= gimple_call_arg (stmt
, 0);
2772 ccp_prop_value_t ptrval
= get_value_for_expr (ptr
, true);
2773 if (ptrval
.lattice_val
== CONSTANT
2774 && TREE_CODE (ptrval
.value
) == INTEGER_CST
2775 && ptrval
.mask
!= 0)
2777 ccp_prop_value_t val
2778 = bit_value_assume_aligned (stmt
, NULL_TREE
, ptrval
, false);
2779 unsigned int ptralign
= least_bit_hwi (ptrval
.mask
.to_uhwi ());
2780 unsigned int align
= least_bit_hwi (val
.mask
.to_uhwi ());
2781 if (ptralign
== align
2782 && ((TREE_INT_CST_LOW (ptrval
.value
) & (align
- 1))
2783 == (TREE_INT_CST_LOW (val
.value
) & (align
- 1))))
2785 replace_call_with_value (gsi
, ptr
);
2791 /* Propagate into the call arguments. Compared to replace_uses_in
2792 this can use the argument slot types for type verification
2793 instead of the current argument type. We also can safely
2794 drop qualifiers here as we are dealing with constants anyway. */
2795 argt
= TYPE_ARG_TYPES (gimple_call_fntype (stmt
));
2796 for (i
= 0; i
< gimple_call_num_args (stmt
) && argt
;
2797 ++i
, argt
= TREE_CHAIN (argt
))
2799 tree arg
= gimple_call_arg (stmt
, i
);
2800 if (TREE_CODE (arg
) == SSA_NAME
2801 && (val
= get_constant_value (arg
))
2802 && useless_type_conversion_p
2803 (TYPE_MAIN_VARIANT (TREE_VALUE (argt
)),
2804 TYPE_MAIN_VARIANT (TREE_TYPE (val
))))
2806 gimple_call_set_arg (stmt
, i
, unshare_expr (val
));
2816 tree lhs
= gimple_assign_lhs (stmt
);
2819 /* If we have a load that turned out to be constant replace it
2820 as we cannot propagate into all uses in all cases. */
2821 if (gimple_assign_single_p (stmt
)
2822 && TREE_CODE (lhs
) == SSA_NAME
2823 && (val
= get_constant_value (lhs
)))
2825 tree rhs
= unshare_expr (val
);
2826 if (!useless_type_conversion_p (TREE_TYPE (lhs
), TREE_TYPE (rhs
)))
2827 rhs
= fold_build1 (VIEW_CONVERT_EXPR
, TREE_TYPE (lhs
), rhs
);
2828 gimple_assign_set_rhs_from_tree (gsi
, rhs
);
2840 /* Visit the assignment statement STMT. Set the value of its LHS to the
2841 value computed by the RHS and store LHS in *OUTPUT_P. If STMT
2842 creates virtual definitions, set the value of each new name to that
2843 of the RHS (if we can derive a constant out of the RHS).
2844 Value-returning call statements also perform an assignment, and
2845 are handled here. */
2847 static enum ssa_prop_result
2848 visit_assignment (gimple
*stmt
, tree
*output_p
)
2850 ccp_prop_value_t val
;
2851 enum ssa_prop_result retval
= SSA_PROP_NOT_INTERESTING
;
2853 tree lhs
= gimple_get_lhs (stmt
);
2854 if (TREE_CODE (lhs
) == SSA_NAME
)
2856 /* Evaluate the statement, which could be
2857 either a GIMPLE_ASSIGN or a GIMPLE_CALL. */
2858 val
= evaluate_stmt (stmt
);
2860 /* If STMT is an assignment to an SSA_NAME, we only have one
2862 if (set_lattice_value (lhs
, &val
))
2865 if (val
.lattice_val
== VARYING
)
2866 retval
= SSA_PROP_VARYING
;
2868 retval
= SSA_PROP_INTERESTING
;
2876 /* Visit the conditional statement STMT. Return SSA_PROP_INTERESTING
2877 if it can determine which edge will be taken. Otherwise, return
2878 SSA_PROP_VARYING. */
2880 static enum ssa_prop_result
2881 visit_cond_stmt (gimple
*stmt
, edge
*taken_edge_p
)
2883 ccp_prop_value_t val
;
2886 block
= gimple_bb (stmt
);
2887 val
= evaluate_stmt (stmt
);
2888 if (val
.lattice_val
!= CONSTANT
2890 return SSA_PROP_VARYING
;
2892 /* Find which edge out of the conditional block will be taken and add it
2893 to the worklist. If no single edge can be determined statically,
2894 return SSA_PROP_VARYING to feed all the outgoing edges to the
2895 propagation engine. */
2896 *taken_edge_p
= find_taken_edge (block
, val
.value
);
2898 return SSA_PROP_INTERESTING
;
2900 return SSA_PROP_VARYING
;
2904 /* Evaluate statement STMT. If the statement produces an output value and
2905 its evaluation changes the lattice value of its output, return
2906 SSA_PROP_INTERESTING and set *OUTPUT_P to the SSA_NAME holding the
2909 If STMT is a conditional branch and we can determine its truth
2910 value, set *TAKEN_EDGE_P accordingly. If STMT produces a varying
2911 value, return SSA_PROP_VARYING. */
2913 enum ssa_prop_result
2914 ccp_propagate::visit_stmt (gimple
*stmt
, edge
*taken_edge_p
, tree
*output_p
)
2919 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2921 fprintf (dump_file
, "\nVisiting statement:\n");
2922 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
2925 switch (gimple_code (stmt
))
2928 /* If the statement is an assignment that produces a single
2929 output value, evaluate its RHS to see if the lattice value of
2930 its output has changed. */
2931 return visit_assignment (stmt
, output_p
);
2934 /* A value-returning call also performs an assignment. */
2935 if (gimple_call_lhs (stmt
) != NULL_TREE
)
2936 return visit_assignment (stmt
, output_p
);
2941 /* If STMT is a conditional branch, see if we can determine
2942 which branch will be taken. */
2943 /* FIXME. It appears that we should be able to optimize
2944 computed GOTOs here as well. */
2945 return visit_cond_stmt (stmt
, taken_edge_p
);
2951 /* Any other kind of statement is not interesting for constant
2952 propagation and, therefore, not worth simulating. */
2953 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2954 fprintf (dump_file
, "No interesting values produced. Marked VARYING.\n");
2956 /* Definitions made by statements other than assignments to
2957 SSA_NAMEs represent unknown modifications to their outputs.
2958 Mark them VARYING. */
2959 FOR_EACH_SSA_TREE_OPERAND (def
, stmt
, iter
, SSA_OP_ALL_DEFS
)
2960 set_value_varying (def
);
2962 return SSA_PROP_VARYING
;
2966 /* Main entry point for SSA Conditional Constant Propagation. If NONZERO_P,
2967 record nonzero bits. */
2970 do_ssa_ccp (bool nonzero_p
)
2972 unsigned int todo
= 0;
2973 calculate_dominance_info (CDI_DOMINATORS
);
2976 class ccp_propagate ccp_propagate
;
2977 ccp_propagate
.ssa_propagate ();
2978 if (ccp_finalize (nonzero_p
|| flag_ipa_bit_cp
))
2980 todo
= (TODO_cleanup_cfg
| TODO_update_ssa
);
2982 /* ccp_finalize does not preserve loop-closed ssa. */
2983 loops_state_clear (LOOP_CLOSED_SSA
);
2986 free_dominance_info (CDI_DOMINATORS
);
2993 const pass_data pass_data_ccp
=
2995 GIMPLE_PASS
, /* type */
2997 OPTGROUP_NONE
, /* optinfo_flags */
2998 TV_TREE_CCP
, /* tv_id */
2999 ( PROP_cfg
| PROP_ssa
), /* properties_required */
3000 0, /* properties_provided */
3001 0, /* properties_destroyed */
3002 0, /* todo_flags_start */
3003 TODO_update_address_taken
, /* todo_flags_finish */
3006 class pass_ccp
: public gimple_opt_pass
3009 pass_ccp (gcc::context
*ctxt
)
3010 : gimple_opt_pass (pass_data_ccp
, ctxt
), nonzero_p (false)
3013 /* opt_pass methods: */
3014 opt_pass
* clone () final override
{ return new pass_ccp (m_ctxt
); }
3015 void set_pass_param (unsigned int n
, bool param
) final override
3017 gcc_assert (n
== 0);
3020 bool gate (function
*) final override
{ return flag_tree_ccp
!= 0; }
3021 unsigned int execute (function
*) final override
3023 return do_ssa_ccp (nonzero_p
);
3027 /* Determines whether the pass instance records nonzero bits. */
3029 }; // class pass_ccp
3034 make_pass_ccp (gcc::context
*ctxt
)
3036 return new pass_ccp (ctxt
);
3041 /* Try to optimize out __builtin_stack_restore. Optimize it out
3042 if there is another __builtin_stack_restore in the same basic
3043 block and no calls or ASM_EXPRs are in between, or if this block's
3044 only outgoing edge is to EXIT_BLOCK and there are no calls or
3045 ASM_EXPRs after this __builtin_stack_restore. */
3048 optimize_stack_restore (gimple_stmt_iterator i
)
3053 basic_block bb
= gsi_bb (i
);
3054 gimple
*call
= gsi_stmt (i
);
3056 if (gimple_code (call
) != GIMPLE_CALL
3057 || gimple_call_num_args (call
) != 1
3058 || TREE_CODE (gimple_call_arg (call
, 0)) != SSA_NAME
3059 || !POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (call
, 0))))
3062 for (gsi_next (&i
); !gsi_end_p (i
); gsi_next (&i
))
3064 stmt
= gsi_stmt (i
);
3065 if (gimple_code (stmt
) == GIMPLE_ASM
)
3067 if (gimple_code (stmt
) != GIMPLE_CALL
)
3070 callee
= gimple_call_fndecl (stmt
);
3072 || !fndecl_built_in_p (callee
, BUILT_IN_NORMAL
)
3073 /* All regular builtins are ok, just obviously not alloca. */
3074 || ALLOCA_FUNCTION_CODE_P (DECL_FUNCTION_CODE (callee
)))
3077 if (fndecl_built_in_p (callee
, BUILT_IN_STACK_RESTORE
))
3078 goto second_stack_restore
;
3084 /* Allow one successor of the exit block, or zero successors. */
3085 switch (EDGE_COUNT (bb
->succs
))
3090 if (single_succ_edge (bb
)->dest
!= EXIT_BLOCK_PTR_FOR_FN (cfun
))
3096 second_stack_restore
:
3098 /* If there's exactly one use, then zap the call to __builtin_stack_save.
3099 If there are multiple uses, then the last one should remove the call.
3100 In any case, whether the call to __builtin_stack_save can be removed
3101 or not is irrelevant to removing the call to __builtin_stack_restore. */
3102 if (has_single_use (gimple_call_arg (call
, 0)))
3104 gimple
*stack_save
= SSA_NAME_DEF_STMT (gimple_call_arg (call
, 0));
3105 if (is_gimple_call (stack_save
))
3107 callee
= gimple_call_fndecl (stack_save
);
3108 if (callee
&& fndecl_built_in_p (callee
, BUILT_IN_STACK_SAVE
))
3110 gimple_stmt_iterator stack_save_gsi
;
3113 stack_save_gsi
= gsi_for_stmt (stack_save
);
3114 rhs
= build_int_cst (TREE_TYPE (gimple_call_arg (call
, 0)), 0);
3115 replace_call_with_value (&stack_save_gsi
, rhs
);
3120 /* No effect, so the statement will be deleted. */
3121 return integer_zero_node
;
3124 /* If va_list type is a simple pointer and nothing special is needed,
3125 optimize __builtin_va_start (&ap, 0) into ap = __builtin_next_arg (0),
3126 __builtin_va_end (&ap) out as NOP and __builtin_va_copy into a simple
3127 pointer assignment. */
3130 optimize_stdarg_builtin (gimple
*call
)
3132 tree callee
, lhs
, rhs
, cfun_va_list
;
3133 bool va_list_simple_ptr
;
3134 location_t loc
= gimple_location (call
);
3136 callee
= gimple_call_fndecl (call
);
3138 cfun_va_list
= targetm
.fn_abi_va_list (callee
);
3139 va_list_simple_ptr
= POINTER_TYPE_P (cfun_va_list
)
3140 && (TREE_TYPE (cfun_va_list
) == void_type_node
3141 || TREE_TYPE (cfun_va_list
) == char_type_node
);
3143 switch (DECL_FUNCTION_CODE (callee
))
3145 case BUILT_IN_VA_START
:
3146 if (!va_list_simple_ptr
3147 || targetm
.expand_builtin_va_start
!= NULL
3148 || !builtin_decl_explicit_p (BUILT_IN_NEXT_ARG
))
3151 if (gimple_call_num_args (call
) != 2)
3154 lhs
= gimple_call_arg (call
, 0);
3155 if (!POINTER_TYPE_P (TREE_TYPE (lhs
))
3156 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs
)))
3157 != TYPE_MAIN_VARIANT (cfun_va_list
))
3160 lhs
= build_fold_indirect_ref_loc (loc
, lhs
);
3161 rhs
= build_call_expr_loc (loc
, builtin_decl_explicit (BUILT_IN_NEXT_ARG
),
3162 1, integer_zero_node
);
3163 rhs
= fold_convert_loc (loc
, TREE_TYPE (lhs
), rhs
);
3164 return build2 (MODIFY_EXPR
, TREE_TYPE (lhs
), lhs
, rhs
);
3166 case BUILT_IN_VA_COPY
:
3167 if (!va_list_simple_ptr
)
3170 if (gimple_call_num_args (call
) != 2)
3173 lhs
= gimple_call_arg (call
, 0);
3174 if (!POINTER_TYPE_P (TREE_TYPE (lhs
))
3175 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs
)))
3176 != TYPE_MAIN_VARIANT (cfun_va_list
))
3179 lhs
= build_fold_indirect_ref_loc (loc
, lhs
);
3180 rhs
= gimple_call_arg (call
, 1);
3181 if (TYPE_MAIN_VARIANT (TREE_TYPE (rhs
))
3182 != TYPE_MAIN_VARIANT (cfun_va_list
))
3185 rhs
= fold_convert_loc (loc
, TREE_TYPE (lhs
), rhs
);
3186 return build2 (MODIFY_EXPR
, TREE_TYPE (lhs
), lhs
, rhs
);
3188 case BUILT_IN_VA_END
:
3189 /* No effect, so the statement will be deleted. */
3190 return integer_zero_node
;
3197 /* Attemp to make the block of __builtin_unreachable I unreachable by changing
3198 the incoming jumps. Return true if at least one jump was changed. */
3201 optimize_unreachable (gimple_stmt_iterator i
)
3203 basic_block bb
= gsi_bb (i
);
3204 gimple_stmt_iterator gsi
;
3210 if (flag_sanitize
& SANITIZE_UNREACHABLE
)
3213 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3215 stmt
= gsi_stmt (gsi
);
3217 if (is_gimple_debug (stmt
))
3220 if (glabel
*label_stmt
= dyn_cast
<glabel
*> (stmt
))
3222 /* Verify we do not need to preserve the label. */
3223 if (FORCED_LABEL (gimple_label_label (label_stmt
)))
3229 /* Only handle the case that __builtin_unreachable is the first statement
3230 in the block. We rely on DCE to remove stmts without side-effects
3231 before __builtin_unreachable. */
3232 if (gsi_stmt (gsi
) != gsi_stmt (i
))
3237 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
3239 gsi
= gsi_last_bb (e
->src
);
3240 if (gsi_end_p (gsi
))
3243 stmt
= gsi_stmt (gsi
);
3244 if (gcond
*cond_stmt
= dyn_cast
<gcond
*> (stmt
))
3246 if (e
->flags
& EDGE_TRUE_VALUE
)
3247 gimple_cond_make_false (cond_stmt
);
3248 else if (e
->flags
& EDGE_FALSE_VALUE
)
3249 gimple_cond_make_true (cond_stmt
);
3252 update_stmt (cond_stmt
);
3256 /* Todo: handle other cases. Note that unreachable switch case
3257 statements have already been removed. */
3268 _1 = __atomic_fetch_or_* (ptr_6, 1, _3);
3272 _1 = __atomic_fetch_or_* (ptr_6, 1, _3);
3276 _1 = __atomic_fetch_and_* (ptr_6, ~1, _3);
3280 _1 = __atomic_fetch_and_* (ptr_6, ~1, _3);
3284 USE_STMT is the gimplt statement which uses the return value of
3285 __atomic_fetch_or_*. LHS is the return value of __atomic_fetch_or_*.
3286 MASK is the mask passed to __atomic_fetch_or_*.
3290 convert_atomic_bit_not (enum internal_fn fn
, gimple
*use_stmt
,
3291 tree lhs
, tree mask
)
3294 if (fn
== IFN_ATOMIC_BIT_TEST_AND_RESET
)
3296 /* MASK must be ~1. */
3297 if (!operand_equal_p (build_int_cst (TREE_TYPE (lhs
),
3298 ~HOST_WIDE_INT_1
), mask
, 0))
3300 and_mask
= build_int_cst (TREE_TYPE (lhs
), 1);
3304 /* MASK must be 1. */
3305 if (!operand_equal_p (build_int_cst (TREE_TYPE (lhs
), 1), mask
, 0))
3310 tree use_lhs
= gimple_assign_lhs (use_stmt
);
3312 use_operand_p use_p
;
3313 gimple
*use_not_stmt
;
3315 if (!single_imm_use (use_lhs
, &use_p
, &use_not_stmt
)
3316 || !is_gimple_assign (use_not_stmt
))
3319 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (use_not_stmt
)))
3322 tree use_not_lhs
= gimple_assign_lhs (use_not_stmt
);
3323 if (TREE_CODE (TREE_TYPE (use_not_lhs
)) != BOOLEAN_TYPE
)
3326 gimple_stmt_iterator gsi
;
3327 gsi
= gsi_for_stmt (use_stmt
);
3328 gsi_remove (&gsi
, true);
3329 tree var
= make_ssa_name (TREE_TYPE (lhs
));
3330 use_stmt
= gimple_build_assign (var
, BIT_AND_EXPR
, lhs
, and_mask
);
3331 gsi
= gsi_for_stmt (use_not_stmt
);
3332 gsi_insert_before (&gsi
, use_stmt
, GSI_NEW_STMT
);
3333 lhs
= gimple_assign_lhs (use_not_stmt
);
3334 gimple
*g
= gimple_build_assign (lhs
, EQ_EXPR
, var
,
3335 build_zero_cst (TREE_TYPE (mask
)));
3336 gsi_insert_after (&gsi
, g
, GSI_NEW_STMT
);
3337 gsi
= gsi_for_stmt (use_not_stmt
);
3338 gsi_remove (&gsi
, true);
3342 /* match.pd function to match atomic_bit_test_and pattern which
3344 _1 = __atomic_fetch_or_4 (&v, 1, 0);
3348 extern bool gimple_nop_atomic_bit_test_and_p (tree
, tree
*,
3350 extern bool gimple_nop_convert (tree
, tree
*, tree (*) (tree
));
3353 mask_2 = 1 << cnt_1;
3354 _4 = __atomic_fetch_or_* (ptr_6, mask_2, _3);
3357 _4 = .ATOMIC_BIT_TEST_AND_SET (ptr_6, cnt_1, 0, _3);
3359 If _5 is only used in _5 != 0 or _5 == 0 comparisons, 1
3360 is passed instead of 0, and the builtin just returns a zero
3361 or 1 value instead of the actual bit.
3362 Similarly for __sync_fetch_and_or_* (without the ", _3" part
3363 in there), and/or if mask_2 is a power of 2 constant.
3364 Similarly for xor instead of or, use ATOMIC_BIT_TEST_AND_COMPLEMENT
3365 in that case. And similarly for and instead of or, except that
3366 the second argument to the builtin needs to be one's complement
3367 of the mask instead of mask. */
3370 optimize_atomic_bit_test_and (gimple_stmt_iterator
*gsip
,
3371 enum internal_fn fn
, bool has_model_arg
,
3374 gimple
*call
= gsi_stmt (*gsip
);
3375 tree lhs
= gimple_call_lhs (call
);
3376 use_operand_p use_p
;
3381 if (!flag_inline_atomics
3383 || !gimple_call_builtin_p (call
, BUILT_IN_NORMAL
)
3385 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs
)
3386 || !single_imm_use (lhs
, &use_p
, &use_stmt
)
3387 || !is_gimple_assign (use_stmt
)
3388 || !gimple_vdef (call
))
3393 case IFN_ATOMIC_BIT_TEST_AND_SET
:
3394 optab
= atomic_bit_test_and_set_optab
;
3396 case IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT
:
3397 optab
= atomic_bit_test_and_complement_optab
;
3399 case IFN_ATOMIC_BIT_TEST_AND_RESET
:
3400 optab
= atomic_bit_test_and_reset_optab
;
3408 mask
= gimple_call_arg (call
, 1);
3409 tree_code rhs_code
= gimple_assign_rhs_code (use_stmt
);
3410 if (rhs_code
!= BIT_AND_EXPR
)
3412 if (rhs_code
!= NOP_EXPR
&& rhs_code
!= BIT_NOT_EXPR
)
3415 tree use_lhs
= gimple_assign_lhs (use_stmt
);
3416 if (TREE_CODE (use_lhs
) == SSA_NAME
3417 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs
))
3420 tree use_rhs
= gimple_assign_rhs1 (use_stmt
);
3424 if (optab_handler (optab
, TYPE_MODE (TREE_TYPE (lhs
)))
3425 == CODE_FOR_nothing
)
3429 gimple_stmt_iterator gsi
;
3433 if (rhs_code
== BIT_NOT_EXPR
)
3435 g
= convert_atomic_bit_not (fn
, use_stmt
, lhs
, mask
);
3441 else if (TREE_CODE (TREE_TYPE (use_lhs
)) == BOOLEAN_TYPE
)
3444 if (fn
== IFN_ATOMIC_BIT_TEST_AND_RESET
)
3446 /* MASK must be ~1. */
3447 if (!operand_equal_p (build_int_cst (TREE_TYPE (lhs
),
3453 _1 = __atomic_fetch_and_* (ptr_6, ~1, _3);
3456 _1 = __atomic_fetch_and_* (ptr_6, ~1, _3);
3460 and_mask
= build_int_cst (TREE_TYPE (lhs
), 1);
3464 and_mask
= build_int_cst (TREE_TYPE (lhs
), 1);
3465 if (!operand_equal_p (and_mask
, mask
, 0))
3469 _1 = __atomic_fetch_or_* (ptr_6, 1, _3);
3472 _1 = __atomic_fetch_or_* (ptr_6, 1, _3);
3477 var
= make_ssa_name (TREE_TYPE (use_rhs
));
3478 replace_uses_by (use_rhs
, var
);
3479 g
= gimple_build_assign (var
, BIT_AND_EXPR
, use_rhs
,
3481 gsi
= gsi_for_stmt (use_stmt
);
3482 gsi_insert_before (&gsi
, g
, GSI_NEW_STMT
);
3486 else if (TYPE_PRECISION (TREE_TYPE (use_lhs
))
3487 <= TYPE_PRECISION (TREE_TYPE (use_rhs
)))
3489 gimple
*use_nop_stmt
;
3490 if (!single_imm_use (use_lhs
, &use_p
, &use_nop_stmt
)
3491 || (!is_gimple_assign (use_nop_stmt
)
3492 && gimple_code (use_nop_stmt
) != GIMPLE_COND
))
3499 tree use_nop_lhs
= nullptr;
3500 rhs_code
= ERROR_MARK
;
3501 if (is_gimple_assign (use_nop_stmt
))
3503 use_nop_lhs
= gimple_assign_lhs (use_nop_stmt
);
3504 rhs_code
= gimple_assign_rhs_code (use_nop_stmt
);
3506 if (!use_nop_lhs
|| rhs_code
!= BIT_AND_EXPR
)
3512 && TREE_CODE (use_nop_lhs
) == SSA_NAME
3513 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_nop_lhs
))
3515 if (use_nop_lhs
&& rhs_code
== BIT_NOT_EXPR
)
3520 g
= convert_atomic_bit_not (fn
, use_nop_stmt
, lhs
,
3525 _1 = __atomic_fetch_or_4 (ptr_6, 1, _3);
3530 _1 = __atomic_fetch_or_4 (ptr_6, ~1, _3);
3534 _1 = __atomic_fetch_and_4 (ptr_6, ~1, _3);
3539 _1 = __atomic_fetch_and_4 (ptr_6, 1, _3);
3543 gsi
= gsi_for_stmt (use_stmt
);
3544 gsi_remove (&gsi
, true);
3550 tree cmp_rhs1
, cmp_rhs2
;
3556 if (TREE_CODE (TREE_TYPE (use_nop_lhs
))
3559 cmp_rhs1
= gimple_assign_rhs1 (use_nop_stmt
);
3560 cmp_rhs2
= gimple_assign_rhs2 (use_nop_stmt
);
3567 rhs_code
= gimple_cond_code (use_nop_stmt
);
3568 cmp_rhs1
= gimple_cond_lhs (use_nop_stmt
);
3569 cmp_rhs2
= gimple_cond_rhs (use_nop_stmt
);
3571 if (rhs_code
!= GE_EXPR
&& rhs_code
!= LT_EXPR
)
3573 if (use_lhs
!= cmp_rhs1
)
3575 if (!integer_zerop (cmp_rhs2
))
3580 unsigned HOST_WIDE_INT bytes
3581 = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (use_rhs
)));
3582 ibit
= bytes
* BITS_PER_UNIT
- 1;
3583 unsigned HOST_WIDE_INT highest
3584 = HOST_WIDE_INT_1U
<< ibit
;
3586 if (fn
== IFN_ATOMIC_BIT_TEST_AND_RESET
)
3588 /* Get the signed maximum of the USE_RHS type. */
3589 and_mask
= build_int_cst (TREE_TYPE (use_rhs
),
3591 if (!operand_equal_p (and_mask
, mask
, 0))
3595 _1 = __atomic_fetch_and_4 (ptr_6, 0x7fffffff, _3);
3596 _5 = (signed int) _1;
3597 _4 = _5 < 0 or _5 >= 0;
3599 _1 = __atomic_fetch_and_4 (ptr_6, 0x7fffffff, _3);
3600 _6 = _1 & 0x80000000;
3601 _4 = _6 != 0 or _6 == 0;
3603 _1 = __atomic_fetch_and_4 (ptr_6, 0x7fffffff, _3);
3604 _5 = (signed int) _1;
3605 if (_5 < 0 or _5 >= 0)
3607 _1 = __atomic_fetch_and_4 (ptr_6, 0x7fffffff, _3);
3608 _6 = _1 & 0x80000000;
3609 if (_6 != 0 or _6 == 0)
3611 and_mask
= build_int_cst (TREE_TYPE (use_rhs
),
3616 /* Get the signed minimum of the USE_RHS type. */
3617 and_mask
= build_int_cst (TREE_TYPE (use_rhs
),
3619 if (!operand_equal_p (and_mask
, mask
, 0))
3623 _1 = __atomic_fetch_or_4 (ptr_6, 0x80000000, _3);
3624 _5 = (signed int) _1;
3625 _4 = _5 < 0 or _5 >= 0;
3627 _1 = __atomic_fetch_or_4 (ptr_6, 0x80000000, _3);
3628 _6 = _1 & 0x80000000;
3629 _4 = _6 != 0 or _6 == 0;
3631 _1 = __atomic_fetch_or_4 (ptr_6, 0x80000000, _3);
3632 _5 = (signed int) _1;
3633 if (_5 < 0 or _5 >= 0)
3635 _1 = __atomic_fetch_or_4 (ptr_6, 0x80000000, _3);
3636 _6 = _1 & 0x80000000;
3637 if (_6 != 0 or _6 == 0)
3640 var
= make_ssa_name (TREE_TYPE (use_rhs
));
3641 gsi
= gsi_for_stmt (use_stmt
);
3642 gsi_remove (&gsi
, true);
3643 g
= gimple_build_assign (var
, BIT_AND_EXPR
, use_rhs
,
3645 gsi
= gsi_for_stmt (use_nop_stmt
);
3646 gsi_insert_before (&gsi
, g
, GSI_NEW_STMT
);
3648 rhs_code
= rhs_code
== GE_EXPR
? EQ_EXPR
: NE_EXPR
;
3649 tree const_zero
= build_zero_cst (TREE_TYPE (use_rhs
));
3651 g
= gimple_build_assign (use_nop_lhs
, rhs_code
,
3654 g
= gimple_build_cond (rhs_code
, var
, const_zero
,
3656 gsi_insert_after (&gsi
, g
, GSI_NEW_STMT
);
3657 gsi
= gsi_for_stmt (use_nop_stmt
);
3658 gsi_remove (&gsi
, true);
3665 if (!gimple_nop_atomic_bit_test_and_p (use_nop_lhs
,
3667 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (match_op
[2])
3668 || !single_imm_use (match_op
[2], &use_p
, &g
)
3669 || !is_gimple_assign (g
))
3672 if (TREE_CODE (match_op
[1]) == INTEGER_CST
)
3674 ibit
= tree_log2 (match_op
[1]);
3675 gcc_assert (ibit
>= 0);
3679 g
= SSA_NAME_DEF_STMT (match_op
[1]);
3680 gcc_assert (is_gimple_assign (g
));
3681 bit
= gimple_assign_rhs2 (g
);
3684 _1 = __atomic_fetch_or_4 (ptr_6, mask, _3);
3688 _1 = __atomic_fetch_or_4 (ptr_6, mask, _3);
3693 _2 = (unsigned int) _1;
3694 _3 = __atomic_fetch_and_4 (ptr_6, _2, 0);
3698 _1 = __atomic_fetch_and_* (ptr_6, ~mask_7, _3);
3703 _1 = __atomic_fetch_and_4 (ptr_6, ~mask, _3);
3704 _2 = (short int) _1;
3707 _1 = __atomic_fetch_and_4 (ptr_6, ~mask, _3);
3709 _5 = (short int) _8;
3711 gimple_seq stmts
= NULL
;
3712 match_op
[1] = gimple_convert (&stmts
,
3713 TREE_TYPE (use_rhs
),
3715 var
= gimple_build (&stmts
, BIT_AND_EXPR
,
3716 TREE_TYPE (use_rhs
), use_rhs
, match_op
[1]);
3717 gsi
= gsi_for_stmt (use_stmt
);
3718 gsi_remove (&gsi
, true);
3719 release_defs (use_stmt
);
3720 use_stmt
= gimple_seq_last_stmt (stmts
);
3721 gsi
= gsi_for_stmt (use_nop_stmt
);
3722 gsi_insert_seq_before (&gsi
, stmts
, GSI_SAME_STMT
);
3723 gimple_assign_set_rhs_with_ops (&gsi
, CONVERT_EXPR
, var
);
3724 update_stmt (use_nop_stmt
);
3734 bit
= build_int_cst (TREE_TYPE (lhs
), ibit
);
3737 else if (optab_handler (optab
, TYPE_MODE (TREE_TYPE (lhs
)))
3738 == CODE_FOR_nothing
)
3741 tree use_lhs
= gimple_assign_lhs (use_stmt
);
3747 if (TREE_CODE (mask
) == INTEGER_CST
)
3749 if (fn
== IFN_ATOMIC_BIT_TEST_AND_RESET
)
3750 mask
= const_unop (BIT_NOT_EXPR
, TREE_TYPE (mask
), mask
);
3751 mask
= fold_convert (TREE_TYPE (lhs
), mask
);
3752 int ibit
= tree_log2 (mask
);
3755 bit
= build_int_cst (TREE_TYPE (lhs
), ibit
);
3757 else if (TREE_CODE (mask
) == SSA_NAME
)
3759 gimple
*g
= SSA_NAME_DEF_STMT (mask
);
3761 if (gimple_nop_convert (mask
, &match_op
, NULL
))
3764 if (TREE_CODE (mask
) != SSA_NAME
)
3766 g
= SSA_NAME_DEF_STMT (mask
);
3768 if (!is_gimple_assign (g
))
3771 if (fn
== IFN_ATOMIC_BIT_TEST_AND_RESET
)
3773 if (gimple_assign_rhs_code (g
) != BIT_NOT_EXPR
)
3775 mask
= gimple_assign_rhs1 (g
);
3776 if (TREE_CODE (mask
) != SSA_NAME
)
3778 g
= SSA_NAME_DEF_STMT (mask
);
3781 if (!is_gimple_assign (g
)
3782 || gimple_assign_rhs_code (g
) != LSHIFT_EXPR
3783 || !integer_onep (gimple_assign_rhs1 (g
)))
3785 bit
= gimple_assign_rhs2 (g
);
3791 if (gimple_assign_rhs1 (use_stmt
) == lhs
)
3792 cmp_mask
= gimple_assign_rhs2 (use_stmt
);
3794 cmp_mask
= gimple_assign_rhs1 (use_stmt
);
3797 if (gimple_nop_convert (cmp_mask
, &match_op
, NULL
))
3798 cmp_mask
= match_op
;
3800 if (!operand_equal_p (cmp_mask
, mask
, 0))
3804 bool use_bool
= true;
3805 bool has_debug_uses
= false;
3806 imm_use_iterator iter
;
3809 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs
))
3811 FOR_EACH_IMM_USE_STMT (g
, iter
, use_lhs
)
3813 enum tree_code code
= ERROR_MARK
;
3814 tree op0
= NULL_TREE
, op1
= NULL_TREE
;
3815 if (is_gimple_debug (g
))
3817 has_debug_uses
= true;
3820 else if (is_gimple_assign (g
))
3821 switch (gimple_assign_rhs_code (g
))
3824 op1
= gimple_assign_rhs1 (g
);
3825 code
= TREE_CODE (op1
);
3826 if (TREE_CODE_CLASS (code
) != tcc_comparison
)
3828 op0
= TREE_OPERAND (op1
, 0);
3829 op1
= TREE_OPERAND (op1
, 1);
3833 code
= gimple_assign_rhs_code (g
);
3834 op0
= gimple_assign_rhs1 (g
);
3835 op1
= gimple_assign_rhs2 (g
);
3840 else if (gimple_code (g
) == GIMPLE_COND
)
3842 code
= gimple_cond_code (g
);
3843 op0
= gimple_cond_lhs (g
);
3844 op1
= gimple_cond_rhs (g
);
3847 if ((code
== EQ_EXPR
|| code
== NE_EXPR
)
3849 && integer_zerop (op1
))
3851 use_operand_p use_p
;
3853 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
3863 tree new_lhs
= make_ssa_name (TREE_TYPE (lhs
));
3864 tree flag
= build_int_cst (TREE_TYPE (lhs
), use_bool
);
3866 g
= gimple_build_call_internal (fn
, 5, gimple_call_arg (call
, 0),
3867 bit
, flag
, gimple_call_arg (call
, 2),
3868 gimple_call_fn (call
));
3870 g
= gimple_build_call_internal (fn
, 4, gimple_call_arg (call
, 0),
3871 bit
, flag
, gimple_call_fn (call
));
3872 gimple_call_set_lhs (g
, new_lhs
);
3873 gimple_set_location (g
, gimple_location (call
));
3874 gimple_move_vops (g
, call
);
3875 bool throws
= stmt_can_throw_internal (cfun
, call
);
3876 gimple_call_set_nothrow (as_a
<gcall
*> (g
),
3877 gimple_call_nothrow_p (as_a
<gcall
*> (call
)));
3878 gimple_stmt_iterator gsi
= *gsip
;
3879 gsi_insert_after (&gsi
, g
, GSI_NEW_STMT
);
3883 maybe_clean_or_replace_eh_stmt (call
, g
);
3884 if (after
|| (use_bool
&& has_debug_uses
))
3885 e
= find_fallthru_edge (gsi_bb (gsi
)->succs
);
3889 /* The internal function returns the value of the specified bit
3890 before the atomic operation. If we are interested in the value
3891 of the specified bit after the atomic operation (makes only sense
3892 for xor, otherwise the bit content is compile time known),
3893 we need to invert the bit. */
3894 tree mask_convert
= mask
;
3895 gimple_seq stmts
= NULL
;
3897 mask_convert
= gimple_convert (&stmts
, TREE_TYPE (lhs
), mask
);
3898 new_lhs
= gimple_build (&stmts
, BIT_XOR_EXPR
, TREE_TYPE (lhs
), new_lhs
,
3899 use_bool
? build_int_cst (TREE_TYPE (lhs
), 1)
3903 gsi_insert_seq_on_edge_immediate (e
, stmts
);
3904 gsi
= gsi_for_stmt (gimple_seq_last (stmts
));
3907 gsi_insert_seq_after (&gsi
, stmts
, GSI_NEW_STMT
);
3909 if (use_bool
&& has_debug_uses
)
3911 tree temp
= NULL_TREE
;
3912 if (!throws
|| after
|| single_pred_p (e
->dest
))
3914 temp
= build_debug_expr_decl (TREE_TYPE (lhs
));
3915 tree t
= build2 (LSHIFT_EXPR
, TREE_TYPE (lhs
), new_lhs
, bit
);
3916 g
= gimple_build_debug_bind (temp
, t
, g
);
3917 if (throws
&& !after
)
3919 gsi
= gsi_after_labels (e
->dest
);
3920 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
3923 gsi_insert_after (&gsi
, g
, GSI_NEW_STMT
);
3925 FOR_EACH_IMM_USE_STMT (g
, iter
, use_lhs
)
3926 if (is_gimple_debug (g
))
3928 use_operand_p use_p
;
3929 if (temp
== NULL_TREE
)
3930 gimple_debug_bind_reset_value (g
);
3932 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
3933 SET_USE (use_p
, temp
);
3937 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_lhs
)
3938 = SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs
);
3939 replace_uses_by (use_lhs
, new_lhs
);
3940 gsi
= gsi_for_stmt (use_stmt
);
3941 gsi_remove (&gsi
, true);
3942 release_defs (use_stmt
);
3943 gsi_remove (gsip
, true);
3944 release_ssa_name (lhs
);
3949 _4 = __atomic_add_fetch_* (ptr_6, arg_2, _3);
3952 _4 = .ATOMIC_ADD_FETCH_CMP_0 (EQ_EXPR, ptr_6, arg_2, _3);
3954 Similarly for __sync_add_and_fetch_* (without the ", _3" part
3958 optimize_atomic_op_fetch_cmp_0 (gimple_stmt_iterator
*gsip
,
3959 enum internal_fn fn
, bool has_model_arg
)
3961 gimple
*call
= gsi_stmt (*gsip
);
3962 tree lhs
= gimple_call_lhs (call
);
3963 use_operand_p use_p
;
3966 if (!flag_inline_atomics
3968 || !gimple_call_builtin_p (call
, BUILT_IN_NORMAL
)
3970 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs
)
3971 || !single_imm_use (lhs
, &use_p
, &use_stmt
)
3972 || !gimple_vdef (call
))
3978 case IFN_ATOMIC_ADD_FETCH_CMP_0
:
3979 optab
= atomic_add_fetch_cmp_0_optab
;
3981 case IFN_ATOMIC_SUB_FETCH_CMP_0
:
3982 optab
= atomic_sub_fetch_cmp_0_optab
;
3984 case IFN_ATOMIC_AND_FETCH_CMP_0
:
3985 optab
= atomic_and_fetch_cmp_0_optab
;
3987 case IFN_ATOMIC_OR_FETCH_CMP_0
:
3988 optab
= atomic_or_fetch_cmp_0_optab
;
3990 case IFN_ATOMIC_XOR_FETCH_CMP_0
:
3991 optab
= atomic_xor_fetch_cmp_0_optab
;
3997 if (optab_handler (optab
, TYPE_MODE (TREE_TYPE (lhs
)))
3998 == CODE_FOR_nothing
)
4002 if (gimple_assign_cast_p (use_stmt
))
4004 use_lhs
= gimple_assign_lhs (use_stmt
);
4005 if (!tree_nop_conversion_p (TREE_TYPE (use_lhs
), TREE_TYPE (lhs
))
4006 || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs
))
4007 && !POINTER_TYPE_P (TREE_TYPE (use_lhs
)))
4008 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs
)
4009 || !single_imm_use (use_lhs
, &use_p
, &use_stmt
))
4012 enum tree_code code
= ERROR_MARK
;
4013 tree op0
= NULL_TREE
, op1
= NULL_TREE
;
4014 if (is_gimple_assign (use_stmt
))
4015 switch (gimple_assign_rhs_code (use_stmt
))
4018 op1
= gimple_assign_rhs1 (use_stmt
);
4019 code
= TREE_CODE (op1
);
4020 if (TREE_CODE_CLASS (code
) == tcc_comparison
)
4022 op0
= TREE_OPERAND (op1
, 0);
4023 op1
= TREE_OPERAND (op1
, 1);
4027 code
= gimple_assign_rhs_code (use_stmt
);
4028 if (TREE_CODE_CLASS (code
) == tcc_comparison
)
4030 op0
= gimple_assign_rhs1 (use_stmt
);
4031 op1
= gimple_assign_rhs2 (use_stmt
);
4035 else if (gimple_code (use_stmt
) == GIMPLE_COND
)
4037 code
= gimple_cond_code (use_stmt
);
4038 op0
= gimple_cond_lhs (use_stmt
);
4039 op1
= gimple_cond_rhs (use_stmt
);
4048 if (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs
))
4049 || TREE_CODE (TREE_TYPE (use_lhs
)) == BOOLEAN_TYPE
4050 || TYPE_UNSIGNED (TREE_TYPE (use_lhs
)))
4055 if (op0
== use_lhs
&& integer_zerop (op1
))
4065 /* Use special encoding of the operation. We want to also
4066 encode the mode in the first argument and for neither EQ_EXPR
4067 etc. nor EQ etc. we can rely it will fit into QImode. */
4068 case EQ_EXPR
: encoded
= ATOMIC_OP_FETCH_CMP_0_EQ
; break;
4069 case NE_EXPR
: encoded
= ATOMIC_OP_FETCH_CMP_0_NE
; break;
4070 case LT_EXPR
: encoded
= ATOMIC_OP_FETCH_CMP_0_LT
; break;
4071 case LE_EXPR
: encoded
= ATOMIC_OP_FETCH_CMP_0_LE
; break;
4072 case GT_EXPR
: encoded
= ATOMIC_OP_FETCH_CMP_0_GT
; break;
4073 case GE_EXPR
: encoded
= ATOMIC_OP_FETCH_CMP_0_GE
; break;
4074 default: gcc_unreachable ();
4077 tree new_lhs
= make_ssa_name (boolean_type_node
);
4079 tree flag
= build_int_cst (TREE_TYPE (lhs
), encoded
);
4081 g
= gimple_build_call_internal (fn
, 5, flag
,
4082 gimple_call_arg (call
, 0),
4083 gimple_call_arg (call
, 1),
4084 gimple_call_arg (call
, 2),
4085 gimple_call_fn (call
));
4087 g
= gimple_build_call_internal (fn
, 4, flag
,
4088 gimple_call_arg (call
, 0),
4089 gimple_call_arg (call
, 1),
4090 gimple_call_fn (call
));
4091 gimple_call_set_lhs (g
, new_lhs
);
4092 gimple_set_location (g
, gimple_location (call
));
4093 gimple_move_vops (g
, call
);
4094 bool throws
= stmt_can_throw_internal (cfun
, call
);
4095 gimple_call_set_nothrow (as_a
<gcall
*> (g
),
4096 gimple_call_nothrow_p (as_a
<gcall
*> (call
)));
4097 gimple_stmt_iterator gsi
= *gsip
;
4098 gsi_insert_after (&gsi
, g
, GSI_SAME_STMT
);
4100 maybe_clean_or_replace_eh_stmt (call
, g
);
4101 if (is_gimple_assign (use_stmt
))
4102 switch (gimple_assign_rhs_code (use_stmt
))
4105 gimple_assign_set_rhs1 (use_stmt
, new_lhs
);
4108 gsi
= gsi_for_stmt (use_stmt
);
4109 if (tree ulhs
= gimple_assign_lhs (use_stmt
))
4110 if (useless_type_conversion_p (TREE_TYPE (ulhs
),
4113 gimple_assign_set_rhs_with_ops (&gsi
, SSA_NAME
, new_lhs
);
4116 gimple_assign_set_rhs_with_ops (&gsi
, NOP_EXPR
, new_lhs
);
4119 else if (gimple_code (use_stmt
) == GIMPLE_COND
)
4121 gcond
*use_cond
= as_a
<gcond
*> (use_stmt
);
4122 gimple_cond_set_code (use_cond
, NE_EXPR
);
4123 gimple_cond_set_lhs (use_cond
, new_lhs
);
4124 gimple_cond_set_rhs (use_cond
, boolean_false_node
);
4127 update_stmt (use_stmt
);
4130 gsi
= gsi_for_stmt (SSA_NAME_DEF_STMT (use_lhs
));
4131 gsi_remove (&gsi
, true);
4132 release_ssa_name (use_lhs
);
4134 gsi_remove (gsip
, true);
4135 release_ssa_name (lhs
);
4145 Similarly for memset (&a, ..., sizeof (a)); instead of a = {};
4146 and/or memcpy (&b, &a, sizeof (a)); instead of b = a; */
4149 optimize_memcpy (gimple_stmt_iterator
*gsip
, tree dest
, tree src
, tree len
)
4151 gimple
*stmt
= gsi_stmt (*gsip
);
4152 if (gimple_has_volatile_ops (stmt
))
4155 tree vuse
= gimple_vuse (stmt
);
4159 gimple
*defstmt
= SSA_NAME_DEF_STMT (vuse
);
4160 tree src2
= NULL_TREE
, len2
= NULL_TREE
;
4161 poly_int64 offset
, offset2
;
4162 tree val
= integer_zero_node
;
4163 if (gimple_store_p (defstmt
)
4164 && gimple_assign_single_p (defstmt
)
4165 && TREE_CODE (gimple_assign_rhs1 (defstmt
)) == CONSTRUCTOR
4166 && !gimple_clobber_p (defstmt
))
4167 src2
= gimple_assign_lhs (defstmt
);
4168 else if (gimple_call_builtin_p (defstmt
, BUILT_IN_MEMSET
)
4169 && TREE_CODE (gimple_call_arg (defstmt
, 0)) == ADDR_EXPR
4170 && TREE_CODE (gimple_call_arg (defstmt
, 1)) == INTEGER_CST
)
4172 src2
= TREE_OPERAND (gimple_call_arg (defstmt
, 0), 0);
4173 len2
= gimple_call_arg (defstmt
, 2);
4174 val
= gimple_call_arg (defstmt
, 1);
4175 /* For non-0 val, we'd have to transform stmt from assignment
4176 into memset (only if dest is addressable). */
4177 if (!integer_zerop (val
) && is_gimple_assign (stmt
))
4181 if (src2
== NULL_TREE
)
4184 if (len
== NULL_TREE
)
4185 len
= (TREE_CODE (src
) == COMPONENT_REF
4186 ? DECL_SIZE_UNIT (TREE_OPERAND (src
, 1))
4187 : TYPE_SIZE_UNIT (TREE_TYPE (src
)));
4188 if (len2
== NULL_TREE
)
4189 len2
= (TREE_CODE (src2
) == COMPONENT_REF
4190 ? DECL_SIZE_UNIT (TREE_OPERAND (src2
, 1))
4191 : TYPE_SIZE_UNIT (TREE_TYPE (src2
)));
4192 if (len
== NULL_TREE
4193 || !poly_int_tree_p (len
)
4194 || len2
== NULL_TREE
4195 || !poly_int_tree_p (len2
))
4198 src
= get_addr_base_and_unit_offset (src
, &offset
);
4199 src2
= get_addr_base_and_unit_offset (src2
, &offset2
);
4200 if (src
== NULL_TREE
4201 || src2
== NULL_TREE
4202 || maybe_lt (offset
, offset2
))
4205 if (!operand_equal_p (src
, src2
, 0))
4208 /* [ src + offset2, src + offset2 + len2 - 1 ] is set to val.
4210 [ src + offset, src + offset + len - 1 ] is a subset of that. */
4211 if (maybe_gt (wi::to_poly_offset (len
) + (offset
- offset2
),
4212 wi::to_poly_offset (len2
)))
4215 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4217 fprintf (dump_file
, "Simplified\n ");
4218 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
4219 fprintf (dump_file
, "after previous\n ");
4220 print_gimple_stmt (dump_file
, defstmt
, 0, dump_flags
);
4223 /* For simplicity, don't change the kind of the stmt,
4224 turn dest = src; into dest = {}; and memcpy (&dest, &src, len);
4225 into memset (&dest, val, len);
4226 In theory we could change dest = src into memset if dest
4227 is addressable (maybe beneficial if val is not 0), or
4228 memcpy (&dest, &src, len) into dest = {} if len is the size
4229 of dest, dest isn't volatile. */
4230 if (is_gimple_assign (stmt
))
4232 tree ctor
= build_constructor (TREE_TYPE (dest
), NULL
);
4233 gimple_assign_set_rhs_from_tree (gsip
, ctor
);
4236 else /* If stmt is memcpy, transform it into memset. */
4238 gcall
*call
= as_a
<gcall
*> (stmt
);
4239 tree fndecl
= builtin_decl_implicit (BUILT_IN_MEMSET
);
4240 gimple_call_set_fndecl (call
, fndecl
);
4241 gimple_call_set_fntype (call
, TREE_TYPE (fndecl
));
4242 gimple_call_set_arg (call
, 1, val
);
4246 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4248 fprintf (dump_file
, "into\n ");
4249 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
4253 /* A simple pass that attempts to fold all builtin functions. This pass
4254 is run after we've propagated as many constants as we can. */
4258 const pass_data pass_data_fold_builtins
=
4260 GIMPLE_PASS
, /* type */
4262 OPTGROUP_NONE
, /* optinfo_flags */
4263 TV_NONE
, /* tv_id */
4264 ( PROP_cfg
| PROP_ssa
), /* properties_required */
4265 0, /* properties_provided */
4266 0, /* properties_destroyed */
4267 0, /* todo_flags_start */
4268 TODO_update_ssa
, /* todo_flags_finish */
4271 class pass_fold_builtins
: public gimple_opt_pass
4274 pass_fold_builtins (gcc::context
*ctxt
)
4275 : gimple_opt_pass (pass_data_fold_builtins
, ctxt
)
4278 /* opt_pass methods: */
4279 opt_pass
* clone () final override
{ return new pass_fold_builtins (m_ctxt
); }
4280 unsigned int execute (function
*) final override
;
4282 }; // class pass_fold_builtins
4285 pass_fold_builtins::execute (function
*fun
)
4287 bool cfg_changed
= false;
4289 unsigned int todoflags
= 0;
4291 FOR_EACH_BB_FN (bb
, fun
)
4293 gimple_stmt_iterator i
;
4294 for (i
= gsi_start_bb (bb
); !gsi_end_p (i
); )
4296 gimple
*stmt
, *old_stmt
;
4298 enum built_in_function fcode
;
4300 stmt
= gsi_stmt (i
);
4302 if (gimple_code (stmt
) != GIMPLE_CALL
)
4304 /* Remove all *ssaname_N ={v} {CLOBBER}; stmts,
4305 after the last GIMPLE DSE they aren't needed and might
4306 unnecessarily keep the SSA_NAMEs live. */
4307 if (gimple_clobber_p (stmt
))
4309 tree lhs
= gimple_assign_lhs (stmt
);
4310 if (TREE_CODE (lhs
) == MEM_REF
4311 && TREE_CODE (TREE_OPERAND (lhs
, 0)) == SSA_NAME
)
4313 unlink_stmt_vdef (stmt
);
4314 gsi_remove (&i
, true);
4315 release_defs (stmt
);
4319 else if (gimple_assign_load_p (stmt
) && gimple_store_p (stmt
))
4320 optimize_memcpy (&i
, gimple_assign_lhs (stmt
),
4321 gimple_assign_rhs1 (stmt
), NULL_TREE
);
4326 callee
= gimple_call_fndecl (stmt
);
4328 && gimple_call_internal_p (stmt
, IFN_ASSUME
))
4330 gsi_remove (&i
, true);
4333 if (!callee
|| !fndecl_built_in_p (callee
, BUILT_IN_NORMAL
))
4339 fcode
= DECL_FUNCTION_CODE (callee
);
4344 tree result
= NULL_TREE
;
4345 switch (DECL_FUNCTION_CODE (callee
))
4347 case BUILT_IN_CONSTANT_P
:
4348 /* Resolve __builtin_constant_p. If it hasn't been
4349 folded to integer_one_node by now, it's fairly
4350 certain that the value simply isn't constant. */
4351 result
= integer_zero_node
;
4354 case BUILT_IN_ASSUME_ALIGNED
:
4355 /* Remove __builtin_assume_aligned. */
4356 result
= gimple_call_arg (stmt
, 0);
4359 case BUILT_IN_STACK_RESTORE
:
4360 result
= optimize_stack_restore (i
);
4366 case BUILT_IN_UNREACHABLE
:
4367 if (optimize_unreachable (i
))
4371 case BUILT_IN_ATOMIC_ADD_FETCH_1
:
4372 case BUILT_IN_ATOMIC_ADD_FETCH_2
:
4373 case BUILT_IN_ATOMIC_ADD_FETCH_4
:
4374 case BUILT_IN_ATOMIC_ADD_FETCH_8
:
4375 case BUILT_IN_ATOMIC_ADD_FETCH_16
:
4376 optimize_atomic_op_fetch_cmp_0 (&i
,
4377 IFN_ATOMIC_ADD_FETCH_CMP_0
,
4380 case BUILT_IN_SYNC_ADD_AND_FETCH_1
:
4381 case BUILT_IN_SYNC_ADD_AND_FETCH_2
:
4382 case BUILT_IN_SYNC_ADD_AND_FETCH_4
:
4383 case BUILT_IN_SYNC_ADD_AND_FETCH_8
:
4384 case BUILT_IN_SYNC_ADD_AND_FETCH_16
:
4385 optimize_atomic_op_fetch_cmp_0 (&i
,
4386 IFN_ATOMIC_ADD_FETCH_CMP_0
,
4390 case BUILT_IN_ATOMIC_SUB_FETCH_1
:
4391 case BUILT_IN_ATOMIC_SUB_FETCH_2
:
4392 case BUILT_IN_ATOMIC_SUB_FETCH_4
:
4393 case BUILT_IN_ATOMIC_SUB_FETCH_8
:
4394 case BUILT_IN_ATOMIC_SUB_FETCH_16
:
4395 optimize_atomic_op_fetch_cmp_0 (&i
,
4396 IFN_ATOMIC_SUB_FETCH_CMP_0
,
4399 case BUILT_IN_SYNC_SUB_AND_FETCH_1
:
4400 case BUILT_IN_SYNC_SUB_AND_FETCH_2
:
4401 case BUILT_IN_SYNC_SUB_AND_FETCH_4
:
4402 case BUILT_IN_SYNC_SUB_AND_FETCH_8
:
4403 case BUILT_IN_SYNC_SUB_AND_FETCH_16
:
4404 optimize_atomic_op_fetch_cmp_0 (&i
,
4405 IFN_ATOMIC_SUB_FETCH_CMP_0
,
4409 case BUILT_IN_ATOMIC_FETCH_OR_1
:
4410 case BUILT_IN_ATOMIC_FETCH_OR_2
:
4411 case BUILT_IN_ATOMIC_FETCH_OR_4
:
4412 case BUILT_IN_ATOMIC_FETCH_OR_8
:
4413 case BUILT_IN_ATOMIC_FETCH_OR_16
:
4414 optimize_atomic_bit_test_and (&i
,
4415 IFN_ATOMIC_BIT_TEST_AND_SET
,
4418 case BUILT_IN_SYNC_FETCH_AND_OR_1
:
4419 case BUILT_IN_SYNC_FETCH_AND_OR_2
:
4420 case BUILT_IN_SYNC_FETCH_AND_OR_4
:
4421 case BUILT_IN_SYNC_FETCH_AND_OR_8
:
4422 case BUILT_IN_SYNC_FETCH_AND_OR_16
:
4423 optimize_atomic_bit_test_and (&i
,
4424 IFN_ATOMIC_BIT_TEST_AND_SET
,
4428 case BUILT_IN_ATOMIC_FETCH_XOR_1
:
4429 case BUILT_IN_ATOMIC_FETCH_XOR_2
:
4430 case BUILT_IN_ATOMIC_FETCH_XOR_4
:
4431 case BUILT_IN_ATOMIC_FETCH_XOR_8
:
4432 case BUILT_IN_ATOMIC_FETCH_XOR_16
:
4433 optimize_atomic_bit_test_and
4434 (&i
, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT
, true, false);
4436 case BUILT_IN_SYNC_FETCH_AND_XOR_1
:
4437 case BUILT_IN_SYNC_FETCH_AND_XOR_2
:
4438 case BUILT_IN_SYNC_FETCH_AND_XOR_4
:
4439 case BUILT_IN_SYNC_FETCH_AND_XOR_8
:
4440 case BUILT_IN_SYNC_FETCH_AND_XOR_16
:
4441 optimize_atomic_bit_test_and
4442 (&i
, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT
, false, false);
4445 case BUILT_IN_ATOMIC_XOR_FETCH_1
:
4446 case BUILT_IN_ATOMIC_XOR_FETCH_2
:
4447 case BUILT_IN_ATOMIC_XOR_FETCH_4
:
4448 case BUILT_IN_ATOMIC_XOR_FETCH_8
:
4449 case BUILT_IN_ATOMIC_XOR_FETCH_16
:
4450 if (optimize_atomic_bit_test_and
4451 (&i
, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT
, true, true))
4453 optimize_atomic_op_fetch_cmp_0 (&i
,
4454 IFN_ATOMIC_XOR_FETCH_CMP_0
,
4457 case BUILT_IN_SYNC_XOR_AND_FETCH_1
:
4458 case BUILT_IN_SYNC_XOR_AND_FETCH_2
:
4459 case BUILT_IN_SYNC_XOR_AND_FETCH_4
:
4460 case BUILT_IN_SYNC_XOR_AND_FETCH_8
:
4461 case BUILT_IN_SYNC_XOR_AND_FETCH_16
:
4462 if (optimize_atomic_bit_test_and
4463 (&i
, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT
, false, true))
4465 optimize_atomic_op_fetch_cmp_0 (&i
,
4466 IFN_ATOMIC_XOR_FETCH_CMP_0
,
4470 case BUILT_IN_ATOMIC_FETCH_AND_1
:
4471 case BUILT_IN_ATOMIC_FETCH_AND_2
:
4472 case BUILT_IN_ATOMIC_FETCH_AND_4
:
4473 case BUILT_IN_ATOMIC_FETCH_AND_8
:
4474 case BUILT_IN_ATOMIC_FETCH_AND_16
:
4475 optimize_atomic_bit_test_and (&i
,
4476 IFN_ATOMIC_BIT_TEST_AND_RESET
,
4479 case BUILT_IN_SYNC_FETCH_AND_AND_1
:
4480 case BUILT_IN_SYNC_FETCH_AND_AND_2
:
4481 case BUILT_IN_SYNC_FETCH_AND_AND_4
:
4482 case BUILT_IN_SYNC_FETCH_AND_AND_8
:
4483 case BUILT_IN_SYNC_FETCH_AND_AND_16
:
4484 optimize_atomic_bit_test_and (&i
,
4485 IFN_ATOMIC_BIT_TEST_AND_RESET
,
4489 case BUILT_IN_ATOMIC_AND_FETCH_1
:
4490 case BUILT_IN_ATOMIC_AND_FETCH_2
:
4491 case BUILT_IN_ATOMIC_AND_FETCH_4
:
4492 case BUILT_IN_ATOMIC_AND_FETCH_8
:
4493 case BUILT_IN_ATOMIC_AND_FETCH_16
:
4494 optimize_atomic_op_fetch_cmp_0 (&i
,
4495 IFN_ATOMIC_AND_FETCH_CMP_0
,
4498 case BUILT_IN_SYNC_AND_AND_FETCH_1
:
4499 case BUILT_IN_SYNC_AND_AND_FETCH_2
:
4500 case BUILT_IN_SYNC_AND_AND_FETCH_4
:
4501 case BUILT_IN_SYNC_AND_AND_FETCH_8
:
4502 case BUILT_IN_SYNC_AND_AND_FETCH_16
:
4503 optimize_atomic_op_fetch_cmp_0 (&i
,
4504 IFN_ATOMIC_AND_FETCH_CMP_0
,
4508 case BUILT_IN_ATOMIC_OR_FETCH_1
:
4509 case BUILT_IN_ATOMIC_OR_FETCH_2
:
4510 case BUILT_IN_ATOMIC_OR_FETCH_4
:
4511 case BUILT_IN_ATOMIC_OR_FETCH_8
:
4512 case BUILT_IN_ATOMIC_OR_FETCH_16
:
4513 optimize_atomic_op_fetch_cmp_0 (&i
,
4514 IFN_ATOMIC_OR_FETCH_CMP_0
,
4517 case BUILT_IN_SYNC_OR_AND_FETCH_1
:
4518 case BUILT_IN_SYNC_OR_AND_FETCH_2
:
4519 case BUILT_IN_SYNC_OR_AND_FETCH_4
:
4520 case BUILT_IN_SYNC_OR_AND_FETCH_8
:
4521 case BUILT_IN_SYNC_OR_AND_FETCH_16
:
4522 optimize_atomic_op_fetch_cmp_0 (&i
,
4523 IFN_ATOMIC_OR_FETCH_CMP_0
,
4527 case BUILT_IN_MEMCPY
:
4528 if (gimple_call_builtin_p (stmt
, BUILT_IN_NORMAL
)
4529 && TREE_CODE (gimple_call_arg (stmt
, 0)) == ADDR_EXPR
4530 && TREE_CODE (gimple_call_arg (stmt
, 1)) == ADDR_EXPR
4531 && TREE_CODE (gimple_call_arg (stmt
, 2)) == INTEGER_CST
)
4533 tree dest
= TREE_OPERAND (gimple_call_arg (stmt
, 0), 0);
4534 tree src
= TREE_OPERAND (gimple_call_arg (stmt
, 1), 0);
4535 tree len
= gimple_call_arg (stmt
, 2);
4536 optimize_memcpy (&i
, dest
, src
, len
);
4540 case BUILT_IN_VA_START
:
4541 case BUILT_IN_VA_END
:
4542 case BUILT_IN_VA_COPY
:
4543 /* These shouldn't be folded before pass_stdarg. */
4544 result
= optimize_stdarg_builtin (stmt
);
4556 gimplify_and_update_call_from_tree (&i
, result
);
4559 todoflags
|= TODO_update_address_taken
;
4561 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4563 fprintf (dump_file
, "Simplified\n ");
4564 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
4568 stmt
= gsi_stmt (i
);
4571 if (maybe_clean_or_replace_eh_stmt (old_stmt
, stmt
)
4572 && gimple_purge_dead_eh_edges (bb
))
4575 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4577 fprintf (dump_file
, "to\n ");
4578 print_gimple_stmt (dump_file
, stmt
, 0, dump_flags
);
4579 fprintf (dump_file
, "\n");
4582 /* Retry the same statement if it changed into another
4583 builtin, there might be new opportunities now. */
4584 if (gimple_code (stmt
) != GIMPLE_CALL
)
4589 callee
= gimple_call_fndecl (stmt
);
4591 || !fndecl_built_in_p (callee
, fcode
))
4596 /* Delete unreachable blocks. */
4598 todoflags
|= TODO_cleanup_cfg
;
4606 make_pass_fold_builtins (gcc::context
*ctxt
)
4608 return new pass_fold_builtins (ctxt
);
4611 /* A simple pass that emits some warnings post IPA. */
4615 const pass_data pass_data_post_ipa_warn
=
4617 GIMPLE_PASS
, /* type */
4618 "post_ipa_warn", /* name */
4619 OPTGROUP_NONE
, /* optinfo_flags */
4620 TV_NONE
, /* tv_id */
4621 ( PROP_cfg
| PROP_ssa
), /* properties_required */
4622 0, /* properties_provided */
4623 0, /* properties_destroyed */
4624 0, /* todo_flags_start */
4625 0, /* todo_flags_finish */
4628 class pass_post_ipa_warn
: public gimple_opt_pass
4631 pass_post_ipa_warn (gcc::context
*ctxt
)
4632 : gimple_opt_pass (pass_data_post_ipa_warn
, ctxt
)
4635 /* opt_pass methods: */
4636 opt_pass
* clone () final override
{ return new pass_post_ipa_warn (m_ctxt
); }
4637 bool gate (function
*) final override
{ return warn_nonnull
!= 0; }
4638 unsigned int execute (function
*) final override
;
4640 }; // class pass_fold_builtins
4643 pass_post_ipa_warn::execute (function
*fun
)
4647 FOR_EACH_BB_FN (bb
, fun
)
4649 gimple_stmt_iterator gsi
;
4650 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4652 gimple
*stmt
= gsi_stmt (gsi
);
4653 if (!is_gimple_call (stmt
) || warning_suppressed_p (stmt
, OPT_Wnonnull
))
4656 tree fntype
= gimple_call_fntype (stmt
);
4657 bitmap nonnullargs
= get_nonnull_args (fntype
);
4661 tree fndecl
= gimple_call_fndecl (stmt
);
4662 const bool closure
= fndecl
&& DECL_LAMBDA_FUNCTION_P (fndecl
);
4664 for (unsigned i
= 0; i
< gimple_call_num_args (stmt
); i
++)
4666 tree arg
= gimple_call_arg (stmt
, i
);
4667 if (TREE_CODE (TREE_TYPE (arg
)) != POINTER_TYPE
)
4669 if (!integer_zerop (arg
))
4671 if (i
== 0 && closure
)
4672 /* Avoid warning for the first argument to lambda functions. */
4674 if (!bitmap_empty_p (nonnullargs
)
4675 && !bitmap_bit_p (nonnullargs
, i
))
4678 /* In C++ non-static member functions argument 0 refers
4679 to the implicit this pointer. Use the same one-based
4680 numbering for ordinary arguments. */
4681 unsigned argno
= TREE_CODE (fntype
) == METHOD_TYPE
? i
: i
+ 1;
4682 location_t loc
= (EXPR_HAS_LOCATION (arg
)
4683 ? EXPR_LOCATION (arg
)
4684 : gimple_location (stmt
));
4685 auto_diagnostic_group d
;
4688 if (warning_at (loc
, OPT_Wnonnull
,
4689 "%qs pointer is null", "this")
4691 inform (DECL_SOURCE_LOCATION (fndecl
),
4692 "in a call to non-static member function %qD",
4697 if (!warning_at (loc
, OPT_Wnonnull
,
4698 "argument %u null where non-null "
4702 tree fndecl
= gimple_call_fndecl (stmt
);
4703 if (fndecl
&& DECL_IS_UNDECLARED_BUILTIN (fndecl
))
4704 inform (loc
, "in a call to built-in function %qD",
4707 inform (DECL_SOURCE_LOCATION (fndecl
),
4708 "in a call to function %qD declared %qs",
4711 BITMAP_FREE (nonnullargs
);
4720 make_pass_post_ipa_warn (gcc::context
*ctxt
)
4722 return new pass_post_ipa_warn (ctxt
);