1 /* Forward propagation of expressions for single use variables.
2 Copyright (C) 2004-2023 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
28 #include "tree-pass.h"
31 #include "optabs-query.h"
32 #include "gimple-pretty-print.h"
33 #include "fold-const.h"
34 #include "stor-layout.h"
35 #include "gimple-iterator.h"
36 #include "gimple-fold.h"
39 #include "gimplify-me.h"
43 #include "tree-ssa-propagate.h"
44 #include "tree-ssa-dom.h"
45 #include "tree-ssa-strlen.h"
47 #include "tree-cfgcleanup.h"
49 #include "optabs-tree.h"
50 #include "tree-vector-builder.h"
51 #include "vec-perm-indices.h"
52 #include "internal-fn.h"
55 #include "gimple-range.h"
57 /* This pass propagates the RHS of assignment statements into use
58 sites of the LHS of the assignment. It's basically a specialized
59 form of tree combination. It is hoped all of this can disappear
60 when we have a generalized tree combiner.
62 One class of common cases we handle is forward propagating a single use
63 variable into a COND_EXPR.
67 if (x) goto ... else goto ...
69 Will be transformed into:
72 if (a COND b) goto ... else goto ...
74 Similarly for the tests (x == 0), (x != 0), (x == 1) and (x != 1).
76 Or (assuming c1 and c2 are constants):
80 if (x EQ/NEQ c2) goto ... else goto ...
82 Will be transformed into:
85 if (a EQ/NEQ (c2 - c1)) goto ... else goto ...
87 Similarly for x = a - c1.
93 if (x) goto ... else goto ...
95 Will be transformed into:
98 if (a == 0) goto ... else goto ...
100 Similarly for the tests (x == 0), (x != 0), (x == 1) and (x != 1).
101 For these cases, we propagate A into all, possibly more than one,
102 COND_EXPRs that use X.
108 if (x) goto ... else goto ...
110 Will be transformed into:
113 if (a != 0) goto ... else goto ...
115 (Assuming a is an integral type and x is a boolean or x is an
116 integral and a is a boolean.)
118 Similarly for the tests (x == 0), (x != 0), (x == 1) and (x != 1).
119 For these cases, we propagate A into all, possibly more than one,
120 COND_EXPRs that use X.
122 In addition to eliminating the variable and the statement which assigns
123 a value to the variable, we may be able to later thread the jump without
124 adding insane complexity in the dominator optimizer.
126 Also note these transformations can cascade. We handle this by having
127 a worklist of COND_EXPR statements to examine. As we make a change to
128 a statement, we put it back on the worklist to examine on the next
129 iteration of the main loop.
131 A second class of propagation opportunities arises for ADDR_EXPR
142 ptr = (type1*)&type2var;
145 Will get turned into (if type1 and type2 are the same size
146 and neither have volatile on them):
147 res = VIEW_CONVERT_EXPR<type1>(type2var)
152 ptr2 = ptr + <constant>;
156 ptr2 = &x[constant/elementsize];
161 offset = index * element_size;
162 offset_p = (pointer) offset;
163 ptr2 = ptr + offset_p
165 Will get turned into:
173 Provided that decl has known alignment >= 2, will get turned into
177 We also propagate casts into SWITCH_EXPR and COND_EXPR conditions to
178 allow us to remove the cast and {NOT_EXPR,NEG_EXPR} into a subsequent
181 This will (of course) be extended as other needs arise. */
183 static bool forward_propagate_addr_expr (tree
, tree
, bool);
185 /* Set to true if we delete dead edges during the optimization. */
186 static bool cfg_changed
;
188 static tree
rhs_to_tree (tree type
, gimple
*stmt
);
190 static bitmap to_purge
;
192 /* Const-and-copy lattice. */
193 static vec
<tree
> lattice
;
195 /* Set the lattice entry for NAME to VAL. */
197 fwprop_set_lattice_val (tree name
, tree val
)
199 if (TREE_CODE (name
) == SSA_NAME
)
201 if (SSA_NAME_VERSION (name
) >= lattice
.length ())
203 lattice
.reserve (num_ssa_names
- lattice
.length ());
204 lattice
.quick_grow_cleared (num_ssa_names
);
206 lattice
[SSA_NAME_VERSION (name
)] = val
;
210 /* Invalidate the lattice entry for NAME, done when releasing SSA names. */
212 fwprop_invalidate_lattice (tree name
)
215 && TREE_CODE (name
) == SSA_NAME
216 && SSA_NAME_VERSION (name
) < lattice
.length ())
217 lattice
[SSA_NAME_VERSION (name
)] = NULL_TREE
;
221 /* Get the statement we can propagate from into NAME skipping
222 trivial copies. Returns the statement which defines the
223 propagation source or NULL_TREE if there is no such one.
224 If SINGLE_USE_ONLY is set considers only sources which have
225 a single use chain up to NAME. If SINGLE_USE_P is non-null,
226 it is set to whether the chain to NAME is a single use chain
227 or not. SINGLE_USE_P is not written to if SINGLE_USE_ONLY is set. */
230 get_prop_source_stmt (tree name
, bool single_use_only
, bool *single_use_p
)
232 bool single_use
= true;
235 gimple
*def_stmt
= SSA_NAME_DEF_STMT (name
);
237 if (!has_single_use (name
))
244 /* If name is defined by a PHI node or is the default def, bail out. */
245 if (!is_gimple_assign (def_stmt
))
248 /* If def_stmt is a simple copy, continue looking. */
249 if (gimple_assign_rhs_code (def_stmt
) == SSA_NAME
)
250 name
= gimple_assign_rhs1 (def_stmt
);
253 if (!single_use_only
&& single_use_p
)
254 *single_use_p
= single_use
;
261 /* Checks if the destination ssa name in DEF_STMT can be used as
262 propagation source. Returns true if so, otherwise false. */
265 can_propagate_from (gimple
*def_stmt
)
267 gcc_assert (is_gimple_assign (def_stmt
));
269 /* If the rhs has side-effects we cannot propagate from it. */
270 if (gimple_has_volatile_ops (def_stmt
))
273 /* If the rhs is a load we cannot propagate from it. */
274 if (TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt
)) == tcc_reference
275 || TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt
)) == tcc_declaration
)
278 /* Constants can be always propagated. */
279 if (gimple_assign_single_p (def_stmt
)
280 && is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt
)))
283 /* We cannot propagate ssa names that occur in abnormal phi nodes. */
284 if (stmt_references_abnormal_ssa_name (def_stmt
))
287 /* If the definition is a conversion of a pointer to a function type,
288 then we cannot apply optimizations as some targets require
289 function pointers to be canonicalized and in this case this
290 optimization could eliminate a necessary canonicalization. */
291 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt
)))
293 tree rhs
= gimple_assign_rhs1 (def_stmt
);
294 if (POINTER_TYPE_P (TREE_TYPE (rhs
))
295 && TREE_CODE (TREE_TYPE (TREE_TYPE (rhs
))) == FUNCTION_TYPE
)
302 /* Remove a chain of dead statements starting at the definition of
303 NAME. The chain is linked via the first operand of the defining statements.
304 If NAME was replaced in its only use then this function can be used
305 to clean up dead stmts. The function handles already released SSA
307 Returns true if cleanup-cfg has to run. */
310 remove_prop_source_from_use (tree name
)
312 gimple_stmt_iterator gsi
;
314 bool cfg_changed
= false;
319 if (SSA_NAME_IN_FREE_LIST (name
)
320 || SSA_NAME_IS_DEFAULT_DEF (name
)
321 || !has_zero_uses (name
))
324 stmt
= SSA_NAME_DEF_STMT (name
);
325 if (gimple_code (stmt
) == GIMPLE_PHI
326 || gimple_has_side_effects (stmt
))
329 bb
= gimple_bb (stmt
);
330 gsi
= gsi_for_stmt (stmt
);
331 unlink_stmt_vdef (stmt
);
332 if (gsi_remove (&gsi
, true))
333 bitmap_set_bit (to_purge
, bb
->index
);
334 fwprop_invalidate_lattice (gimple_get_lhs (stmt
));
337 name
= is_gimple_assign (stmt
) ? gimple_assign_rhs1 (stmt
) : NULL_TREE
;
338 } while (name
&& TREE_CODE (name
) == SSA_NAME
);
343 /* Return the rhs of a gassign *STMT in a form of a single tree,
344 converted to type TYPE.
346 This should disappear, but is needed so we can combine expressions and use
347 the fold() interfaces. Long term, we need to develop folding and combine
348 routines that deal with gimple exclusively . */
351 rhs_to_tree (tree type
, gimple
*stmt
)
353 location_t loc
= gimple_location (stmt
);
354 enum tree_code code
= gimple_assign_rhs_code (stmt
);
355 switch (get_gimple_rhs_class (code
))
357 case GIMPLE_TERNARY_RHS
:
358 return fold_build3_loc (loc
, code
, type
, gimple_assign_rhs1 (stmt
),
359 gimple_assign_rhs2 (stmt
),
360 gimple_assign_rhs3 (stmt
));
361 case GIMPLE_BINARY_RHS
:
362 return fold_build2_loc (loc
, code
, type
, gimple_assign_rhs1 (stmt
),
363 gimple_assign_rhs2 (stmt
));
364 case GIMPLE_UNARY_RHS
:
365 return build1 (code
, type
, gimple_assign_rhs1 (stmt
));
366 case GIMPLE_SINGLE_RHS
:
367 return gimple_assign_rhs1 (stmt
);
373 /* Combine OP0 CODE OP1 in the context of a COND_EXPR. Returns
374 the folded result in a form suitable for COND_EXPR_COND or
375 NULL_TREE, if there is no suitable simplified form. If
376 INVARIANT_ONLY is true only gimple_min_invariant results are
377 considered simplified. */
380 combine_cond_expr_cond (gimple
*stmt
, enum tree_code code
, tree type
,
381 tree op0
, tree op1
, bool invariant_only
)
385 gcc_assert (TREE_CODE_CLASS (code
) == tcc_comparison
);
387 fold_defer_overflow_warnings ();
388 t
= fold_binary_loc (gimple_location (stmt
), code
, type
, op0
, op1
);
391 fold_undefer_overflow_warnings (false, NULL
, 0);
395 /* Require that we got a boolean type out if we put one in. */
396 gcc_assert (TREE_CODE (TREE_TYPE (t
)) == TREE_CODE (type
));
398 /* Canonicalize the combined condition for use in a COND_EXPR. */
399 t
= canonicalize_cond_expr_cond (t
);
401 /* Bail out if we required an invariant but didn't get one. */
402 if (!t
|| (invariant_only
&& !is_gimple_min_invariant (t
)))
404 fold_undefer_overflow_warnings (false, NULL
, 0);
408 bool nowarn
= warning_suppressed_p (stmt
, OPT_Wstrict_overflow
);
409 fold_undefer_overflow_warnings (!nowarn
, stmt
, 0);
414 /* Combine the comparison OP0 CODE OP1 at LOC with the defining statements
415 of its operand. Return a new comparison tree or NULL_TREE if there
416 were no simplifying combines. */
419 forward_propagate_into_comparison_1 (gimple
*stmt
,
420 enum tree_code code
, tree type
,
423 tree tmp
= NULL_TREE
;
424 tree rhs0
= NULL_TREE
, rhs1
= NULL_TREE
;
425 bool single_use0_p
= false, single_use1_p
= false;
427 /* For comparisons use the first operand, that is likely to
428 simplify comparisons against constants. */
429 if (TREE_CODE (op0
) == SSA_NAME
)
431 gimple
*def_stmt
= get_prop_source_stmt (op0
, false, &single_use0_p
);
432 if (def_stmt
&& can_propagate_from (def_stmt
))
434 enum tree_code def_code
= gimple_assign_rhs_code (def_stmt
);
435 bool invariant_only_p
= !single_use0_p
;
437 rhs0
= rhs_to_tree (TREE_TYPE (op1
), def_stmt
);
439 /* Always combine comparisons or conversions from booleans. */
440 if (TREE_CODE (op1
) == INTEGER_CST
441 && ((CONVERT_EXPR_CODE_P (def_code
)
442 && TREE_CODE (TREE_TYPE (TREE_OPERAND (rhs0
, 0)))
444 || TREE_CODE_CLASS (def_code
) == tcc_comparison
))
445 invariant_only_p
= false;
447 tmp
= combine_cond_expr_cond (stmt
, code
, type
,
448 rhs0
, op1
, invariant_only_p
);
454 /* If that wasn't successful, try the second operand. */
455 if (TREE_CODE (op1
) == SSA_NAME
)
457 gimple
*def_stmt
= get_prop_source_stmt (op1
, false, &single_use1_p
);
458 if (def_stmt
&& can_propagate_from (def_stmt
))
460 rhs1
= rhs_to_tree (TREE_TYPE (op0
), def_stmt
);
461 tmp
= combine_cond_expr_cond (stmt
, code
, type
,
462 op0
, rhs1
, !single_use1_p
);
468 /* If that wasn't successful either, try both operands. */
469 if (rhs0
!= NULL_TREE
470 && rhs1
!= NULL_TREE
)
471 tmp
= combine_cond_expr_cond (stmt
, code
, type
,
473 !(single_use0_p
&& single_use1_p
));
478 /* Propagate from the ssa name definition statements of the assignment
479 from a comparison at *GSI into the conditional if that simplifies it.
480 Returns 1 if the stmt was modified and 2 if the CFG needs cleanup,
481 otherwise returns 0. */
484 forward_propagate_into_comparison (gimple_stmt_iterator
*gsi
)
486 gimple
*stmt
= gsi_stmt (*gsi
);
488 bool cfg_changed
= false;
489 tree type
= TREE_TYPE (gimple_assign_lhs (stmt
));
490 tree rhs1
= gimple_assign_rhs1 (stmt
);
491 tree rhs2
= gimple_assign_rhs2 (stmt
);
493 /* Combine the comparison with defining statements. */
494 tmp
= forward_propagate_into_comparison_1 (stmt
,
495 gimple_assign_rhs_code (stmt
),
497 if (tmp
&& useless_type_conversion_p (type
, TREE_TYPE (tmp
)))
499 gimple_assign_set_rhs_from_tree (gsi
, tmp
);
501 update_stmt (gsi_stmt (*gsi
));
503 if (TREE_CODE (rhs1
) == SSA_NAME
)
504 cfg_changed
|= remove_prop_source_from_use (rhs1
);
505 if (TREE_CODE (rhs2
) == SSA_NAME
)
506 cfg_changed
|= remove_prop_source_from_use (rhs2
);
507 return cfg_changed
? 2 : 1;
513 /* Propagate from the ssa name definition statements of COND_EXPR
514 in GIMPLE_COND statement STMT into the conditional if that simplifies it.
515 Returns zero if no statement was changed, one if there were
516 changes and two if cfg_cleanup needs to run. */
519 forward_propagate_into_gimple_cond (gcond
*stmt
)
522 enum tree_code code
= gimple_cond_code (stmt
);
523 bool cfg_changed
= false;
524 tree rhs1
= gimple_cond_lhs (stmt
);
525 tree rhs2
= gimple_cond_rhs (stmt
);
527 /* We can do tree combining on SSA_NAME and comparison expressions. */
528 if (TREE_CODE_CLASS (gimple_cond_code (stmt
)) != tcc_comparison
)
531 tmp
= forward_propagate_into_comparison_1 (stmt
, code
,
535 && is_gimple_condexpr_for_cond (tmp
))
539 fprintf (dump_file
, " Replaced '");
540 print_gimple_expr (dump_file
, stmt
, 0);
541 fprintf (dump_file
, "' with '");
542 print_generic_expr (dump_file
, tmp
);
543 fprintf (dump_file
, "'\n");
546 gimple_cond_set_condition_from_tree (stmt
, unshare_expr (tmp
));
549 if (TREE_CODE (rhs1
) == SSA_NAME
)
550 cfg_changed
|= remove_prop_source_from_use (rhs1
);
551 if (TREE_CODE (rhs2
) == SSA_NAME
)
552 cfg_changed
|= remove_prop_source_from_use (rhs2
);
553 return (cfg_changed
|| is_gimple_min_invariant (tmp
)) ? 2 : 1;
556 /* Canonicalize _Bool == 0 and _Bool != 1 to _Bool != 0 by swapping edges. */
557 if ((TREE_CODE (TREE_TYPE (rhs1
)) == BOOLEAN_TYPE
558 || (INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
559 && TYPE_PRECISION (TREE_TYPE (rhs1
)) == 1))
561 && integer_zerop (rhs2
))
563 && integer_onep (rhs2
))))
565 basic_block bb
= gimple_bb (stmt
);
566 gimple_cond_set_code (stmt
, NE_EXPR
);
567 gimple_cond_set_rhs (stmt
, build_zero_cst (TREE_TYPE (rhs1
)));
568 EDGE_SUCC (bb
, 0)->flags
^= (EDGE_TRUE_VALUE
|EDGE_FALSE_VALUE
);
569 EDGE_SUCC (bb
, 1)->flags
^= (EDGE_TRUE_VALUE
|EDGE_FALSE_VALUE
);
576 /* We've just substituted an ADDR_EXPR into stmt. Update all the
577 relevant data structures to match. */
580 tidy_after_forward_propagate_addr (gimple
*stmt
)
582 /* We may have turned a trapping insn into a non-trapping insn. */
583 if (maybe_clean_or_replace_eh_stmt (stmt
, stmt
))
584 bitmap_set_bit (to_purge
, gimple_bb (stmt
)->index
);
586 if (TREE_CODE (gimple_assign_rhs1 (stmt
)) == ADDR_EXPR
)
587 recompute_tree_invariant_for_addr_expr (gimple_assign_rhs1 (stmt
));
590 /* NAME is a SSA_NAME representing DEF_RHS which is of the form
591 ADDR_EXPR <whatever>.
593 Try to forward propagate the ADDR_EXPR into the use USE_STMT.
594 Often this will allow for removal of an ADDR_EXPR and INDIRECT_REF
595 node or for recovery of array indexing from pointer arithmetic.
597 Return true if the propagation was successful (the propagation can
598 be not totally successful, yet things may have been changed). */
601 forward_propagate_addr_expr_1 (tree name
, tree def_rhs
,
602 gimple_stmt_iterator
*use_stmt_gsi
,
605 tree lhs
, rhs
, rhs2
, array_ref
;
606 gimple
*use_stmt
= gsi_stmt (*use_stmt_gsi
);
607 enum tree_code rhs_code
;
610 gcc_assert (TREE_CODE (def_rhs
) == ADDR_EXPR
);
612 lhs
= gimple_assign_lhs (use_stmt
);
613 rhs_code
= gimple_assign_rhs_code (use_stmt
);
614 rhs
= gimple_assign_rhs1 (use_stmt
);
616 /* Do not perform copy-propagation but recurse through copy chains. */
617 if (TREE_CODE (lhs
) == SSA_NAME
618 && rhs_code
== SSA_NAME
)
619 return forward_propagate_addr_expr (lhs
, def_rhs
, single_use_p
);
621 /* The use statement could be a conversion. Recurse to the uses of the
622 lhs as copyprop does not copy through pointer to integer to pointer
623 conversions and FRE does not catch all cases either.
624 Treat the case of a single-use name and
625 a conversion to def_rhs type separate, though. */
626 if (TREE_CODE (lhs
) == SSA_NAME
627 && CONVERT_EXPR_CODE_P (rhs_code
))
629 /* If there is a point in a conversion chain where the types match
630 so we can remove a conversion re-materialize the address here
633 && useless_type_conversion_p (TREE_TYPE (lhs
), TREE_TYPE (def_rhs
)))
635 gimple_assign_set_rhs1 (use_stmt
, unshare_expr (def_rhs
));
636 gimple_assign_set_rhs_code (use_stmt
, TREE_CODE (def_rhs
));
640 /* Else recurse if the conversion preserves the address value. */
641 if ((INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
642 || POINTER_TYPE_P (TREE_TYPE (lhs
)))
643 && (TYPE_PRECISION (TREE_TYPE (lhs
))
644 >= TYPE_PRECISION (TREE_TYPE (def_rhs
))))
645 return forward_propagate_addr_expr (lhs
, def_rhs
, single_use_p
);
650 /* If this isn't a conversion chain from this on we only can propagate
651 into compatible pointer contexts. */
652 if (!types_compatible_p (TREE_TYPE (name
), TREE_TYPE (def_rhs
)))
655 /* Propagate through constant pointer adjustments. */
656 if (TREE_CODE (lhs
) == SSA_NAME
657 && rhs_code
== POINTER_PLUS_EXPR
659 && TREE_CODE (gimple_assign_rhs2 (use_stmt
)) == INTEGER_CST
)
662 /* As we come here with non-invariant addresses in def_rhs we need
663 to make sure we can build a valid constant offsetted address
664 for further propagation. Simply rely on fold building that
665 and check after the fact. */
666 new_def_rhs
= fold_build2 (MEM_REF
, TREE_TYPE (TREE_TYPE (rhs
)),
668 fold_convert (ptr_type_node
,
669 gimple_assign_rhs2 (use_stmt
)));
670 if (TREE_CODE (new_def_rhs
) == MEM_REF
671 && !is_gimple_mem_ref_addr (TREE_OPERAND (new_def_rhs
, 0)))
673 new_def_rhs
= build1 (ADDR_EXPR
, TREE_TYPE (rhs
), new_def_rhs
);
675 /* Recurse. If we could propagate into all uses of lhs do not
676 bother to replace into the current use but just pretend we did. */
677 if (forward_propagate_addr_expr (lhs
, new_def_rhs
, single_use_p
))
680 if (useless_type_conversion_p (TREE_TYPE (lhs
),
681 TREE_TYPE (new_def_rhs
)))
682 gimple_assign_set_rhs_with_ops (use_stmt_gsi
, TREE_CODE (new_def_rhs
),
684 else if (is_gimple_min_invariant (new_def_rhs
))
685 gimple_assign_set_rhs_with_ops (use_stmt_gsi
, NOP_EXPR
, new_def_rhs
);
688 gcc_assert (gsi_stmt (*use_stmt_gsi
) == use_stmt
);
689 update_stmt (use_stmt
);
693 /* Now strip away any outer COMPONENT_REF/ARRAY_REF nodes from the LHS.
694 ADDR_EXPR will not appear on the LHS. */
695 tree
*lhsp
= gimple_assign_lhs_ptr (use_stmt
);
696 while (handled_component_p (*lhsp
))
697 lhsp
= &TREE_OPERAND (*lhsp
, 0);
700 /* Now see if the LHS node is a MEM_REF using NAME. If so,
701 propagate the ADDR_EXPR into the use of NAME and fold the result. */
702 if (TREE_CODE (lhs
) == MEM_REF
703 && TREE_OPERAND (lhs
, 0) == name
)
706 poly_int64 def_rhs_offset
;
707 /* If the address is invariant we can always fold it. */
708 if ((def_rhs_base
= get_addr_base_and_unit_offset (TREE_OPERAND (def_rhs
, 0),
711 poly_offset_int off
= mem_ref_offset (lhs
);
713 off
+= def_rhs_offset
;
714 if (TREE_CODE (def_rhs_base
) == MEM_REF
)
716 off
+= mem_ref_offset (def_rhs_base
);
717 new_ptr
= TREE_OPERAND (def_rhs_base
, 0);
720 new_ptr
= build_fold_addr_expr (def_rhs_base
);
721 TREE_OPERAND (lhs
, 0) = new_ptr
;
722 TREE_OPERAND (lhs
, 1)
723 = wide_int_to_tree (TREE_TYPE (TREE_OPERAND (lhs
, 1)), off
);
724 tidy_after_forward_propagate_addr (use_stmt
);
725 /* Continue propagating into the RHS if this was not the only use. */
729 /* If the LHS is a plain dereference and the value type is the same as
730 that of the pointed-to type of the address we can put the
731 dereferenced address on the LHS preserving the original alias-type. */
732 else if (integer_zerop (TREE_OPERAND (lhs
, 1))
733 && ((gimple_assign_lhs (use_stmt
) == lhs
734 && useless_type_conversion_p
735 (TREE_TYPE (TREE_OPERAND (def_rhs
, 0)),
736 TREE_TYPE (gimple_assign_rhs1 (use_stmt
))))
737 || types_compatible_p (TREE_TYPE (lhs
),
738 TREE_TYPE (TREE_OPERAND (def_rhs
, 0))))
739 /* Don't forward anything into clobber stmts if it would result
740 in the lhs no longer being a MEM_REF. */
741 && (!gimple_clobber_p (use_stmt
)
742 || TREE_CODE (TREE_OPERAND (def_rhs
, 0)) == MEM_REF
))
744 tree
*def_rhs_basep
= &TREE_OPERAND (def_rhs
, 0);
745 tree new_offset
, new_base
, saved
, new_lhs
;
746 while (handled_component_p (*def_rhs_basep
))
747 def_rhs_basep
= &TREE_OPERAND (*def_rhs_basep
, 0);
748 saved
= *def_rhs_basep
;
749 if (TREE_CODE (*def_rhs_basep
) == MEM_REF
)
751 new_base
= TREE_OPERAND (*def_rhs_basep
, 0);
752 new_offset
= fold_convert (TREE_TYPE (TREE_OPERAND (lhs
, 1)),
753 TREE_OPERAND (*def_rhs_basep
, 1));
757 new_base
= build_fold_addr_expr (*def_rhs_basep
);
758 new_offset
= TREE_OPERAND (lhs
, 1);
760 *def_rhs_basep
= build2 (MEM_REF
, TREE_TYPE (*def_rhs_basep
),
761 new_base
, new_offset
);
762 TREE_THIS_VOLATILE (*def_rhs_basep
) = TREE_THIS_VOLATILE (lhs
);
763 TREE_SIDE_EFFECTS (*def_rhs_basep
) = TREE_SIDE_EFFECTS (lhs
);
764 TREE_THIS_NOTRAP (*def_rhs_basep
) = TREE_THIS_NOTRAP (lhs
);
765 new_lhs
= unshare_expr (TREE_OPERAND (def_rhs
, 0));
767 TREE_THIS_VOLATILE (new_lhs
) = TREE_THIS_VOLATILE (lhs
);
768 TREE_SIDE_EFFECTS (new_lhs
) = TREE_SIDE_EFFECTS (lhs
);
769 *def_rhs_basep
= saved
;
770 tidy_after_forward_propagate_addr (use_stmt
);
771 /* Continue propagating into the RHS if this was not the
777 /* We can have a struct assignment dereferencing our name twice.
778 Note that we didn't propagate into the lhs to not falsely
779 claim we did when propagating into the rhs. */
783 /* Strip away any outer COMPONENT_REF, ARRAY_REF or ADDR_EXPR
784 nodes from the RHS. */
785 tree
*rhsp
= gimple_assign_rhs1_ptr (use_stmt
);
786 if (TREE_CODE (*rhsp
) == ADDR_EXPR
)
787 rhsp
= &TREE_OPERAND (*rhsp
, 0);
788 while (handled_component_p (*rhsp
))
789 rhsp
= &TREE_OPERAND (*rhsp
, 0);
792 /* Now see if the RHS node is a MEM_REF using NAME. If so,
793 propagate the ADDR_EXPR into the use of NAME and fold the result. */
794 if (TREE_CODE (rhs
) == MEM_REF
795 && TREE_OPERAND (rhs
, 0) == name
)
798 poly_int64 def_rhs_offset
;
799 if ((def_rhs_base
= get_addr_base_and_unit_offset (TREE_OPERAND (def_rhs
, 0),
802 poly_offset_int off
= mem_ref_offset (rhs
);
804 off
+= def_rhs_offset
;
805 if (TREE_CODE (def_rhs_base
) == MEM_REF
)
807 off
+= mem_ref_offset (def_rhs_base
);
808 new_ptr
= TREE_OPERAND (def_rhs_base
, 0);
811 new_ptr
= build_fold_addr_expr (def_rhs_base
);
812 TREE_OPERAND (rhs
, 0) = new_ptr
;
813 TREE_OPERAND (rhs
, 1)
814 = wide_int_to_tree (TREE_TYPE (TREE_OPERAND (rhs
, 1)), off
);
815 fold_stmt_inplace (use_stmt_gsi
);
816 tidy_after_forward_propagate_addr (use_stmt
);
819 /* If the RHS is a plain dereference and the value type is the same as
820 that of the pointed-to type of the address we can put the
821 dereferenced address on the RHS preserving the original alias-type. */
822 else if (integer_zerop (TREE_OPERAND (rhs
, 1))
823 && ((gimple_assign_rhs1 (use_stmt
) == rhs
824 && useless_type_conversion_p
825 (TREE_TYPE (gimple_assign_lhs (use_stmt
)),
826 TREE_TYPE (TREE_OPERAND (def_rhs
, 0))))
827 || types_compatible_p (TREE_TYPE (rhs
),
828 TREE_TYPE (TREE_OPERAND (def_rhs
, 0)))))
830 tree
*def_rhs_basep
= &TREE_OPERAND (def_rhs
, 0);
831 tree new_offset
, new_base
, saved
, new_rhs
;
832 while (handled_component_p (*def_rhs_basep
))
833 def_rhs_basep
= &TREE_OPERAND (*def_rhs_basep
, 0);
834 saved
= *def_rhs_basep
;
835 if (TREE_CODE (*def_rhs_basep
) == MEM_REF
)
837 new_base
= TREE_OPERAND (*def_rhs_basep
, 0);
838 new_offset
= fold_convert (TREE_TYPE (TREE_OPERAND (rhs
, 1)),
839 TREE_OPERAND (*def_rhs_basep
, 1));
843 new_base
= build_fold_addr_expr (*def_rhs_basep
);
844 new_offset
= TREE_OPERAND (rhs
, 1);
846 *def_rhs_basep
= build2 (MEM_REF
, TREE_TYPE (*def_rhs_basep
),
847 new_base
, new_offset
);
848 TREE_THIS_VOLATILE (*def_rhs_basep
) = TREE_THIS_VOLATILE (rhs
);
849 TREE_SIDE_EFFECTS (*def_rhs_basep
) = TREE_SIDE_EFFECTS (rhs
);
850 TREE_THIS_NOTRAP (*def_rhs_basep
) = TREE_THIS_NOTRAP (rhs
);
851 new_rhs
= unshare_expr (TREE_OPERAND (def_rhs
, 0));
853 TREE_THIS_VOLATILE (new_rhs
) = TREE_THIS_VOLATILE (rhs
);
854 TREE_SIDE_EFFECTS (new_rhs
) = TREE_SIDE_EFFECTS (rhs
);
855 *def_rhs_basep
= saved
;
856 fold_stmt_inplace (use_stmt_gsi
);
857 tidy_after_forward_propagate_addr (use_stmt
);
862 /* If the use of the ADDR_EXPR is not a POINTER_PLUS_EXPR, there
864 if (gimple_assign_rhs_code (use_stmt
) != POINTER_PLUS_EXPR
865 || gimple_assign_rhs1 (use_stmt
) != name
)
868 /* The remaining cases are all for turning pointer arithmetic into
869 array indexing. They only apply when we have the address of
870 element zero in an array. If that is not the case then there
872 array_ref
= TREE_OPERAND (def_rhs
, 0);
873 if ((TREE_CODE (array_ref
) != ARRAY_REF
874 || TREE_CODE (TREE_TYPE (TREE_OPERAND (array_ref
, 0))) != ARRAY_TYPE
875 || TREE_CODE (TREE_OPERAND (array_ref
, 1)) != INTEGER_CST
)
876 && TREE_CODE (TREE_TYPE (array_ref
)) != ARRAY_TYPE
)
879 rhs2
= gimple_assign_rhs2 (use_stmt
);
880 /* Optimize &x[C1] p+ C2 to &x p+ C3 with C3 = C1 * element_size + C2. */
881 if (TREE_CODE (rhs2
) == INTEGER_CST
)
883 tree new_rhs
= build1_loc (gimple_location (use_stmt
),
884 ADDR_EXPR
, TREE_TYPE (def_rhs
),
885 fold_build2 (MEM_REF
,
886 TREE_TYPE (TREE_TYPE (def_rhs
)),
887 unshare_expr (def_rhs
),
888 fold_convert (ptr_type_node
,
890 gimple_assign_set_rhs_from_tree (use_stmt_gsi
, new_rhs
);
891 use_stmt
= gsi_stmt (*use_stmt_gsi
);
892 update_stmt (use_stmt
);
893 tidy_after_forward_propagate_addr (use_stmt
);
900 /* STMT is a statement of the form SSA_NAME = ADDR_EXPR <whatever>.
902 Try to forward propagate the ADDR_EXPR into all uses of the SSA_NAME.
903 Often this will allow for removal of an ADDR_EXPR and INDIRECT_REF
904 node or for recovery of array indexing from pointer arithmetic.
906 PARENT_SINGLE_USE_P tells if, when in a recursive invocation, NAME was
907 the single use in the previous invocation. Pass true when calling
910 Returns true, if all uses have been propagated into. */
913 forward_propagate_addr_expr (tree name
, tree rhs
, bool parent_single_use_p
)
915 imm_use_iterator iter
;
918 bool single_use_p
= parent_single_use_p
&& has_single_use (name
);
920 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, name
)
925 /* If the use is not in a simple assignment statement, then
926 there is nothing we can do. */
927 if (!is_gimple_assign (use_stmt
))
929 if (!is_gimple_debug (use_stmt
))
934 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
935 result
= forward_propagate_addr_expr_1 (name
, rhs
, &gsi
,
937 /* If the use has moved to a different statement adjust
938 the update machinery for the old statement too. */
939 if (use_stmt
!= gsi_stmt (gsi
))
941 update_stmt (use_stmt
);
942 use_stmt
= gsi_stmt (gsi
);
944 update_stmt (use_stmt
);
947 /* Remove intermediate now unused copy and conversion chains. */
948 use_rhs
= gimple_assign_rhs1 (use_stmt
);
950 && TREE_CODE (gimple_assign_lhs (use_stmt
)) == SSA_NAME
951 && TREE_CODE (use_rhs
) == SSA_NAME
952 && has_zero_uses (gimple_assign_lhs (use_stmt
)))
954 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
955 fwprop_invalidate_lattice (gimple_get_lhs (use_stmt
));
956 release_defs (use_stmt
);
957 gsi_remove (&gsi
, true);
961 return all
&& has_zero_uses (name
);
965 /* Helper function for simplify_gimple_switch. Remove case labels that
966 have values outside the range of the new type. */
969 simplify_gimple_switch_label_vec (gswitch
*stmt
, tree index_type
)
971 unsigned int branch_num
= gimple_switch_num_labels (stmt
);
972 auto_vec
<tree
> labels (branch_num
);
975 /* Collect the existing case labels in a VEC, and preprocess it as if
976 we are gimplifying a GENERIC SWITCH_EXPR. */
977 for (i
= 1; i
< branch_num
; i
++)
978 labels
.quick_push (gimple_switch_label (stmt
, i
));
979 preprocess_case_label_vec_for_gimple (labels
, index_type
, NULL
);
981 /* If any labels were removed, replace the existing case labels
982 in the GIMPLE_SWITCH statement with the correct ones.
983 Note that the type updates were done in-place on the case labels,
984 so we only have to replace the case labels in the GIMPLE_SWITCH
985 if the number of labels changed. */
986 len
= labels
.length ();
987 if (len
< branch_num
- 1)
989 bitmap target_blocks
;
993 /* Corner case: *all* case labels have been removed as being
994 out-of-range for INDEX_TYPE. Push one label and let the
995 CFG cleanups deal with this further. */
1000 label
= CASE_LABEL (gimple_switch_default_label (stmt
));
1001 elt
= build_case_label (build_int_cst (index_type
, 0), NULL
, label
);
1002 labels
.quick_push (elt
);
1006 for (i
= 0; i
< labels
.length (); i
++)
1007 gimple_switch_set_label (stmt
, i
+ 1, labels
[i
]);
1008 for (i
++ ; i
< branch_num
; i
++)
1009 gimple_switch_set_label (stmt
, i
, NULL_TREE
);
1010 gimple_switch_set_num_labels (stmt
, len
+ 1);
1012 /* Cleanup any edges that are now dead. */
1013 target_blocks
= BITMAP_ALLOC (NULL
);
1014 for (i
= 0; i
< gimple_switch_num_labels (stmt
); i
++)
1016 tree elt
= gimple_switch_label (stmt
, i
);
1017 basic_block target
= label_to_block (cfun
, CASE_LABEL (elt
));
1018 bitmap_set_bit (target_blocks
, target
->index
);
1020 for (ei
= ei_start (gimple_bb (stmt
)->succs
); (e
= ei_safe_edge (ei
)); )
1022 if (! bitmap_bit_p (target_blocks
, e
->dest
->index
))
1026 free_dominance_info (CDI_DOMINATORS
);
1031 BITMAP_FREE (target_blocks
);
1035 /* STMT is a SWITCH_EXPR for which we attempt to find equivalent forms of
1036 the condition which we may be able to optimize better. */
1039 simplify_gimple_switch (gswitch
*stmt
)
1041 /* The optimization that we really care about is removing unnecessary
1042 casts. That will let us do much better in propagating the inferred
1043 constant at the switch target. */
1044 tree cond
= gimple_switch_index (stmt
);
1045 if (TREE_CODE (cond
) == SSA_NAME
)
1047 gimple
*def_stmt
= SSA_NAME_DEF_STMT (cond
);
1048 if (gimple_assign_cast_p (def_stmt
))
1050 tree def
= gimple_assign_rhs1 (def_stmt
);
1051 if (TREE_CODE (def
) != SSA_NAME
)
1054 /* If we have an extension or sign-change that preserves the
1055 values we check against then we can copy the source value into
1057 tree ti
= TREE_TYPE (def
);
1058 if (INTEGRAL_TYPE_P (ti
)
1059 && TYPE_PRECISION (ti
) <= TYPE_PRECISION (TREE_TYPE (cond
)))
1061 size_t n
= gimple_switch_num_labels (stmt
);
1062 tree min
= NULL_TREE
, max
= NULL_TREE
;
1065 min
= CASE_LOW (gimple_switch_label (stmt
, 1));
1066 if (CASE_HIGH (gimple_switch_label (stmt
, n
- 1)))
1067 max
= CASE_HIGH (gimple_switch_label (stmt
, n
- 1));
1069 max
= CASE_LOW (gimple_switch_label (stmt
, n
- 1));
1071 if ((!min
|| int_fits_type_p (min
, ti
))
1072 && (!max
|| int_fits_type_p (max
, ti
)))
1074 gimple_switch_set_index (stmt
, def
);
1075 simplify_gimple_switch_label_vec (stmt
, ti
);
1086 /* For pointers p2 and p1 return p2 - p1 if the
1087 difference is known and constant, otherwise return NULL. */
1090 constant_pointer_difference (tree p1
, tree p2
)
1093 #define CPD_ITERATIONS 5
1094 tree exps
[2][CPD_ITERATIONS
];
1095 tree offs
[2][CPD_ITERATIONS
];
1098 for (i
= 0; i
< 2; i
++)
1100 tree p
= i
? p1
: p2
;
1101 tree off
= size_zero_node
;
1103 enum tree_code code
;
1105 /* For each of p1 and p2 we need to iterate at least
1106 twice, to handle ADDR_EXPR directly in p1/p2,
1107 SSA_NAME with ADDR_EXPR or POINTER_PLUS_EXPR etc.
1108 on definition's stmt RHS. Iterate a few extra times. */
1112 if (!POINTER_TYPE_P (TREE_TYPE (p
)))
1114 if (TREE_CODE (p
) == ADDR_EXPR
)
1116 tree q
= TREE_OPERAND (p
, 0);
1118 tree base
= get_addr_base_and_unit_offset (q
, &offset
);
1122 if (maybe_ne (offset
, 0))
1123 off
= size_binop (PLUS_EXPR
, off
, size_int (offset
));
1125 if (TREE_CODE (q
) == MEM_REF
1126 && TREE_CODE (TREE_OPERAND (q
, 0)) == SSA_NAME
)
1128 p
= TREE_OPERAND (q
, 0);
1129 off
= size_binop (PLUS_EXPR
, off
,
1130 wide_int_to_tree (sizetype
,
1131 mem_ref_offset (q
)));
1140 if (TREE_CODE (p
) != SSA_NAME
)
1144 if (j
== CPD_ITERATIONS
)
1146 stmt
= SSA_NAME_DEF_STMT (p
);
1147 if (!is_gimple_assign (stmt
) || gimple_assign_lhs (stmt
) != p
)
1149 code
= gimple_assign_rhs_code (stmt
);
1150 if (code
== POINTER_PLUS_EXPR
)
1152 if (TREE_CODE (gimple_assign_rhs2 (stmt
)) != INTEGER_CST
)
1154 off
= size_binop (PLUS_EXPR
, off
, gimple_assign_rhs2 (stmt
));
1155 p
= gimple_assign_rhs1 (stmt
);
1157 else if (code
== ADDR_EXPR
|| CONVERT_EXPR_CODE_P (code
))
1158 p
= gimple_assign_rhs1 (stmt
);
1166 for (i
= 0; i
< cnt
[0]; i
++)
1167 for (j
= 0; j
< cnt
[1]; j
++)
1168 if (exps
[0][i
] == exps
[1][j
])
1169 return size_binop (MINUS_EXPR
, offs
[0][i
], offs
[1][j
]);
1174 /* *GSI_P is a GIMPLE_CALL to a builtin function.
1176 memcpy (p, "abcd", 4);
1177 memset (p + 4, ' ', 3);
1179 memcpy (p, "abcd ", 7);
1180 call if the latter can be stored by pieces during expansion.
1183 memchr ("abcd", a, 4) == 0;
1185 memchr ("abcd", a, 4) != 0;
1187 (a == 'a' || a == 'b' || a == 'c' || a == 'd') == 0
1189 (a == 'a' || a == 'b' || a == 'c' || a == 'd') != 0
1191 Also canonicalize __atomic_fetch_op (p, x, y) op x
1192 to __atomic_op_fetch (p, x, y) or
1193 __atomic_op_fetch (p, x, y) iop x
1194 to __atomic_fetch_op (p, x, y) when possible (also __sync). */
1197 simplify_builtin_call (gimple_stmt_iterator
*gsi_p
, tree callee2
)
1199 gimple
*stmt1
, *stmt2
= gsi_stmt (*gsi_p
);
1200 enum built_in_function other_atomic
= END_BUILTINS
;
1201 enum tree_code atomic_op
= ERROR_MARK
;
1202 tree vuse
= gimple_vuse (stmt2
);
1205 stmt1
= SSA_NAME_DEF_STMT (vuse
);
1209 switch (DECL_FUNCTION_CODE (callee2
))
1211 case BUILT_IN_MEMCHR
:
1212 if (gimple_call_num_args (stmt2
) == 3
1213 && (res
= gimple_call_lhs (stmt2
)) != nullptr
1214 && use_in_zero_equality (res
) != nullptr
1216 && BITS_PER_UNIT
== 8)
1218 tree ptr
= gimple_call_arg (stmt2
, 0);
1219 if (TREE_CODE (ptr
) != ADDR_EXPR
1220 || TREE_CODE (TREE_OPERAND (ptr
, 0)) != STRING_CST
)
1222 unsigned HOST_WIDE_INT slen
1223 = TREE_STRING_LENGTH (TREE_OPERAND (ptr
, 0));
1224 /* It must be a non-empty string constant. */
1227 /* For -Os, only simplify strings with a single character. */
1228 if (!optimize_bb_for_speed_p (gimple_bb (stmt2
))
1231 tree size
= gimple_call_arg (stmt2
, 2);
1232 /* Size must be a constant which is <= UNITS_PER_WORD and
1233 <= the string length. */
1234 if (TREE_CODE (size
) != INTEGER_CST
|| integer_zerop (size
))
1237 if (!tree_fits_uhwi_p (size
))
1240 unsigned HOST_WIDE_INT sz
= tree_to_uhwi (size
);
1241 if (sz
> UNITS_PER_WORD
|| sz
>= slen
)
1244 tree ch
= gimple_call_arg (stmt2
, 1);
1245 location_t loc
= gimple_location (stmt2
);
1246 if (!useless_type_conversion_p (char_type_node
,
1248 ch
= fold_convert_loc (loc
, char_type_node
, ch
);
1249 const char *p
= TREE_STRING_POINTER (TREE_OPERAND (ptr
, 0));
1250 unsigned int isize
= sz
;
1251 tree
*op
= XALLOCAVEC (tree
, isize
);
1252 for (unsigned int i
= 0; i
< isize
; i
++)
1254 op
[i
] = build_int_cst (char_type_node
, p
[i
]);
1255 op
[i
] = fold_build2_loc (loc
, EQ_EXPR
, boolean_type_node
,
1258 for (unsigned int i
= isize
- 1; i
>= 1; i
--)
1259 op
[i
- 1] = fold_convert_loc (loc
, boolean_type_node
,
1260 fold_build2_loc (loc
,
1265 res
= fold_convert_loc (loc
, TREE_TYPE (res
), op
[0]);
1266 gimplify_and_update_call_from_tree (gsi_p
, res
);
1271 case BUILT_IN_MEMSET
:
1272 if (gimple_call_num_args (stmt2
) != 3
1273 || gimple_call_lhs (stmt2
)
1275 || BITS_PER_UNIT
!= 8)
1280 tree ptr1
, src1
, str1
, off1
, len1
, lhs1
;
1281 tree ptr2
= gimple_call_arg (stmt2
, 0);
1282 tree val2
= gimple_call_arg (stmt2
, 1);
1283 tree len2
= gimple_call_arg (stmt2
, 2);
1284 tree diff
, vdef
, new_str_cst
;
1286 unsigned int ptr1_align
;
1287 unsigned HOST_WIDE_INT src_len
;
1289 use_operand_p use_p
;
1291 if (!tree_fits_shwi_p (val2
)
1292 || !tree_fits_uhwi_p (len2
)
1293 || compare_tree_int (len2
, 1024) == 1)
1295 if (is_gimple_call (stmt1
))
1297 /* If first stmt is a call, it needs to be memcpy
1298 or mempcpy, with string literal as second argument and
1300 callee1
= gimple_call_fndecl (stmt1
);
1301 if (callee1
== NULL_TREE
1302 || !fndecl_built_in_p (callee1
, BUILT_IN_NORMAL
)
1303 || gimple_call_num_args (stmt1
) != 3)
1305 if (DECL_FUNCTION_CODE (callee1
) != BUILT_IN_MEMCPY
1306 && DECL_FUNCTION_CODE (callee1
) != BUILT_IN_MEMPCPY
)
1308 ptr1
= gimple_call_arg (stmt1
, 0);
1309 src1
= gimple_call_arg (stmt1
, 1);
1310 len1
= gimple_call_arg (stmt1
, 2);
1311 lhs1
= gimple_call_lhs (stmt1
);
1312 if (!tree_fits_uhwi_p (len1
))
1314 str1
= string_constant (src1
, &off1
, NULL
, NULL
);
1315 if (str1
== NULL_TREE
)
1317 if (!tree_fits_uhwi_p (off1
)
1318 || compare_tree_int (off1
, TREE_STRING_LENGTH (str1
) - 1) > 0
1319 || compare_tree_int (len1
, TREE_STRING_LENGTH (str1
)
1320 - tree_to_uhwi (off1
)) > 0
1321 || TREE_CODE (TREE_TYPE (str1
)) != ARRAY_TYPE
1322 || TYPE_MODE (TREE_TYPE (TREE_TYPE (str1
)))
1323 != TYPE_MODE (char_type_node
))
1326 else if (gimple_assign_single_p (stmt1
))
1328 /* Otherwise look for length 1 memcpy optimized into
1330 ptr1
= gimple_assign_lhs (stmt1
);
1331 src1
= gimple_assign_rhs1 (stmt1
);
1332 if (TREE_CODE (ptr1
) != MEM_REF
1333 || TYPE_MODE (TREE_TYPE (ptr1
)) != TYPE_MODE (char_type_node
)
1334 || !tree_fits_shwi_p (src1
))
1336 ptr1
= build_fold_addr_expr (ptr1
);
1337 STRIP_USELESS_TYPE_CONVERSION (ptr1
);
1338 callee1
= NULL_TREE
;
1339 len1
= size_one_node
;
1341 off1
= size_zero_node
;
1347 diff
= constant_pointer_difference (ptr1
, ptr2
);
1348 if (diff
== NULL
&& lhs1
!= NULL
)
1350 diff
= constant_pointer_difference (lhs1
, ptr2
);
1351 if (DECL_FUNCTION_CODE (callee1
) == BUILT_IN_MEMPCPY
1353 diff
= size_binop (PLUS_EXPR
, diff
,
1354 fold_convert (sizetype
, len1
));
1356 /* If the difference between the second and first destination pointer
1357 is not constant, or is bigger than memcpy length, bail out. */
1359 || !tree_fits_uhwi_p (diff
)
1360 || tree_int_cst_lt (len1
, diff
)
1361 || compare_tree_int (diff
, 1024) == 1)
1364 /* Use maximum of difference plus memset length and memcpy length
1365 as the new memcpy length, if it is too big, bail out. */
1366 src_len
= tree_to_uhwi (diff
);
1367 src_len
+= tree_to_uhwi (len2
);
1368 if (src_len
< tree_to_uhwi (len1
))
1369 src_len
= tree_to_uhwi (len1
);
1373 /* If mempcpy value is used elsewhere, bail out, as mempcpy
1374 with bigger length will return different result. */
1375 if (lhs1
!= NULL_TREE
1376 && DECL_FUNCTION_CODE (callee1
) == BUILT_IN_MEMPCPY
1377 && (TREE_CODE (lhs1
) != SSA_NAME
1378 || !single_imm_use (lhs1
, &use_p
, &use_stmt
)
1379 || use_stmt
!= stmt2
))
1382 /* If anything reads memory in between memcpy and memset
1383 call, the modified memcpy call might change it. */
1384 vdef
= gimple_vdef (stmt1
);
1386 && (!single_imm_use (vdef
, &use_p
, &use_stmt
)
1387 || use_stmt
!= stmt2
))
1390 ptr1_align
= get_pointer_alignment (ptr1
);
1391 /* Construct the new source string literal. */
1392 src_buf
= XALLOCAVEC (char, src_len
+ 1);
1395 TREE_STRING_POINTER (str1
) + tree_to_uhwi (off1
),
1396 tree_to_uhwi (len1
));
1398 src_buf
[0] = tree_to_shwi (src1
);
1399 memset (src_buf
+ tree_to_uhwi (diff
),
1400 tree_to_shwi (val2
), tree_to_uhwi (len2
));
1401 src_buf
[src_len
] = '\0';
1402 /* Neither builtin_strncpy_read_str nor builtin_memcpy_read_str
1403 handle embedded '\0's. */
1404 if (strlen (src_buf
) != src_len
)
1406 rtl_profile_for_bb (gimple_bb (stmt2
));
1407 /* If the new memcpy wouldn't be emitted by storing the literal
1408 by pieces, this optimization might enlarge .rodata too much,
1409 as commonly used string literals couldn't be shared any
1411 if (!can_store_by_pieces (src_len
,
1412 builtin_strncpy_read_str
,
1413 src_buf
, ptr1_align
, false))
1416 new_str_cst
= build_string_literal (src_len
, src_buf
);
1419 /* If STMT1 is a mem{,p}cpy call, adjust it and remove
1421 if (lhs1
&& DECL_FUNCTION_CODE (callee1
) == BUILT_IN_MEMPCPY
)
1422 gimple_call_set_lhs (stmt1
, NULL_TREE
);
1423 gimple_call_set_arg (stmt1
, 1, new_str_cst
);
1424 gimple_call_set_arg (stmt1
, 2,
1425 build_int_cst (TREE_TYPE (len1
), src_len
));
1426 update_stmt (stmt1
);
1427 unlink_stmt_vdef (stmt2
);
1428 gsi_replace (gsi_p
, gimple_build_nop (), false);
1429 fwprop_invalidate_lattice (gimple_get_lhs (stmt2
));
1430 release_defs (stmt2
);
1431 if (lhs1
&& DECL_FUNCTION_CODE (callee1
) == BUILT_IN_MEMPCPY
)
1433 fwprop_invalidate_lattice (lhs1
);
1434 release_ssa_name (lhs1
);
1440 /* Otherwise, if STMT1 is length 1 memcpy optimized into
1441 assignment, remove STMT1 and change memset call into
1443 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt1
);
1445 if (!is_gimple_val (ptr1
))
1446 ptr1
= force_gimple_operand_gsi (gsi_p
, ptr1
, true, NULL_TREE
,
1447 true, GSI_SAME_STMT
);
1448 tree fndecl
= builtin_decl_explicit (BUILT_IN_MEMCPY
);
1449 gimple_call_set_fndecl (stmt2
, fndecl
);
1450 gimple_call_set_fntype (as_a
<gcall
*> (stmt2
),
1451 TREE_TYPE (fndecl
));
1452 gimple_call_set_arg (stmt2
, 0, ptr1
);
1453 gimple_call_set_arg (stmt2
, 1, new_str_cst
);
1454 gimple_call_set_arg (stmt2
, 2,
1455 build_int_cst (TREE_TYPE (len2
), src_len
));
1456 unlink_stmt_vdef (stmt1
);
1457 gsi_remove (&gsi
, true);
1458 fwprop_invalidate_lattice (gimple_get_lhs (stmt1
));
1459 release_defs (stmt1
);
1460 update_stmt (stmt2
);
1466 #define CASE_ATOMIC(NAME, OTHER, OP) \
1467 case BUILT_IN_##NAME##_1: \
1468 case BUILT_IN_##NAME##_2: \
1469 case BUILT_IN_##NAME##_4: \
1470 case BUILT_IN_##NAME##_8: \
1471 case BUILT_IN_##NAME##_16: \
1474 = (enum built_in_function) (BUILT_IN_##OTHER##_1 \
1475 + (DECL_FUNCTION_CODE (callee2) \
1476 - BUILT_IN_##NAME##_1)); \
1477 goto handle_atomic_fetch_op;
1479 CASE_ATOMIC (ATOMIC_FETCH_ADD
, ATOMIC_ADD_FETCH
, PLUS_EXPR
)
1480 CASE_ATOMIC (ATOMIC_FETCH_SUB
, ATOMIC_SUB_FETCH
, MINUS_EXPR
)
1481 CASE_ATOMIC (ATOMIC_FETCH_AND
, ATOMIC_AND_FETCH
, BIT_AND_EXPR
)
1482 CASE_ATOMIC (ATOMIC_FETCH_XOR
, ATOMIC_XOR_FETCH
, BIT_XOR_EXPR
)
1483 CASE_ATOMIC (ATOMIC_FETCH_OR
, ATOMIC_OR_FETCH
, BIT_IOR_EXPR
)
1485 CASE_ATOMIC (SYNC_FETCH_AND_ADD
, SYNC_ADD_AND_FETCH
, PLUS_EXPR
)
1486 CASE_ATOMIC (SYNC_FETCH_AND_SUB
, SYNC_SUB_AND_FETCH
, MINUS_EXPR
)
1487 CASE_ATOMIC (SYNC_FETCH_AND_AND
, SYNC_AND_AND_FETCH
, BIT_AND_EXPR
)
1488 CASE_ATOMIC (SYNC_FETCH_AND_XOR
, SYNC_XOR_AND_FETCH
, BIT_XOR_EXPR
)
1489 CASE_ATOMIC (SYNC_FETCH_AND_OR
, SYNC_OR_AND_FETCH
, BIT_IOR_EXPR
)
1491 CASE_ATOMIC (ATOMIC_ADD_FETCH
, ATOMIC_FETCH_ADD
, MINUS_EXPR
)
1492 CASE_ATOMIC (ATOMIC_SUB_FETCH
, ATOMIC_FETCH_SUB
, PLUS_EXPR
)
1493 CASE_ATOMIC (ATOMIC_XOR_FETCH
, ATOMIC_FETCH_XOR
, BIT_XOR_EXPR
)
1495 CASE_ATOMIC (SYNC_ADD_AND_FETCH
, SYNC_FETCH_AND_ADD
, MINUS_EXPR
)
1496 CASE_ATOMIC (SYNC_SUB_AND_FETCH
, SYNC_FETCH_AND_SUB
, PLUS_EXPR
)
1497 CASE_ATOMIC (SYNC_XOR_AND_FETCH
, SYNC_FETCH_AND_XOR
, BIT_XOR_EXPR
)
1501 handle_atomic_fetch_op
:
1502 if (gimple_call_num_args (stmt2
) >= 2 && gimple_call_lhs (stmt2
))
1504 tree lhs2
= gimple_call_lhs (stmt2
), lhsc
= lhs2
;
1505 tree arg
= gimple_call_arg (stmt2
, 1);
1506 gimple
*use_stmt
, *cast_stmt
= NULL
;
1507 use_operand_p use_p
;
1508 tree ndecl
= builtin_decl_explicit (other_atomic
);
1510 if (ndecl
== NULL_TREE
|| !single_imm_use (lhs2
, &use_p
, &use_stmt
))
1513 if (gimple_assign_cast_p (use_stmt
))
1515 cast_stmt
= use_stmt
;
1516 lhsc
= gimple_assign_lhs (cast_stmt
);
1517 if (lhsc
== NULL_TREE
1518 || !INTEGRAL_TYPE_P (TREE_TYPE (lhsc
))
1519 || (TYPE_PRECISION (TREE_TYPE (lhsc
))
1520 != TYPE_PRECISION (TREE_TYPE (lhs2
)))
1521 || !single_imm_use (lhsc
, &use_p
, &use_stmt
))
1523 use_stmt
= cast_stmt
;
1530 tree oarg
= NULL_TREE
;
1531 enum tree_code ccode
= ERROR_MARK
;
1532 tree crhs1
= NULL_TREE
, crhs2
= NULL_TREE
;
1533 if (is_gimple_assign (use_stmt
)
1534 && gimple_assign_rhs_code (use_stmt
) == atomic_op
)
1536 if (gimple_assign_rhs1 (use_stmt
) == lhsc
)
1537 oarg
= gimple_assign_rhs2 (use_stmt
);
1538 else if (atomic_op
!= MINUS_EXPR
)
1539 oarg
= gimple_assign_rhs1 (use_stmt
);
1541 else if (atomic_op
== MINUS_EXPR
1542 && is_gimple_assign (use_stmt
)
1543 && gimple_assign_rhs_code (use_stmt
) == PLUS_EXPR
1544 && TREE_CODE (arg
) == INTEGER_CST
1545 && (TREE_CODE (gimple_assign_rhs2 (use_stmt
))
1548 tree a
= fold_convert (TREE_TYPE (lhs2
), arg
);
1549 tree o
= fold_convert (TREE_TYPE (lhs2
),
1550 gimple_assign_rhs2 (use_stmt
));
1551 if (wi::to_wide (a
) == wi::neg (wi::to_wide (o
)))
1554 else if (atomic_op
== BIT_AND_EXPR
|| atomic_op
== BIT_IOR_EXPR
)
1556 else if (gimple_code (use_stmt
) == GIMPLE_COND
)
1558 ccode
= gimple_cond_code (use_stmt
);
1559 crhs1
= gimple_cond_lhs (use_stmt
);
1560 crhs2
= gimple_cond_rhs (use_stmt
);
1562 else if (is_gimple_assign (use_stmt
))
1564 if (gimple_assign_rhs_class (use_stmt
) == GIMPLE_BINARY_RHS
)
1566 ccode
= gimple_assign_rhs_code (use_stmt
);
1567 crhs1
= gimple_assign_rhs1 (use_stmt
);
1568 crhs2
= gimple_assign_rhs2 (use_stmt
);
1570 else if (gimple_assign_rhs_code (use_stmt
) == COND_EXPR
)
1572 tree cond
= gimple_assign_rhs1 (use_stmt
);
1573 if (COMPARISON_CLASS_P (cond
))
1575 ccode
= TREE_CODE (cond
);
1576 crhs1
= TREE_OPERAND (cond
, 0);
1577 crhs2
= TREE_OPERAND (cond
, 1);
1581 if (ccode
== EQ_EXPR
|| ccode
== NE_EXPR
)
1583 /* Deal with x - y == 0 or x ^ y == 0
1584 being optimized into x == y and x + cst == 0
1589 else if (crhs2
== lhsc
)
1591 if (o
&& atomic_op
!= PLUS_EXPR
)
1594 && TREE_CODE (o
) == INTEGER_CST
1595 && TREE_CODE (arg
) == INTEGER_CST
)
1597 tree a
= fold_convert (TREE_TYPE (lhs2
), arg
);
1598 o
= fold_convert (TREE_TYPE (lhs2
), o
);
1599 if (wi::to_wide (a
) == wi::neg (wi::to_wide (o
)))
1605 if (operand_equal_p (arg
, oarg
, 0))
1607 else if (TREE_CODE (arg
) == SSA_NAME
1608 && TREE_CODE (oarg
) == SSA_NAME
)
1611 if (gimple_assign_cast_p (SSA_NAME_DEF_STMT (oarg
)))
1613 gimple
*g
= SSA_NAME_DEF_STMT (oarg
);
1614 oarg2
= gimple_assign_rhs1 (g
);
1615 if (TREE_CODE (oarg2
) != SSA_NAME
1616 || !INTEGRAL_TYPE_P (TREE_TYPE (oarg2
))
1617 || (TYPE_PRECISION (TREE_TYPE (oarg2
))
1618 != TYPE_PRECISION (TREE_TYPE (oarg
))))
1621 if (gimple_assign_cast_p (SSA_NAME_DEF_STMT (arg
)))
1623 gimple
*g
= SSA_NAME_DEF_STMT (arg
);
1624 tree rhs1
= gimple_assign_rhs1 (g
);
1626 x.0_1 = (long unsigned int) x_4(D);
1627 _2 = __atomic_fetch_add_8 (&vlong, x.0_1, 0);
1629 _7 = x_4(D) + _3; */
1630 if (rhs1
== oarg
|| rhs1
== oarg2
)
1633 x.18_1 = (short unsigned int) x_5(D);
1635 _3 = __atomic_fetch_xor_2 (&vshort, _2, 0);
1636 _4 = (short int) _3;
1638 This happens only for char/short. */
1639 else if (TREE_CODE (rhs1
) == SSA_NAME
1640 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
1641 && (TYPE_PRECISION (TREE_TYPE (rhs1
))
1642 == TYPE_PRECISION (TREE_TYPE (lhs2
))))
1644 g
= SSA_NAME_DEF_STMT (rhs1
);
1645 if (gimple_assign_cast_p (g
)
1646 && (gimple_assign_rhs1 (g
) == oarg
1647 || gimple_assign_rhs1 (g
) == oarg2
))
1651 if (!ok
&& arg
== oarg2
)
1653 _1 = __sync_fetch_and_add_4 (&v, x_5(D));
1655 x.0_3 = (int) x_5(D);
1663 tree new_lhs
= make_ssa_name (TREE_TYPE (lhs2
));
1664 gimple_call_set_lhs (stmt2
, new_lhs
);
1665 gimple_call_set_fndecl (stmt2
, ndecl
);
1666 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
1667 if (ccode
== ERROR_MARK
)
1668 gimple_assign_set_rhs_with_ops (&gsi
, cast_stmt
1669 ? NOP_EXPR
: SSA_NAME
,
1674 crhs2
= build_zero_cst (TREE_TYPE (lhs2
));
1675 if (gimple_code (use_stmt
) == GIMPLE_COND
)
1677 gcond
*cond_stmt
= as_a
<gcond
*> (use_stmt
);
1678 gimple_cond_set_lhs (cond_stmt
, crhs1
);
1679 gimple_cond_set_rhs (cond_stmt
, crhs2
);
1681 else if (gimple_assign_rhs_class (use_stmt
)
1682 == GIMPLE_BINARY_RHS
)
1684 gimple_assign_set_rhs1 (use_stmt
, crhs1
);
1685 gimple_assign_set_rhs2 (use_stmt
, crhs2
);
1689 gcc_checking_assert (gimple_assign_rhs_code (use_stmt
)
1691 tree cond
= build2 (ccode
, boolean_type_node
,
1693 gimple_assign_set_rhs1 (use_stmt
, cond
);
1696 update_stmt (use_stmt
);
1697 if (atomic_op
!= BIT_AND_EXPR
1698 && atomic_op
!= BIT_IOR_EXPR
1699 && !stmt_ends_bb_p (stmt2
))
1701 /* For the benefit of debug stmts, emit stmt(s) to set
1702 lhs2 to the value it had from the new builtin.
1703 E.g. if it was previously:
1704 lhs2 = __atomic_fetch_add_8 (ptr, arg, 0);
1706 new_lhs = __atomic_add_fetch_8 (ptr, arg, 0);
1707 lhs2 = new_lhs - arg;
1708 We also keep cast_stmt if any in the IL for
1710 These stmts will be DCEd later and proper debug info
1712 This is only possible for reversible operations
1713 (+/-/^) and without -fnon-call-exceptions. */
1714 gsi
= gsi_for_stmt (stmt2
);
1715 tree type
= TREE_TYPE (lhs2
);
1716 if (TREE_CODE (arg
) == INTEGER_CST
)
1717 arg
= fold_convert (type
, arg
);
1718 else if (!useless_type_conversion_p (type
, TREE_TYPE (arg
)))
1720 tree narg
= make_ssa_name (type
);
1721 gimple
*g
= gimple_build_assign (narg
, NOP_EXPR
, arg
);
1722 gsi_insert_after (&gsi
, g
, GSI_NEW_STMT
);
1725 enum tree_code rcode
;
1728 case PLUS_EXPR
: rcode
= MINUS_EXPR
; break;
1729 case MINUS_EXPR
: rcode
= PLUS_EXPR
; break;
1730 case BIT_XOR_EXPR
: rcode
= atomic_op
; break;
1731 default: gcc_unreachable ();
1733 gimple
*g
= gimple_build_assign (lhs2
, rcode
, new_lhs
, arg
);
1734 gsi_insert_after (&gsi
, g
, GSI_NEW_STMT
);
1735 update_stmt (stmt2
);
1740 lhs2 = __atomic_fetch_or_8 (ptr, arg, 0);
1741 after we change it to
1742 new_lhs = __atomic_or_fetch_8 (ptr, arg, 0);
1743 there is no way to find out the lhs2 value (i.e.
1744 what the atomic memory contained before the operation),
1745 values of some bits are lost. We have checked earlier
1746 that we don't have any non-debug users except for what
1747 we are already changing, so we need to reset the
1748 debug stmts and remove the cast_stmt if any. */
1749 imm_use_iterator iter
;
1750 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs2
)
1751 if (use_stmt
!= cast_stmt
)
1753 gcc_assert (is_gimple_debug (use_stmt
));
1754 gimple_debug_bind_reset_value (use_stmt
);
1755 update_stmt (use_stmt
);
1759 gsi
= gsi_for_stmt (cast_stmt
);
1760 gsi_remove (&gsi
, true);
1762 update_stmt (stmt2
);
1763 release_ssa_name (lhs2
);
1775 /* Given a ssa_name in NAME see if it was defined by an assignment and
1776 set CODE to be the code and ARG1 to the first operand on the rhs and ARG2
1777 to the second operand on the rhs. */
1780 defcodefor_name (tree name
, enum tree_code
*code
, tree
*arg1
, tree
*arg2
)
1783 enum tree_code code1
;
1787 enum gimple_rhs_class grhs_class
;
1789 code1
= TREE_CODE (name
);
1793 grhs_class
= get_gimple_rhs_class (code1
);
1795 if (code1
== SSA_NAME
)
1797 def
= SSA_NAME_DEF_STMT (name
);
1799 if (def
&& is_gimple_assign (def
)
1800 && can_propagate_from (def
))
1802 code1
= gimple_assign_rhs_code (def
);
1803 arg11
= gimple_assign_rhs1 (def
);
1804 arg21
= gimple_assign_rhs2 (def
);
1805 arg31
= gimple_assign_rhs3 (def
);
1808 else if (grhs_class
!= GIMPLE_SINGLE_RHS
)
1820 /* Recognize rotation patterns. Return true if a transformation
1821 applied, otherwise return false.
1823 We are looking for X with unsigned type T with bitsize B, OP being
1824 +, | or ^, some type T2 wider than T. For:
1825 (X << CNT1) OP (X >> CNT2) iff CNT1 + CNT2 == B
1826 ((T) ((T2) X << CNT1)) OP ((T) ((T2) X >> CNT2)) iff CNT1 + CNT2 == B
1828 transform these into:
1832 (X << Y) OP (X >> (B - Y))
1833 (X << (int) Y) OP (X >> (int) (B - Y))
1834 ((T) ((T2) X << Y)) OP ((T) ((T2) X >> (B - Y)))
1835 ((T) ((T2) X << (int) Y)) OP ((T) ((T2) X >> (int) (B - Y)))
1836 (X << Y) | (X >> ((-Y) & (B - 1)))
1837 (X << (int) Y) | (X >> (int) ((-Y) & (B - 1)))
1838 ((T) ((T2) X << Y)) | ((T) ((T2) X >> ((-Y) & (B - 1))))
1839 ((T) ((T2) X << (int) Y)) | ((T) ((T2) X >> (int) ((-Y) & (B - 1))))
1841 transform these into (last 2 only if ranger can prove Y < B
1846 The latter for the forms with T2 wider than T if ranger can't prove Y < B.
1849 (X << (Y & (B - 1))) | (X >> ((-Y) & (B - 1)))
1850 (X << (int) (Y & (B - 1))) | (X >> (int) ((-Y) & (B - 1)))
1851 ((T) ((T2) X << (Y & (B - 1)))) | ((T) ((T2) X >> ((-Y) & (B - 1))))
1852 ((T) ((T2) X << (int) (Y & (B - 1)))) \
1853 | ((T) ((T2) X >> (int) ((-Y) & (B - 1))))
1855 transform these into:
1858 Note, in the patterns with T2 type, the type of OP operands
1859 might be even a signed type, but should have precision B.
1860 Expressions with & (B - 1) should be recognized only if B is
1864 simplify_rotate (gimple_stmt_iterator
*gsi
)
1866 gimple
*stmt
= gsi_stmt (*gsi
);
1867 tree arg
[2], rtype
, rotcnt
= NULL_TREE
;
1868 tree def_arg1
[2], def_arg2
[2];
1869 enum tree_code def_code
[2];
1872 bool swapped_p
= false;
1874 gimple
*def_arg_stmt
[2] = { NULL
, NULL
};
1876 bool add_masking
= false;
1878 arg
[0] = gimple_assign_rhs1 (stmt
);
1879 arg
[1] = gimple_assign_rhs2 (stmt
);
1880 rtype
= TREE_TYPE (arg
[0]);
1882 /* Only create rotates in complete modes. Other cases are not
1883 expanded properly. */
1884 if (!INTEGRAL_TYPE_P (rtype
)
1885 || !type_has_mode_precision_p (rtype
))
1888 for (i
= 0; i
< 2; i
++)
1890 defcodefor_name (arg
[i
], &def_code
[i
], &def_arg1
[i
], &def_arg2
[i
]);
1891 if (TREE_CODE (arg
[i
]) == SSA_NAME
)
1892 def_arg_stmt
[i
] = SSA_NAME_DEF_STMT (arg
[i
]);
1895 /* Look through narrowing (or same precision) conversions. */
1896 if (CONVERT_EXPR_CODE_P (def_code
[0])
1897 && CONVERT_EXPR_CODE_P (def_code
[1])
1898 && INTEGRAL_TYPE_P (TREE_TYPE (def_arg1
[0]))
1899 && INTEGRAL_TYPE_P (TREE_TYPE (def_arg1
[1]))
1900 && TYPE_PRECISION (TREE_TYPE (def_arg1
[0]))
1901 == TYPE_PRECISION (TREE_TYPE (def_arg1
[1]))
1902 && TYPE_PRECISION (TREE_TYPE (def_arg1
[0])) >= TYPE_PRECISION (rtype
)
1903 && has_single_use (arg
[0])
1904 && has_single_use (arg
[1]))
1906 wider_prec
= TYPE_PRECISION (TREE_TYPE (def_arg1
[0]));
1907 for (i
= 0; i
< 2; i
++)
1909 arg
[i
] = def_arg1
[i
];
1910 defcodefor_name (arg
[i
], &def_code
[i
], &def_arg1
[i
], &def_arg2
[i
]);
1911 if (TREE_CODE (arg
[i
]) == SSA_NAME
)
1912 def_arg_stmt
[i
] = SSA_NAME_DEF_STMT (arg
[i
]);
1917 /* Handle signed rotate; the RSHIFT_EXPR has to be done
1918 in unsigned type but LSHIFT_EXPR could be signed. */
1919 i
= (def_code
[0] == LSHIFT_EXPR
|| def_code
[0] == RSHIFT_EXPR
);
1920 if (CONVERT_EXPR_CODE_P (def_code
[i
])
1921 && (def_code
[1 - i
] == LSHIFT_EXPR
|| def_code
[1 - i
] == RSHIFT_EXPR
)
1922 && INTEGRAL_TYPE_P (TREE_TYPE (def_arg1
[i
]))
1923 && TYPE_PRECISION (rtype
) == TYPE_PRECISION (TREE_TYPE (def_arg1
[i
]))
1924 && has_single_use (arg
[i
]))
1926 arg
[i
] = def_arg1
[i
];
1927 defcodefor_name (arg
[i
], &def_code
[i
], &def_arg1
[i
], &def_arg2
[i
]);
1928 if (TREE_CODE (arg
[i
]) == SSA_NAME
)
1929 def_arg_stmt
[i
] = SSA_NAME_DEF_STMT (arg
[i
]);
1933 /* One operand has to be LSHIFT_EXPR and one RSHIFT_EXPR. */
1934 for (i
= 0; i
< 2; i
++)
1935 if (def_code
[i
] != LSHIFT_EXPR
&& def_code
[i
] != RSHIFT_EXPR
)
1937 else if (!has_single_use (arg
[i
]))
1939 if (def_code
[0] == def_code
[1])
1942 /* If we've looked through narrowing conversions before, look through
1943 widening conversions from unsigned type with the same precision
1945 if (TYPE_PRECISION (TREE_TYPE (def_arg1
[0])) != TYPE_PRECISION (rtype
))
1946 for (i
= 0; i
< 2; i
++)
1949 enum tree_code code
;
1950 defcodefor_name (def_arg1
[i
], &code
, &tem
, NULL
);
1951 if (!CONVERT_EXPR_CODE_P (code
)
1952 || !INTEGRAL_TYPE_P (TREE_TYPE (tem
))
1953 || TYPE_PRECISION (TREE_TYPE (tem
)) != TYPE_PRECISION (rtype
))
1957 /* Both shifts have to use the same first operand. */
1958 if (!operand_equal_for_phi_arg_p (def_arg1
[0], def_arg1
[1])
1959 || !types_compatible_p (TREE_TYPE (def_arg1
[0]),
1960 TREE_TYPE (def_arg1
[1])))
1962 if ((TYPE_PRECISION (TREE_TYPE (def_arg1
[0]))
1963 != TYPE_PRECISION (TREE_TYPE (def_arg1
[1])))
1964 || (TYPE_UNSIGNED (TREE_TYPE (def_arg1
[0]))
1965 == TYPE_UNSIGNED (TREE_TYPE (def_arg1
[1]))))
1968 /* Handle signed rotate; the RSHIFT_EXPR has to be done
1969 in unsigned type but LSHIFT_EXPR could be signed. */
1970 i
= def_code
[0] != RSHIFT_EXPR
;
1971 if (!TYPE_UNSIGNED (TREE_TYPE (def_arg1
[i
])))
1975 enum tree_code code
;
1976 defcodefor_name (def_arg1
[i
], &code
, &tem
, NULL
);
1977 if (!CONVERT_EXPR_CODE_P (code
)
1978 || !INTEGRAL_TYPE_P (TREE_TYPE (tem
))
1979 || TYPE_PRECISION (TREE_TYPE (tem
)) != TYPE_PRECISION (rtype
))
1982 if (!operand_equal_for_phi_arg_p (def_arg1
[0], def_arg1
[1])
1983 || !types_compatible_p (TREE_TYPE (def_arg1
[0]),
1984 TREE_TYPE (def_arg1
[1])))
1987 else if (!TYPE_UNSIGNED (TREE_TYPE (def_arg1
[0])))
1990 /* CNT1 + CNT2 == B case above. */
1991 if (tree_fits_uhwi_p (def_arg2
[0])
1992 && tree_fits_uhwi_p (def_arg2
[1])
1993 && tree_to_uhwi (def_arg2
[0])
1994 + tree_to_uhwi (def_arg2
[1]) == TYPE_PRECISION (rtype
))
1995 rotcnt
= def_arg2
[0];
1996 else if (TREE_CODE (def_arg2
[0]) != SSA_NAME
1997 || TREE_CODE (def_arg2
[1]) != SSA_NAME
)
2001 tree cdef_arg1
[2], cdef_arg2
[2], def_arg2_alt
[2];
2002 enum tree_code cdef_code
[2];
2003 gimple
*def_arg_alt_stmt
[2] = { NULL
, NULL
};
2004 int check_range
= 0;
2005 gimple
*check_range_stmt
= NULL
;
2006 /* Look through conversion of the shift count argument.
2007 The C/C++ FE cast any shift count argument to integer_type_node.
2008 The only problem might be if the shift count type maximum value
2009 is equal or smaller than number of bits in rtype. */
2010 for (i
= 0; i
< 2; i
++)
2012 def_arg2_alt
[i
] = def_arg2
[i
];
2013 defcodefor_name (def_arg2
[i
], &cdef_code
[i
],
2014 &cdef_arg1
[i
], &cdef_arg2
[i
]);
2015 if (CONVERT_EXPR_CODE_P (cdef_code
[i
])
2016 && INTEGRAL_TYPE_P (TREE_TYPE (cdef_arg1
[i
]))
2017 && TYPE_PRECISION (TREE_TYPE (cdef_arg1
[i
]))
2018 > floor_log2 (TYPE_PRECISION (rtype
))
2019 && type_has_mode_precision_p (TREE_TYPE (cdef_arg1
[i
])))
2021 def_arg2_alt
[i
] = cdef_arg1
[i
];
2022 if (TREE_CODE (def_arg2
[i
]) == SSA_NAME
)
2023 def_arg_alt_stmt
[i
] = SSA_NAME_DEF_STMT (def_arg2
[i
]);
2024 defcodefor_name (def_arg2_alt
[i
], &cdef_code
[i
],
2025 &cdef_arg1
[i
], &cdef_arg2
[i
]);
2028 def_arg_alt_stmt
[i
] = def_arg_stmt
[i
];
2030 for (i
= 0; i
< 2; i
++)
2031 /* Check for one shift count being Y and the other B - Y,
2032 with optional casts. */
2033 if (cdef_code
[i
] == MINUS_EXPR
2034 && tree_fits_shwi_p (cdef_arg1
[i
])
2035 && tree_to_shwi (cdef_arg1
[i
]) == TYPE_PRECISION (rtype
)
2036 && TREE_CODE (cdef_arg2
[i
]) == SSA_NAME
)
2039 enum tree_code code
;
2041 if (cdef_arg2
[i
] == def_arg2
[1 - i
]
2042 || cdef_arg2
[i
] == def_arg2_alt
[1 - i
])
2044 rotcnt
= cdef_arg2
[i
];
2046 if (cdef_arg2
[i
] == def_arg2
[1 - i
])
2047 check_range_stmt
= def_arg_stmt
[1 - i
];
2049 check_range_stmt
= def_arg_alt_stmt
[1 - i
];
2052 defcodefor_name (cdef_arg2
[i
], &code
, &tem
, NULL
);
2053 if (CONVERT_EXPR_CODE_P (code
)
2054 && INTEGRAL_TYPE_P (TREE_TYPE (tem
))
2055 && TYPE_PRECISION (TREE_TYPE (tem
))
2056 > floor_log2 (TYPE_PRECISION (rtype
))
2057 && type_has_mode_precision_p (TREE_TYPE (tem
))
2058 && (tem
== def_arg2
[1 - i
]
2059 || tem
== def_arg2_alt
[1 - i
]))
2063 if (tem
== def_arg2
[1 - i
])
2064 check_range_stmt
= def_arg_stmt
[1 - i
];
2066 check_range_stmt
= def_arg_alt_stmt
[1 - i
];
2070 /* The above sequence isn't safe for Y being 0,
2071 because then one of the shifts triggers undefined behavior.
2072 This alternative is safe even for rotation count of 0.
2073 One shift count is Y and the other (-Y) & (B - 1).
2074 Or one shift count is Y & (B - 1) and the other (-Y) & (B - 1). */
2075 else if (cdef_code
[i
] == BIT_AND_EXPR
2076 && pow2p_hwi (TYPE_PRECISION (rtype
))
2077 && tree_fits_shwi_p (cdef_arg2
[i
])
2078 && tree_to_shwi (cdef_arg2
[i
])
2079 == TYPE_PRECISION (rtype
) - 1
2080 && TREE_CODE (cdef_arg1
[i
]) == SSA_NAME
2081 && gimple_assign_rhs_code (stmt
) == BIT_IOR_EXPR
)
2084 enum tree_code code
;
2086 defcodefor_name (cdef_arg1
[i
], &code
, &tem
, NULL
);
2087 if (CONVERT_EXPR_CODE_P (code
)
2088 && INTEGRAL_TYPE_P (TREE_TYPE (tem
))
2089 && TYPE_PRECISION (TREE_TYPE (tem
))
2090 > floor_log2 (TYPE_PRECISION (rtype
))
2091 && type_has_mode_precision_p (TREE_TYPE (tem
)))
2092 defcodefor_name (tem
, &code
, &tem
, NULL
);
2094 if (code
== NEGATE_EXPR
)
2096 if (tem
== def_arg2
[1 - i
] || tem
== def_arg2_alt
[1 - i
])
2100 if (tem
== def_arg2
[1 - i
])
2101 check_range_stmt
= def_arg_stmt
[1 - i
];
2103 check_range_stmt
= def_arg_alt_stmt
[1 - i
];
2107 defcodefor_name (tem
, &code
, &tem2
, NULL
);
2108 if (CONVERT_EXPR_CODE_P (code
)
2109 && INTEGRAL_TYPE_P (TREE_TYPE (tem2
))
2110 && TYPE_PRECISION (TREE_TYPE (tem2
))
2111 > floor_log2 (TYPE_PRECISION (rtype
))
2112 && type_has_mode_precision_p (TREE_TYPE (tem2
)))
2114 if (tem2
== def_arg2
[1 - i
]
2115 || tem2
== def_arg2_alt
[1 - i
])
2119 if (tem2
== def_arg2
[1 - i
])
2120 check_range_stmt
= def_arg_stmt
[1 - i
];
2122 check_range_stmt
= def_arg_alt_stmt
[1 - i
];
2129 if (cdef_code
[1 - i
] == BIT_AND_EXPR
2130 && tree_fits_shwi_p (cdef_arg2
[1 - i
])
2131 && tree_to_shwi (cdef_arg2
[1 - i
])
2132 == TYPE_PRECISION (rtype
) - 1
2133 && TREE_CODE (cdef_arg1
[1 - i
]) == SSA_NAME
)
2135 if (tem
== cdef_arg1
[1 - i
]
2136 || tem2
== cdef_arg1
[1 - i
])
2138 rotcnt
= def_arg2
[1 - i
];
2142 defcodefor_name (cdef_arg1
[1 - i
], &code
, &tem3
, NULL
);
2143 if (CONVERT_EXPR_CODE_P (code
)
2144 && INTEGRAL_TYPE_P (TREE_TYPE (tem3
))
2145 && TYPE_PRECISION (TREE_TYPE (tem3
))
2146 > floor_log2 (TYPE_PRECISION (rtype
))
2147 && type_has_mode_precision_p (TREE_TYPE (tem3
)))
2149 if (tem
== tem3
|| tem2
== tem3
)
2151 rotcnt
= def_arg2
[1 - i
];
2158 if (check_range
&& wider_prec
> TYPE_PRECISION (rtype
))
2160 if (TREE_CODE (rotcnt
) != SSA_NAME
)
2163 range_query
*q
= get_range_query (cfun
);
2164 if (q
== get_global_range_query ())
2165 q
= enable_ranger (cfun
);
2166 if (!q
->range_of_expr (r
, rotcnt
, check_range_stmt
))
2168 if (check_range
> 0)
2170 r
.set_varying (TREE_TYPE (rotcnt
));
2172 int prec
= TYPE_PRECISION (TREE_TYPE (rotcnt
));
2173 signop sign
= TYPE_SIGN (TREE_TYPE (rotcnt
));
2174 wide_int min
= wide_int::from (TYPE_PRECISION (rtype
), prec
, sign
);
2175 wide_int max
= wide_int::from (wider_prec
- 1, prec
, sign
);
2176 if (check_range
< 0)
2178 int_range
<1> r2 (TREE_TYPE (rotcnt
), min
, max
);
2180 if (!r
.undefined_p ())
2182 if (check_range
> 0)
2185 for (int i
= TYPE_PRECISION (rtype
) + 1; i
< wider_prec
;
2186 i
+= TYPE_PRECISION (rtype
))
2188 int j
= i
+ TYPE_PRECISION (rtype
) - 2;
2189 min
= wide_int::from (i
, prec
, sign
);
2190 max
= wide_int::from (MIN (j
, wider_prec
- 1),
2192 int_range
<1> r4 (TREE_TYPE (rotcnt
), min
, max
);
2196 if (!r
.undefined_p ())
2202 if (rotcnt
== NULL_TREE
)
2207 if (!useless_type_conversion_p (TREE_TYPE (def_arg2
[0]),
2208 TREE_TYPE (rotcnt
)))
2210 g
= gimple_build_assign (make_ssa_name (TREE_TYPE (def_arg2
[0])),
2212 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
2213 rotcnt
= gimple_assign_lhs (g
);
2217 g
= gimple_build_assign (make_ssa_name (TREE_TYPE (rotcnt
)),
2218 BIT_AND_EXPR
, rotcnt
,
2219 build_int_cst (TREE_TYPE (rotcnt
),
2220 TYPE_PRECISION (rtype
) - 1));
2221 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
2222 rotcnt
= gimple_assign_lhs (g
);
2224 lhs
= gimple_assign_lhs (stmt
);
2225 if (!useless_type_conversion_p (rtype
, TREE_TYPE (def_arg1
[0])))
2226 lhs
= make_ssa_name (TREE_TYPE (def_arg1
[0]));
2227 g
= gimple_build_assign (lhs
,
2228 ((def_code
[0] == LSHIFT_EXPR
) ^ swapped_p
)
2229 ? LROTATE_EXPR
: RROTATE_EXPR
, def_arg1
[0], rotcnt
);
2230 if (!useless_type_conversion_p (rtype
, TREE_TYPE (def_arg1
[0])))
2232 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
2233 g
= gimple_build_assign (gimple_assign_lhs (stmt
), NOP_EXPR
, lhs
);
2235 gsi_replace (gsi
, g
, false);
2240 /* Check whether an array contains a valid ctz table. */
2242 check_ctz_array (tree ctor
, unsigned HOST_WIDE_INT mulc
,
2243 HOST_WIDE_INT
&zero_val
, unsigned shift
, unsigned bits
)
2246 unsigned HOST_WIDE_INT i
, mask
;
2247 unsigned matched
= 0;
2249 mask
= ((HOST_WIDE_INT_1U
<< (bits
- shift
)) - 1) << shift
;
2253 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor
), i
, idx
, elt
)
2255 if (TREE_CODE (idx
) != INTEGER_CST
|| TREE_CODE (elt
) != INTEGER_CST
)
2260 unsigned HOST_WIDE_INT index
= tree_to_shwi (idx
);
2261 HOST_WIDE_INT val
= tree_to_shwi (elt
);
2269 if (val
>= 0 && val
< bits
&& (((mulc
<< val
) & mask
) >> shift
) == index
)
2279 /* Check whether a string contains a valid ctz table. */
2281 check_ctz_string (tree string
, unsigned HOST_WIDE_INT mulc
,
2282 HOST_WIDE_INT
&zero_val
, unsigned shift
, unsigned bits
)
2284 unsigned HOST_WIDE_INT len
= TREE_STRING_LENGTH (string
);
2285 unsigned HOST_WIDE_INT mask
;
2286 unsigned matched
= 0;
2287 const unsigned char *p
= (const unsigned char *) TREE_STRING_POINTER (string
);
2289 if (len
< bits
|| len
> bits
* 2)
2292 mask
= ((HOST_WIDE_INT_1U
<< (bits
- shift
)) - 1) << shift
;
2296 for (unsigned i
= 0; i
< len
; i
++)
2297 if (p
[i
] < bits
&& (((mulc
<< p
[i
]) & mask
) >> shift
) == i
)
2300 return matched
== bits
;
2303 /* Recognize count trailing zeroes idiom.
2304 The canonical form is array[((x & -x) * C) >> SHIFT] where C is a magic
2305 constant which when multiplied by a power of 2 creates a unique value
2306 in the top 5 or 6 bits. This is then indexed into a table which maps it
2307 to the number of trailing zeroes. Array[0] is returned so the caller can
2308 emit an appropriate sequence depending on whether ctz (0) is defined on
2311 optimize_count_trailing_zeroes (tree array_ref
, tree x
, tree mulc
,
2312 tree tshift
, HOST_WIDE_INT
&zero_val
)
2314 tree type
= TREE_TYPE (array_ref
);
2315 tree array
= TREE_OPERAND (array_ref
, 0);
2317 gcc_assert (TREE_CODE (mulc
) == INTEGER_CST
);
2318 gcc_assert (TREE_CODE (tshift
) == INTEGER_CST
);
2320 tree input_type
= TREE_TYPE (x
);
2321 unsigned input_bits
= tree_to_shwi (TYPE_SIZE (input_type
));
2323 /* Check the array element type is not wider than 32 bits and the input is
2324 an unsigned 32-bit or 64-bit type. */
2325 if (TYPE_PRECISION (type
) > 32 || !TYPE_UNSIGNED (input_type
))
2327 if (input_bits
!= 32 && input_bits
!= 64)
2330 if (!direct_internal_fn_supported_p (IFN_CTZ
, input_type
, OPTIMIZE_FOR_BOTH
))
2333 /* Check the lower bound of the array is zero. */
2334 tree low
= array_ref_low_bound (array_ref
);
2335 if (!low
|| !integer_zerop (low
))
2338 unsigned shiftval
= tree_to_shwi (tshift
);
2340 /* Check the shift extracts the top 5..7 bits. */
2341 if (shiftval
< input_bits
- 7 || shiftval
> input_bits
- 5)
2344 tree ctor
= ctor_for_folding (array
);
2348 unsigned HOST_WIDE_INT val
= tree_to_uhwi (mulc
);
2350 if (TREE_CODE (ctor
) == CONSTRUCTOR
)
2351 return check_ctz_array (ctor
, val
, zero_val
, shiftval
, input_bits
);
2353 if (TREE_CODE (ctor
) == STRING_CST
2354 && TYPE_PRECISION (type
) == CHAR_TYPE_SIZE
)
2355 return check_ctz_string (ctor
, val
, zero_val
, shiftval
, input_bits
);
2360 /* Match.pd function to match the ctz expression. */
2361 extern bool gimple_ctz_table_index (tree
, tree
*, tree (*)(tree
));
2364 simplify_count_trailing_zeroes (gimple_stmt_iterator
*gsi
)
2366 gimple
*stmt
= gsi_stmt (*gsi
);
2367 tree array_ref
= gimple_assign_rhs1 (stmt
);
2369 HOST_WIDE_INT zero_val
;
2371 gcc_checking_assert (TREE_CODE (array_ref
) == ARRAY_REF
);
2373 if (!gimple_ctz_table_index (TREE_OPERAND (array_ref
, 1), &res_ops
[0], NULL
))
2376 if (optimize_count_trailing_zeroes (array_ref
, res_ops
[0],
2377 res_ops
[1], res_ops
[2], zero_val
))
2379 tree type
= TREE_TYPE (res_ops
[0]);
2380 HOST_WIDE_INT ctz_val
= 0;
2381 HOST_WIDE_INT type_size
= tree_to_shwi (TYPE_SIZE (type
));
2383 = CTZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (type
), ctz_val
) == 2;
2385 /* If the input value can't be zero, don't special case ctz (0). */
2386 if (tree_expr_nonzero_p (res_ops
[0]))
2393 /* Skip if there is no value defined at zero, or if we can't easily
2394 return the correct value for zero. */
2397 if (zero_val
!= ctz_val
&& !(zero_val
== 0 && ctz_val
== type_size
))
2400 gimple_seq seq
= NULL
;
2402 gcall
*call
= gimple_build_call_internal (IFN_CTZ
, 1, res_ops
[0]);
2403 gimple_set_location (call
, gimple_location (stmt
));
2404 gimple_set_lhs (call
, make_ssa_name (integer_type_node
));
2405 gimple_seq_add_stmt (&seq
, call
);
2407 tree prev_lhs
= gimple_call_lhs (call
);
2409 /* Emit ctz (x) & 31 if ctz (0) is 32 but we need to return 0. */
2410 if (zero_val
== 0 && ctz_val
== type_size
)
2412 g
= gimple_build_assign (make_ssa_name (integer_type_node
),
2413 BIT_AND_EXPR
, prev_lhs
,
2414 build_int_cst (integer_type_node
,
2416 gimple_set_location (g
, gimple_location (stmt
));
2417 gimple_seq_add_stmt (&seq
, g
);
2418 prev_lhs
= gimple_assign_lhs (g
);
2421 g
= gimple_build_assign (gimple_assign_lhs (stmt
), NOP_EXPR
, prev_lhs
);
2422 gimple_seq_add_stmt (&seq
, g
);
2423 gsi_replace_with_seq (gsi
, seq
, true);
2431 /* Combine an element access with a shuffle. Returns true if there were
2432 any changes made, else it returns false. */
2435 simplify_bitfield_ref (gimple_stmt_iterator
*gsi
)
2437 gimple
*stmt
= gsi_stmt (*gsi
);
2440 tree elem_type
, type
;
2442 unsigned HOST_WIDE_INT nelts
, idx
;
2443 poly_uint64 size
, elem_size
;
2444 enum tree_code code
;
2446 op
= gimple_assign_rhs1 (stmt
);
2447 gcc_checking_assert (TREE_CODE (op
) == BIT_FIELD_REF
);
2449 op0
= TREE_OPERAND (op
, 0);
2450 if (TREE_CODE (op0
) != SSA_NAME
2451 || TREE_CODE (TREE_TYPE (op0
)) != VECTOR_TYPE
)
2454 def_stmt
= get_prop_source_stmt (op0
, false, NULL
);
2455 if (!def_stmt
|| !can_propagate_from (def_stmt
))
2458 op1
= TREE_OPERAND (op
, 1);
2459 code
= gimple_assign_rhs_code (def_stmt
);
2460 elem_type
= TREE_TYPE (TREE_TYPE (op0
));
2461 type
= TREE_TYPE (op
);
2462 /* Also handle vector type.
2464 _7 = VEC_PERM_EXPR <_1, _1, { 2, 3, 2, 3 }>;
2465 _11 = BIT_FIELD_REF <_7, 64, 0>;
2469 _11 = BIT_FIELD_REF <_1, 64, 64>. */
2471 size
= tree_to_poly_uint64 (TYPE_SIZE (type
));
2472 if (maybe_ne (bit_field_size (op
), size
))
2475 elem_size
= tree_to_poly_uint64 (TYPE_SIZE (elem_type
));
2476 if (code
!= VEC_PERM_EXPR
2477 || !constant_multiple_p (bit_field_offset (op
), elem_size
, &idx
))
2480 m
= gimple_assign_rhs3 (def_stmt
);
2481 if (TREE_CODE (m
) != VECTOR_CST
2482 || !VECTOR_CST_NELTS (m
).is_constant (&nelts
))
2486 if (known_eq (size
, elem_size
))
2487 idx
= TREE_INT_CST_LOW (VECTOR_CST_ELT (m
, idx
)) % (2 * nelts
);
2490 unsigned HOST_WIDE_INT nelts_op
;
2491 if (!constant_multiple_p (size
, elem_size
, &nelts_op
)
2492 || !pow2p_hwi (nelts_op
))
2494 /* Clamp vec_perm_expr index. */
2495 unsigned start
= TREE_INT_CST_LOW (vector_cst_elt (m
, idx
)) % (2 * nelts
);
2496 unsigned end
= TREE_INT_CST_LOW (vector_cst_elt (m
, idx
+ nelts_op
- 1))
2498 /* Be in the same vector. */
2499 if ((start
< nelts
) != (end
< nelts
))
2501 for (unsigned HOST_WIDE_INT i
= 1; i
!= nelts_op
; i
++)
2503 /* Continuous area. */
2504 if (TREE_INT_CST_LOW (vector_cst_elt (m
, idx
+ i
)) % (2 * nelts
) - 1
2505 != TREE_INT_CST_LOW (vector_cst_elt (m
, idx
+ i
- 1))
2509 /* Alignment not worse than before. */
2510 if (start
% nelts_op
)
2516 p
= gimple_assign_rhs1 (def_stmt
);
2519 p
= gimple_assign_rhs2 (def_stmt
);
2523 tem
= build3 (BIT_FIELD_REF
, TREE_TYPE (op
),
2524 p
, op1
, bitsize_int (idx
* elem_size
));
2525 gimple_assign_set_rhs1 (stmt
, tem
);
2527 update_stmt (gsi_stmt (*gsi
));
2531 /* Determine whether applying the 2 permutations (mask1 then mask2)
2532 gives back one of the input. */
2535 is_combined_permutation_identity (tree mask1
, tree mask2
)
2538 unsigned HOST_WIDE_INT nelts
, i
, j
;
2539 bool maybe_identity1
= true;
2540 bool maybe_identity2
= true;
2542 gcc_checking_assert (TREE_CODE (mask1
) == VECTOR_CST
2543 && TREE_CODE (mask2
) == VECTOR_CST
);
2544 mask
= fold_ternary (VEC_PERM_EXPR
, TREE_TYPE (mask1
), mask1
, mask1
, mask2
);
2545 if (mask
== NULL_TREE
|| TREE_CODE (mask
) != VECTOR_CST
)
2548 if (!VECTOR_CST_NELTS (mask
).is_constant (&nelts
))
2550 for (i
= 0; i
< nelts
; i
++)
2552 tree val
= VECTOR_CST_ELT (mask
, i
);
2553 gcc_assert (TREE_CODE (val
) == INTEGER_CST
);
2554 j
= TREE_INT_CST_LOW (val
) & (2 * nelts
- 1);
2556 maybe_identity2
= false;
2557 else if (j
== i
+ nelts
)
2558 maybe_identity1
= false;
2562 return maybe_identity1
? 1 : maybe_identity2
? 2 : 0;
2565 /* Combine a shuffle with its arguments. Returns 1 if there were any
2566 changes made, 2 if cfg-cleanup needs to run. Else it returns 0. */
2569 simplify_permutation (gimple_stmt_iterator
*gsi
)
2571 gimple
*stmt
= gsi_stmt (*gsi
);
2572 gimple
*def_stmt
= NULL
;
2573 tree op0
, op1
, op2
, op3
, arg0
, arg1
;
2574 enum tree_code code
, code2
= ERROR_MARK
;
2575 bool single_use_op0
= false;
2577 gcc_checking_assert (gimple_assign_rhs_code (stmt
) == VEC_PERM_EXPR
);
2579 op0
= gimple_assign_rhs1 (stmt
);
2580 op1
= gimple_assign_rhs2 (stmt
);
2581 op2
= gimple_assign_rhs3 (stmt
);
2583 if (TREE_CODE (op2
) != VECTOR_CST
)
2586 if (TREE_CODE (op0
) == VECTOR_CST
)
2591 else if (TREE_CODE (op0
) == SSA_NAME
)
2593 def_stmt
= get_prop_source_stmt (op0
, false, &single_use_op0
);
2596 code
= gimple_assign_rhs_code (def_stmt
);
2597 if (code
== VIEW_CONVERT_EXPR
)
2599 tree rhs
= gimple_assign_rhs1 (def_stmt
);
2600 tree name
= TREE_OPERAND (rhs
, 0);
2601 if (TREE_CODE (name
) != SSA_NAME
)
2603 if (!has_single_use (name
))
2604 single_use_op0
= false;
2605 /* Here we update the def_stmt through this VIEW_CONVERT_EXPR,
2606 but still keep the code to indicate it comes from
2607 VIEW_CONVERT_EXPR. */
2608 def_stmt
= SSA_NAME_DEF_STMT (name
);
2609 if (!def_stmt
|| !is_gimple_assign (def_stmt
))
2611 if (gimple_assign_rhs_code (def_stmt
) != CONSTRUCTOR
)
2614 if (!can_propagate_from (def_stmt
))
2616 arg0
= gimple_assign_rhs1 (def_stmt
);
2621 /* Two consecutive shuffles. */
2622 if (code
== VEC_PERM_EXPR
)
2629 op3
= gimple_assign_rhs3 (def_stmt
);
2630 if (TREE_CODE (op3
) != VECTOR_CST
)
2632 ident
= is_combined_permutation_identity (op3
, op2
);
2635 orig
= (ident
== 1) ? gimple_assign_rhs1 (def_stmt
)
2636 : gimple_assign_rhs2 (def_stmt
);
2637 gimple_assign_set_rhs1 (stmt
, unshare_expr (orig
));
2638 gimple_assign_set_rhs_code (stmt
, TREE_CODE (orig
));
2639 gimple_set_num_ops (stmt
, 2);
2641 return remove_prop_source_from_use (op0
) ? 2 : 1;
2643 else if (code
== CONSTRUCTOR
2644 || code
== VECTOR_CST
2645 || code
== VIEW_CONVERT_EXPR
)
2649 if (TREE_CODE (op0
) == SSA_NAME
&& !single_use_op0
)
2652 if (TREE_CODE (op1
) == VECTOR_CST
)
2654 else if (TREE_CODE (op1
) == SSA_NAME
)
2656 gimple
*def_stmt2
= get_prop_source_stmt (op1
, true, NULL
);
2659 code2
= gimple_assign_rhs_code (def_stmt2
);
2660 if (code2
== VIEW_CONVERT_EXPR
)
2662 tree rhs
= gimple_assign_rhs1 (def_stmt2
);
2663 tree name
= TREE_OPERAND (rhs
, 0);
2664 if (TREE_CODE (name
) != SSA_NAME
)
2666 if (!has_single_use (name
))
2668 def_stmt2
= SSA_NAME_DEF_STMT (name
);
2669 if (!def_stmt2
|| !is_gimple_assign (def_stmt2
))
2671 if (gimple_assign_rhs_code (def_stmt2
) != CONSTRUCTOR
)
2674 else if (code2
!= CONSTRUCTOR
&& code2
!= VECTOR_CST
)
2676 if (!can_propagate_from (def_stmt2
))
2678 arg1
= gimple_assign_rhs1 (def_stmt2
);
2685 /* Already used twice in this statement. */
2686 if (TREE_CODE (op0
) == SSA_NAME
&& num_imm_uses (op0
) > 2)
2691 /* If there are any VIEW_CONVERT_EXPRs found when finding permutation
2692 operands source, check whether it's valid to transform and prepare
2693 the required new operands. */
2694 if (code
== VIEW_CONVERT_EXPR
|| code2
== VIEW_CONVERT_EXPR
)
2696 /* Figure out the target vector type to which operands should be
2697 converted. If both are CONSTRUCTOR, the types should be the
2698 same, otherwise, use the one of CONSTRUCTOR. */
2699 tree tgt_type
= NULL_TREE
;
2700 if (code
== VIEW_CONVERT_EXPR
)
2702 gcc_assert (gimple_assign_rhs_code (def_stmt
) == CONSTRUCTOR
);
2704 tgt_type
= TREE_TYPE (arg0
);
2706 if (code2
== VIEW_CONVERT_EXPR
)
2708 tree arg1_type
= TREE_TYPE (arg1
);
2709 if (tgt_type
== NULL_TREE
)
2710 tgt_type
= arg1_type
;
2711 else if (tgt_type
!= arg1_type
)
2715 if (!VECTOR_TYPE_P (tgt_type
))
2717 tree op2_type
= TREE_TYPE (op2
);
2719 /* Figure out the shrunk factor. */
2720 poly_uint64 tgt_units
= TYPE_VECTOR_SUBPARTS (tgt_type
);
2721 poly_uint64 op2_units
= TYPE_VECTOR_SUBPARTS (op2_type
);
2722 if (maybe_gt (tgt_units
, op2_units
))
2724 unsigned int factor
;
2725 if (!constant_multiple_p (op2_units
, tgt_units
, &factor
))
2728 /* Build the new permutation control vector as target vector. */
2729 vec_perm_builder builder
;
2730 if (!tree_to_vec_perm_builder (&builder
, op2
))
2732 vec_perm_indices
indices (builder
, 2, op2_units
);
2733 vec_perm_indices new_indices
;
2734 if (new_indices
.new_shrunk_vector (indices
, factor
))
2736 tree mask_type
= tgt_type
;
2737 if (!VECTOR_INTEGER_TYPE_P (mask_type
))
2739 tree elem_type
= TREE_TYPE (mask_type
);
2740 unsigned elem_size
= TREE_INT_CST_LOW (TYPE_SIZE (elem_type
));
2741 tree int_type
= build_nonstandard_integer_type (elem_size
, 0);
2742 mask_type
= build_vector_type (int_type
, tgt_units
);
2744 op2
= vec_perm_indices_to_tree (mask_type
, new_indices
);
2749 /* Convert the VECTOR_CST to the appropriate vector type. */
2750 if (tgt_type
!= TREE_TYPE (arg0
))
2751 arg0
= fold_build1 (VIEW_CONVERT_EXPR
, tgt_type
, arg0
);
2752 else if (tgt_type
!= TREE_TYPE (arg1
))
2753 arg1
= fold_build1 (VIEW_CONVERT_EXPR
, tgt_type
, arg1
);
2756 /* VIEW_CONVERT_EXPR should be updated to CONSTRUCTOR before. */
2757 gcc_assert (code
== CONSTRUCTOR
|| code
== VECTOR_CST
);
2759 /* Shuffle of a constructor. */
2762 = build_vector_type (TREE_TYPE (TREE_TYPE (arg0
)),
2763 TYPE_VECTOR_SUBPARTS (TREE_TYPE (op2
)));
2764 tree opt
= fold_ternary (VEC_PERM_EXPR
, res_type
, arg0
, arg1
, op2
);
2766 || (TREE_CODE (opt
) != CONSTRUCTOR
&& TREE_CODE (opt
) != VECTOR_CST
))
2768 /* Found VIEW_CONVERT_EXPR before, need one explicit conversion. */
2769 if (res_type
!= TREE_TYPE (op0
))
2771 tree name
= make_ssa_name (TREE_TYPE (opt
));
2772 gimple
*ass_stmt
= gimple_build_assign (name
, opt
);
2773 gsi_insert_before (gsi
, ass_stmt
, GSI_SAME_STMT
);
2774 opt
= build1 (VIEW_CONVERT_EXPR
, TREE_TYPE (op0
), name
);
2776 gimple_assign_set_rhs_from_tree (gsi
, opt
);
2777 update_stmt (gsi_stmt (*gsi
));
2778 if (TREE_CODE (op0
) == SSA_NAME
)
2779 ret
= remove_prop_source_from_use (op0
);
2780 if (op0
!= op1
&& TREE_CODE (op1
) == SSA_NAME
)
2781 ret
|= remove_prop_source_from_use (op1
);
2788 /* Get the BIT_FIELD_REF definition of VAL, if any, looking through
2789 conversions with code CONV_CODE or update it if still ERROR_MARK.
2790 Return NULL_TREE if no such matching def was found. */
2793 get_bit_field_ref_def (tree val
, enum tree_code
&conv_code
)
2795 if (TREE_CODE (val
) != SSA_NAME
)
2797 gimple
*def_stmt
= get_prop_source_stmt (val
, false, NULL
);
2800 enum tree_code code
= gimple_assign_rhs_code (def_stmt
);
2801 if (code
== FLOAT_EXPR
2802 || code
== FIX_TRUNC_EXPR
2803 || CONVERT_EXPR_CODE_P (code
))
2805 tree op1
= gimple_assign_rhs1 (def_stmt
);
2806 if (conv_code
== ERROR_MARK
)
2808 else if (conv_code
!= code
)
2810 if (TREE_CODE (op1
) != SSA_NAME
)
2812 def_stmt
= SSA_NAME_DEF_STMT (op1
);
2813 if (! is_gimple_assign (def_stmt
))
2815 code
= gimple_assign_rhs_code (def_stmt
);
2817 if (code
!= BIT_FIELD_REF
)
2819 return gimple_assign_rhs1 (def_stmt
);
2822 /* Recognize a VEC_PERM_EXPR. Returns true if there were any changes. */
2825 simplify_vector_constructor (gimple_stmt_iterator
*gsi
)
2827 gimple
*stmt
= gsi_stmt (*gsi
);
2828 tree op
, orig
[2], type
, elem_type
;
2829 unsigned elem_size
, i
;
2830 unsigned HOST_WIDE_INT nelts
;
2831 unsigned HOST_WIDE_INT refnelts
;
2832 enum tree_code conv_code
;
2833 constructor_elt
*elt
;
2835 op
= gimple_assign_rhs1 (stmt
);
2836 type
= TREE_TYPE (op
);
2837 gcc_checking_assert (TREE_CODE (op
) == CONSTRUCTOR
2838 && TREE_CODE (type
) == VECTOR_TYPE
);
2840 if (!TYPE_VECTOR_SUBPARTS (type
).is_constant (&nelts
))
2842 elem_type
= TREE_TYPE (type
);
2843 elem_size
= TREE_INT_CST_LOW (TYPE_SIZE (elem_type
));
2847 conv_code
= ERROR_MARK
;
2848 bool maybe_ident
= true;
2849 bool maybe_blend
[2] = { true, true };
2850 tree one_constant
= NULL_TREE
;
2851 tree one_nonconstant
= NULL_TREE
;
2852 auto_vec
<tree
> constants
;
2853 constants
.safe_grow_cleared (nelts
, true);
2854 auto_vec
<std::pair
<unsigned, unsigned>, 64> elts
;
2855 FOR_EACH_VEC_SAFE_ELT (CONSTRUCTOR_ELTS (op
), i
, elt
)
2863 /* Look for elements extracted and possibly converted from
2865 op1
= get_bit_field_ref_def (elt
->value
, conv_code
);
2867 && TREE_CODE ((ref
= TREE_OPERAND (op1
, 0))) == SSA_NAME
2868 && VECTOR_TYPE_P (TREE_TYPE (ref
))
2869 && useless_type_conversion_p (TREE_TYPE (op1
),
2870 TREE_TYPE (TREE_TYPE (ref
)))
2871 && constant_multiple_p (bit_field_offset (op1
),
2872 bit_field_size (op1
), &elem
)
2873 && TYPE_VECTOR_SUBPARTS (TREE_TYPE (ref
)).is_constant (&refnelts
))
2876 for (j
= 0; j
< 2; ++j
)
2881 || useless_type_conversion_p (TREE_TYPE (orig
[0]),
2885 else if (ref
== orig
[j
])
2888 /* Found a suitable vector element. */
2892 if (elem
!= i
|| j
!= 0)
2893 maybe_ident
= false;
2895 maybe_blend
[j
] = false;
2896 elts
.safe_push (std::make_pair (j
, elem
));
2899 /* Else fallthru. */
2901 /* Handle elements not extracted from a vector.
2902 1. constants by permuting with constant vector
2903 2. a unique non-constant element by permuting with a splat vector */
2905 && orig
[1] != error_mark_node
)
2907 orig
[1] = error_mark_node
;
2908 if (CONSTANT_CLASS_P (elt
->value
))
2910 if (one_nonconstant
)
2913 one_constant
= elt
->value
;
2914 constants
[i
] = elt
->value
;
2920 if (!one_nonconstant
)
2921 one_nonconstant
= elt
->value
;
2922 else if (!operand_equal_p (one_nonconstant
, elt
->value
, 0))
2925 elts
.safe_push (std::make_pair (1, i
));
2926 maybe_ident
= false;
2932 || ! VECTOR_TYPE_P (TREE_TYPE (orig
[0])))
2934 refnelts
= TYPE_VECTOR_SUBPARTS (TREE_TYPE (orig
[0])).to_constant ();
2935 /* We currently do not handle larger destination vectors. */
2936 if (refnelts
< nelts
)
2942 = (nelts
!= refnelts
2943 ? (conv_code
!= ERROR_MARK
2944 ? build_vector_type (TREE_TYPE (TREE_TYPE (orig
[0])), nelts
)
2946 : TREE_TYPE (orig
[0]));
2947 if (conv_code
!= ERROR_MARK
2948 && !supportable_convert_operation (conv_code
, type
, conv_src_type
,
2951 /* Only few targets implement direct conversion patterns so try
2952 some simple special cases via VEC_[UN]PACK[_FLOAT]_LO_EXPR. */
2954 tree halfvectype
, dblvectype
;
2955 enum tree_code unpack_op
;
2957 if (!BYTES_BIG_ENDIAN
)
2958 unpack_op
= (FLOAT_TYPE_P (TREE_TYPE (type
))
2959 ? VEC_UNPACK_FLOAT_LO_EXPR
2960 : VEC_UNPACK_LO_EXPR
);
2962 unpack_op
= (FLOAT_TYPE_P (TREE_TYPE (type
))
2963 ? VEC_UNPACK_FLOAT_HI_EXPR
2964 : VEC_UNPACK_HI_EXPR
);
2966 /* Conversions between DFP and FP have no special tree code
2967 but we cannot handle those since all relevant vector conversion
2968 optabs only have a single mode. */
2969 if (CONVERT_EXPR_CODE_P (conv_code
)
2970 && FLOAT_TYPE_P (TREE_TYPE (type
))
2971 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (type
))
2972 != DECIMAL_FLOAT_TYPE_P (TREE_TYPE (conv_src_type
))))
2975 if (CONVERT_EXPR_CODE_P (conv_code
)
2976 && (2 * TYPE_PRECISION (TREE_TYPE (TREE_TYPE (orig
[0])))
2977 == TYPE_PRECISION (TREE_TYPE (type
)))
2978 && mode_for_vector (as_a
<scalar_mode
>
2979 (TYPE_MODE (TREE_TYPE (TREE_TYPE (orig
[0])))),
2980 nelts
* 2).exists ()
2982 = build_vector_type (TREE_TYPE (TREE_TYPE (orig
[0])),
2984 /* Only use it for vector modes or for vector booleans
2985 represented as scalar bitmasks. See PR95528. */
2986 && (VECTOR_MODE_P (TYPE_MODE (dblvectype
))
2987 || VECTOR_BOOLEAN_TYPE_P (dblvectype
))
2988 && (optab
= optab_for_tree_code (unpack_op
,
2991 && (optab_handler (optab
, TYPE_MODE (dblvectype
))
2992 != CODE_FOR_nothing
))
2994 gimple_seq stmts
= NULL
;
2996 if (refnelts
== nelts
)
2998 /* ??? Paradoxical subregs don't exist, so insert into
2999 the lower half of a wider zero vector. */
3000 dbl
= gimple_build (&stmts
, BIT_INSERT_EXPR
, dblvectype
,
3001 build_zero_cst (dblvectype
), orig
[0],
3004 else if (refnelts
== 2 * nelts
)
3007 dbl
= gimple_build (&stmts
, BIT_FIELD_REF
, dblvectype
,
3008 orig
[0], TYPE_SIZE (dblvectype
),
3010 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
3011 gimple_assign_set_rhs_with_ops (gsi
, unpack_op
, dbl
);
3013 else if (CONVERT_EXPR_CODE_P (conv_code
)
3014 && (TYPE_PRECISION (TREE_TYPE (TREE_TYPE (orig
[0])))
3015 == 2 * TYPE_PRECISION (TREE_TYPE (type
)))
3016 && mode_for_vector (as_a
<scalar_mode
>
3018 (TREE_TYPE (TREE_TYPE (orig
[0])))),
3019 nelts
/ 2).exists ()
3021 = build_vector_type (TREE_TYPE (TREE_TYPE (orig
[0])),
3023 /* Only use it for vector modes or for vector booleans
3024 represented as scalar bitmasks. See PR95528. */
3025 && (VECTOR_MODE_P (TYPE_MODE (halfvectype
))
3026 || VECTOR_BOOLEAN_TYPE_P (halfvectype
))
3027 && (optab
= optab_for_tree_code (VEC_PACK_TRUNC_EXPR
,
3030 && (optab_handler (optab
, TYPE_MODE (halfvectype
))
3031 != CODE_FOR_nothing
))
3033 gimple_seq stmts
= NULL
;
3034 tree low
= gimple_build (&stmts
, BIT_FIELD_REF
, halfvectype
,
3035 orig
[0], TYPE_SIZE (halfvectype
),
3037 tree hig
= gimple_build (&stmts
, BIT_FIELD_REF
, halfvectype
,
3038 orig
[0], TYPE_SIZE (halfvectype
),
3039 TYPE_SIZE (halfvectype
));
3040 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
3041 gimple_assign_set_rhs_with_ops (gsi
, VEC_PACK_TRUNC_EXPR
,
3046 update_stmt (gsi_stmt (*gsi
));
3049 if (nelts
!= refnelts
)
3052 = gimple_build_assign (make_ssa_name (conv_src_type
),
3053 build3 (BIT_FIELD_REF
, conv_src_type
,
3054 orig
[0], TYPE_SIZE (conv_src_type
),
3055 bitsize_zero_node
));
3056 gsi_insert_before (gsi
, lowpart
, GSI_SAME_STMT
);
3057 orig
[0] = gimple_assign_lhs (lowpart
);
3059 if (conv_code
== ERROR_MARK
)
3061 tree src_type
= TREE_TYPE (orig
[0]);
3062 if (!useless_type_conversion_p (type
, src_type
))
3064 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (type
),
3065 TYPE_VECTOR_SUBPARTS (src_type
))
3066 && useless_type_conversion_p (TREE_TYPE (type
),
3067 TREE_TYPE (src_type
)));
3068 tree rhs
= build1 (VIEW_CONVERT_EXPR
, type
, orig
[0]);
3069 orig
[0] = make_ssa_name (type
);
3070 gassign
*assign
= gimple_build_assign (orig
[0], rhs
);
3071 gsi_insert_before (gsi
, assign
, GSI_SAME_STMT
);
3073 gimple_assign_set_rhs_from_tree (gsi
, orig
[0]);
3076 gimple_assign_set_rhs_with_ops (gsi
, conv_code
, orig
[0],
3077 NULL_TREE
, NULL_TREE
);
3081 /* If we combine a vector with a non-vector avoid cases where
3082 we'll obviously end up with more GIMPLE stmts which is when
3083 we'll later not fold this to a single insert into the vector
3084 and we had a single extract originally. See PR92819. */
3087 && orig
[1] == error_mark_node
3090 tree mask_type
, perm_type
, conv_src_type
;
3091 perm_type
= TREE_TYPE (orig
[0]);
3092 conv_src_type
= (nelts
== refnelts
3094 : build_vector_type (TREE_TYPE (perm_type
), nelts
));
3095 if (conv_code
!= ERROR_MARK
3096 && !supportable_convert_operation (conv_code
, type
, conv_src_type
,
3100 /* Now that we know the number of elements of the source build the
3102 ??? When the second vector has constant values we can shuffle
3103 it and its source indexes to make the permutation supported.
3104 For now it mimics a blend. */
3105 vec_perm_builder
sel (refnelts
, refnelts
, 1);
3106 bool all_same_p
= true;
3107 for (i
= 0; i
< elts
.length (); ++i
)
3109 sel
.quick_push (elts
[i
].second
+ elts
[i
].first
* refnelts
);
3110 all_same_p
&= known_eq (sel
[i
], sel
[0]);
3112 /* And fill the tail with "something". It's really don't care,
3113 and ideally we'd allow VEC_PERM to have a smaller destination
3114 vector. As a heuristic:
3116 (a) if what we have so far duplicates a single element, make the
3119 (b) otherwise preserve a uniform orig[0]. This facilitates
3120 later pattern-matching of VEC_PERM_EXPR to a BIT_INSERT_EXPR. */
3121 for (; i
< refnelts
; ++i
)
3122 sel
.quick_push (all_same_p
3124 : (elts
[0].second
== 0 && elts
[0].first
== 0
3125 ? 0 : refnelts
) + i
);
3126 vec_perm_indices
indices (sel
, orig
[1] ? 2 : 1, refnelts
);
3127 machine_mode vmode
= TYPE_MODE (perm_type
);
3128 if (!can_vec_perm_const_p (vmode
, vmode
, indices
))
3131 = build_vector_type (build_nonstandard_integer_type (elem_size
, 1),
3133 if (GET_MODE_CLASS (TYPE_MODE (mask_type
)) != MODE_VECTOR_INT
3134 || maybe_ne (GET_MODE_SIZE (TYPE_MODE (mask_type
)),
3135 GET_MODE_SIZE (TYPE_MODE (perm_type
))))
3137 tree op2
= vec_perm_indices_to_tree (mask_type
, indices
);
3138 bool converted_orig1
= false;
3139 gimple_seq stmts
= NULL
;
3142 else if (orig
[1] == error_mark_node
3145 /* ??? We can see if we can safely convert to the original
3147 converted_orig1
= conv_code
!= ERROR_MARK
;
3148 orig
[1] = gimple_build_vector_from_val (&stmts
, UNKNOWN_LOCATION
,
3153 else if (orig
[1] == error_mark_node
)
3155 /* ??? See if we can convert the vector to the original type. */
3156 converted_orig1
= conv_code
!= ERROR_MARK
;
3157 unsigned n
= converted_orig1
? nelts
: refnelts
;
3158 tree_vector_builder
vec (converted_orig1
3159 ? type
: perm_type
, n
, 1);
3160 for (unsigned i
= 0; i
< n
; ++i
)
3161 if (i
< nelts
&& constants
[i
])
3162 vec
.quick_push (constants
[i
]);
3164 /* ??? Push a don't-care value. */
3165 vec
.quick_push (one_constant
);
3166 orig
[1] = vec
.build ();
3168 tree blend_op2
= NULL_TREE
;
3169 if (converted_orig1
)
3171 /* Make sure we can do a blend in the target type. */
3172 vec_perm_builder
sel (nelts
, nelts
, 1);
3173 for (i
= 0; i
< elts
.length (); ++i
)
3174 sel
.quick_push (elts
[i
].first
3175 ? elts
[i
].second
+ nelts
: i
);
3176 vec_perm_indices
indices (sel
, 2, nelts
);
3177 machine_mode vmode
= TYPE_MODE (type
);
3178 if (!can_vec_perm_const_p (vmode
, vmode
, indices
))
3181 = build_vector_type (build_nonstandard_integer_type (elem_size
, 1),
3183 if (GET_MODE_CLASS (TYPE_MODE (mask_type
)) != MODE_VECTOR_INT
3184 || maybe_ne (GET_MODE_SIZE (TYPE_MODE (mask_type
)),
3185 GET_MODE_SIZE (TYPE_MODE (type
))))
3187 blend_op2
= vec_perm_indices_to_tree (mask_type
, indices
);
3190 = converted_orig1
? build_zero_cst (perm_type
) : orig
[1];
3191 tree res
= gimple_build (&stmts
, VEC_PERM_EXPR
, perm_type
,
3192 orig
[0], orig1_for_perm
, op2
);
3193 if (nelts
!= refnelts
)
3194 res
= gimple_build (&stmts
, BIT_FIELD_REF
,
3195 conv_code
!= ERROR_MARK
? conv_src_type
: type
,
3196 res
, TYPE_SIZE (type
), bitsize_zero_node
);
3197 if (conv_code
!= ERROR_MARK
)
3198 res
= gimple_build (&stmts
, conv_code
, type
, res
);
3199 else if (!useless_type_conversion_p (type
, TREE_TYPE (res
)))
3201 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (type
),
3202 TYPE_VECTOR_SUBPARTS (perm_type
))
3203 && useless_type_conversion_p (TREE_TYPE (type
),
3204 TREE_TYPE (perm_type
)));
3205 res
= gimple_build (&stmts
, VIEW_CONVERT_EXPR
, type
, res
);
3207 /* Blend in the actual constant. */
3208 if (converted_orig1
)
3209 res
= gimple_build (&stmts
, VEC_PERM_EXPR
, type
,
3210 res
, orig
[1], blend_op2
);
3211 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
3212 gimple_assign_set_rhs_with_ops (gsi
, SSA_NAME
, res
);
3214 update_stmt (gsi_stmt (*gsi
));
3219 /* Rewrite the vector load at *GSI to component-wise loads if the load
3220 is only used in BIT_FIELD_REF extractions with eventual intermediate
3224 optimize_vector_load (gimple_stmt_iterator
*gsi
)
3226 gimple
*stmt
= gsi_stmt (*gsi
);
3227 tree lhs
= gimple_assign_lhs (stmt
);
3228 tree rhs
= gimple_assign_rhs1 (stmt
);
3230 /* Gather BIT_FIELD_REFs to rewrite, looking through
3231 VEC_UNPACK_{LO,HI}_EXPR. */
3232 use_operand_p use_p
;
3233 imm_use_iterator iter
;
3234 bool rewrite
= true;
3235 auto_vec
<gimple
*, 8> bf_stmts
;
3236 auto_vec
<tree
, 8> worklist
;
3237 worklist
.quick_push (lhs
);
3240 tree def
= worklist
.pop ();
3241 unsigned HOST_WIDE_INT def_eltsize
3242 = TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (TREE_TYPE (def
))));
3243 FOR_EACH_IMM_USE_FAST (use_p
, iter
, def
)
3245 gimple
*use_stmt
= USE_STMT (use_p
);
3246 if (is_gimple_debug (use_stmt
))
3248 if (!is_gimple_assign (use_stmt
))
3253 enum tree_code use_code
= gimple_assign_rhs_code (use_stmt
);
3254 tree use_rhs
= gimple_assign_rhs1 (use_stmt
);
3255 if (use_code
== BIT_FIELD_REF
3256 && TREE_OPERAND (use_rhs
, 0) == def
3257 /* If its on the VEC_UNPACK_{HI,LO}_EXPR
3258 def need to verify it is element aligned. */
3260 || (known_eq (bit_field_size (use_rhs
), def_eltsize
)
3261 && constant_multiple_p (bit_field_offset (use_rhs
),
3263 /* We can simulate the VEC_UNPACK_{HI,LO}_EXPR
3264 via a NOP_EXPR only for integral types.
3265 ??? Support VEC_UNPACK_FLOAT_{HI,LO}_EXPR. */
3266 && INTEGRAL_TYPE_P (TREE_TYPE (use_rhs
)))))
3268 bf_stmts
.safe_push (use_stmt
);
3271 /* Walk through one level of VEC_UNPACK_{LO,HI}_EXPR. */
3273 && (use_code
== VEC_UNPACK_HI_EXPR
3274 || use_code
== VEC_UNPACK_LO_EXPR
)
3277 worklist
.safe_push (gimple_assign_lhs (use_stmt
));
3286 while (!worklist
.is_empty ());
3293 /* We now have all ultimate uses of the load to rewrite in bf_stmts. */
3295 /* Prepare the original ref to be wrapped in adjusted BIT_FIELD_REFs.
3296 For TARGET_MEM_REFs we have to separate the LEA from the reference. */
3297 tree load_rhs
= rhs
;
3298 if (TREE_CODE (load_rhs
) == TARGET_MEM_REF
)
3300 if (TREE_CODE (TREE_OPERAND (load_rhs
, 0)) == ADDR_EXPR
)
3301 mark_addressable (TREE_OPERAND (TREE_OPERAND (load_rhs
, 0), 0));
3302 tree ptrtype
= build_pointer_type (TREE_TYPE (load_rhs
));
3303 tree tem
= make_ssa_name (ptrtype
);
3305 = gimple_build_assign (tem
, build1 (ADDR_EXPR
, TREE_TYPE (tem
),
3306 unshare_expr (load_rhs
)));
3307 gsi_insert_before (gsi
, new_stmt
, GSI_SAME_STMT
);
3308 load_rhs
= build2_loc (EXPR_LOCATION (load_rhs
),
3309 MEM_REF
, TREE_TYPE (load_rhs
), tem
,
3311 (TREE_TYPE (TREE_OPERAND (load_rhs
, 1)), 0));
3314 /* Rewrite the BIT_FIELD_REFs to be actual loads, re-emitting them at
3315 the place of the original load. */
3316 for (gimple
*use_stmt
: bf_stmts
)
3318 tree bfr
= gimple_assign_rhs1 (use_stmt
);
3319 tree new_rhs
= unshare_expr (load_rhs
);
3320 if (TREE_OPERAND (bfr
, 0) != lhs
)
3322 /* When the BIT_FIELD_REF is on the promoted vector we have to
3323 adjust it and emit a conversion afterwards. */
3325 = SSA_NAME_DEF_STMT (TREE_OPERAND (bfr
, 0));
3326 enum tree_code def_code
3327 = gimple_assign_rhs_code (def_stmt
);
3329 /* The adjusted BIT_FIELD_REF is of the promotion source
3330 vector size and at half of the offset... */
3331 new_rhs
= fold_build3 (BIT_FIELD_REF
,
3332 TREE_TYPE (TREE_TYPE (lhs
)),
3334 TYPE_SIZE (TREE_TYPE (TREE_TYPE (lhs
))),
3335 size_binop (EXACT_DIV_EXPR
,
3336 TREE_OPERAND (bfr
, 2),
3338 /* ... and offsetted by half of the vector if VEC_UNPACK_HI_EXPR. */
3339 if (def_code
== (!BYTES_BIG_ENDIAN
3340 ? VEC_UNPACK_HI_EXPR
: VEC_UNPACK_LO_EXPR
))
3341 TREE_OPERAND (new_rhs
, 2)
3342 = size_binop (PLUS_EXPR
, TREE_OPERAND (new_rhs
, 2),
3343 size_binop (EXACT_DIV_EXPR
,
3344 TYPE_SIZE (TREE_TYPE (lhs
)),
3346 tree tem
= make_ssa_name (TREE_TYPE (TREE_TYPE (lhs
)));
3347 gimple
*new_stmt
= gimple_build_assign (tem
, new_rhs
);
3348 location_t loc
= gimple_location (use_stmt
);
3349 gimple_set_location (new_stmt
, loc
);
3350 gsi_insert_before (gsi
, new_stmt
, GSI_SAME_STMT
);
3351 /* Perform scalar promotion. */
3352 new_stmt
= gimple_build_assign (gimple_assign_lhs (use_stmt
),
3354 gimple_set_location (new_stmt
, loc
);
3355 gsi_insert_before (gsi
, new_stmt
, GSI_SAME_STMT
);
3359 /* When the BIT_FIELD_REF is on the original load result
3360 we can just wrap that. */
3361 tree new_rhs
= fold_build3 (BIT_FIELD_REF
, TREE_TYPE (bfr
),
3362 unshare_expr (load_rhs
),
3363 TREE_OPERAND (bfr
, 1),
3364 TREE_OPERAND (bfr
, 2));
3365 gimple
*new_stmt
= gimple_build_assign (gimple_assign_lhs (use_stmt
),
3367 location_t loc
= gimple_location (use_stmt
);
3368 gimple_set_location (new_stmt
, loc
);
3369 gsi_insert_before (gsi
, new_stmt
, GSI_SAME_STMT
);
3371 gimple_stmt_iterator gsi2
= gsi_for_stmt (use_stmt
);
3372 unlink_stmt_vdef (use_stmt
);
3373 gsi_remove (&gsi2
, true);
3376 /* Finally get rid of the intermediate stmts. */
3378 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
3380 if (is_gimple_debug (use_stmt
))
3382 if (gimple_debug_bind_p (use_stmt
))
3384 gimple_debug_bind_reset_value (use_stmt
);
3385 update_stmt (use_stmt
);
3389 gimple_stmt_iterator gsi2
= gsi_for_stmt (use_stmt
);
3390 unlink_stmt_vdef (use_stmt
);
3391 release_defs (use_stmt
);
3392 gsi_remove (&gsi2
, true);
3394 /* And the original load. */
3395 release_defs (stmt
);
3396 gsi_remove (gsi
, true);
3400 /* Primitive "lattice" function for gimple_simplify. */
3403 fwprop_ssa_val (tree name
)
3405 /* First valueize NAME. */
3406 if (TREE_CODE (name
) == SSA_NAME
3407 && SSA_NAME_VERSION (name
) < lattice
.length ())
3409 tree val
= lattice
[SSA_NAME_VERSION (name
)];
3413 /* We continue matching along SSA use-def edges for SSA names
3414 that are not single-use. Currently there are no patterns
3415 that would cause any issues with that. */
3419 /* Main entry point for the forward propagation and statement combine
3424 const pass_data pass_data_forwprop
=
3426 GIMPLE_PASS
, /* type */
3427 "forwprop", /* name */
3428 OPTGROUP_NONE
, /* optinfo_flags */
3429 TV_TREE_FORWPROP
, /* tv_id */
3430 ( PROP_cfg
| PROP_ssa
), /* properties_required */
3431 0, /* properties_provided */
3432 0, /* properties_destroyed */
3433 0, /* todo_flags_start */
3434 TODO_update_ssa
, /* todo_flags_finish */
3437 class pass_forwprop
: public gimple_opt_pass
3440 pass_forwprop (gcc::context
*ctxt
)
3441 : gimple_opt_pass (pass_data_forwprop
, ctxt
)
3444 /* opt_pass methods: */
3445 opt_pass
* clone () final override
{ return new pass_forwprop (m_ctxt
); }
3446 bool gate (function
*) final override
{ return flag_tree_forwprop
; }
3447 unsigned int execute (function
*) final override
;
3449 }; // class pass_forwprop
3452 pass_forwprop::execute (function
*fun
)
3454 unsigned int todoflags
= 0;
3456 cfg_changed
= false;
3458 /* Combine stmts with the stmts defining their operands. Do that
3459 in an order that guarantees visiting SSA defs before SSA uses. */
3460 lattice
.create (num_ssa_names
);
3461 lattice
.quick_grow_cleared (num_ssa_names
);
3462 int *postorder
= XNEWVEC (int, n_basic_blocks_for_fn (fun
));
3463 int postorder_num
= pre_and_rev_post_order_compute_fn (fun
, NULL
,
3465 int *bb_to_rpo
= XNEWVEC (int, last_basic_block_for_fn (fun
));
3466 for (int i
= 0; i
< postorder_num
; ++i
)
3468 bb_to_rpo
[postorder
[i
]] = i
;
3471 FOR_EACH_EDGE (e
, ei
, BASIC_BLOCK_FOR_FN (fun
, postorder
[i
])->succs
)
3472 e
->flags
&= ~EDGE_EXECUTABLE
;
3474 single_succ_edge (BASIC_BLOCK_FOR_FN (fun
, ENTRY_BLOCK
))->flags
3476 auto_vec
<gimple
*, 4> to_fixup
;
3477 auto_vec
<gimple
*, 32> to_remove
;
3478 to_purge
= BITMAP_ALLOC (NULL
);
3479 bitmap need_ab_cleanup
= BITMAP_ALLOC (NULL
);
3480 for (int i
= 0; i
< postorder_num
; ++i
)
3482 gimple_stmt_iterator gsi
;
3483 basic_block bb
= BASIC_BLOCK_FOR_FN (fun
, postorder
[i
]);
3487 /* Skip processing not executable blocks. We could improve
3488 single_use tracking by at least unlinking uses from unreachable
3489 blocks but since blocks with uses are not processed in a
3490 meaningful order this is probably not worth it. */
3492 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
3494 if ((e
->flags
& EDGE_EXECUTABLE
)
3495 /* With dominators we could improve backedge handling
3496 when e->src is dominated by bb. But for irreducible
3497 regions we have to take all backedges conservatively.
3498 We can handle single-block cycles as we know the
3499 dominator relationship here. */
3500 || bb_to_rpo
[e
->src
->index
] > i
)
3509 /* Record degenerate PHIs in the lattice. */
3510 for (gphi_iterator si
= gsi_start_phis (bb
); !gsi_end_p (si
);
3513 gphi
*phi
= si
.phi ();
3514 tree res
= gimple_phi_result (phi
);
3515 if (virtual_operand_p (res
))
3518 tree first
= NULL_TREE
;
3519 bool all_same
= true;
3522 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
3524 /* Ignore not executable forward edges. */
3525 if (!(e
->flags
& EDGE_EXECUTABLE
))
3527 if (bb_to_rpo
[e
->src
->index
] < i
)
3529 /* Avoid equivalences from backedges - while we might
3530 be able to make irreducible regions reducible and
3531 thus turning a back into a forward edge we do not
3532 want to deal with the intermediate SSA issues that
3536 tree use
= PHI_ARG_DEF_FROM_EDGE (phi
, e
);
3538 /* The PHI result can also appear on a backedge, if so
3539 we can ignore this case for the purpose of determining
3540 the singular value. */
3544 else if (! operand_equal_p (first
, use
, 0))
3552 if (may_propagate_copy (res
, first
))
3553 to_remove
.safe_push (phi
);
3554 fwprop_set_lattice_val (res
, first
);
3558 /* Apply forward propagation to all stmts in the basic-block.
3559 Note we update GSI within the loop as necessary. */
3560 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); )
3562 gimple
*stmt
= gsi_stmt (gsi
);
3564 enum tree_code code
;
3566 if (!is_gimple_assign (stmt
))
3572 lhs
= gimple_assign_lhs (stmt
);
3573 rhs
= gimple_assign_rhs1 (stmt
);
3574 code
= gimple_assign_rhs_code (stmt
);
3575 if (TREE_CODE (lhs
) != SSA_NAME
3576 || has_zero_uses (lhs
))
3582 /* If this statement sets an SSA_NAME to an address,
3583 try to propagate the address into the uses of the SSA_NAME. */
3584 if ((code
== ADDR_EXPR
3585 /* Handle pointer conversions on invariant addresses
3586 as well, as this is valid gimple. */
3587 || (CONVERT_EXPR_CODE_P (code
)
3588 && TREE_CODE (rhs
) == ADDR_EXPR
3589 && POINTER_TYPE_P (TREE_TYPE (lhs
))))
3590 && TREE_CODE (TREE_OPERAND (rhs
, 0)) != TARGET_MEM_REF
)
3592 tree base
= get_base_address (TREE_OPERAND (rhs
, 0));
3595 || decl_address_invariant_p (base
))
3596 && !stmt_references_abnormal_ssa_name (stmt
)
3597 && forward_propagate_addr_expr (lhs
, rhs
, true))
3599 fwprop_invalidate_lattice (gimple_get_lhs (stmt
));
3600 release_defs (stmt
);
3601 gsi_remove (&gsi
, true);
3606 else if (code
== POINTER_PLUS_EXPR
)
3608 tree off
= gimple_assign_rhs2 (stmt
);
3609 if (TREE_CODE (off
) == INTEGER_CST
3610 && can_propagate_from (stmt
)
3611 && !simple_iv_increment_p (stmt
)
3612 /* ??? Better adjust the interface to that function
3613 instead of building new trees here. */
3614 && forward_propagate_addr_expr
3616 build1_loc (gimple_location (stmt
),
3617 ADDR_EXPR
, TREE_TYPE (rhs
),
3618 fold_build2 (MEM_REF
,
3619 TREE_TYPE (TREE_TYPE (rhs
)),
3621 fold_convert (ptr_type_node
,
3624 fwprop_invalidate_lattice (gimple_get_lhs (stmt
));
3625 release_defs (stmt
);
3626 gsi_remove (&gsi
, true);
3628 else if (is_gimple_min_invariant (rhs
))
3630 /* Make sure to fold &a[0] + off_1 here. */
3631 fold_stmt_inplace (&gsi
);
3633 if (gimple_assign_rhs_code (stmt
) == POINTER_PLUS_EXPR
)
3639 else if (TREE_CODE (TREE_TYPE (lhs
)) == COMPLEX_TYPE
3640 && gimple_assign_load_p (stmt
)
3641 && !gimple_has_volatile_ops (stmt
)
3642 && (TREE_CODE (gimple_assign_rhs1 (stmt
))
3644 && !stmt_can_throw_internal (fun
, stmt
))
3646 /* Rewrite loads used only in real/imagpart extractions to
3647 component-wise loads. */
3648 use_operand_p use_p
;
3649 imm_use_iterator iter
;
3650 bool rewrite
= true;
3651 FOR_EACH_IMM_USE_FAST (use_p
, iter
, lhs
)
3653 gimple
*use_stmt
= USE_STMT (use_p
);
3654 if (is_gimple_debug (use_stmt
))
3656 if (!is_gimple_assign (use_stmt
)
3657 || (gimple_assign_rhs_code (use_stmt
) != REALPART_EXPR
3658 && gimple_assign_rhs_code (use_stmt
) != IMAGPART_EXPR
)
3659 || TREE_OPERAND (gimple_assign_rhs1 (use_stmt
), 0) != lhs
)
3668 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
3670 if (is_gimple_debug (use_stmt
))
3672 if (gimple_debug_bind_p (use_stmt
))
3674 gimple_debug_bind_reset_value (use_stmt
);
3675 update_stmt (use_stmt
);
3680 tree new_rhs
= build1 (gimple_assign_rhs_code (use_stmt
),
3681 TREE_TYPE (TREE_TYPE (rhs
)),
3682 unshare_expr (rhs
));
3684 = gimple_build_assign (gimple_assign_lhs (use_stmt
),
3687 location_t loc
= gimple_location (use_stmt
);
3688 gimple_set_location (new_stmt
, loc
);
3689 gimple_stmt_iterator gsi2
= gsi_for_stmt (use_stmt
);
3690 unlink_stmt_vdef (use_stmt
);
3691 gsi_remove (&gsi2
, true);
3693 gsi_insert_before (&gsi
, new_stmt
, GSI_SAME_STMT
);
3696 release_defs (stmt
);
3697 gsi_remove (&gsi
, true);
3702 else if (TREE_CODE (TREE_TYPE (lhs
)) == VECTOR_TYPE
3703 && (TYPE_MODE (TREE_TYPE (lhs
)) == BLKmode
3704 /* After vector lowering rewrite all loads, but
3705 initially do not since this conflicts with
3706 vector CONSTRUCTOR to shuffle optimization. */
3707 || (fun
->curr_properties
& PROP_gimple_lvec
))
3708 && gimple_assign_load_p (stmt
)
3709 && !gimple_has_volatile_ops (stmt
)
3710 && !stmt_can_throw_internal (fun
, stmt
)
3711 && (!VAR_P (rhs
) || !DECL_HARD_REGISTER (rhs
)))
3712 optimize_vector_load (&gsi
);
3714 else if (code
== COMPLEX_EXPR
)
3716 /* Rewrite stores of a single-use complex build expression
3717 to component-wise stores. */
3718 use_operand_p use_p
;
3719 gimple
*use_stmt
, *def1
, *def2
;
3721 if (single_imm_use (lhs
, &use_p
, &use_stmt
)
3722 && gimple_store_p (use_stmt
)
3723 && !gimple_has_volatile_ops (use_stmt
)
3724 && is_gimple_assign (use_stmt
)
3725 && (TREE_CODE (gimple_assign_lhs (use_stmt
))
3728 tree use_lhs
= gimple_assign_lhs (use_stmt
);
3729 if (auto_var_p (use_lhs
))
3730 DECL_NOT_GIMPLE_REG_P (use_lhs
) = 1;
3731 tree new_lhs
= build1 (REALPART_EXPR
,
3732 TREE_TYPE (TREE_TYPE (use_lhs
)),
3733 unshare_expr (use_lhs
));
3734 gimple
*new_stmt
= gimple_build_assign (new_lhs
, rhs
);
3735 location_t loc
= gimple_location (use_stmt
);
3736 gimple_set_location (new_stmt
, loc
);
3737 gimple_set_vuse (new_stmt
, gimple_vuse (use_stmt
));
3738 gimple_set_vdef (new_stmt
, make_ssa_name (gimple_vop (fun
)));
3739 SSA_NAME_DEF_STMT (gimple_vdef (new_stmt
)) = new_stmt
;
3740 gimple_set_vuse (use_stmt
, gimple_vdef (new_stmt
));
3741 gimple_stmt_iterator gsi2
= gsi_for_stmt (use_stmt
);
3742 gsi_insert_before (&gsi2
, new_stmt
, GSI_SAME_STMT
);
3744 new_lhs
= build1 (IMAGPART_EXPR
,
3745 TREE_TYPE (TREE_TYPE (use_lhs
)),
3746 unshare_expr (use_lhs
));
3747 gimple_assign_set_lhs (use_stmt
, new_lhs
);
3748 gimple_assign_set_rhs1 (use_stmt
, gimple_assign_rhs2 (stmt
));
3749 update_stmt (use_stmt
);
3751 release_defs (stmt
);
3752 gsi_remove (&gsi
, true);
3754 /* Rewrite a component-wise load of a complex to a complex
3755 load if the components are not used separately. */
3756 else if (TREE_CODE (rhs
) == SSA_NAME
3757 && has_single_use (rhs
)
3758 && ((rhs2
= gimple_assign_rhs2 (stmt
)), true)
3759 && TREE_CODE (rhs2
) == SSA_NAME
3760 && has_single_use (rhs2
)
3761 && (def1
= SSA_NAME_DEF_STMT (rhs
),
3762 gimple_assign_load_p (def1
))
3763 && (def2
= SSA_NAME_DEF_STMT (rhs2
),
3764 gimple_assign_load_p (def2
))
3765 && (gimple_vuse (def1
) == gimple_vuse (def2
))
3766 && !gimple_has_volatile_ops (def1
)
3767 && !gimple_has_volatile_ops (def2
)
3768 && !stmt_can_throw_internal (fun
, def1
)
3769 && !stmt_can_throw_internal (fun
, def2
)
3770 && gimple_assign_rhs_code (def1
) == REALPART_EXPR
3771 && gimple_assign_rhs_code (def2
) == IMAGPART_EXPR
3772 && operand_equal_p (TREE_OPERAND (gimple_assign_rhs1
3774 TREE_OPERAND (gimple_assign_rhs1
3777 tree cl
= TREE_OPERAND (gimple_assign_rhs1 (def1
), 0);
3778 gimple_assign_set_rhs_from_tree (&gsi
, unshare_expr (cl
));
3779 gcc_assert (gsi_stmt (gsi
) == stmt
);
3780 gimple_set_vuse (stmt
, gimple_vuse (def1
));
3781 gimple_set_modified (stmt
, true);
3782 gimple_stmt_iterator gsi2
= gsi_for_stmt (def1
);
3783 gsi_remove (&gsi
, false);
3784 gsi_insert_after (&gsi2
, stmt
, GSI_SAME_STMT
);
3789 else if (code
== CONSTRUCTOR
3790 && VECTOR_TYPE_P (TREE_TYPE (rhs
))
3791 && TYPE_MODE (TREE_TYPE (rhs
)) == BLKmode
3792 && CONSTRUCTOR_NELTS (rhs
) > 0
3793 && (!VECTOR_TYPE_P (TREE_TYPE (CONSTRUCTOR_ELT (rhs
, 0)->value
))
3794 || (TYPE_MODE (TREE_TYPE (CONSTRUCTOR_ELT (rhs
, 0)->value
))
3797 /* Rewrite stores of a single-use vector constructors
3798 to component-wise stores if the mode isn't supported. */
3799 use_operand_p use_p
;
3801 if (single_imm_use (lhs
, &use_p
, &use_stmt
)
3802 && gimple_store_p (use_stmt
)
3803 && !gimple_has_volatile_ops (use_stmt
)
3804 && !stmt_can_throw_internal (fun
, use_stmt
)
3805 && is_gimple_assign (use_stmt
)
3806 && (TREE_CODE (gimple_assign_lhs (use_stmt
))
3809 tree elt_t
= TREE_TYPE (CONSTRUCTOR_ELT (rhs
, 0)->value
);
3810 unsigned HOST_WIDE_INT elt_w
3811 = tree_to_uhwi (TYPE_SIZE (elt_t
));
3812 unsigned HOST_WIDE_INT n
3813 = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (rhs
)));
3814 tree use_lhs
= gimple_assign_lhs (use_stmt
);
3815 if (auto_var_p (use_lhs
))
3816 DECL_NOT_GIMPLE_REG_P (use_lhs
) = 1;
3817 for (unsigned HOST_WIDE_INT bi
= 0; bi
< n
; bi
+= elt_w
)
3819 unsigned HOST_WIDE_INT ci
= bi
/ elt_w
;
3821 if (ci
< CONSTRUCTOR_NELTS (rhs
))
3822 new_rhs
= CONSTRUCTOR_ELT (rhs
, ci
)->value
;
3824 new_rhs
= build_zero_cst (elt_t
);
3825 tree new_lhs
= build3 (BIT_FIELD_REF
,
3827 unshare_expr (use_lhs
),
3828 bitsize_int (elt_w
),
3830 gimple
*new_stmt
= gimple_build_assign (new_lhs
, new_rhs
);
3831 location_t loc
= gimple_location (use_stmt
);
3832 gimple_set_location (new_stmt
, loc
);
3833 gimple_set_vuse (new_stmt
, gimple_vuse (use_stmt
));
3834 gimple_set_vdef (new_stmt
,
3835 make_ssa_name (gimple_vop (fun
)));
3836 SSA_NAME_DEF_STMT (gimple_vdef (new_stmt
)) = new_stmt
;
3837 gimple_set_vuse (use_stmt
, gimple_vdef (new_stmt
));
3838 gimple_stmt_iterator gsi2
= gsi_for_stmt (use_stmt
);
3839 gsi_insert_before (&gsi2
, new_stmt
, GSI_SAME_STMT
);
3841 gimple_stmt_iterator gsi2
= gsi_for_stmt (use_stmt
);
3842 unlink_stmt_vdef (use_stmt
);
3843 release_defs (use_stmt
);
3844 gsi_remove (&gsi2
, true);
3845 release_defs (stmt
);
3846 gsi_remove (&gsi
, true);
3855 /* Combine stmts with the stmts defining their operands.
3856 Note we update GSI within the loop as necessary. */
3857 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3859 gimple
*stmt
= gsi_stmt (gsi
);
3861 /* Mark stmt as potentially needing revisiting. */
3862 gimple_set_plf (stmt
, GF_PLF_1
, false);
3864 bool can_make_abnormal_goto
= (is_gimple_call (stmt
)
3865 && stmt_can_make_abnormal_goto (stmt
));
3867 /* Substitute from our lattice. We need to do so only once. */
3868 bool substituted_p
= false;
3871 FOR_EACH_SSA_USE_OPERAND (usep
, stmt
, iter
, SSA_OP_USE
)
3873 tree use
= USE_FROM_PTR (usep
);
3874 tree val
= fwprop_ssa_val (use
);
3875 if (val
&& val
!= use
&& may_propagate_copy (use
, val
))
3877 propagate_value (usep
, val
);
3878 substituted_p
= true;
3882 && is_gimple_assign (stmt
)
3883 && gimple_assign_rhs_code (stmt
) == ADDR_EXPR
)
3884 recompute_tree_invariant_for_addr_expr (gimple_assign_rhs1 (stmt
));
3886 && can_make_abnormal_goto
3887 && !stmt_can_make_abnormal_goto (stmt
))
3888 bitmap_set_bit (need_ab_cleanup
, bb
->index
);
3893 gimple
*orig_stmt
= stmt
= gsi_stmt (gsi
);
3894 bool was_noreturn
= (is_gimple_call (stmt
)
3895 && gimple_call_noreturn_p (stmt
));
3898 if (fold_stmt (&gsi
, fwprop_ssa_val
))
3901 stmt
= gsi_stmt (gsi
);
3902 /* Cleanup the CFG if we simplified a condition to
3904 if (gcond
*cond
= dyn_cast
<gcond
*> (stmt
))
3905 if (gimple_cond_true_p (cond
)
3906 || gimple_cond_false_p (cond
))
3910 if (changed
|| substituted_p
)
3912 if (maybe_clean_or_replace_eh_stmt (orig_stmt
, stmt
))
3913 bitmap_set_bit (to_purge
, bb
->index
);
3915 && is_gimple_call (stmt
) && gimple_call_noreturn_p (stmt
))
3916 to_fixup
.safe_push (stmt
);
3918 substituted_p
= false;
3921 switch (gimple_code (stmt
))
3925 tree rhs1
= gimple_assign_rhs1 (stmt
);
3926 enum tree_code code
= gimple_assign_rhs_code (stmt
);
3928 if (TREE_CODE_CLASS (code
) == tcc_comparison
)
3931 did_something
= forward_propagate_into_comparison (&gsi
);
3932 if (maybe_clean_or_replace_eh_stmt (stmt
, gsi_stmt (gsi
)))
3933 bitmap_set_bit (to_purge
, bb
->index
);
3934 if (did_something
== 2)
3936 changed
= did_something
!= 0;
3938 else if ((code
== PLUS_EXPR
3939 || code
== BIT_IOR_EXPR
3940 || code
== BIT_XOR_EXPR
)
3941 && simplify_rotate (&gsi
))
3943 else if (code
== VEC_PERM_EXPR
)
3945 int did_something
= simplify_permutation (&gsi
);
3946 if (did_something
== 2)
3948 changed
= did_something
!= 0;
3950 else if (code
== BIT_FIELD_REF
)
3951 changed
= simplify_bitfield_ref (&gsi
);
3952 else if (code
== CONSTRUCTOR
3953 && TREE_CODE (TREE_TYPE (rhs1
)) == VECTOR_TYPE
)
3954 changed
= simplify_vector_constructor (&gsi
);
3955 else if (code
== ARRAY_REF
)
3956 changed
= simplify_count_trailing_zeroes (&gsi
);
3961 changed
= simplify_gimple_switch (as_a
<gswitch
*> (stmt
));
3966 int did_something
= forward_propagate_into_gimple_cond
3967 (as_a
<gcond
*> (stmt
));
3968 if (did_something
== 2)
3970 changed
= did_something
!= 0;
3976 tree callee
= gimple_call_fndecl (stmt
);
3977 if (callee
!= NULL_TREE
3978 && fndecl_built_in_p (callee
, BUILT_IN_NORMAL
))
3979 changed
= simplify_builtin_call (&gsi
, callee
);
3988 /* If the stmt changed then re-visit it and the statements
3989 inserted before it. */
3990 for (; !gsi_end_p (gsi
); gsi_prev (&gsi
))
3991 if (gimple_plf (gsi_stmt (gsi
), GF_PLF_1
))
3993 if (gsi_end_p (gsi
))
3994 gsi
= gsi_start_bb (bb
);
4001 /* Stmt no longer needs to be revisited. */
4002 stmt
= gsi_stmt (gsi
);
4003 gcc_checking_assert (!gimple_plf (stmt
, GF_PLF_1
));
4004 gimple_set_plf (stmt
, GF_PLF_1
, true);
4006 /* Fill up the lattice. */
4007 if (gimple_assign_single_p (stmt
))
4009 tree lhs
= gimple_assign_lhs (stmt
);
4010 tree rhs
= gimple_assign_rhs1 (stmt
);
4011 if (TREE_CODE (lhs
) == SSA_NAME
)
4014 if (TREE_CODE (rhs
) == SSA_NAME
)
4015 val
= fwprop_ssa_val (rhs
);
4016 else if (is_gimple_min_invariant (rhs
))
4018 /* If we can propagate the lattice-value mark the
4019 stmt for removal. */
4021 && may_propagate_copy (lhs
, val
))
4022 to_remove
.safe_push (stmt
);
4023 fwprop_set_lattice_val (lhs
, val
);
4026 else if (gimple_nop_p (stmt
))
4027 to_remove
.safe_push (stmt
);
4030 /* Substitute in destination PHI arguments. */
4031 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
4032 for (gphi_iterator gsi
= gsi_start_phis (e
->dest
);
4033 !gsi_end_p (gsi
); gsi_next (&gsi
))
4035 gphi
*phi
= gsi
.phi ();
4036 use_operand_p use_p
= PHI_ARG_DEF_PTR_FROM_EDGE (phi
, e
);
4037 tree arg
= USE_FROM_PTR (use_p
);
4038 if (TREE_CODE (arg
) != SSA_NAME
4039 || virtual_operand_p (arg
))
4041 tree val
= fwprop_ssa_val (arg
);
4043 && may_propagate_copy (arg
, val
))
4044 propagate_value (use_p
, val
);
4047 /* Mark outgoing exectuable edges. */
4048 if (edge e
= find_taken_edge (bb
, NULL
))
4050 e
->flags
|= EDGE_EXECUTABLE
;
4051 if (EDGE_COUNT (bb
->succs
) > 1)
4056 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
4057 e
->flags
|= EDGE_EXECUTABLE
;
4064 /* Remove stmts in reverse order to make debug stmt creation possible. */
4065 while (!to_remove
.is_empty())
4067 gimple
*stmt
= to_remove
.pop ();
4068 /* For example remove_prop_source_from_use can remove stmts queued
4069 for removal. Deal with this gracefully. */
4070 if (!gimple_bb (stmt
))
4072 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4074 fprintf (dump_file
, "Removing dead stmt ");
4075 print_gimple_stmt (dump_file
, stmt
, 0);
4076 fprintf (dump_file
, "\n");
4078 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
4079 if (gimple_code (stmt
) == GIMPLE_PHI
)
4080 remove_phi_node (&gsi
, true);
4083 unlink_stmt_vdef (stmt
);
4084 gsi_remove (&gsi
, true);
4085 release_defs (stmt
);
4089 /* Fixup stmts that became noreturn calls. This may require splitting
4090 blocks and thus isn't possible during the walk. Do this
4091 in reverse order so we don't inadvertedly remove a stmt we want to
4092 fixup by visiting a dominating now noreturn call first. */
4093 while (!to_fixup
.is_empty ())
4095 gimple
*stmt
= to_fixup
.pop ();
4096 if (dump_file
&& dump_flags
& TDF_DETAILS
)
4098 fprintf (dump_file
, "Fixing up noreturn call ");
4099 print_gimple_stmt (dump_file
, stmt
, 0);
4100 fprintf (dump_file
, "\n");
4102 cfg_changed
|= fixup_noreturn_call (stmt
);
4105 cfg_changed
|= gimple_purge_all_dead_eh_edges (to_purge
);
4106 cfg_changed
|= gimple_purge_all_dead_abnormal_call_edges (need_ab_cleanup
);
4107 BITMAP_FREE (to_purge
);
4108 BITMAP_FREE (need_ab_cleanup
);
4110 if (get_range_query (fun
) != get_global_range_query ())
4111 disable_ranger (fun
);
4114 todoflags
|= TODO_cleanup_cfg
;
4122 make_pass_forwprop (gcc::context
*ctxt
)
4124 return new pass_forwprop (ctxt
);