1 /* If-conversion for vectorizer.
2 Copyright (C) 2004-2021 Free Software Foundation, Inc.
3 Contributed by Devang Patel <dpatel@apple.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* This pass implements a tree level if-conversion of loops. Its
22 initial goal is to help the vectorizer to vectorize loops with
25 A short description of if-conversion:
27 o Decide if a loop is if-convertible or not.
28 o Walk all loop basic blocks in breadth first order (BFS order).
29 o Remove conditional statements (at the end of basic block)
30 and propagate condition into destination basic blocks'
32 o Replace modify expression with conditional modify expression
33 using current basic block's condition.
34 o Merge all basic blocks
35 o Replace phi nodes with conditional modify expr
36 o Merge all basic blocks into header
38 Sample transformation:
43 # i_23 = PHI <0(0), i_18(10)>;
46 if (j_15 > 41) goto <L1>; else goto <L17>;
53 # iftmp.2_4 = PHI <0(8), 42(2)>;
57 if (i_18 <= 15) goto <L19>; else goto <L18>;
67 # i_23 = PHI <0(0), i_18(10)>;
72 iftmp.2_4 = j_15 > 41 ? 42 : 0;
75 if (i_18 <= 15) goto <L19>; else goto <L18>;
85 #include "coretypes.h"
91 #include "tree-pass.h"
94 #include "optabs-query.h"
95 #include "gimple-pretty-print.h"
97 #include "fold-const.h"
98 #include "stor-layout.h"
99 #include "gimple-fold.h"
100 #include "gimplify.h"
101 #include "gimple-iterator.h"
102 #include "gimplify-me.h"
103 #include "tree-cfg.h"
104 #include "tree-into-ssa.h"
105 #include "tree-ssa.h"
107 #include "tree-data-ref.h"
108 #include "tree-scalar-evolution.h"
109 #include "tree-ssa-loop.h"
110 #include "tree-ssa-loop-niter.h"
111 #include "tree-ssa-loop-ivopts.h"
112 #include "tree-ssa-address.h"
114 #include "tree-hash-traits.h"
116 #include "builtins.h"
118 #include "internal-fn.h"
119 #include "fold-const.h"
120 #include "tree-ssa-sccvn.h"
121 #include "tree-cfgcleanup.h"
122 #include "tree-ssa-dse.h"
124 /* Only handle PHIs with no more arguments unless we are asked to by
126 #define MAX_PHI_ARG_NUM \
127 ((unsigned) param_max_tree_if_conversion_phi_args)
129 /* True if we've converted a statement that was only executed when some
130 condition C was true, and if for correctness we need to predicate the
131 statement to ensure that it is a no-op when C is false. See
132 predicate_statements for the kinds of predication we support. */
133 static bool need_to_predicate
;
135 /* Indicate if there are any complicated PHIs that need to be handled in
136 if-conversion. Complicated PHI has more than two arguments and can't
137 be degenerated to two arguments PHI. See more information in comment
138 before phi_convertible_by_degenerating_args. */
139 static bool any_complicated_phi
;
141 /* Hash for struct innermost_loop_behavior. It depends on the user to
144 struct innermost_loop_behavior_hash
: nofree_ptr_hash
<innermost_loop_behavior
>
146 static inline hashval_t
hash (const value_type
&);
147 static inline bool equal (const value_type
&,
148 const compare_type
&);
152 innermost_loop_behavior_hash::hash (const value_type
&e
)
156 hash
= iterative_hash_expr (e
->base_address
, 0);
157 hash
= iterative_hash_expr (e
->offset
, hash
);
158 hash
= iterative_hash_expr (e
->init
, hash
);
159 return iterative_hash_expr (e
->step
, hash
);
163 innermost_loop_behavior_hash::equal (const value_type
&e1
,
164 const compare_type
&e2
)
166 if ((e1
->base_address
&& !e2
->base_address
)
167 || (!e1
->base_address
&& e2
->base_address
)
168 || (!e1
->offset
&& e2
->offset
)
169 || (e1
->offset
&& !e2
->offset
)
170 || (!e1
->init
&& e2
->init
)
171 || (e1
->init
&& !e2
->init
)
172 || (!e1
->step
&& e2
->step
)
173 || (e1
->step
&& !e2
->step
))
176 if (e1
->base_address
&& e2
->base_address
177 && !operand_equal_p (e1
->base_address
, e2
->base_address
, 0))
179 if (e1
->offset
&& e2
->offset
180 && !operand_equal_p (e1
->offset
, e2
->offset
, 0))
182 if (e1
->init
&& e2
->init
183 && !operand_equal_p (e1
->init
, e2
->init
, 0))
185 if (e1
->step
&& e2
->step
186 && !operand_equal_p (e1
->step
, e2
->step
, 0))
192 /* List of basic blocks in if-conversion-suitable order. */
193 static basic_block
*ifc_bbs
;
195 /* Hash table to store <DR's innermost loop behavior, DR> pairs. */
196 static hash_map
<innermost_loop_behavior_hash
,
197 data_reference_p
> *innermost_DR_map
;
199 /* Hash table to store <base reference, DR> pairs. */
200 static hash_map
<tree_operand_hash
, data_reference_p
> *baseref_DR_map
;
202 /* List of redundant SSA names: the first should be replaced by the second. */
203 static vec
< std::pair
<tree
, tree
> > redundant_ssa_names
;
205 /* Structure used to predicate basic blocks. This is attached to the
206 ->aux field of the BBs in the loop to be if-converted. */
207 struct bb_predicate
{
209 /* The condition under which this basic block is executed. */
212 /* PREDICATE is gimplified, and the sequence of statements is
213 recorded here, in order to avoid the duplication of computations
214 that occur in previous conditions. See PR44483. */
215 gimple_seq predicate_gimplified_stmts
;
218 /* Returns true when the basic block BB has a predicate. */
221 bb_has_predicate (basic_block bb
)
223 return bb
->aux
!= NULL
;
226 /* Returns the gimplified predicate for basic block BB. */
229 bb_predicate (basic_block bb
)
231 return ((struct bb_predicate
*) bb
->aux
)->predicate
;
234 /* Sets the gimplified predicate COND for basic block BB. */
237 set_bb_predicate (basic_block bb
, tree cond
)
239 gcc_assert ((TREE_CODE (cond
) == TRUTH_NOT_EXPR
240 && is_gimple_condexpr (TREE_OPERAND (cond
, 0)))
241 || is_gimple_condexpr (cond
));
242 ((struct bb_predicate
*) bb
->aux
)->predicate
= cond
;
245 /* Returns the sequence of statements of the gimplification of the
246 predicate for basic block BB. */
248 static inline gimple_seq
249 bb_predicate_gimplified_stmts (basic_block bb
)
251 return ((struct bb_predicate
*) bb
->aux
)->predicate_gimplified_stmts
;
254 /* Sets the sequence of statements STMTS of the gimplification of the
255 predicate for basic block BB. */
258 set_bb_predicate_gimplified_stmts (basic_block bb
, gimple_seq stmts
)
260 ((struct bb_predicate
*) bb
->aux
)->predicate_gimplified_stmts
= stmts
;
263 /* Adds the sequence of statements STMTS to the sequence of statements
264 of the predicate for basic block BB. */
267 add_bb_predicate_gimplified_stmts (basic_block bb
, gimple_seq stmts
)
269 /* We might have updated some stmts in STMTS via force_gimple_operand
270 calling fold_stmt and that producing multiple stmts. Delink immediate
271 uses so update_ssa after loop versioning doesn't get confused for
272 the not yet inserted predicates.
273 ??? This should go away once we reliably avoid updating stmts
275 for (gimple_stmt_iterator gsi
= gsi_start (stmts
);
276 !gsi_end_p (gsi
); gsi_next (&gsi
))
278 gimple
*stmt
= gsi_stmt (gsi
);
279 delink_stmt_imm_use (stmt
);
280 gimple_set_modified (stmt
, true);
282 gimple_seq_add_seq_without_update
283 (&(((struct bb_predicate
*) bb
->aux
)->predicate_gimplified_stmts
), stmts
);
286 /* Initializes to TRUE the predicate of basic block BB. */
289 init_bb_predicate (basic_block bb
)
291 bb
->aux
= XNEW (struct bb_predicate
);
292 set_bb_predicate_gimplified_stmts (bb
, NULL
);
293 set_bb_predicate (bb
, boolean_true_node
);
296 /* Release the SSA_NAMEs associated with the predicate of basic block BB. */
299 release_bb_predicate (basic_block bb
)
301 gimple_seq stmts
= bb_predicate_gimplified_stmts (bb
);
304 /* Ensure that these stmts haven't yet been added to a bb. */
306 for (gimple_stmt_iterator i
= gsi_start (stmts
);
307 !gsi_end_p (i
); gsi_next (&i
))
308 gcc_assert (! gimple_bb (gsi_stmt (i
)));
311 gimple_seq_discard (stmts
);
312 set_bb_predicate_gimplified_stmts (bb
, NULL
);
316 /* Free the predicate of basic block BB. */
319 free_bb_predicate (basic_block bb
)
321 if (!bb_has_predicate (bb
))
324 release_bb_predicate (bb
);
329 /* Reinitialize predicate of BB with the true predicate. */
332 reset_bb_predicate (basic_block bb
)
334 if (!bb_has_predicate (bb
))
335 init_bb_predicate (bb
);
338 release_bb_predicate (bb
);
339 set_bb_predicate (bb
, boolean_true_node
);
343 /* Returns a new SSA_NAME of type TYPE that is assigned the value of
344 the expression EXPR. Inserts the statement created for this
345 computation before GSI and leaves the iterator GSI at the same
349 ifc_temp_var (tree type
, tree expr
, gimple_stmt_iterator
*gsi
)
351 tree new_name
= make_temp_ssa_name (type
, NULL
, "_ifc_");
352 gimple
*stmt
= gimple_build_assign (new_name
, expr
);
353 gimple_set_vuse (stmt
, gimple_vuse (gsi_stmt (*gsi
)));
354 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
358 /* Return true when COND is a false predicate. */
361 is_false_predicate (tree cond
)
363 return (cond
!= NULL_TREE
364 && (cond
== boolean_false_node
365 || integer_zerop (cond
)));
368 /* Return true when COND is a true predicate. */
371 is_true_predicate (tree cond
)
373 return (cond
== NULL_TREE
374 || cond
== boolean_true_node
375 || integer_onep (cond
));
378 /* Returns true when BB has a predicate that is not trivial: true or
382 is_predicated (basic_block bb
)
384 return !is_true_predicate (bb_predicate (bb
));
387 /* Parses the predicate COND and returns its comparison code and
388 operands OP0 and OP1. */
390 static enum tree_code
391 parse_predicate (tree cond
, tree
*op0
, tree
*op1
)
395 if (TREE_CODE (cond
) == SSA_NAME
396 && is_gimple_assign (s
= SSA_NAME_DEF_STMT (cond
)))
398 if (TREE_CODE_CLASS (gimple_assign_rhs_code (s
)) == tcc_comparison
)
400 *op0
= gimple_assign_rhs1 (s
);
401 *op1
= gimple_assign_rhs2 (s
);
402 return gimple_assign_rhs_code (s
);
405 else if (gimple_assign_rhs_code (s
) == TRUTH_NOT_EXPR
)
407 tree op
= gimple_assign_rhs1 (s
);
408 tree type
= TREE_TYPE (op
);
409 enum tree_code code
= parse_predicate (op
, op0
, op1
);
411 return code
== ERROR_MARK
? ERROR_MARK
412 : invert_tree_comparison (code
, HONOR_NANS (type
));
418 if (COMPARISON_CLASS_P (cond
))
420 *op0
= TREE_OPERAND (cond
, 0);
421 *op1
= TREE_OPERAND (cond
, 1);
422 return TREE_CODE (cond
);
428 /* Returns the fold of predicate C1 OR C2 at location LOC. */
431 fold_or_predicates (location_t loc
, tree c1
, tree c2
)
433 tree op1a
, op1b
, op2a
, op2b
;
434 enum tree_code code1
= parse_predicate (c1
, &op1a
, &op1b
);
435 enum tree_code code2
= parse_predicate (c2
, &op2a
, &op2b
);
437 if (code1
!= ERROR_MARK
&& code2
!= ERROR_MARK
)
439 tree t
= maybe_fold_or_comparisons (boolean_type_node
, code1
, op1a
, op1b
,
445 return fold_build2_loc (loc
, TRUTH_OR_EXPR
, boolean_type_node
, c1
, c2
);
448 /* Returns either a COND_EXPR or the folded expression if the folded
449 expression is a MIN_EXPR, a MAX_EXPR, an ABS_EXPR,
450 a constant or a SSA_NAME. */
453 fold_build_cond_expr (tree type
, tree cond
, tree rhs
, tree lhs
)
455 tree rhs1
, lhs1
, cond_expr
;
457 /* If COND is comparison r != 0 and r has boolean type, convert COND
458 to SSA_NAME to accept by vect bool pattern. */
459 if (TREE_CODE (cond
) == NE_EXPR
)
461 tree op0
= TREE_OPERAND (cond
, 0);
462 tree op1
= TREE_OPERAND (cond
, 1);
463 if (TREE_CODE (op0
) == SSA_NAME
464 && TREE_CODE (TREE_TYPE (op0
)) == BOOLEAN_TYPE
465 && (integer_zerop (op1
)))
468 cond_expr
= fold_ternary (COND_EXPR
, type
, cond
, rhs
, lhs
);
470 if (cond_expr
== NULL_TREE
)
471 return build3 (COND_EXPR
, type
, cond
, rhs
, lhs
);
473 STRIP_USELESS_TYPE_CONVERSION (cond_expr
);
475 if (is_gimple_val (cond_expr
))
478 if (TREE_CODE (cond_expr
) == ABS_EXPR
)
480 rhs1
= TREE_OPERAND (cond_expr
, 1);
481 STRIP_USELESS_TYPE_CONVERSION (rhs1
);
482 if (is_gimple_val (rhs1
))
483 return build1 (ABS_EXPR
, type
, rhs1
);
486 if (TREE_CODE (cond_expr
) == MIN_EXPR
487 || TREE_CODE (cond_expr
) == MAX_EXPR
)
489 lhs1
= TREE_OPERAND (cond_expr
, 0);
490 STRIP_USELESS_TYPE_CONVERSION (lhs1
);
491 rhs1
= TREE_OPERAND (cond_expr
, 1);
492 STRIP_USELESS_TYPE_CONVERSION (rhs1
);
493 if (is_gimple_val (rhs1
) && is_gimple_val (lhs1
))
494 return build2 (TREE_CODE (cond_expr
), type
, lhs1
, rhs1
);
496 return build3 (COND_EXPR
, type
, cond
, rhs
, lhs
);
499 /* Add condition NC to the predicate list of basic block BB. LOOP is
500 the loop to be if-converted. Use predicate of cd-equivalent block
501 for join bb if it exists: we call basic blocks bb1 and bb2
502 cd-equivalent if they are executed under the same condition. */
505 add_to_predicate_list (class loop
*loop
, basic_block bb
, tree nc
)
510 if (is_true_predicate (nc
))
513 /* If dominance tells us this basic block is always executed,
514 don't record any predicates for it. */
515 if (dominated_by_p (CDI_DOMINATORS
, loop
->latch
, bb
))
518 dom_bb
= get_immediate_dominator (CDI_DOMINATORS
, bb
);
519 /* We use notion of cd equivalence to get simpler predicate for
520 join block, e.g. if join block has 2 predecessors with predicates
521 p1 & p2 and p1 & !p2, we'd like to get p1 for it instead of
522 p1 & p2 | p1 & !p2. */
523 if (dom_bb
!= loop
->header
524 && get_immediate_dominator (CDI_POST_DOMINATORS
, dom_bb
) == bb
)
526 gcc_assert (flow_bb_inside_loop_p (loop
, dom_bb
));
527 bc
= bb_predicate (dom_bb
);
528 if (!is_true_predicate (bc
))
529 set_bb_predicate (bb
, bc
);
531 gcc_assert (is_true_predicate (bb_predicate (bb
)));
532 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
533 fprintf (dump_file
, "Use predicate of bb#%d for bb#%d\n",
534 dom_bb
->index
, bb
->index
);
538 if (!is_predicated (bb
))
542 bc
= bb_predicate (bb
);
543 bc
= fold_or_predicates (EXPR_LOCATION (bc
), nc
, bc
);
544 if (is_true_predicate (bc
))
546 reset_bb_predicate (bb
);
551 /* Allow a TRUTH_NOT_EXPR around the main predicate. */
552 if (TREE_CODE (bc
) == TRUTH_NOT_EXPR
)
553 tp
= &TREE_OPERAND (bc
, 0);
556 if (!is_gimple_condexpr (*tp
))
559 *tp
= force_gimple_operand_1 (*tp
, &stmts
, is_gimple_condexpr
, NULL_TREE
);
560 add_bb_predicate_gimplified_stmts (bb
, stmts
);
562 set_bb_predicate (bb
, bc
);
565 /* Add the condition COND to the previous condition PREV_COND, and add
566 this to the predicate list of the destination of edge E. LOOP is
567 the loop to be if-converted. */
570 add_to_dst_predicate_list (class loop
*loop
, edge e
,
571 tree prev_cond
, tree cond
)
573 if (!flow_bb_inside_loop_p (loop
, e
->dest
))
576 if (!is_true_predicate (prev_cond
))
577 cond
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
580 if (!dominated_by_p (CDI_DOMINATORS
, loop
->latch
, e
->dest
))
581 add_to_predicate_list (loop
, e
->dest
, cond
);
584 /* Return true if one of the successor edges of BB exits LOOP. */
587 bb_with_exit_edge_p (class loop
*loop
, basic_block bb
)
592 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
593 if (loop_exit_edge_p (loop
, e
))
599 /* Given PHI which has more than two arguments, this function checks if
600 it's if-convertible by degenerating its arguments. Specifically, if
601 below two conditions are satisfied:
603 1) Number of PHI arguments with different values equals to 2 and one
604 argument has the only occurrence.
605 2) The edge corresponding to the unique argument isn't critical edge.
607 Such PHI can be handled as PHIs have only two arguments. For example,
610 res = PHI <A_1(e1), A_1(e2), A_2(e3)>;
612 can be transformed into:
614 res = (predicate of e3) ? A_2 : A_1;
616 Return TRUE if it is the case, FALSE otherwise. */
619 phi_convertible_by_degenerating_args (gphi
*phi
)
622 tree arg
, t1
= NULL
, t2
= NULL
;
623 unsigned int i
, i1
= 0, i2
= 0, n1
= 0, n2
= 0;
624 unsigned int num_args
= gimple_phi_num_args (phi
);
626 gcc_assert (num_args
> 2);
628 for (i
= 0; i
< num_args
; i
++)
630 arg
= gimple_phi_arg_def (phi
, i
);
631 if (t1
== NULL
|| operand_equal_p (t1
, arg
, 0))
637 else if (t2
== NULL
|| operand_equal_p (t2
, arg
, 0))
647 if (n1
!= 1 && n2
!= 1)
650 /* Check if the edge corresponding to the unique arg is critical. */
651 e
= gimple_phi_arg_edge (phi
, (n1
== 1) ? i1
: i2
);
652 if (EDGE_COUNT (e
->src
->succs
) > 1)
658 /* Return true when PHI is if-convertible. PHI is part of loop LOOP
659 and it belongs to basic block BB. Note at this point, it is sure
660 that PHI is if-convertible. This function updates global variable
661 ANY_COMPLICATED_PHI if PHI is complicated. */
664 if_convertible_phi_p (class loop
*loop
, basic_block bb
, gphi
*phi
)
666 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
668 fprintf (dump_file
, "-------------------------\n");
669 print_gimple_stmt (dump_file
, phi
, 0, TDF_SLIM
);
672 if (bb
!= loop
->header
673 && gimple_phi_num_args (phi
) > 2
674 && !phi_convertible_by_degenerating_args (phi
))
675 any_complicated_phi
= true;
680 /* Records the status of a data reference. This struct is attached to
681 each DR->aux field. */
684 bool rw_unconditionally
;
685 bool w_unconditionally
;
686 bool written_at_least_once
;
690 tree base_w_predicate
;
693 #define IFC_DR(DR) ((struct ifc_dr *) (DR)->aux)
694 #define DR_BASE_W_UNCONDITIONALLY(DR) (IFC_DR (DR)->written_at_least_once)
695 #define DR_RW_UNCONDITIONALLY(DR) (IFC_DR (DR)->rw_unconditionally)
696 #define DR_W_UNCONDITIONALLY(DR) (IFC_DR (DR)->w_unconditionally)
698 /* Iterates over DR's and stores refs, DR and base refs, DR pairs in
699 HASH tables. While storing them in HASH table, it checks if the
700 reference is unconditionally read or written and stores that as a flag
701 information. For base reference it checks if it is written atlest once
702 unconditionally and stores it as flag information along with DR.
703 In other words for every data reference A in STMT there exist other
704 accesses to a data reference with the same base with predicates that
705 add up (OR-up) to the true predicate: this ensures that the data
706 reference A is touched (read or written) on every iteration of the
707 if-converted loop. */
709 hash_memrefs_baserefs_and_store_DRs_read_written_info (data_reference_p a
)
712 data_reference_p
*master_dr
, *base_master_dr
;
713 tree base_ref
= DR_BASE_OBJECT (a
);
714 innermost_loop_behavior
*innermost
= &DR_INNERMOST (a
);
715 tree ca
= bb_predicate (gimple_bb (DR_STMT (a
)));
718 master_dr
= &innermost_DR_map
->get_or_insert (innermost
, &exist1
);
724 IFC_DR (*master_dr
)->w_predicate
725 = fold_or_predicates (UNKNOWN_LOCATION
, ca
,
726 IFC_DR (*master_dr
)->w_predicate
);
727 if (is_true_predicate (IFC_DR (*master_dr
)->w_predicate
))
728 DR_W_UNCONDITIONALLY (*master_dr
) = true;
730 IFC_DR (*master_dr
)->rw_predicate
731 = fold_or_predicates (UNKNOWN_LOCATION
, ca
,
732 IFC_DR (*master_dr
)->rw_predicate
);
733 if (is_true_predicate (IFC_DR (*master_dr
)->rw_predicate
))
734 DR_RW_UNCONDITIONALLY (*master_dr
) = true;
738 base_master_dr
= &baseref_DR_map
->get_or_insert (base_ref
, &exist2
);
741 IFC_DR (*base_master_dr
)->base_w_predicate
742 = fold_or_predicates (UNKNOWN_LOCATION
, ca
,
743 IFC_DR (*base_master_dr
)->base_w_predicate
);
744 if (is_true_predicate (IFC_DR (*base_master_dr
)->base_w_predicate
))
745 DR_BASE_W_UNCONDITIONALLY (*base_master_dr
) = true;
749 /* Return TRUE if can prove the index IDX of an array reference REF is
750 within array bound. Return false otherwise. */
753 idx_within_array_bound (tree ref
, tree
*idx
, void *dta
)
755 wi::overflow_type overflow
;
756 widest_int niter
, valid_niter
, delta
, wi_step
;
759 class loop
*loop
= (class loop
*) dta
;
761 /* Only support within-bound access for array references. */
762 if (TREE_CODE (ref
) != ARRAY_REF
)
765 /* For arrays at the end of the structure, we are not guaranteed that they
766 do not really extend over their declared size. However, for arrays of
767 size greater than one, this is unlikely to be intended. */
768 if (array_at_struct_end_p (ref
))
771 ev
= analyze_scalar_evolution (loop
, *idx
);
772 ev
= instantiate_parameters (loop
, ev
);
773 init
= initial_condition (ev
);
774 step
= evolution_part_in_loop_num (ev
, loop
->num
);
776 if (!init
|| TREE_CODE (init
) != INTEGER_CST
777 || (step
&& TREE_CODE (step
) != INTEGER_CST
))
780 low
= array_ref_low_bound (ref
);
781 high
= array_ref_up_bound (ref
);
783 /* The case of nonconstant bounds could be handled, but it would be
785 if (TREE_CODE (low
) != INTEGER_CST
786 || !high
|| TREE_CODE (high
) != INTEGER_CST
)
789 /* Check if the intial idx is within bound. */
790 if (wi::to_widest (init
) < wi::to_widest (low
)
791 || wi::to_widest (init
) > wi::to_widest (high
))
794 /* The idx is always within bound. */
795 if (!step
|| integer_zerop (step
))
798 if (!max_loop_iterations (loop
, &niter
))
801 if (wi::to_widest (step
) < 0)
803 delta
= wi::to_widest (init
) - wi::to_widest (low
);
804 wi_step
= -wi::to_widest (step
);
808 delta
= wi::to_widest (high
) - wi::to_widest (init
);
809 wi_step
= wi::to_widest (step
);
812 valid_niter
= wi::div_floor (delta
, wi_step
, SIGNED
, &overflow
);
813 /* The iteration space of idx is within array bound. */
814 if (!overflow
&& niter
<= valid_niter
)
820 /* Return TRUE if ref is a within bound array reference. */
823 ref_within_array_bound (gimple
*stmt
, tree ref
)
825 class loop
*loop
= loop_containing_stmt (stmt
);
827 gcc_assert (loop
!= NULL
);
828 return for_each_index (&ref
, idx_within_array_bound
, loop
);
832 /* Given a memory reference expression T, return TRUE if base object
833 it refers to is writable. The base object of a memory reference
834 is the main object being referenced, which is returned by function
838 base_object_writable (tree ref
)
840 tree base_tree
= get_base_address (ref
);
843 && DECL_P (base_tree
)
844 && decl_binds_to_current_def_p (base_tree
)
845 && !TREE_READONLY (base_tree
));
848 /* Return true when the memory references of STMT won't trap in the
849 if-converted code. There are two things that we have to check for:
851 - writes to memory occur to writable memory: if-conversion of
852 memory writes transforms the conditional memory writes into
853 unconditional writes, i.e. "if (cond) A[i] = foo" is transformed
854 into "A[i] = cond ? foo : A[i]", and as the write to memory may not
855 be executed at all in the original code, it may be a readonly
856 memory. To check that A is not const-qualified, we check that
857 there exists at least an unconditional write to A in the current
860 - reads or writes to memory are valid memory accesses for every
861 iteration. To check that the memory accesses are correctly formed
862 and that we are allowed to read and write in these locations, we
863 check that the memory accesses to be if-converted occur at every
864 iteration unconditionally.
866 Returns true for the memory reference in STMT, same memory reference
867 is read or written unconditionally atleast once and the base memory
868 reference is written unconditionally once. This is to check reference
869 will not write fault. Also retuns true if the memory reference is
870 unconditionally read once then we are conditionally writing to memory
871 which is defined as read and write and is bound to the definition
874 ifcvt_memrefs_wont_trap (gimple
*stmt
, vec
<data_reference_p
> drs
)
876 /* If DR didn't see a reference here we can't use it to tell
877 whether the ref traps or not. */
878 if (gimple_uid (stmt
) == 0)
881 data_reference_p
*master_dr
, *base_master_dr
;
882 data_reference_p a
= drs
[gimple_uid (stmt
) - 1];
884 tree base
= DR_BASE_OBJECT (a
);
885 innermost_loop_behavior
*innermost
= &DR_INNERMOST (a
);
887 gcc_assert (DR_STMT (a
) == stmt
);
888 gcc_assert (DR_BASE_ADDRESS (a
) || DR_OFFSET (a
)
889 || DR_INIT (a
) || DR_STEP (a
));
891 master_dr
= innermost_DR_map
->get (innermost
);
892 gcc_assert (master_dr
!= NULL
);
894 base_master_dr
= baseref_DR_map
->get (base
);
896 /* If a is unconditionally written to it doesn't trap. */
897 if (DR_W_UNCONDITIONALLY (*master_dr
))
900 /* If a is unconditionally accessed then ...
902 Even a is conditional access, we can treat it as an unconditional
903 one if it's an array reference and all its index are within array
905 if (DR_RW_UNCONDITIONALLY (*master_dr
)
906 || ref_within_array_bound (stmt
, DR_REF (a
)))
908 /* an unconditional read won't trap. */
912 /* an unconditionaly write won't trap if the base is written
913 to unconditionally. */
915 && DR_BASE_W_UNCONDITIONALLY (*base_master_dr
))
916 return flag_store_data_races
;
917 /* or the base is known to be not readonly. */
918 else if (base_object_writable (DR_REF (a
)))
919 return flag_store_data_races
;
925 /* Return true if STMT could be converted into a masked load or store
926 (conditional load or store based on a mask computed from bb predicate). */
929 ifcvt_can_use_mask_load_store (gimple
*stmt
)
931 /* Check whether this is a load or store. */
932 tree lhs
= gimple_assign_lhs (stmt
);
935 if (gimple_store_p (stmt
))
937 if (!is_gimple_val (gimple_assign_rhs1 (stmt
)))
942 else if (gimple_assign_load_p (stmt
))
945 ref
= gimple_assign_rhs1 (stmt
);
950 if (may_be_nonaddressable_p (ref
))
953 /* Mask should be integer mode of the same size as the load/store
955 machine_mode mode
= TYPE_MODE (TREE_TYPE (lhs
));
956 if (!int_mode_for_mode (mode
).exists () || VECTOR_MODE_P (mode
))
959 if (can_vec_mask_load_store_p (mode
, VOIDmode
, is_load
))
965 /* Return true if STMT could be converted from an operation that is
966 unconditional to one that is conditional on a bb predicate mask. */
969 ifcvt_can_predicate (gimple
*stmt
)
971 basic_block bb
= gimple_bb (stmt
);
973 if (!(flag_tree_loop_vectorize
|| bb
->loop_father
->force_vectorize
)
974 || bb
->loop_father
->dont_vectorize
975 || gimple_has_volatile_ops (stmt
))
978 if (gimple_assign_single_p (stmt
))
979 return ifcvt_can_use_mask_load_store (stmt
);
981 tree_code code
= gimple_assign_rhs_code (stmt
);
982 tree lhs_type
= TREE_TYPE (gimple_assign_lhs (stmt
));
983 tree rhs_type
= TREE_TYPE (gimple_assign_rhs1 (stmt
));
984 if (!types_compatible_p (lhs_type
, rhs_type
))
986 internal_fn cond_fn
= get_conditional_internal_fn (code
);
987 return (cond_fn
!= IFN_LAST
988 && vectorized_internal_fn_supported_p (cond_fn
, lhs_type
));
991 /* Return true when STMT is if-convertible.
993 GIMPLE_ASSIGN statement is not if-convertible if,
996 - LHS is not var decl. */
999 if_convertible_gimple_assign_stmt_p (gimple
*stmt
,
1000 vec
<data_reference_p
> refs
)
1002 tree lhs
= gimple_assign_lhs (stmt
);
1004 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1006 fprintf (dump_file
, "-------------------------\n");
1007 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
1010 if (!is_gimple_reg_type (TREE_TYPE (lhs
)))
1013 /* Some of these constrains might be too conservative. */
1014 if (stmt_ends_bb_p (stmt
)
1015 || gimple_has_volatile_ops (stmt
)
1016 || (TREE_CODE (lhs
) == SSA_NAME
1017 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs
))
1018 || gimple_has_side_effects (stmt
))
1020 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1021 fprintf (dump_file
, "stmt not suitable for ifcvt\n");
1025 /* tree-into-ssa.c uses GF_PLF_1, so avoid it, because
1026 in between if_convertible_loop_p and combine_blocks
1027 we can perform loop versioning. */
1028 gimple_set_plf (stmt
, GF_PLF_2
, false);
1030 if ((! gimple_vuse (stmt
)
1031 || gimple_could_trap_p_1 (stmt
, false, false)
1032 || ! ifcvt_memrefs_wont_trap (stmt
, refs
))
1033 && gimple_could_trap_p (stmt
))
1035 if (ifcvt_can_predicate (stmt
))
1037 gimple_set_plf (stmt
, GF_PLF_2
, true);
1038 need_to_predicate
= true;
1041 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1042 fprintf (dump_file
, "tree could trap...\n");
1046 /* When if-converting stores force versioning, likewise if we
1047 ended up generating store data races. */
1048 if (gimple_vdef (stmt
))
1049 need_to_predicate
= true;
1054 /* Return true when STMT is if-convertible.
1056 A statement is if-convertible if:
1057 - it is an if-convertible GIMPLE_ASSIGN,
1058 - it is a GIMPLE_LABEL or a GIMPLE_COND,
1059 - it is builtins call. */
1062 if_convertible_stmt_p (gimple
*stmt
, vec
<data_reference_p
> refs
)
1064 switch (gimple_code (stmt
))
1072 return if_convertible_gimple_assign_stmt_p (stmt
, refs
);
1076 tree fndecl
= gimple_call_fndecl (stmt
);
1079 int flags
= gimple_call_flags (stmt
);
1080 if ((flags
& ECF_CONST
)
1081 && !(flags
& ECF_LOOPING_CONST_OR_PURE
)
1082 /* We can only vectorize some builtins at the moment,
1083 so restrict if-conversion to those. */
1084 && fndecl_built_in_p (fndecl
))
1091 /* Don't know what to do with 'em so don't do anything. */
1092 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1094 fprintf (dump_file
, "don't know what to do\n");
1095 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
1103 /* Assumes that BB has more than 1 predecessors.
1104 Returns false if at least one successor is not on critical edge
1105 and true otherwise. */
1108 all_preds_critical_p (basic_block bb
)
1113 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1114 if (EDGE_COUNT (e
->src
->succs
) == 1)
1119 /* Return true when BB is if-convertible. This routine does not check
1120 basic block's statements and phis.
1122 A basic block is not if-convertible if:
1123 - it is non-empty and it is after the exit block (in BFS order),
1124 - it is after the exit block but before the latch,
1125 - its edges are not normal.
1127 EXIT_BB is the basic block containing the exit of the LOOP. BB is
1131 if_convertible_bb_p (class loop
*loop
, basic_block bb
, basic_block exit_bb
)
1136 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1137 fprintf (dump_file
, "----------[%d]-------------\n", bb
->index
);
1139 if (EDGE_COUNT (bb
->succs
) > 2)
1142 gimple
*last
= last_stmt (bb
);
1143 if (gcall
*call
= safe_dyn_cast
<gcall
*> (last
))
1144 if (gimple_call_ctrl_altering_p (call
))
1149 if (bb
!= loop
->latch
)
1151 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1152 fprintf (dump_file
, "basic block after exit bb but before latch\n");
1155 else if (!empty_block_p (bb
))
1157 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1158 fprintf (dump_file
, "non empty basic block after exit bb\n");
1161 else if (bb
== loop
->latch
1163 && !dominated_by_p (CDI_DOMINATORS
, bb
, exit_bb
))
1165 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1166 fprintf (dump_file
, "latch is not dominated by exit_block\n");
1171 /* Be less adventurous and handle only normal edges. */
1172 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
1173 if (e
->flags
& (EDGE_EH
| EDGE_ABNORMAL
| EDGE_IRREDUCIBLE_LOOP
))
1175 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1176 fprintf (dump_file
, "Difficult to handle edges\n");
1183 /* Return true when all predecessor blocks of BB are visited. The
1184 VISITED bitmap keeps track of the visited blocks. */
1187 pred_blocks_visited_p (basic_block bb
, bitmap
*visited
)
1191 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1192 if (!bitmap_bit_p (*visited
, e
->src
->index
))
1198 /* Get body of a LOOP in suitable order for if-conversion. It is
1199 caller's responsibility to deallocate basic block list.
1200 If-conversion suitable order is, breadth first sort (BFS) order
1201 with an additional constraint: select a block only if all its
1202 predecessors are already selected. */
1204 static basic_block
*
1205 get_loop_body_in_if_conv_order (const class loop
*loop
)
1207 basic_block
*blocks
, *blocks_in_bfs_order
;
1210 unsigned int index
= 0;
1211 unsigned int visited_count
= 0;
1213 gcc_assert (loop
->num_nodes
);
1214 gcc_assert (loop
->latch
!= EXIT_BLOCK_PTR_FOR_FN (cfun
));
1216 blocks
= XCNEWVEC (basic_block
, loop
->num_nodes
);
1217 visited
= BITMAP_ALLOC (NULL
);
1219 blocks_in_bfs_order
= get_loop_body_in_bfs_order (loop
);
1222 while (index
< loop
->num_nodes
)
1224 bb
= blocks_in_bfs_order
[index
];
1226 if (bb
->flags
& BB_IRREDUCIBLE_LOOP
)
1228 free (blocks_in_bfs_order
);
1229 BITMAP_FREE (visited
);
1234 if (!bitmap_bit_p (visited
, bb
->index
))
1236 if (pred_blocks_visited_p (bb
, &visited
)
1237 || bb
== loop
->header
)
1239 /* This block is now visited. */
1240 bitmap_set_bit (visited
, bb
->index
);
1241 blocks
[visited_count
++] = bb
;
1247 if (index
== loop
->num_nodes
1248 && visited_count
!= loop
->num_nodes
)
1252 free (blocks_in_bfs_order
);
1253 BITMAP_FREE (visited
);
1257 /* Returns true when the analysis of the predicates for all the basic
1258 blocks in LOOP succeeded.
1260 predicate_bbs first allocates the predicates of the basic blocks.
1261 These fields are then initialized with the tree expressions
1262 representing the predicates under which a basic block is executed
1263 in the LOOP. As the loop->header is executed at each iteration, it
1264 has the "true" predicate. Other statements executed under a
1265 condition are predicated with that condition, for example
1272 S1 will be predicated with "x", and
1273 S2 will be predicated with "!x". */
1276 predicate_bbs (loop_p loop
)
1280 for (i
= 0; i
< loop
->num_nodes
; i
++)
1281 init_bb_predicate (ifc_bbs
[i
]);
1283 for (i
= 0; i
< loop
->num_nodes
; i
++)
1285 basic_block bb
= ifc_bbs
[i
];
1289 /* The loop latch and loop exit block are always executed and
1290 have no extra conditions to be processed: skip them. */
1291 if (bb
== loop
->latch
1292 || bb_with_exit_edge_p (loop
, bb
))
1294 reset_bb_predicate (bb
);
1298 cond
= bb_predicate (bb
);
1299 stmt
= last_stmt (bb
);
1300 if (stmt
&& gimple_code (stmt
) == GIMPLE_COND
)
1303 edge true_edge
, false_edge
;
1304 location_t loc
= gimple_location (stmt
);
1305 tree c
= build2_loc (loc
, gimple_cond_code (stmt
),
1307 gimple_cond_lhs (stmt
),
1308 gimple_cond_rhs (stmt
));
1310 /* Add new condition into destination's predicate list. */
1311 extract_true_false_edges_from_block (gimple_bb (stmt
),
1312 &true_edge
, &false_edge
);
1314 /* If C is true, then TRUE_EDGE is taken. */
1315 add_to_dst_predicate_list (loop
, true_edge
, unshare_expr (cond
),
1318 /* If C is false, then FALSE_EDGE is taken. */
1319 c2
= build1_loc (loc
, TRUTH_NOT_EXPR
, boolean_type_node
,
1321 add_to_dst_predicate_list (loop
, false_edge
,
1322 unshare_expr (cond
), c2
);
1327 /* If current bb has only one successor, then consider it as an
1328 unconditional goto. */
1329 if (single_succ_p (bb
))
1331 basic_block bb_n
= single_succ (bb
);
1333 /* The successor bb inherits the predicate of its
1334 predecessor. If there is no predicate in the predecessor
1335 bb, then consider the successor bb as always executed. */
1336 if (cond
== NULL_TREE
)
1337 cond
= boolean_true_node
;
1339 add_to_predicate_list (loop
, bb_n
, cond
);
1343 /* The loop header is always executed. */
1344 reset_bb_predicate (loop
->header
);
1345 gcc_assert (bb_predicate_gimplified_stmts (loop
->header
) == NULL
1346 && bb_predicate_gimplified_stmts (loop
->latch
) == NULL
);
1349 /* Build region by adding loop pre-header and post-header blocks. */
1351 static vec
<basic_block
>
1352 build_region (class loop
*loop
)
1354 vec
<basic_block
> region
= vNULL
;
1355 basic_block exit_bb
= NULL
;
1357 gcc_assert (ifc_bbs
);
1358 /* The first element is loop pre-header. */
1359 region
.safe_push (loop_preheader_edge (loop
)->src
);
1361 for (unsigned int i
= 0; i
< loop
->num_nodes
; i
++)
1363 basic_block bb
= ifc_bbs
[i
];
1364 region
.safe_push (bb
);
1365 /* Find loop postheader. */
1368 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
1369 if (loop_exit_edge_p (loop
, e
))
1375 /* The last element is loop post-header. */
1376 gcc_assert (exit_bb
);
1377 region
.safe_push (exit_bb
);
1381 /* Return true when LOOP is if-convertible. This is a helper function
1382 for if_convertible_loop_p. REFS and DDRS are initialized and freed
1383 in if_convertible_loop_p. */
1386 if_convertible_loop_p_1 (class loop
*loop
, vec
<data_reference_p
> *refs
)
1389 basic_block exit_bb
= NULL
;
1390 vec
<basic_block
> region
;
1392 if (find_data_references_in_loop (loop
, refs
) == chrec_dont_know
)
1395 calculate_dominance_info (CDI_DOMINATORS
);
1397 /* Allow statements that can be handled during if-conversion. */
1398 ifc_bbs
= get_loop_body_in_if_conv_order (loop
);
1401 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1402 fprintf (dump_file
, "Irreducible loop\n");
1406 for (i
= 0; i
< loop
->num_nodes
; i
++)
1408 basic_block bb
= ifc_bbs
[i
];
1410 if (!if_convertible_bb_p (loop
, bb
, exit_bb
))
1413 if (bb_with_exit_edge_p (loop
, bb
))
1417 for (i
= 0; i
< loop
->num_nodes
; i
++)
1419 basic_block bb
= ifc_bbs
[i
];
1420 gimple_stmt_iterator gsi
;
1422 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1423 switch (gimple_code (gsi_stmt (gsi
)))
1430 gimple_set_uid (gsi_stmt (gsi
), 0);
1437 data_reference_p dr
;
1440 = new hash_map
<innermost_loop_behavior_hash
, data_reference_p
>;
1441 baseref_DR_map
= new hash_map
<tree_operand_hash
, data_reference_p
>;
1443 /* Compute post-dominator tree locally. */
1444 region
= build_region (loop
);
1445 calculate_dominance_info_for_region (CDI_POST_DOMINATORS
, region
);
1447 predicate_bbs (loop
);
1449 /* Free post-dominator tree since it is not used after predication. */
1450 free_dominance_info_for_region (cfun
, CDI_POST_DOMINATORS
, region
);
1453 for (i
= 0; refs
->iterate (i
, &dr
); i
++)
1455 tree ref
= DR_REF (dr
);
1457 dr
->aux
= XNEW (struct ifc_dr
);
1458 DR_BASE_W_UNCONDITIONALLY (dr
) = false;
1459 DR_RW_UNCONDITIONALLY (dr
) = false;
1460 DR_W_UNCONDITIONALLY (dr
) = false;
1461 IFC_DR (dr
)->rw_predicate
= boolean_false_node
;
1462 IFC_DR (dr
)->w_predicate
= boolean_false_node
;
1463 IFC_DR (dr
)->base_w_predicate
= boolean_false_node
;
1464 if (gimple_uid (DR_STMT (dr
)) == 0)
1465 gimple_set_uid (DR_STMT (dr
), i
+ 1);
1467 /* If DR doesn't have innermost loop behavior or it's a compound
1468 memory reference, we synthesize its innermost loop behavior
1470 if (TREE_CODE (ref
) == COMPONENT_REF
1471 || TREE_CODE (ref
) == IMAGPART_EXPR
1472 || TREE_CODE (ref
) == REALPART_EXPR
1473 || !(DR_BASE_ADDRESS (dr
) || DR_OFFSET (dr
)
1474 || DR_INIT (dr
) || DR_STEP (dr
)))
1476 while (TREE_CODE (ref
) == COMPONENT_REF
1477 || TREE_CODE (ref
) == IMAGPART_EXPR
1478 || TREE_CODE (ref
) == REALPART_EXPR
)
1479 ref
= TREE_OPERAND (ref
, 0);
1481 memset (&DR_INNERMOST (dr
), 0, sizeof (DR_INNERMOST (dr
)));
1482 DR_BASE_ADDRESS (dr
) = ref
;
1484 hash_memrefs_baserefs_and_store_DRs_read_written_info (dr
);
1487 for (i
= 0; i
< loop
->num_nodes
; i
++)
1489 basic_block bb
= ifc_bbs
[i
];
1490 gimple_stmt_iterator itr
;
1492 /* Check the if-convertibility of statements in predicated BBs. */
1493 if (!dominated_by_p (CDI_DOMINATORS
, loop
->latch
, bb
))
1494 for (itr
= gsi_start_bb (bb
); !gsi_end_p (itr
); gsi_next (&itr
))
1495 if (!if_convertible_stmt_p (gsi_stmt (itr
), *refs
))
1499 /* Checking PHIs needs to be done after stmts, as the fact whether there
1500 are any masked loads or stores affects the tests. */
1501 for (i
= 0; i
< loop
->num_nodes
; i
++)
1503 basic_block bb
= ifc_bbs
[i
];
1506 for (itr
= gsi_start_phis (bb
); !gsi_end_p (itr
); gsi_next (&itr
))
1507 if (!if_convertible_phi_p (loop
, bb
, itr
.phi ()))
1512 fprintf (dump_file
, "Applying if-conversion\n");
1517 /* Return true when LOOP is if-convertible.
1518 LOOP is if-convertible if:
1520 - it has two or more basic blocks,
1521 - it has only one exit,
1522 - loop header is not the exit edge,
1523 - if its basic blocks and phi nodes are if convertible. */
1526 if_convertible_loop_p (class loop
*loop
)
1531 vec
<data_reference_p
> refs
;
1533 /* Handle only innermost loop. */
1534 if (!loop
|| loop
->inner
)
1536 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1537 fprintf (dump_file
, "not innermost loop\n");
1541 /* If only one block, no need for if-conversion. */
1542 if (loop
->num_nodes
<= 2)
1544 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1545 fprintf (dump_file
, "less than 2 basic blocks\n");
1549 /* More than one loop exit is too much to handle. */
1550 if (!single_exit (loop
))
1552 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1553 fprintf (dump_file
, "multiple exits\n");
1557 /* If one of the loop header's edge is an exit edge then do not
1558 apply if-conversion. */
1559 FOR_EACH_EDGE (e
, ei
, loop
->header
->succs
)
1560 if (loop_exit_edge_p (loop
, e
))
1564 res
= if_convertible_loop_p_1 (loop
, &refs
);
1566 data_reference_p dr
;
1568 for (i
= 0; refs
.iterate (i
, &dr
); i
++)
1571 free_data_refs (refs
);
1573 delete innermost_DR_map
;
1574 innermost_DR_map
= NULL
;
1576 delete baseref_DR_map
;
1577 baseref_DR_map
= NULL
;
1582 /* Return reduc_1 if has_nop.
1585 tmp1 = (unsigned type) reduc_1;
1587 reduc_3 = (signed type) tmp2. */
1589 strip_nop_cond_scalar_reduction (bool has_nop
, tree op
)
1594 if (TREE_CODE (op
) != SSA_NAME
)
1597 gassign
*stmt
= safe_dyn_cast
<gassign
*> (SSA_NAME_DEF_STMT (op
));
1599 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt
))
1600 || !tree_nop_conversion_p (TREE_TYPE (op
), TREE_TYPE
1601 (gimple_assign_rhs1 (stmt
))))
1604 return gimple_assign_rhs1 (stmt
);
1607 /* Returns true if def-stmt for phi argument ARG is simple increment/decrement
1608 which is in predicated basic block.
1609 In fact, the following PHI pattern is searching:
1611 reduc_1 = PHI <..., reduc_2>
1615 reduc_2 = PHI <reduc_1, reduc_3>
1617 ARG_0 and ARG_1 are correspondent PHI arguments.
1618 REDUC, OP0 and OP1 contain reduction stmt and its operands.
1619 EXTENDED is true if PHI has > 2 arguments. */
1622 is_cond_scalar_reduction (gimple
*phi
, gimple
**reduc
, tree arg_0
, tree arg_1
,
1623 tree
*op0
, tree
*op1
, bool extended
, bool* has_nop
,
1626 tree lhs
, r_op1
, r_op2
, r_nop1
, r_nop2
;
1628 gimple
*header_phi
= NULL
;
1629 enum tree_code reduction_op
;
1630 basic_block bb
= gimple_bb (phi
);
1631 class loop
*loop
= bb
->loop_father
;
1632 edge latch_e
= loop_latch_edge (loop
);
1633 imm_use_iterator imm_iter
;
1634 use_operand_p use_p
;
1637 bool result
= *has_nop
= false;
1638 if (TREE_CODE (arg_0
) != SSA_NAME
|| TREE_CODE (arg_1
) != SSA_NAME
)
1641 if (!extended
&& gimple_code (SSA_NAME_DEF_STMT (arg_0
)) == GIMPLE_PHI
)
1644 header_phi
= SSA_NAME_DEF_STMT (arg_0
);
1645 stmt
= SSA_NAME_DEF_STMT (arg_1
);
1647 else if (gimple_code (SSA_NAME_DEF_STMT (arg_1
)) == GIMPLE_PHI
)
1650 header_phi
= SSA_NAME_DEF_STMT (arg_1
);
1651 stmt
= SSA_NAME_DEF_STMT (arg_0
);
1655 if (gimple_bb (header_phi
) != loop
->header
)
1658 if (PHI_ARG_DEF_FROM_EDGE (header_phi
, latch_e
) != PHI_RESULT (phi
))
1661 if (gimple_code (stmt
) != GIMPLE_ASSIGN
1662 || gimple_has_volatile_ops (stmt
))
1665 if (!flow_bb_inside_loop_p (loop
, gimple_bb (stmt
)))
1668 if (!is_predicated (gimple_bb (stmt
)))
1671 /* Check that stmt-block is predecessor of phi-block. */
1672 FOR_EACH_EDGE (e
, ei
, gimple_bb (stmt
)->succs
)
1681 if (!has_single_use (lhs
))
1684 reduction_op
= gimple_assign_rhs_code (stmt
);
1686 /* Catch something like below
1689 reduc_1 = PHI <..., reduc_2>
1692 tmp1 = (unsigned type) reduc_1;
1694 reduc_3 = (signed type) tmp2;
1696 reduc_2 = PHI <reduc_1, reduc_3>
1700 reduc_2 = PHI <0, reduc_3>
1701 tmp1 = (unsigned type)reduce_1;
1702 ifcvt = cond_expr ? rhs2 : 0
1703 tmp2 = tmp1 +/- ifcvt;
1704 reduce_1 = (signed type)tmp2; */
1706 if (CONVERT_EXPR_CODE_P (reduction_op
))
1708 lhs
= gimple_assign_rhs1 (stmt
);
1709 if (TREE_CODE (lhs
) != SSA_NAME
1710 || !has_single_use (lhs
))
1714 stmt
= SSA_NAME_DEF_STMT (lhs
);
1715 if (gimple_bb (stmt
) != gimple_bb (*nop_reduc
)
1716 || !is_gimple_assign (stmt
))
1720 reduction_op
= gimple_assign_rhs_code (stmt
);
1723 if (reduction_op
!= PLUS_EXPR
&& reduction_op
!= MINUS_EXPR
)
1725 r_op1
= gimple_assign_rhs1 (stmt
);
1726 r_op2
= gimple_assign_rhs2 (stmt
);
1728 r_nop1
= strip_nop_cond_scalar_reduction (*has_nop
, r_op1
);
1729 r_nop2
= strip_nop_cond_scalar_reduction (*has_nop
, r_op2
);
1731 /* Make R_OP1 to hold reduction variable. */
1732 if (r_nop2
== PHI_RESULT (header_phi
)
1733 && reduction_op
== PLUS_EXPR
)
1735 std::swap (r_op1
, r_op2
);
1736 std::swap (r_nop1
, r_nop2
);
1738 else if (r_nop1
!= PHI_RESULT (header_phi
))
1743 /* Check that R_NOP1 is used in nop_stmt or in PHI only. */
1744 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, r_nop1
)
1746 gimple
*use_stmt
= USE_STMT (use_p
);
1747 if (is_gimple_debug (use_stmt
))
1749 if (use_stmt
== SSA_NAME_DEF_STMT (r_op1
))
1751 if (use_stmt
!= phi
)
1756 /* Check that R_OP1 is used in reduction stmt or in PHI only. */
1757 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, r_op1
)
1759 gimple
*use_stmt
= USE_STMT (use_p
);
1760 if (is_gimple_debug (use_stmt
))
1762 if (use_stmt
== stmt
)
1764 if (gimple_code (use_stmt
) != GIMPLE_PHI
)
1768 *op0
= r_op1
; *op1
= r_op2
;
1773 /* Converts conditional scalar reduction into unconditional form, e.g.
1775 if (_5 != 0) goto bb_5 else goto bb_6
1781 # res_2 = PHI <res_13(4), res_6(5)>
1784 will be converted into sequence
1785 _ifc__1 = _5 != 0 ? 1 : 0;
1786 res_2 = res_13 + _ifc__1;
1787 Argument SWAP tells that arguments of conditional expression should be
1789 Returns rhs of resulting PHI assignment. */
1792 convert_scalar_cond_reduction (gimple
*reduc
, gimple_stmt_iterator
*gsi
,
1793 tree cond
, tree op0
, tree op1
, bool swap
,
1794 bool has_nop
, gimple
* nop_reduc
)
1796 gimple_stmt_iterator stmt_it
;
1799 tree rhs1
= gimple_assign_rhs1 (reduc
);
1800 tree tmp
= make_temp_ssa_name (TREE_TYPE (rhs1
), NULL
, "_ifc_");
1802 tree zero
= build_zero_cst (TREE_TYPE (rhs1
));
1803 gimple_seq stmts
= NULL
;
1805 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1807 fprintf (dump_file
, "Found cond scalar reduction.\n");
1808 print_gimple_stmt (dump_file
, reduc
, 0, TDF_SLIM
);
1811 /* Build cond expression using COND and constant operand
1812 of reduction rhs. */
1813 c
= fold_build_cond_expr (TREE_TYPE (rhs1
),
1814 unshare_expr (cond
),
1818 /* Create assignment stmt and insert it at GSI. */
1819 new_assign
= gimple_build_assign (tmp
, c
);
1820 gsi_insert_before (gsi
, new_assign
, GSI_SAME_STMT
);
1821 /* Build rhs for unconditional increment/decrement. */
1822 rhs
= gimple_build (&stmts
, gimple_assign_rhs_code (reduc
),
1823 TREE_TYPE (rhs1
), op0
, tmp
);
1827 rhs
= gimple_convert (&stmts
,
1828 TREE_TYPE (gimple_assign_lhs (nop_reduc
)), rhs
);
1829 stmt_it
= gsi_for_stmt (nop_reduc
);
1830 gsi_remove (&stmt_it
, true);
1831 release_defs (nop_reduc
);
1833 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
1835 /* Delete original reduction stmt. */
1836 stmt_it
= gsi_for_stmt (reduc
);
1837 gsi_remove (&stmt_it
, true);
1838 release_defs (reduc
);
1842 /* Produce condition for all occurrences of ARG in PHI node. */
1845 gen_phi_arg_condition (gphi
*phi
, vec
<int> *occur
,
1846 gimple_stmt_iterator
*gsi
)
1850 tree cond
= NULL_TREE
;
1854 len
= occur
->length ();
1855 gcc_assert (len
> 0);
1856 for (i
= 0; i
< len
; i
++)
1858 e
= gimple_phi_arg_edge (phi
, (*occur
)[i
]);
1859 c
= bb_predicate (e
->src
);
1860 if (is_true_predicate (c
))
1865 c
= force_gimple_operand_gsi_1 (gsi
, unshare_expr (c
),
1866 is_gimple_condexpr
, NULL_TREE
,
1867 true, GSI_SAME_STMT
);
1868 if (cond
!= NULL_TREE
)
1870 /* Must build OR expression. */
1871 cond
= fold_or_predicates (EXPR_LOCATION (c
), c
, cond
);
1872 cond
= force_gimple_operand_gsi_1 (gsi
, unshare_expr (cond
),
1873 is_gimple_condexpr
, NULL_TREE
,
1874 true, GSI_SAME_STMT
);
1879 gcc_assert (cond
!= NULL_TREE
);
1883 /* Local valueization callback that follows all-use SSA edges. */
1886 ifcvt_follow_ssa_use_edges (tree val
)
1891 /* Replace a scalar PHI node with a COND_EXPR using COND as condition.
1892 This routine can handle PHI nodes with more than two arguments.
1895 S1: A = PHI <x1(1), x2(5)>
1897 S2: A = cond ? x1 : x2;
1899 The generated code is inserted at GSI that points to the top of
1900 basic block's statement list.
1901 If PHI node has more than two arguments a chain of conditional
1902 expression is produced. */
1906 predicate_scalar_phi (gphi
*phi
, gimple_stmt_iterator
*gsi
)
1908 gimple
*new_stmt
= NULL
, *reduc
, *nop_reduc
;
1909 tree rhs
, res
, arg0
, arg1
, op0
, op1
, scev
;
1911 unsigned int index0
;
1912 unsigned int max
, args_len
;
1918 res
= gimple_phi_result (phi
);
1919 if (virtual_operand_p (res
))
1922 if ((rhs
= degenerate_phi_result (phi
))
1923 || ((scev
= analyze_scalar_evolution (gimple_bb (phi
)->loop_father
,
1925 && !chrec_contains_undetermined (scev
)
1927 && (rhs
= gimple_phi_arg_def (phi
, 0))))
1929 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1931 fprintf (dump_file
, "Degenerate phi!\n");
1932 print_gimple_stmt (dump_file
, phi
, 0, TDF_SLIM
);
1934 new_stmt
= gimple_build_assign (res
, rhs
);
1935 gsi_insert_before (gsi
, new_stmt
, GSI_SAME_STMT
);
1936 update_stmt (new_stmt
);
1940 bb
= gimple_bb (phi
);
1941 if (EDGE_COUNT (bb
->preds
) == 2)
1943 /* Predicate ordinary PHI node with 2 arguments. */
1944 edge first_edge
, second_edge
;
1945 basic_block true_bb
;
1946 first_edge
= EDGE_PRED (bb
, 0);
1947 second_edge
= EDGE_PRED (bb
, 1);
1948 cond
= bb_predicate (first_edge
->src
);
1949 if (TREE_CODE (cond
) == TRUTH_NOT_EXPR
)
1950 std::swap (first_edge
, second_edge
);
1951 if (EDGE_COUNT (first_edge
->src
->succs
) > 1)
1953 cond
= bb_predicate (second_edge
->src
);
1954 if (TREE_CODE (cond
) == TRUTH_NOT_EXPR
)
1955 cond
= TREE_OPERAND (cond
, 0);
1957 first_edge
= second_edge
;
1960 cond
= bb_predicate (first_edge
->src
);
1961 /* Gimplify the condition to a valid cond-expr conditonal operand. */
1962 cond
= force_gimple_operand_gsi_1 (gsi
, unshare_expr (cond
),
1963 is_gimple_condexpr
, NULL_TREE
,
1964 true, GSI_SAME_STMT
);
1965 true_bb
= first_edge
->src
;
1966 if (EDGE_PRED (bb
, 1)->src
== true_bb
)
1968 arg0
= gimple_phi_arg_def (phi
, 1);
1969 arg1
= gimple_phi_arg_def (phi
, 0);
1973 arg0
= gimple_phi_arg_def (phi
, 0);
1974 arg1
= gimple_phi_arg_def (phi
, 1);
1976 if (is_cond_scalar_reduction (phi
, &reduc
, arg0
, arg1
,
1977 &op0
, &op1
, false, &has_nop
,
1980 /* Convert reduction stmt into vectorizable form. */
1981 rhs
= convert_scalar_cond_reduction (reduc
, gsi
, cond
, op0
, op1
,
1982 true_bb
!= gimple_bb (reduc
),
1983 has_nop
, nop_reduc
);
1984 redundant_ssa_names
.safe_push (std::make_pair (res
, rhs
));
1987 /* Build new RHS using selected condition and arguments. */
1988 rhs
= fold_build_cond_expr (TREE_TYPE (res
), unshare_expr (cond
),
1990 new_stmt
= gimple_build_assign (res
, rhs
);
1991 gsi_insert_before (gsi
, new_stmt
, GSI_SAME_STMT
);
1992 gimple_stmt_iterator new_gsi
= gsi_for_stmt (new_stmt
);
1993 if (fold_stmt (&new_gsi
, ifcvt_follow_ssa_use_edges
))
1995 new_stmt
= gsi_stmt (new_gsi
);
1996 update_stmt (new_stmt
);
1999 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2001 fprintf (dump_file
, "new phi replacement stmt\n");
2002 print_gimple_stmt (dump_file
, new_stmt
, 0, TDF_SLIM
);
2007 /* Create hashmap for PHI node which contain vector of argument indexes
2008 having the same value. */
2010 hash_map
<tree_operand_hash
, auto_vec
<int> > phi_arg_map
;
2011 unsigned int num_args
= gimple_phi_num_args (phi
);
2013 /* Vector of different PHI argument values. */
2014 auto_vec
<tree
> args (num_args
);
2016 /* Compute phi_arg_map. */
2017 for (i
= 0; i
< num_args
; i
++)
2021 arg
= gimple_phi_arg_def (phi
, i
);
2022 if (!phi_arg_map
.get (arg
))
2023 args
.quick_push (arg
);
2024 phi_arg_map
.get_or_insert (arg
).safe_push (i
);
2027 /* Determine element with max number of occurrences. */
2030 args_len
= args
.length ();
2031 for (i
= 0; i
< args_len
; i
++)
2034 if ((len
= phi_arg_map
.get (args
[i
])->length ()) > max
)
2041 /* Put element with max number of occurences to the end of ARGS. */
2042 if (max_ind
!= -1 && max_ind
+1 != (int) args_len
)
2043 std::swap (args
[args_len
- 1], args
[max_ind
]);
2045 /* Handle one special case when number of arguments with different values
2046 is equal 2 and one argument has the only occurrence. Such PHI can be
2047 handled as if would have only 2 arguments. */
2048 if (args_len
== 2 && phi_arg_map
.get (args
[0])->length () == 1)
2051 indexes
= phi_arg_map
.get (args
[0]);
2052 index0
= (*indexes
)[0];
2055 e
= gimple_phi_arg_edge (phi
, index0
);
2056 cond
= bb_predicate (e
->src
);
2057 if (TREE_CODE (cond
) == TRUTH_NOT_EXPR
)
2060 cond
= TREE_OPERAND (cond
, 0);
2062 /* Gimplify the condition to a valid cond-expr conditonal operand. */
2063 cond
= force_gimple_operand_gsi_1 (gsi
, unshare_expr (cond
),
2064 is_gimple_condexpr
, NULL_TREE
,
2065 true, GSI_SAME_STMT
);
2066 if (!(is_cond_scalar_reduction (phi
, &reduc
, arg0
, arg1
,
2067 &op0
, &op1
, true, &has_nop
, &nop_reduc
)))
2068 rhs
= fold_build_cond_expr (TREE_TYPE (res
), unshare_expr (cond
),
2073 /* Convert reduction stmt into vectorizable form. */
2074 rhs
= convert_scalar_cond_reduction (reduc
, gsi
, cond
, op0
, op1
,
2075 swap
,has_nop
, nop_reduc
);
2076 redundant_ssa_names
.safe_push (std::make_pair (res
, rhs
));
2078 new_stmt
= gimple_build_assign (res
, rhs
);
2079 gsi_insert_before (gsi
, new_stmt
, GSI_SAME_STMT
);
2080 update_stmt (new_stmt
);
2086 tree type
= TREE_TYPE (gimple_phi_result (phi
));
2089 for (i
= 0; i
< args_len
; i
++)
2092 indexes
= phi_arg_map
.get (args
[i
]);
2093 if (i
!= args_len
- 1)
2094 lhs
= make_temp_ssa_name (type
, NULL
, "_ifc_");
2097 cond
= gen_phi_arg_condition (phi
, indexes
, gsi
);
2098 rhs
= fold_build_cond_expr (type
, unshare_expr (cond
),
2100 new_stmt
= gimple_build_assign (lhs
, rhs
);
2101 gsi_insert_before (gsi
, new_stmt
, GSI_SAME_STMT
);
2102 update_stmt (new_stmt
);
2107 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2109 fprintf (dump_file
, "new extended phi replacement stmt\n");
2110 print_gimple_stmt (dump_file
, new_stmt
, 0, TDF_SLIM
);
2114 /* Replaces in LOOP all the scalar phi nodes other than those in the
2115 LOOP->header block with conditional modify expressions. */
2118 predicate_all_scalar_phis (class loop
*loop
)
2121 unsigned int orig_loop_num_nodes
= loop
->num_nodes
;
2124 for (i
= 1; i
< orig_loop_num_nodes
; i
++)
2127 gimple_stmt_iterator gsi
;
2128 gphi_iterator phi_gsi
;
2131 if (bb
== loop
->header
)
2134 phi_gsi
= gsi_start_phis (bb
);
2135 if (gsi_end_p (phi_gsi
))
2138 gsi
= gsi_after_labels (bb
);
2139 while (!gsi_end_p (phi_gsi
))
2141 phi
= phi_gsi
.phi ();
2142 if (virtual_operand_p (gimple_phi_result (phi
)))
2143 gsi_next (&phi_gsi
);
2146 predicate_scalar_phi (phi
, &gsi
);
2147 remove_phi_node (&phi_gsi
, false);
2153 /* Insert in each basic block of LOOP the statements produced by the
2154 gimplification of the predicates. */
2157 insert_gimplified_predicates (loop_p loop
)
2161 for (i
= 0; i
< loop
->num_nodes
; i
++)
2163 basic_block bb
= ifc_bbs
[i
];
2165 if (!is_predicated (bb
))
2166 gcc_assert (bb_predicate_gimplified_stmts (bb
) == NULL
);
2167 if (!is_predicated (bb
))
2169 /* Do not insert statements for a basic block that is not
2170 predicated. Also make sure that the predicate of the
2171 basic block is set to true. */
2172 reset_bb_predicate (bb
);
2176 stmts
= bb_predicate_gimplified_stmts (bb
);
2179 if (need_to_predicate
)
2181 /* Insert the predicate of the BB just after the label,
2182 as the if-conversion of memory writes will use this
2184 gimple_stmt_iterator gsi
= gsi_after_labels (bb
);
2185 gsi_insert_seq_before (&gsi
, stmts
, GSI_SAME_STMT
);
2189 /* Insert the predicate of the BB at the end of the BB
2190 as this would reduce the register pressure: the only
2191 use of this predicate will be in successor BBs. */
2192 gimple_stmt_iterator gsi
= gsi_last_bb (bb
);
2195 || stmt_ends_bb_p (gsi_stmt (gsi
)))
2196 gsi_insert_seq_before (&gsi
, stmts
, GSI_SAME_STMT
);
2198 gsi_insert_seq_after (&gsi
, stmts
, GSI_SAME_STMT
);
2201 /* Once the sequence is code generated, set it to NULL. */
2202 set_bb_predicate_gimplified_stmts (bb
, NULL
);
2207 /* Helper function for predicate_statements. Returns index of existent
2208 mask if it was created for given SIZE and -1 otherwise. */
2211 mask_exists (int size
, const vec
<int> &vec
)
2215 FOR_EACH_VEC_ELT (vec
, ix
, v
)
2221 /* Helper function for predicate_statements. STMT is a memory read or
2222 write and it needs to be predicated by MASK. Return a statement
2226 predicate_load_or_store (gimple_stmt_iterator
*gsi
, gassign
*stmt
, tree mask
)
2230 tree lhs
= gimple_assign_lhs (stmt
);
2231 tree rhs
= gimple_assign_rhs1 (stmt
);
2232 tree ref
= TREE_CODE (lhs
) == SSA_NAME
? rhs
: lhs
;
2233 mark_addressable (ref
);
2234 tree addr
= force_gimple_operand_gsi (gsi
, build_fold_addr_expr (ref
),
2235 true, NULL_TREE
, true, GSI_SAME_STMT
);
2236 tree ptr
= build_int_cst (reference_alias_ptr_type (ref
),
2237 get_object_alignment (ref
));
2238 /* Copy points-to info if possible. */
2239 if (TREE_CODE (addr
) == SSA_NAME
&& !SSA_NAME_PTR_INFO (addr
))
2240 copy_ref_info (build2 (MEM_REF
, TREE_TYPE (ref
), addr
, ptr
),
2242 if (TREE_CODE (lhs
) == SSA_NAME
)
2245 = gimple_build_call_internal (IFN_MASK_LOAD
, 3, addr
,
2247 gimple_call_set_lhs (new_stmt
, lhs
);
2248 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
2253 = gimple_build_call_internal (IFN_MASK_STORE
, 4, addr
, ptr
,
2255 gimple_move_vops (new_stmt
, stmt
);
2257 gimple_call_set_nothrow (new_stmt
, true);
2261 /* STMT uses OP_LHS. Check whether it is equivalent to:
2263 ... = OP_MASK ? OP_LHS : X;
2265 Return X if so, otherwise return null. OP_MASK is an SSA_NAME that is
2266 known to have value OP_COND. */
2269 check_redundant_cond_expr (gimple
*stmt
, tree op_mask
, tree op_cond
,
2272 gassign
*assign
= dyn_cast
<gassign
*> (stmt
);
2273 if (!assign
|| gimple_assign_rhs_code (assign
) != COND_EXPR
)
2276 tree use_cond
= gimple_assign_rhs1 (assign
);
2277 tree if_true
= gimple_assign_rhs2 (assign
);
2278 tree if_false
= gimple_assign_rhs3 (assign
);
2280 if ((use_cond
== op_mask
|| operand_equal_p (use_cond
, op_cond
, 0))
2281 && if_true
== op_lhs
)
2284 if (inverse_conditions_p (use_cond
, op_cond
) && if_false
== op_lhs
)
2290 /* Return true if VALUE is available for use at STMT. SSA_NAMES is
2291 the set of SSA names defined earlier in STMT's block. */
2294 value_available_p (gimple
*stmt
, hash_set
<tree_ssa_name_hash
> *ssa_names
,
2297 if (is_gimple_min_invariant (value
))
2300 if (TREE_CODE (value
) == SSA_NAME
)
2302 if (SSA_NAME_IS_DEFAULT_DEF (value
))
2305 basic_block def_bb
= gimple_bb (SSA_NAME_DEF_STMT (value
));
2306 basic_block use_bb
= gimple_bb (stmt
);
2307 return (def_bb
== use_bb
2308 ? ssa_names
->contains (value
)
2309 : dominated_by_p (CDI_DOMINATORS
, use_bb
, def_bb
));
2315 /* Helper function for predicate_statements. STMT is a potentially-trapping
2316 arithmetic operation that needs to be predicated by MASK, an SSA_NAME that
2317 has value COND. Return a statement that does so. SSA_NAMES is the set of
2318 SSA names defined earlier in STMT's block. */
2321 predicate_rhs_code (gassign
*stmt
, tree mask
, tree cond
,
2322 hash_set
<tree_ssa_name_hash
> *ssa_names
)
2324 tree lhs
= gimple_assign_lhs (stmt
);
2325 tree_code code
= gimple_assign_rhs_code (stmt
);
2326 unsigned int nops
= gimple_num_ops (stmt
);
2327 internal_fn cond_fn
= get_conditional_internal_fn (code
);
2329 /* Construct the arguments to the conditional internal function. */
2330 auto_vec
<tree
, 8> args
;
2331 args
.safe_grow (nops
+ 1, true);
2333 for (unsigned int i
= 1; i
< nops
; ++i
)
2334 args
[i
] = gimple_op (stmt
, i
);
2335 args
[nops
] = NULL_TREE
;
2337 /* Look for uses of the result to see whether they are COND_EXPRs that can
2338 be folded into the conditional call. */
2339 imm_use_iterator imm_iter
;
2341 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, lhs
)
2343 tree new_else
= check_redundant_cond_expr (use_stmt
, mask
, cond
, lhs
);
2344 if (new_else
&& value_available_p (stmt
, ssa_names
, new_else
))
2347 args
[nops
] = new_else
;
2348 if (operand_equal_p (new_else
, args
[nops
], 0))
2352 LHS = IFN_COND (MASK, ..., ELSE);
2353 X = MASK ? LHS : ELSE;
2355 which makes X equivalent to LHS. */
2356 tree use_lhs
= gimple_assign_lhs (use_stmt
);
2357 redundant_ssa_names
.safe_push (std::make_pair (use_lhs
, lhs
));
2362 args
[nops
] = targetm
.preferred_else_value (cond_fn
, TREE_TYPE (lhs
),
2363 nops
- 1, &args
[1]);
2365 /* Create and insert the call. */
2366 gcall
*new_stmt
= gimple_build_call_internal_vec (cond_fn
, args
);
2367 gimple_call_set_lhs (new_stmt
, lhs
);
2368 gimple_call_set_nothrow (new_stmt
, true);
2373 /* Predicate each write to memory in LOOP.
2375 This function transforms control flow constructs containing memory
2378 | for (i = 0; i < N; i++)
2382 into the following form that does not contain control flow:
2384 | for (i = 0; i < N; i++)
2385 | A[i] = cond ? expr : A[i];
2387 The original CFG looks like this:
2394 | if (i < N) goto bb_5 else goto bb_2
2398 | cond = some_computation;
2399 | if (cond) goto bb_3 else goto bb_4
2411 insert_gimplified_predicates inserts the computation of the COND
2412 expression at the beginning of the destination basic block:
2419 | if (i < N) goto bb_5 else goto bb_2
2423 | cond = some_computation;
2424 | if (cond) goto bb_3 else goto bb_4
2428 | cond = some_computation;
2437 predicate_statements is then predicating the memory write as follows:
2444 | if (i < N) goto bb_5 else goto bb_2
2448 | if (cond) goto bb_3 else goto bb_4
2452 | cond = some_computation;
2453 | A[i] = cond ? expr : A[i];
2461 and finally combine_blocks removes the basic block boundaries making
2462 the loop vectorizable:
2466 | if (i < N) goto bb_5 else goto bb_1
2470 | cond = some_computation;
2471 | A[i] = cond ? expr : A[i];
2472 | if (i < N) goto bb_5 else goto bb_4
2481 predicate_statements (loop_p loop
)
2483 unsigned int i
, orig_loop_num_nodes
= loop
->num_nodes
;
2484 auto_vec
<int, 1> vect_sizes
;
2485 auto_vec
<tree
, 1> vect_masks
;
2486 hash_set
<tree_ssa_name_hash
> ssa_names
;
2488 for (i
= 1; i
< orig_loop_num_nodes
; i
++)
2490 gimple_stmt_iterator gsi
;
2491 basic_block bb
= ifc_bbs
[i
];
2492 tree cond
= bb_predicate (bb
);
2496 if (is_true_predicate (cond
))
2500 if (TREE_CODE (cond
) == TRUTH_NOT_EXPR
)
2503 cond
= TREE_OPERAND (cond
, 0);
2506 vect_sizes
.truncate (0);
2507 vect_masks
.truncate (0);
2509 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
);)
2511 gassign
*stmt
= dyn_cast
<gassign
*> (gsi_stmt (gsi
));
2514 else if (is_false_predicate (cond
)
2515 && gimple_vdef (stmt
))
2517 unlink_stmt_vdef (stmt
);
2518 gsi_remove (&gsi
, true);
2519 release_defs (stmt
);
2522 else if (gimple_plf (stmt
, GF_PLF_2
))
2524 tree lhs
= gimple_assign_lhs (stmt
);
2527 gimple_seq stmts
= NULL
;
2528 machine_mode mode
= TYPE_MODE (TREE_TYPE (lhs
));
2529 /* We checked before setting GF_PLF_2 that an equivalent
2530 integer mode exists. */
2531 int bitsize
= GET_MODE_BITSIZE (mode
).to_constant ();
2532 if (!vect_sizes
.is_empty ()
2533 && (index
= mask_exists (bitsize
, vect_sizes
)) != -1)
2534 /* Use created mask. */
2535 mask
= vect_masks
[index
];
2538 if (COMPARISON_CLASS_P (cond
))
2539 mask
= gimple_build (&stmts
, TREE_CODE (cond
),
2541 TREE_OPERAND (cond
, 0),
2542 TREE_OPERAND (cond
, 1));
2549 = constant_boolean_node (true, TREE_TYPE (mask
));
2550 mask
= gimple_build (&stmts
, BIT_XOR_EXPR
,
2551 TREE_TYPE (mask
), mask
, true_val
);
2553 gsi_insert_seq_before (&gsi
, stmts
, GSI_SAME_STMT
);
2555 /* Save mask and its size for further use. */
2556 vect_sizes
.safe_push (bitsize
);
2557 vect_masks
.safe_push (mask
);
2559 if (gimple_assign_single_p (stmt
))
2560 new_stmt
= predicate_load_or_store (&gsi
, stmt
, mask
);
2562 new_stmt
= predicate_rhs_code (stmt
, mask
, cond
, &ssa_names
);
2564 gsi_replace (&gsi
, new_stmt
, true);
2566 else if (gimple_vdef (stmt
))
2568 tree lhs
= gimple_assign_lhs (stmt
);
2569 tree rhs
= gimple_assign_rhs1 (stmt
);
2570 tree type
= TREE_TYPE (lhs
);
2572 lhs
= ifc_temp_var (type
, unshare_expr (lhs
), &gsi
);
2573 rhs
= ifc_temp_var (type
, unshare_expr (rhs
), &gsi
);
2575 std::swap (lhs
, rhs
);
2576 cond
= force_gimple_operand_gsi_1 (&gsi
, unshare_expr (cond
),
2577 is_gimple_condexpr
, NULL_TREE
,
2578 true, GSI_SAME_STMT
);
2579 rhs
= fold_build_cond_expr (type
, unshare_expr (cond
), rhs
, lhs
);
2580 gimple_assign_set_rhs1 (stmt
, ifc_temp_var (type
, rhs
, &gsi
));
2583 tree lhs
= gimple_get_lhs (gsi_stmt (gsi
));
2584 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
2585 ssa_names
.add (lhs
);
2592 /* Remove all GIMPLE_CONDs and GIMPLE_LABELs of all the basic blocks
2593 other than the exit and latch of the LOOP. Also resets the
2594 GIMPLE_DEBUG information. */
2597 remove_conditions_and_labels (loop_p loop
)
2599 gimple_stmt_iterator gsi
;
2602 for (i
= 0; i
< loop
->num_nodes
; i
++)
2604 basic_block bb
= ifc_bbs
[i
];
2606 if (bb_with_exit_edge_p (loop
, bb
)
2607 || bb
== loop
->latch
)
2610 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); )
2611 switch (gimple_code (gsi_stmt (gsi
)))
2615 gsi_remove (&gsi
, true);
2619 /* ??? Should there be conditional GIMPLE_DEBUG_BINDs? */
2620 if (gimple_debug_bind_p (gsi_stmt (gsi
)))
2622 gimple_debug_bind_reset_value (gsi_stmt (gsi
));
2623 update_stmt (gsi_stmt (gsi
));
2634 /* Combine all the basic blocks from LOOP into one or two super basic
2635 blocks. Replace PHI nodes with conditional modify expressions. */
2638 combine_blocks (class loop
*loop
)
2640 basic_block bb
, exit_bb
, merge_target_bb
;
2641 unsigned int orig_loop_num_nodes
= loop
->num_nodes
;
2646 remove_conditions_and_labels (loop
);
2647 insert_gimplified_predicates (loop
);
2648 predicate_all_scalar_phis (loop
);
2650 if (need_to_predicate
)
2651 predicate_statements (loop
);
2653 /* Merge basic blocks. */
2655 bool *predicated
= XNEWVEC (bool, orig_loop_num_nodes
);
2656 for (i
= 0; i
< orig_loop_num_nodes
; i
++)
2659 predicated
[i
] = !is_true_predicate (bb_predicate (bb
));
2660 free_bb_predicate (bb
);
2661 if (bb_with_exit_edge_p (loop
, bb
))
2663 gcc_assert (exit_bb
== NULL
);
2667 gcc_assert (exit_bb
!= loop
->latch
);
2669 merge_target_bb
= loop
->header
;
2671 /* Get at the virtual def valid for uses starting at the first block
2672 we merge into the header. Without a virtual PHI the loop has the
2673 same virtual use on all stmts. */
2674 gphi
*vphi
= get_virtual_phi (loop
->header
);
2675 tree last_vdef
= NULL_TREE
;
2678 last_vdef
= gimple_phi_result (vphi
);
2679 for (gimple_stmt_iterator gsi
= gsi_start_bb (loop
->header
);
2680 ! gsi_end_p (gsi
); gsi_next (&gsi
))
2681 if (gimple_vdef (gsi_stmt (gsi
)))
2682 last_vdef
= gimple_vdef (gsi_stmt (gsi
));
2684 for (i
= 1; i
< orig_loop_num_nodes
; i
++)
2686 gimple_stmt_iterator gsi
;
2687 gimple_stmt_iterator last
;
2691 if (bb
== exit_bb
|| bb
== loop
->latch
)
2694 /* We release virtual PHIs late because we have to propagate them
2695 out using the current VUSE. The def might be the one used
2697 vphi
= get_virtual_phi (bb
);
2700 /* When there's just loads inside the loop a stray virtual
2701 PHI merging the uses can appear, update last_vdef from
2704 last_vdef
= gimple_phi_arg_def (vphi
, 0);
2705 imm_use_iterator iter
;
2706 use_operand_p use_p
;
2708 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, gimple_phi_result (vphi
))
2710 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
2711 SET_USE (use_p
, last_vdef
);
2713 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_phi_result (vphi
)))
2714 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (last_vdef
) = 1;
2715 gsi
= gsi_for_stmt (vphi
);
2716 remove_phi_node (&gsi
, true);
2719 /* Make stmts member of loop->header and clear range info from all stmts
2720 in BB which is now no longer executed conditional on a predicate we
2721 could have derived it from. */
2722 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2724 gimple
*stmt
= gsi_stmt (gsi
);
2725 gimple_set_bb (stmt
, merge_target_bb
);
2726 /* Update virtual operands. */
2729 use_operand_p use_p
= ssa_vuse_operand (stmt
);
2731 && USE_FROM_PTR (use_p
) != last_vdef
)
2732 SET_USE (use_p
, last_vdef
);
2733 if (gimple_vdef (stmt
))
2734 last_vdef
= gimple_vdef (stmt
);
2737 /* If this is the first load we arrive at update last_vdef
2738 so we handle stray PHIs correctly. */
2739 last_vdef
= gimple_vuse (stmt
);
2744 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_DEF
)
2745 reset_flow_sensitive_info (op
);
2749 /* Update stmt list. */
2750 last
= gsi_last_bb (merge_target_bb
);
2751 gsi_insert_seq_after_without_update (&last
, bb_seq (bb
), GSI_NEW_STMT
);
2752 set_bb_seq (bb
, NULL
);
2755 /* Fixup virtual operands in the exit block. */
2757 && exit_bb
!= loop
->header
)
2759 /* We release virtual PHIs late because we have to propagate them
2760 out using the current VUSE. The def might be the one used
2762 vphi
= get_virtual_phi (exit_bb
);
2765 /* When there's just loads inside the loop a stray virtual
2766 PHI merging the uses can appear, update last_vdef from
2769 last_vdef
= gimple_phi_arg_def (vphi
, 0);
2770 imm_use_iterator iter
;
2771 use_operand_p use_p
;
2773 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, gimple_phi_result (vphi
))
2775 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
2776 SET_USE (use_p
, last_vdef
);
2778 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_phi_result (vphi
)))
2779 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (last_vdef
) = 1;
2780 gimple_stmt_iterator gsi
= gsi_for_stmt (vphi
);
2781 remove_phi_node (&gsi
, true);
2785 /* Now remove all the edges in the loop, except for those from the exit
2786 block and delete the blocks we elided. */
2787 for (i
= 1; i
< orig_loop_num_nodes
; i
++)
2791 for (ei
= ei_start (bb
->preds
); (e
= ei_safe_edge (ei
));)
2793 if (e
->src
== exit_bb
)
2799 for (i
= 1; i
< orig_loop_num_nodes
; i
++)
2803 if (bb
== exit_bb
|| bb
== loop
->latch
)
2806 delete_basic_block (bb
);
2809 /* Re-connect the exit block. */
2810 if (exit_bb
!= NULL
)
2812 if (exit_bb
!= loop
->header
)
2814 /* Connect this node to loop header. */
2815 make_single_succ_edge (loop
->header
, exit_bb
, EDGE_FALLTHRU
);
2816 set_immediate_dominator (CDI_DOMINATORS
, exit_bb
, loop
->header
);
2819 /* Redirect non-exit edges to loop->latch. */
2820 FOR_EACH_EDGE (e
, ei
, exit_bb
->succs
)
2822 if (!loop_exit_edge_p (loop
, e
))
2823 redirect_edge_and_branch (e
, loop
->latch
);
2825 set_immediate_dominator (CDI_DOMINATORS
, loop
->latch
, exit_bb
);
2829 /* If the loop does not have an exit, reconnect header and latch. */
2830 make_edge (loop
->header
, loop
->latch
, EDGE_FALLTHRU
);
2831 set_immediate_dominator (CDI_DOMINATORS
, loop
->latch
, loop
->header
);
2834 /* If possible, merge loop header to the block with the exit edge.
2835 This reduces the number of basic blocks to two, to please the
2836 vectorizer that handles only loops with two nodes. */
2838 && exit_bb
!= loop
->header
)
2840 if (can_merge_blocks_p (loop
->header
, exit_bb
))
2841 merge_blocks (loop
->header
, exit_bb
);
2849 /* Version LOOP before if-converting it; the original loop
2850 will be if-converted, the new copy of the loop will not,
2851 and the LOOP_VECTORIZED internal call will be guarding which
2852 loop to execute. The vectorizer pass will fold this
2853 internal call into either true or false.
2855 Note that this function intentionally invalidates profile. Both edges
2856 out of LOOP_VECTORIZED must have 100% probability so the profile remains
2857 consistent after the condition is folded in the vectorizer. */
2860 version_loop_for_if_conversion (class loop
*loop
, vec
<gimple
*> *preds
)
2862 basic_block cond_bb
;
2863 tree cond
= make_ssa_name (boolean_type_node
);
2864 class loop
*new_loop
;
2866 gimple_stmt_iterator gsi
;
2867 unsigned int save_length
;
2869 g
= gimple_build_call_internal (IFN_LOOP_VECTORIZED
, 2,
2870 build_int_cst (integer_type_node
, loop
->num
),
2872 gimple_call_set_lhs (g
, cond
);
2874 /* Save BB->aux around loop_version as that uses the same field. */
2875 save_length
= loop
->inner
? loop
->inner
->num_nodes
: loop
->num_nodes
;
2876 void **saved_preds
= XALLOCAVEC (void *, save_length
);
2877 for (unsigned i
= 0; i
< save_length
; i
++)
2878 saved_preds
[i
] = ifc_bbs
[i
]->aux
;
2880 initialize_original_copy_tables ();
2881 /* At this point we invalidate porfile confistency until IFN_LOOP_VECTORIZED
2882 is re-merged in the vectorizer. */
2883 new_loop
= loop_version (loop
, cond
, &cond_bb
,
2884 profile_probability::always (),
2885 profile_probability::always (),
2886 profile_probability::always (),
2887 profile_probability::always (), true);
2888 free_original_copy_tables ();
2890 for (unsigned i
= 0; i
< save_length
; i
++)
2891 ifc_bbs
[i
]->aux
= saved_preds
[i
];
2893 if (new_loop
== NULL
)
2896 new_loop
->dont_vectorize
= true;
2897 new_loop
->force_vectorize
= false;
2898 gsi
= gsi_last_bb (cond_bb
);
2899 gimple_call_set_arg (g
, 1, build_int_cst (integer_type_node
, new_loop
->num
));
2901 preds
->safe_push (g
);
2902 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
2903 update_ssa (TODO_update_ssa
);
2907 /* Return true when LOOP satisfies the follow conditions that will
2908 allow it to be recognized by the vectorizer for outer-loop
2910 - The loop is not the root node of the loop tree.
2911 - The loop has exactly one inner loop.
2912 - The loop has a single exit.
2913 - The loop header has a single successor, which is the inner
2915 - Each of the inner and outer loop latches have a single
2917 - The loop exit block has a single predecessor, which is the
2918 inner loop's exit block. */
2921 versionable_outer_loop_p (class loop
*loop
)
2923 if (!loop_outer (loop
)
2924 || loop
->dont_vectorize
2926 || loop
->inner
->next
2927 || !single_exit (loop
)
2928 || !single_succ_p (loop
->header
)
2929 || single_succ (loop
->header
) != loop
->inner
->header
2930 || !single_pred_p (loop
->latch
)
2931 || !single_pred_p (loop
->inner
->latch
))
2934 basic_block outer_exit
= single_pred (loop
->latch
);
2935 basic_block inner_exit
= single_pred (loop
->inner
->latch
);
2937 if (!single_pred_p (outer_exit
) || single_pred (outer_exit
) != inner_exit
)
2941 fprintf (dump_file
, "Found vectorizable outer loop for versioning\n");
2946 /* Performs splitting of critical edges. Skip splitting and return false
2947 if LOOP will not be converted because:
2949 - LOOP is not well formed.
2950 - LOOP has PHI with more than MAX_PHI_ARG_NUM arguments.
2952 Last restriction is valid only if AGGRESSIVE_IF_CONV is false. */
2955 ifcvt_split_critical_edges (class loop
*loop
, bool aggressive_if_conv
)
2959 unsigned int num
= loop
->num_nodes
;
2964 auto_vec
<edge
> critical_edges
;
2966 /* Loop is not well formed. */
2967 if (num
<= 2 || loop
->inner
|| !single_exit (loop
))
2970 body
= get_loop_body (loop
);
2971 for (i
= 0; i
< num
; i
++)
2974 if (!aggressive_if_conv
2976 && EDGE_COUNT (bb
->preds
) > MAX_PHI_ARG_NUM
)
2978 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2980 "BB %d has complicated PHI with more than %u args.\n",
2981 bb
->index
, MAX_PHI_ARG_NUM
);
2986 if (bb
== loop
->latch
|| bb_with_exit_edge_p (loop
, bb
))
2989 stmt
= last_stmt (bb
);
2990 /* Skip basic blocks not ending with conditional branch. */
2991 if (!stmt
|| gimple_code (stmt
) != GIMPLE_COND
)
2994 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
2995 if (EDGE_CRITICAL_P (e
) && e
->dest
->loop_father
== loop
)
2996 critical_edges
.safe_push (e
);
3000 while (critical_edges
.length () > 0)
3002 e
= critical_edges
.pop ();
3003 /* Don't split if bb can be predicated along non-critical edge. */
3004 if (EDGE_COUNT (e
->dest
->preds
) > 2 || all_preds_critical_p (e
->dest
))
3011 /* Delete redundant statements produced by predication which prevents
3012 loop vectorization. */
3015 ifcvt_local_dce (class loop
*loop
)
3020 gimple_stmt_iterator gsi
;
3021 auto_vec
<gimple
*> worklist
;
3022 enum gimple_code code
;
3023 use_operand_p use_p
;
3024 imm_use_iterator imm_iter
;
3026 /* The loop has a single BB only. */
3027 basic_block bb
= loop
->header
;
3028 tree latch_vdef
= NULL_TREE
;
3030 worklist
.create (64);
3031 /* Consider all phi as live statements. */
3032 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3034 phi
= gsi_stmt (gsi
);
3035 gimple_set_plf (phi
, GF_PLF_2
, true);
3036 worklist
.safe_push (phi
);
3037 if (virtual_operand_p (gimple_phi_result (phi
)))
3038 latch_vdef
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_latch_edge (loop
));
3040 /* Consider load/store statements, CALL and COND as live. */
3041 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3043 stmt
= gsi_stmt (gsi
);
3044 if (is_gimple_debug (stmt
))
3046 gimple_set_plf (stmt
, GF_PLF_2
, true);
3049 if (gimple_store_p (stmt
) || gimple_assign_load_p (stmt
))
3051 gimple_set_plf (stmt
, GF_PLF_2
, true);
3052 worklist
.safe_push (stmt
);
3055 code
= gimple_code (stmt
);
3056 if (code
== GIMPLE_COND
|| code
== GIMPLE_CALL
)
3058 gimple_set_plf (stmt
, GF_PLF_2
, true);
3059 worklist
.safe_push (stmt
);
3062 gimple_set_plf (stmt
, GF_PLF_2
, false);
3064 if (code
== GIMPLE_ASSIGN
)
3066 tree lhs
= gimple_assign_lhs (stmt
);
3067 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, lhs
)
3069 stmt1
= USE_STMT (use_p
);
3070 if (!is_gimple_debug (stmt1
) && gimple_bb (stmt1
) != bb
)
3072 gimple_set_plf (stmt
, GF_PLF_2
, true);
3073 worklist
.safe_push (stmt
);
3079 /* Propagate liveness through arguments of live stmt. */
3080 while (worklist
.length () > 0)
3083 use_operand_p use_p
;
3086 stmt
= worklist
.pop ();
3087 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
3089 use
= USE_FROM_PTR (use_p
);
3090 if (TREE_CODE (use
) != SSA_NAME
)
3092 stmt1
= SSA_NAME_DEF_STMT (use
);
3093 if (gimple_bb (stmt1
) != bb
|| gimple_plf (stmt1
, GF_PLF_2
))
3095 gimple_set_plf (stmt1
, GF_PLF_2
, true);
3096 worklist
.safe_push (stmt1
);
3099 /* Delete dead statements. */
3100 gsi
= gsi_last_bb (bb
);
3101 while (!gsi_end_p (gsi
))
3103 gimple_stmt_iterator gsiprev
= gsi
;
3104 gsi_prev (&gsiprev
);
3105 stmt
= gsi_stmt (gsi
);
3106 if (gimple_store_p (stmt
))
3108 tree lhs
= gimple_get_lhs (stmt
);
3110 ao_ref_init (&write
, lhs
);
3112 if (dse_classify_store (&write
, stmt
, false, NULL
, NULL
, latch_vdef
)
3114 delete_dead_or_redundant_assignment (&gsi
, "dead");
3119 if (gimple_plf (stmt
, GF_PLF_2
))
3124 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3126 fprintf (dump_file
, "Delete dead stmt in bb#%d\n", bb
->index
);
3127 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
3129 gsi_remove (&gsi
, true);
3130 release_defs (stmt
);
3135 /* If-convert LOOP when it is legal. For the moment this pass has no
3136 profitability analysis. Returns non-zero todo flags when something
3140 tree_if_conversion (class loop
*loop
, vec
<gimple
*> *preds
)
3142 unsigned int todo
= 0;
3143 bool aggressive_if_conv
;
3150 need_to_predicate
= false;
3151 any_complicated_phi
= false;
3153 /* Apply more aggressive if-conversion when loop or its outer loop were
3154 marked with simd pragma. When that's the case, we try to if-convert
3155 loop containing PHIs with more than MAX_PHI_ARG_NUM arguments. */
3156 aggressive_if_conv
= loop
->force_vectorize
;
3157 if (!aggressive_if_conv
)
3159 class loop
*outer_loop
= loop_outer (loop
);
3160 if (outer_loop
&& outer_loop
->force_vectorize
)
3161 aggressive_if_conv
= true;
3164 if (!ifcvt_split_critical_edges (loop
, aggressive_if_conv
))
3167 if (!if_convertible_loop_p (loop
)
3168 || !dbg_cnt (if_conversion_tree
))
3171 if ((need_to_predicate
|| any_complicated_phi
)
3172 && ((!flag_tree_loop_vectorize
&& !loop
->force_vectorize
)
3173 || loop
->dont_vectorize
))
3176 /* Since we have no cost model, always version loops unless the user
3177 specified -ftree-loop-if-convert or unless versioning is required.
3178 Either version this loop, or if the pattern is right for outer-loop
3179 vectorization, version the outer loop. In the latter case we will
3180 still if-convert the original inner loop. */
3181 if (need_to_predicate
3182 || any_complicated_phi
3183 || flag_tree_loop_if_convert
!= 1)
3186 = (versionable_outer_loop_p (loop_outer (loop
))
3187 ? loop_outer (loop
) : loop
);
3188 class loop
*nloop
= version_loop_for_if_conversion (vloop
, preds
);
3193 /* If versionable_outer_loop_p decided to version the
3194 outer loop, version also the inner loop of the non-vectorized
3195 loop copy. So we transform:
3199 if (LOOP_VECTORIZED (1, 3))
3205 loop3 (copy of loop1)
3206 if (LOOP_VECTORIZED (4, 5))
3207 loop4 (copy of loop2)
3209 loop5 (copy of loop4) */
3210 gcc_assert (nloop
->inner
&& nloop
->inner
->next
== NULL
);
3211 rloop
= nloop
->inner
;
3215 /* Now all statements are if-convertible. Combine all the basic
3216 blocks into one huge basic block doing the if-conversion
3218 combine_blocks (loop
);
3220 /* Perform local CSE, this esp. helps the vectorizer analysis if loads
3221 and stores are involved. CSE only the loop body, not the entry
3222 PHIs, those are to be kept in sync with the non-if-converted copy.
3223 ??? We'll still keep dead stores though. */
3224 exit_bbs
= BITMAP_ALLOC (NULL
);
3225 bitmap_set_bit (exit_bbs
, single_exit (loop
)->dest
->index
);
3226 bitmap_set_bit (exit_bbs
, loop
->latch
->index
);
3228 std::pair
<tree
, tree
> *name_pair
;
3229 unsigned ssa_names_idx
;
3230 FOR_EACH_VEC_ELT (redundant_ssa_names
, ssa_names_idx
, name_pair
)
3231 replace_uses_by (name_pair
->first
, name_pair
->second
);
3232 redundant_ssa_names
.release ();
3234 todo
|= do_rpo_vn (cfun
, loop_preheader_edge (loop
), exit_bbs
);
3236 /* Delete dead predicate computations. */
3237 ifcvt_local_dce (loop
);
3238 BITMAP_FREE (exit_bbs
);
3240 todo
|= TODO_cleanup_cfg
;
3247 for (i
= 0; i
< loop
->num_nodes
; i
++)
3248 free_bb_predicate (ifc_bbs
[i
]);
3262 /* Tree if-conversion pass management. */
3266 const pass_data pass_data_if_conversion
=
3268 GIMPLE_PASS
, /* type */
3270 OPTGROUP_NONE
, /* optinfo_flags */
3271 TV_TREE_LOOP_IFCVT
, /* tv_id */
3272 ( PROP_cfg
| PROP_ssa
), /* properties_required */
3273 0, /* properties_provided */
3274 0, /* properties_destroyed */
3275 0, /* todo_flags_start */
3276 0, /* todo_flags_finish */
3279 class pass_if_conversion
: public gimple_opt_pass
3282 pass_if_conversion (gcc::context
*ctxt
)
3283 : gimple_opt_pass (pass_data_if_conversion
, ctxt
)
3286 /* opt_pass methods: */
3287 virtual bool gate (function
*);
3288 virtual unsigned int execute (function
*);
3290 }; // class pass_if_conversion
3293 pass_if_conversion::gate (function
*fun
)
3295 return (((flag_tree_loop_vectorize
|| fun
->has_force_vectorize_loops
)
3296 && flag_tree_loop_if_convert
!= 0)
3297 || flag_tree_loop_if_convert
== 1);
3301 pass_if_conversion::execute (function
*fun
)
3305 if (number_of_loops (fun
) <= 1)
3308 auto_vec
<gimple
*> preds
;
3309 for (auto loop
: loops_list (cfun
, 0))
3310 if (flag_tree_loop_if_convert
== 1
3311 || ((flag_tree_loop_vectorize
|| loop
->force_vectorize
)
3312 && !loop
->dont_vectorize
))
3313 todo
|= tree_if_conversion (loop
, &preds
);
3317 free_numbers_of_iterations_estimates (fun
);
3324 FOR_EACH_BB_FN (bb
, fun
)
3325 gcc_assert (!bb
->aux
);
3328 /* Perform IL update now, it might elide some loops. */
3329 if (todo
& TODO_cleanup_cfg
)
3331 cleanup_tree_cfg ();
3332 if (need_ssa_update_p (fun
))
3333 todo
|= TODO_update_ssa
;
3335 if (todo
& TODO_update_ssa_any
)
3336 update_ssa (todo
& TODO_update_ssa_any
);
3338 /* If if-conversion elided the loop fall back to the original one. */
3339 for (unsigned i
= 0; i
< preds
.length (); ++i
)
3341 gimple
*g
= preds
[i
];
3344 unsigned ifcvt_loop
= tree_to_uhwi (gimple_call_arg (g
, 0));
3345 if (!get_loop (fun
, ifcvt_loop
))
3348 fprintf (dump_file
, "If-converted loop vanished\n");
3349 fold_loop_internal_call (g
, boolean_false_node
);
3359 make_pass_if_conversion (gcc::context
*ctxt
)
3361 return new pass_if_conversion (ctxt
);