1 /* If-conversion for vectorizer.
2 Copyright (C) 2004-2018 Free Software Foundation, Inc.
3 Contributed by Devang Patel <dpatel@apple.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* This pass implements a tree level if-conversion of loops. Its
22 initial goal is to help the vectorizer to vectorize loops with
25 A short description of if-conversion:
27 o Decide if a loop is if-convertible or not.
28 o Walk all loop basic blocks in breadth first order (BFS order).
29 o Remove conditional statements (at the end of basic block)
30 and propagate condition into destination basic blocks'
32 o Replace modify expression with conditional modify expression
33 using current basic block's condition.
34 o Merge all basic blocks
35 o Replace phi nodes with conditional modify expr
36 o Merge all basic blocks into header
38 Sample transformation:
43 # i_23 = PHI <0(0), i_18(10)>;
46 if (j_15 > 41) goto <L1>; else goto <L17>;
53 # iftmp.2_4 = PHI <0(8), 42(2)>;
57 if (i_18 <= 15) goto <L19>; else goto <L18>;
67 # i_23 = PHI <0(0), i_18(10)>;
72 iftmp.2_4 = j_15 > 41 ? 42 : 0;
75 if (i_18 <= 15) goto <L19>; else goto <L18>;
85 #include "coretypes.h"
91 #include "tree-pass.h"
94 #include "optabs-query.h"
95 #include "gimple-pretty-print.h"
97 #include "fold-const.h"
98 #include "stor-layout.h"
99 #include "gimple-fold.h"
100 #include "gimplify.h"
101 #include "gimple-iterator.h"
102 #include "gimplify-me.h"
103 #include "tree-cfg.h"
104 #include "tree-into-ssa.h"
105 #include "tree-ssa.h"
107 #include "tree-data-ref.h"
108 #include "tree-scalar-evolution.h"
109 #include "tree-ssa-loop.h"
110 #include "tree-ssa-loop-niter.h"
111 #include "tree-ssa-loop-ivopts.h"
112 #include "tree-ssa-address.h"
114 #include "tree-hash-traits.h"
116 #include "builtins.h"
120 /* Only handle PHIs with no more arguments unless we are asked to by
122 #define MAX_PHI_ARG_NUM \
123 ((unsigned) PARAM_VALUE (PARAM_MAX_TREE_IF_CONVERSION_PHI_ARGS))
125 /* Indicate if new load/store that needs to be predicated is introduced
126 during if conversion. */
127 static bool any_pred_load_store
;
129 /* Indicate if there are any complicated PHIs that need to be handled in
130 if-conversion. Complicated PHI has more than two arguments and can't
131 be degenerated to two arguments PHI. See more information in comment
132 before phi_convertible_by_degenerating_args. */
133 static bool any_complicated_phi
;
135 /* Hash for struct innermost_loop_behavior. It depends on the user to
138 struct innermost_loop_behavior_hash
: nofree_ptr_hash
<innermost_loop_behavior
>
140 static inline hashval_t
hash (const value_type
&);
141 static inline bool equal (const value_type
&,
142 const compare_type
&);
146 innermost_loop_behavior_hash::hash (const value_type
&e
)
150 hash
= iterative_hash_expr (e
->base_address
, 0);
151 hash
= iterative_hash_expr (e
->offset
, hash
);
152 hash
= iterative_hash_expr (e
->init
, hash
);
153 return iterative_hash_expr (e
->step
, hash
);
157 innermost_loop_behavior_hash::equal (const value_type
&e1
,
158 const compare_type
&e2
)
160 if ((e1
->base_address
&& !e2
->base_address
)
161 || (!e1
->base_address
&& e2
->base_address
)
162 || (!e1
->offset
&& e2
->offset
)
163 || (e1
->offset
&& !e2
->offset
)
164 || (!e1
->init
&& e2
->init
)
165 || (e1
->init
&& !e2
->init
)
166 || (!e1
->step
&& e2
->step
)
167 || (e1
->step
&& !e2
->step
))
170 if (e1
->base_address
&& e2
->base_address
171 && !operand_equal_p (e1
->base_address
, e2
->base_address
, 0))
173 if (e1
->offset
&& e2
->offset
174 && !operand_equal_p (e1
->offset
, e2
->offset
, 0))
176 if (e1
->init
&& e2
->init
177 && !operand_equal_p (e1
->init
, e2
->init
, 0))
179 if (e1
->step
&& e2
->step
180 && !operand_equal_p (e1
->step
, e2
->step
, 0))
186 /* List of basic blocks in if-conversion-suitable order. */
187 static basic_block
*ifc_bbs
;
189 /* Hash table to store <DR's innermost loop behavior, DR> pairs. */
190 static hash_map
<innermost_loop_behavior_hash
,
191 data_reference_p
> *innermost_DR_map
;
193 /* Hash table to store <base reference, DR> pairs. */
194 static hash_map
<tree_operand_hash
, data_reference_p
> *baseref_DR_map
;
196 /* Structure used to predicate basic blocks. This is attached to the
197 ->aux field of the BBs in the loop to be if-converted. */
198 struct bb_predicate
{
200 /* The condition under which this basic block is executed. */
203 /* PREDICATE is gimplified, and the sequence of statements is
204 recorded here, in order to avoid the duplication of computations
205 that occur in previous conditions. See PR44483. */
206 gimple_seq predicate_gimplified_stmts
;
209 /* Returns true when the basic block BB has a predicate. */
212 bb_has_predicate (basic_block bb
)
214 return bb
->aux
!= NULL
;
217 /* Returns the gimplified predicate for basic block BB. */
220 bb_predicate (basic_block bb
)
222 return ((struct bb_predicate
*) bb
->aux
)->predicate
;
225 /* Sets the gimplified predicate COND for basic block BB. */
228 set_bb_predicate (basic_block bb
, tree cond
)
230 gcc_assert ((TREE_CODE (cond
) == TRUTH_NOT_EXPR
231 && is_gimple_condexpr (TREE_OPERAND (cond
, 0)))
232 || is_gimple_condexpr (cond
));
233 ((struct bb_predicate
*) bb
->aux
)->predicate
= cond
;
236 /* Returns the sequence of statements of the gimplification of the
237 predicate for basic block BB. */
239 static inline gimple_seq
240 bb_predicate_gimplified_stmts (basic_block bb
)
242 return ((struct bb_predicate
*) bb
->aux
)->predicate_gimplified_stmts
;
245 /* Sets the sequence of statements STMTS of the gimplification of the
246 predicate for basic block BB. */
249 set_bb_predicate_gimplified_stmts (basic_block bb
, gimple_seq stmts
)
251 ((struct bb_predicate
*) bb
->aux
)->predicate_gimplified_stmts
= stmts
;
254 /* Adds the sequence of statements STMTS to the sequence of statements
255 of the predicate for basic block BB. */
258 add_bb_predicate_gimplified_stmts (basic_block bb
, gimple_seq stmts
)
260 /* We might have updated some stmts in STMTS via force_gimple_operand
261 calling fold_stmt and that producing multiple stmts. Delink immediate
262 uses so update_ssa after loop versioning doesn't get confused for
263 the not yet inserted predicates.
264 ??? This should go away once we reliably avoid updating stmts
266 for (gimple_stmt_iterator gsi
= gsi_start (stmts
);
267 !gsi_end_p (gsi
); gsi_next (&gsi
))
269 gimple
*stmt
= gsi_stmt (gsi
);
270 delink_stmt_imm_use (stmt
);
271 gimple_set_modified (stmt
, true);
273 gimple_seq_add_seq_without_update
274 (&(((struct bb_predicate
*) bb
->aux
)->predicate_gimplified_stmts
), stmts
);
277 /* Initializes to TRUE the predicate of basic block BB. */
280 init_bb_predicate (basic_block bb
)
282 bb
->aux
= XNEW (struct bb_predicate
);
283 set_bb_predicate_gimplified_stmts (bb
, NULL
);
284 set_bb_predicate (bb
, boolean_true_node
);
287 /* Release the SSA_NAMEs associated with the predicate of basic block BB. */
290 release_bb_predicate (basic_block bb
)
292 gimple_seq stmts
= bb_predicate_gimplified_stmts (bb
);
295 /* Ensure that these stmts haven't yet been added to a bb. */
297 for (gimple_stmt_iterator i
= gsi_start (stmts
);
298 !gsi_end_p (i
); gsi_next (&i
))
299 gcc_assert (! gimple_bb (gsi_stmt (i
)));
302 gimple_seq_discard (stmts
);
303 set_bb_predicate_gimplified_stmts (bb
, NULL
);
307 /* Free the predicate of basic block BB. */
310 free_bb_predicate (basic_block bb
)
312 if (!bb_has_predicate (bb
))
315 release_bb_predicate (bb
);
320 /* Reinitialize predicate of BB with the true predicate. */
323 reset_bb_predicate (basic_block bb
)
325 if (!bb_has_predicate (bb
))
326 init_bb_predicate (bb
);
329 release_bb_predicate (bb
);
330 set_bb_predicate (bb
, boolean_true_node
);
334 /* Returns a new SSA_NAME of type TYPE that is assigned the value of
335 the expression EXPR. Inserts the statement created for this
336 computation before GSI and leaves the iterator GSI at the same
340 ifc_temp_var (tree type
, tree expr
, gimple_stmt_iterator
*gsi
)
342 tree new_name
= make_temp_ssa_name (type
, NULL
, "_ifc_");
343 gimple
*stmt
= gimple_build_assign (new_name
, expr
);
344 gimple_set_vuse (stmt
, gimple_vuse (gsi_stmt (*gsi
)));
345 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
349 /* Return true when COND is a false predicate. */
352 is_false_predicate (tree cond
)
354 return (cond
!= NULL_TREE
355 && (cond
== boolean_false_node
356 || integer_zerop (cond
)));
359 /* Return true when COND is a true predicate. */
362 is_true_predicate (tree cond
)
364 return (cond
== NULL_TREE
365 || cond
== boolean_true_node
366 || integer_onep (cond
));
369 /* Returns true when BB has a predicate that is not trivial: true or
373 is_predicated (basic_block bb
)
375 return !is_true_predicate (bb_predicate (bb
));
378 /* Parses the predicate COND and returns its comparison code and
379 operands OP0 and OP1. */
381 static enum tree_code
382 parse_predicate (tree cond
, tree
*op0
, tree
*op1
)
386 if (TREE_CODE (cond
) == SSA_NAME
387 && is_gimple_assign (s
= SSA_NAME_DEF_STMT (cond
)))
389 if (TREE_CODE_CLASS (gimple_assign_rhs_code (s
)) == tcc_comparison
)
391 *op0
= gimple_assign_rhs1 (s
);
392 *op1
= gimple_assign_rhs2 (s
);
393 return gimple_assign_rhs_code (s
);
396 else if (gimple_assign_rhs_code (s
) == TRUTH_NOT_EXPR
)
398 tree op
= gimple_assign_rhs1 (s
);
399 tree type
= TREE_TYPE (op
);
400 enum tree_code code
= parse_predicate (op
, op0
, op1
);
402 return code
== ERROR_MARK
? ERROR_MARK
403 : invert_tree_comparison (code
, HONOR_NANS (type
));
409 if (COMPARISON_CLASS_P (cond
))
411 *op0
= TREE_OPERAND (cond
, 0);
412 *op1
= TREE_OPERAND (cond
, 1);
413 return TREE_CODE (cond
);
419 /* Returns the fold of predicate C1 OR C2 at location LOC. */
422 fold_or_predicates (location_t loc
, tree c1
, tree c2
)
424 tree op1a
, op1b
, op2a
, op2b
;
425 enum tree_code code1
= parse_predicate (c1
, &op1a
, &op1b
);
426 enum tree_code code2
= parse_predicate (c2
, &op2a
, &op2b
);
428 if (code1
!= ERROR_MARK
&& code2
!= ERROR_MARK
)
430 tree t
= maybe_fold_or_comparisons (code1
, op1a
, op1b
,
436 return fold_build2_loc (loc
, TRUTH_OR_EXPR
, boolean_type_node
, c1
, c2
);
439 /* Returns either a COND_EXPR or the folded expression if the folded
440 expression is a MIN_EXPR, a MAX_EXPR, an ABS_EXPR,
441 a constant or a SSA_NAME. */
444 fold_build_cond_expr (tree type
, tree cond
, tree rhs
, tree lhs
)
446 tree rhs1
, lhs1
, cond_expr
;
448 /* If COND is comparison r != 0 and r has boolean type, convert COND
449 to SSA_NAME to accept by vect bool pattern. */
450 if (TREE_CODE (cond
) == NE_EXPR
)
452 tree op0
= TREE_OPERAND (cond
, 0);
453 tree op1
= TREE_OPERAND (cond
, 1);
454 if (TREE_CODE (op0
) == SSA_NAME
455 && TREE_CODE (TREE_TYPE (op0
)) == BOOLEAN_TYPE
456 && (integer_zerop (op1
)))
459 cond_expr
= fold_ternary (COND_EXPR
, type
, cond
, rhs
, lhs
);
461 if (cond_expr
== NULL_TREE
)
462 return build3 (COND_EXPR
, type
, cond
, rhs
, lhs
);
464 STRIP_USELESS_TYPE_CONVERSION (cond_expr
);
466 if (is_gimple_val (cond_expr
))
469 if (TREE_CODE (cond_expr
) == ABS_EXPR
)
471 rhs1
= TREE_OPERAND (cond_expr
, 1);
472 STRIP_USELESS_TYPE_CONVERSION (rhs1
);
473 if (is_gimple_val (rhs1
))
474 return build1 (ABS_EXPR
, type
, rhs1
);
477 if (TREE_CODE (cond_expr
) == MIN_EXPR
478 || TREE_CODE (cond_expr
) == MAX_EXPR
)
480 lhs1
= TREE_OPERAND (cond_expr
, 0);
481 STRIP_USELESS_TYPE_CONVERSION (lhs1
);
482 rhs1
= TREE_OPERAND (cond_expr
, 1);
483 STRIP_USELESS_TYPE_CONVERSION (rhs1
);
484 if (is_gimple_val (rhs1
) && is_gimple_val (lhs1
))
485 return build2 (TREE_CODE (cond_expr
), type
, lhs1
, rhs1
);
487 return build3 (COND_EXPR
, type
, cond
, rhs
, lhs
);
490 /* Add condition NC to the predicate list of basic block BB. LOOP is
491 the loop to be if-converted. Use predicate of cd-equivalent block
492 for join bb if it exists: we call basic blocks bb1 and bb2
493 cd-equivalent if they are executed under the same condition. */
496 add_to_predicate_list (struct loop
*loop
, basic_block bb
, tree nc
)
501 if (is_true_predicate (nc
))
504 /* If dominance tells us this basic block is always executed,
505 don't record any predicates for it. */
506 if (dominated_by_p (CDI_DOMINATORS
, loop
->latch
, bb
))
509 dom_bb
= get_immediate_dominator (CDI_DOMINATORS
, bb
);
510 /* We use notion of cd equivalence to get simpler predicate for
511 join block, e.g. if join block has 2 predecessors with predicates
512 p1 & p2 and p1 & !p2, we'd like to get p1 for it instead of
513 p1 & p2 | p1 & !p2. */
514 if (dom_bb
!= loop
->header
515 && get_immediate_dominator (CDI_POST_DOMINATORS
, dom_bb
) == bb
)
517 gcc_assert (flow_bb_inside_loop_p (loop
, dom_bb
));
518 bc
= bb_predicate (dom_bb
);
519 if (!is_true_predicate (bc
))
520 set_bb_predicate (bb
, bc
);
522 gcc_assert (is_true_predicate (bb_predicate (bb
)));
523 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
524 fprintf (dump_file
, "Use predicate of bb#%d for bb#%d\n",
525 dom_bb
->index
, bb
->index
);
529 if (!is_predicated (bb
))
533 bc
= bb_predicate (bb
);
534 bc
= fold_or_predicates (EXPR_LOCATION (bc
), nc
, bc
);
535 if (is_true_predicate (bc
))
537 reset_bb_predicate (bb
);
542 /* Allow a TRUTH_NOT_EXPR around the main predicate. */
543 if (TREE_CODE (bc
) == TRUTH_NOT_EXPR
)
544 tp
= &TREE_OPERAND (bc
, 0);
547 if (!is_gimple_condexpr (*tp
))
550 *tp
= force_gimple_operand_1 (*tp
, &stmts
, is_gimple_condexpr
, NULL_TREE
);
551 add_bb_predicate_gimplified_stmts (bb
, stmts
);
553 set_bb_predicate (bb
, bc
);
556 /* Add the condition COND to the previous condition PREV_COND, and add
557 this to the predicate list of the destination of edge E. LOOP is
558 the loop to be if-converted. */
561 add_to_dst_predicate_list (struct loop
*loop
, edge e
,
562 tree prev_cond
, tree cond
)
564 if (!flow_bb_inside_loop_p (loop
, e
->dest
))
567 if (!is_true_predicate (prev_cond
))
568 cond
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
571 if (!dominated_by_p (CDI_DOMINATORS
, loop
->latch
, e
->dest
))
572 add_to_predicate_list (loop
, e
->dest
, cond
);
575 /* Return true if one of the successor edges of BB exits LOOP. */
578 bb_with_exit_edge_p (struct loop
*loop
, basic_block bb
)
583 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
584 if (loop_exit_edge_p (loop
, e
))
590 /* Given PHI which has more than two arguments, this function checks if
591 it's if-convertible by degenerating its arguments. Specifically, if
592 below two conditions are satisfied:
594 1) Number of PHI arguments with different values equals to 2 and one
595 argument has the only occurrence.
596 2) The edge corresponding to the unique argument isn't critical edge.
598 Such PHI can be handled as PHIs have only two arguments. For example,
601 res = PHI <A_1(e1), A_1(e2), A_2(e3)>;
603 can be transformed into:
605 res = (predicate of e3) ? A_2 : A_1;
607 Return TRUE if it is the case, FALSE otherwise. */
610 phi_convertible_by_degenerating_args (gphi
*phi
)
613 tree arg
, t1
= NULL
, t2
= NULL
;
614 unsigned int i
, i1
= 0, i2
= 0, n1
= 0, n2
= 0;
615 unsigned int num_args
= gimple_phi_num_args (phi
);
617 gcc_assert (num_args
> 2);
619 for (i
= 0; i
< num_args
; i
++)
621 arg
= gimple_phi_arg_def (phi
, i
);
622 if (t1
== NULL
|| operand_equal_p (t1
, arg
, 0))
628 else if (t2
== NULL
|| operand_equal_p (t2
, arg
, 0))
638 if (n1
!= 1 && n2
!= 1)
641 /* Check if the edge corresponding to the unique arg is critical. */
642 e
= gimple_phi_arg_edge (phi
, (n1
== 1) ? i1
: i2
);
643 if (EDGE_COUNT (e
->src
->succs
) > 1)
649 /* Return true when PHI is if-convertible. PHI is part of loop LOOP
650 and it belongs to basic block BB. Note at this point, it is sure
651 that PHI is if-convertible. This function updates global variable
652 ANY_COMPLICATED_PHI if PHI is complicated. */
655 if_convertible_phi_p (struct loop
*loop
, basic_block bb
, gphi
*phi
)
657 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
659 fprintf (dump_file
, "-------------------------\n");
660 print_gimple_stmt (dump_file
, phi
, 0, TDF_SLIM
);
663 if (bb
!= loop
->header
664 && gimple_phi_num_args (phi
) > 2
665 && !phi_convertible_by_degenerating_args (phi
))
666 any_complicated_phi
= true;
671 /* Records the status of a data reference. This struct is attached to
672 each DR->aux field. */
675 bool rw_unconditionally
;
676 bool w_unconditionally
;
677 bool written_at_least_once
;
681 tree base_w_predicate
;
684 #define IFC_DR(DR) ((struct ifc_dr *) (DR)->aux)
685 #define DR_BASE_W_UNCONDITIONALLY(DR) (IFC_DR (DR)->written_at_least_once)
686 #define DR_RW_UNCONDITIONALLY(DR) (IFC_DR (DR)->rw_unconditionally)
687 #define DR_W_UNCONDITIONALLY(DR) (IFC_DR (DR)->w_unconditionally)
689 /* Iterates over DR's and stores refs, DR and base refs, DR pairs in
690 HASH tables. While storing them in HASH table, it checks if the
691 reference is unconditionally read or written and stores that as a flag
692 information. For base reference it checks if it is written atlest once
693 unconditionally and stores it as flag information along with DR.
694 In other words for every data reference A in STMT there exist other
695 accesses to a data reference with the same base with predicates that
696 add up (OR-up) to the true predicate: this ensures that the data
697 reference A is touched (read or written) on every iteration of the
698 if-converted loop. */
700 hash_memrefs_baserefs_and_store_DRs_read_written_info (data_reference_p a
)
703 data_reference_p
*master_dr
, *base_master_dr
;
704 tree base_ref
= DR_BASE_OBJECT (a
);
705 innermost_loop_behavior
*innermost
= &DR_INNERMOST (a
);
706 tree ca
= bb_predicate (gimple_bb (DR_STMT (a
)));
709 master_dr
= &innermost_DR_map
->get_or_insert (innermost
, &exist1
);
715 IFC_DR (*master_dr
)->w_predicate
716 = fold_or_predicates (UNKNOWN_LOCATION
, ca
,
717 IFC_DR (*master_dr
)->w_predicate
);
718 if (is_true_predicate (IFC_DR (*master_dr
)->w_predicate
))
719 DR_W_UNCONDITIONALLY (*master_dr
) = true;
721 IFC_DR (*master_dr
)->rw_predicate
722 = fold_or_predicates (UNKNOWN_LOCATION
, ca
,
723 IFC_DR (*master_dr
)->rw_predicate
);
724 if (is_true_predicate (IFC_DR (*master_dr
)->rw_predicate
))
725 DR_RW_UNCONDITIONALLY (*master_dr
) = true;
729 base_master_dr
= &baseref_DR_map
->get_or_insert (base_ref
, &exist2
);
732 IFC_DR (*base_master_dr
)->base_w_predicate
733 = fold_or_predicates (UNKNOWN_LOCATION
, ca
,
734 IFC_DR (*base_master_dr
)->base_w_predicate
);
735 if (is_true_predicate (IFC_DR (*base_master_dr
)->base_w_predicate
))
736 DR_BASE_W_UNCONDITIONALLY (*base_master_dr
) = true;
740 /* Return TRUE if can prove the index IDX of an array reference REF is
741 within array bound. Return false otherwise. */
744 idx_within_array_bound (tree ref
, tree
*idx
, void *dta
)
747 widest_int niter
, valid_niter
, delta
, wi_step
;
750 struct loop
*loop
= (struct loop
*) dta
;
752 /* Only support within-bound access for array references. */
753 if (TREE_CODE (ref
) != ARRAY_REF
)
756 /* For arrays at the end of the structure, we are not guaranteed that they
757 do not really extend over their declared size. However, for arrays of
758 size greater than one, this is unlikely to be intended. */
759 if (array_at_struct_end_p (ref
))
762 ev
= analyze_scalar_evolution (loop
, *idx
);
763 ev
= instantiate_parameters (loop
, ev
);
764 init
= initial_condition (ev
);
765 step
= evolution_part_in_loop_num (ev
, loop
->num
);
767 if (!init
|| TREE_CODE (init
) != INTEGER_CST
768 || (step
&& TREE_CODE (step
) != INTEGER_CST
))
771 low
= array_ref_low_bound (ref
);
772 high
= array_ref_up_bound (ref
);
774 /* The case of nonconstant bounds could be handled, but it would be
776 if (TREE_CODE (low
) != INTEGER_CST
777 || !high
|| TREE_CODE (high
) != INTEGER_CST
)
780 /* Check if the intial idx is within bound. */
781 if (wi::to_widest (init
) < wi::to_widest (low
)
782 || wi::to_widest (init
) > wi::to_widest (high
))
785 /* The idx is always within bound. */
786 if (!step
|| integer_zerop (step
))
789 if (!max_loop_iterations (loop
, &niter
))
792 if (wi::to_widest (step
) < 0)
794 delta
= wi::to_widest (init
) - wi::to_widest (low
);
795 wi_step
= -wi::to_widest (step
);
799 delta
= wi::to_widest (high
) - wi::to_widest (init
);
800 wi_step
= wi::to_widest (step
);
803 valid_niter
= wi::div_floor (delta
, wi_step
, SIGNED
, &overflow
);
804 /* The iteration space of idx is within array bound. */
805 if (!overflow
&& niter
<= valid_niter
)
811 /* Return TRUE if ref is a within bound array reference. */
814 ref_within_array_bound (gimple
*stmt
, tree ref
)
816 struct loop
*loop
= loop_containing_stmt (stmt
);
818 gcc_assert (loop
!= NULL
);
819 return for_each_index (&ref
, idx_within_array_bound
, loop
);
823 /* Given a memory reference expression T, return TRUE if base object
824 it refers to is writable. The base object of a memory reference
825 is the main object being referenced, which is returned by function
829 base_object_writable (tree ref
)
831 tree base_tree
= get_base_address (ref
);
834 && DECL_P (base_tree
)
835 && decl_binds_to_current_def_p (base_tree
)
836 && !TREE_READONLY (base_tree
));
839 /* Return true when the memory references of STMT won't trap in the
840 if-converted code. There are two things that we have to check for:
842 - writes to memory occur to writable memory: if-conversion of
843 memory writes transforms the conditional memory writes into
844 unconditional writes, i.e. "if (cond) A[i] = foo" is transformed
845 into "A[i] = cond ? foo : A[i]", and as the write to memory may not
846 be executed at all in the original code, it may be a readonly
847 memory. To check that A is not const-qualified, we check that
848 there exists at least an unconditional write to A in the current
851 - reads or writes to memory are valid memory accesses for every
852 iteration. To check that the memory accesses are correctly formed
853 and that we are allowed to read and write in these locations, we
854 check that the memory accesses to be if-converted occur at every
855 iteration unconditionally.
857 Returns true for the memory reference in STMT, same memory reference
858 is read or written unconditionally atleast once and the base memory
859 reference is written unconditionally once. This is to check reference
860 will not write fault. Also retuns true if the memory reference is
861 unconditionally read once then we are conditionally writing to memory
862 which is defined as read and write and is bound to the definition
865 ifcvt_memrefs_wont_trap (gimple
*stmt
, vec
<data_reference_p
> drs
)
867 data_reference_p
*master_dr
, *base_master_dr
;
868 data_reference_p a
= drs
[gimple_uid (stmt
) - 1];
870 tree base
= DR_BASE_OBJECT (a
);
871 innermost_loop_behavior
*innermost
= &DR_INNERMOST (a
);
873 gcc_assert (DR_STMT (a
) == stmt
);
874 gcc_assert (DR_BASE_ADDRESS (a
) || DR_OFFSET (a
)
875 || DR_INIT (a
) || DR_STEP (a
));
877 master_dr
= innermost_DR_map
->get (innermost
);
878 gcc_assert (master_dr
!= NULL
);
880 base_master_dr
= baseref_DR_map
->get (base
);
882 /* If a is unconditionally written to it doesn't trap. */
883 if (DR_W_UNCONDITIONALLY (*master_dr
))
886 /* If a is unconditionally accessed then ...
888 Even a is conditional access, we can treat it as an unconditional
889 one if it's an array reference and all its index are within array
891 if (DR_RW_UNCONDITIONALLY (*master_dr
)
892 || ref_within_array_bound (stmt
, DR_REF (a
)))
894 /* an unconditional read won't trap. */
898 /* an unconditionaly write won't trap if the base is written
899 to unconditionally. */
901 && DR_BASE_W_UNCONDITIONALLY (*base_master_dr
))
902 return PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES
);
903 /* or the base is known to be not readonly. */
904 else if (base_object_writable (DR_REF (a
)))
905 return PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES
);
911 /* Return true if STMT could be converted into a masked load or store
912 (conditional load or store based on a mask computed from bb predicate). */
915 ifcvt_can_use_mask_load_store (gimple
*stmt
)
919 basic_block bb
= gimple_bb (stmt
);
922 if (!(flag_tree_loop_vectorize
|| bb
->loop_father
->force_vectorize
)
923 || bb
->loop_father
->dont_vectorize
924 || !gimple_assign_single_p (stmt
)
925 || gimple_has_volatile_ops (stmt
))
928 /* Check whether this is a load or store. */
929 lhs
= gimple_assign_lhs (stmt
);
930 if (gimple_store_p (stmt
))
932 if (!is_gimple_val (gimple_assign_rhs1 (stmt
)))
937 else if (gimple_assign_load_p (stmt
))
940 ref
= gimple_assign_rhs1 (stmt
);
945 if (may_be_nonaddressable_p (ref
))
948 /* Mask should be integer mode of the same size as the load/store
950 mode
= TYPE_MODE (TREE_TYPE (lhs
));
951 if (!int_mode_for_mode (mode
).exists () || VECTOR_MODE_P (mode
))
954 if (can_vec_mask_load_store_p (mode
, VOIDmode
, is_load
))
960 /* Return true when STMT is if-convertible.
962 GIMPLE_ASSIGN statement is not if-convertible if,
965 - LHS is not var decl. */
968 if_convertible_gimple_assign_stmt_p (gimple
*stmt
,
969 vec
<data_reference_p
> refs
)
971 tree lhs
= gimple_assign_lhs (stmt
);
973 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
975 fprintf (dump_file
, "-------------------------\n");
976 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
979 if (!is_gimple_reg_type (TREE_TYPE (lhs
)))
982 /* Some of these constrains might be too conservative. */
983 if (stmt_ends_bb_p (stmt
)
984 || gimple_has_volatile_ops (stmt
)
985 || (TREE_CODE (lhs
) == SSA_NAME
986 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs
))
987 || gimple_has_side_effects (stmt
))
989 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
990 fprintf (dump_file
, "stmt not suitable for ifcvt\n");
994 /* tree-into-ssa.c uses GF_PLF_1, so avoid it, because
995 in between if_convertible_loop_p and combine_blocks
996 we can perform loop versioning. */
997 gimple_set_plf (stmt
, GF_PLF_2
, false);
999 if ((! gimple_vuse (stmt
)
1000 || gimple_could_trap_p_1 (stmt
, false, false)
1001 || ! ifcvt_memrefs_wont_trap (stmt
, refs
))
1002 && gimple_could_trap_p (stmt
))
1004 if (ifcvt_can_use_mask_load_store (stmt
))
1006 gimple_set_plf (stmt
, GF_PLF_2
, true);
1007 any_pred_load_store
= true;
1010 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1011 fprintf (dump_file
, "tree could trap...\n");
1015 /* When if-converting stores force versioning, likewise if we
1016 ended up generating store data races. */
1017 if (gimple_vdef (stmt
))
1018 any_pred_load_store
= true;
1023 /* Return true when STMT is if-convertible.
1025 A statement is if-convertible if:
1026 - it is an if-convertible GIMPLE_ASSIGN,
1027 - it is a GIMPLE_LABEL or a GIMPLE_COND,
1028 - it is builtins call. */
1031 if_convertible_stmt_p (gimple
*stmt
, vec
<data_reference_p
> refs
)
1033 switch (gimple_code (stmt
))
1041 return if_convertible_gimple_assign_stmt_p (stmt
, refs
);
1045 tree fndecl
= gimple_call_fndecl (stmt
);
1048 int flags
= gimple_call_flags (stmt
);
1049 if ((flags
& ECF_CONST
)
1050 && !(flags
& ECF_LOOPING_CONST_OR_PURE
)
1051 /* We can only vectorize some builtins at the moment,
1052 so restrict if-conversion to those. */
1053 && DECL_BUILT_IN (fndecl
))
1060 /* Don't know what to do with 'em so don't do anything. */
1061 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1063 fprintf (dump_file
, "don't know what to do\n");
1064 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
1072 /* Assumes that BB has more than 1 predecessors.
1073 Returns false if at least one successor is not on critical edge
1074 and true otherwise. */
1077 all_preds_critical_p (basic_block bb
)
1082 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1083 if (EDGE_COUNT (e
->src
->succs
) == 1)
1088 /* Returns true if at least one successor in on critical edge. */
1090 has_pred_critical_p (basic_block bb
)
1095 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1096 if (EDGE_COUNT (e
->src
->succs
) > 1)
1101 /* Return true when BB is if-convertible. This routine does not check
1102 basic block's statements and phis.
1104 A basic block is not if-convertible if:
1105 - it is non-empty and it is after the exit block (in BFS order),
1106 - it is after the exit block but before the latch,
1107 - its edges are not normal.
1109 EXIT_BB is the basic block containing the exit of the LOOP. BB is
1113 if_convertible_bb_p (struct loop
*loop
, basic_block bb
, basic_block exit_bb
)
1118 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1119 fprintf (dump_file
, "----------[%d]-------------\n", bb
->index
);
1121 if (EDGE_COUNT (bb
->succs
) > 2)
1126 if (bb
!= loop
->latch
)
1128 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1129 fprintf (dump_file
, "basic block after exit bb but before latch\n");
1132 else if (!empty_block_p (bb
))
1134 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1135 fprintf (dump_file
, "non empty basic block after exit bb\n");
1138 else if (bb
== loop
->latch
1140 && !dominated_by_p (CDI_DOMINATORS
, bb
, exit_bb
))
1142 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1143 fprintf (dump_file
, "latch is not dominated by exit_block\n");
1148 /* Be less adventurous and handle only normal edges. */
1149 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
1150 if (e
->flags
& (EDGE_EH
| EDGE_ABNORMAL
| EDGE_IRREDUCIBLE_LOOP
))
1152 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1153 fprintf (dump_file
, "Difficult to handle edges\n");
1160 /* Return true when all predecessor blocks of BB are visited. The
1161 VISITED bitmap keeps track of the visited blocks. */
1164 pred_blocks_visited_p (basic_block bb
, bitmap
*visited
)
1168 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1169 if (!bitmap_bit_p (*visited
, e
->src
->index
))
1175 /* Get body of a LOOP in suitable order for if-conversion. It is
1176 caller's responsibility to deallocate basic block list.
1177 If-conversion suitable order is, breadth first sort (BFS) order
1178 with an additional constraint: select a block only if all its
1179 predecessors are already selected. */
1181 static basic_block
*
1182 get_loop_body_in_if_conv_order (const struct loop
*loop
)
1184 basic_block
*blocks
, *blocks_in_bfs_order
;
1187 unsigned int index
= 0;
1188 unsigned int visited_count
= 0;
1190 gcc_assert (loop
->num_nodes
);
1191 gcc_assert (loop
->latch
!= EXIT_BLOCK_PTR_FOR_FN (cfun
));
1193 blocks
= XCNEWVEC (basic_block
, loop
->num_nodes
);
1194 visited
= BITMAP_ALLOC (NULL
);
1196 blocks_in_bfs_order
= get_loop_body_in_bfs_order (loop
);
1199 while (index
< loop
->num_nodes
)
1201 bb
= blocks_in_bfs_order
[index
];
1203 if (bb
->flags
& BB_IRREDUCIBLE_LOOP
)
1205 free (blocks_in_bfs_order
);
1206 BITMAP_FREE (visited
);
1211 if (!bitmap_bit_p (visited
, bb
->index
))
1213 if (pred_blocks_visited_p (bb
, &visited
)
1214 || bb
== loop
->header
)
1216 /* This block is now visited. */
1217 bitmap_set_bit (visited
, bb
->index
);
1218 blocks
[visited_count
++] = bb
;
1224 if (index
== loop
->num_nodes
1225 && visited_count
!= loop
->num_nodes
)
1229 free (blocks_in_bfs_order
);
1230 BITMAP_FREE (visited
);
1234 /* Returns true when the analysis of the predicates for all the basic
1235 blocks in LOOP succeeded.
1237 predicate_bbs first allocates the predicates of the basic blocks.
1238 These fields are then initialized with the tree expressions
1239 representing the predicates under which a basic block is executed
1240 in the LOOP. As the loop->header is executed at each iteration, it
1241 has the "true" predicate. Other statements executed under a
1242 condition are predicated with that condition, for example
1249 S1 will be predicated with "x", and
1250 S2 will be predicated with "!x". */
1253 predicate_bbs (loop_p loop
)
1257 for (i
= 0; i
< loop
->num_nodes
; i
++)
1258 init_bb_predicate (ifc_bbs
[i
]);
1260 for (i
= 0; i
< loop
->num_nodes
; i
++)
1262 basic_block bb
= ifc_bbs
[i
];
1266 /* The loop latch and loop exit block are always executed and
1267 have no extra conditions to be processed: skip them. */
1268 if (bb
== loop
->latch
1269 || bb_with_exit_edge_p (loop
, bb
))
1271 reset_bb_predicate (bb
);
1275 cond
= bb_predicate (bb
);
1276 stmt
= last_stmt (bb
);
1277 if (stmt
&& gimple_code (stmt
) == GIMPLE_COND
)
1280 edge true_edge
, false_edge
;
1281 location_t loc
= gimple_location (stmt
);
1282 tree c
= build2_loc (loc
, gimple_cond_code (stmt
),
1284 gimple_cond_lhs (stmt
),
1285 gimple_cond_rhs (stmt
));
1287 /* Add new condition into destination's predicate list. */
1288 extract_true_false_edges_from_block (gimple_bb (stmt
),
1289 &true_edge
, &false_edge
);
1291 /* If C is true, then TRUE_EDGE is taken. */
1292 add_to_dst_predicate_list (loop
, true_edge
, unshare_expr (cond
),
1295 /* If C is false, then FALSE_EDGE is taken. */
1296 c2
= build1_loc (loc
, TRUTH_NOT_EXPR
, boolean_type_node
,
1298 add_to_dst_predicate_list (loop
, false_edge
,
1299 unshare_expr (cond
), c2
);
1304 /* If current bb has only one successor, then consider it as an
1305 unconditional goto. */
1306 if (single_succ_p (bb
))
1308 basic_block bb_n
= single_succ (bb
);
1310 /* The successor bb inherits the predicate of its
1311 predecessor. If there is no predicate in the predecessor
1312 bb, then consider the successor bb as always executed. */
1313 if (cond
== NULL_TREE
)
1314 cond
= boolean_true_node
;
1316 add_to_predicate_list (loop
, bb_n
, cond
);
1320 /* The loop header is always executed. */
1321 reset_bb_predicate (loop
->header
);
1322 gcc_assert (bb_predicate_gimplified_stmts (loop
->header
) == NULL
1323 && bb_predicate_gimplified_stmts (loop
->latch
) == NULL
);
1326 /* Build region by adding loop pre-header and post-header blocks. */
1328 static vec
<basic_block
>
1329 build_region (struct loop
*loop
)
1331 vec
<basic_block
> region
= vNULL
;
1332 basic_block exit_bb
= NULL
;
1334 gcc_assert (ifc_bbs
);
1335 /* The first element is loop pre-header. */
1336 region
.safe_push (loop_preheader_edge (loop
)->src
);
1338 for (unsigned int i
= 0; i
< loop
->num_nodes
; i
++)
1340 basic_block bb
= ifc_bbs
[i
];
1341 region
.safe_push (bb
);
1342 /* Find loop postheader. */
1345 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
1346 if (loop_exit_edge_p (loop
, e
))
1352 /* The last element is loop post-header. */
1353 gcc_assert (exit_bb
);
1354 region
.safe_push (exit_bb
);
1358 /* Return true when LOOP is if-convertible. This is a helper function
1359 for if_convertible_loop_p. REFS and DDRS are initialized and freed
1360 in if_convertible_loop_p. */
1363 if_convertible_loop_p_1 (struct loop
*loop
, vec
<data_reference_p
> *refs
)
1366 basic_block exit_bb
= NULL
;
1367 vec
<basic_block
> region
;
1369 if (find_data_references_in_loop (loop
, refs
) == chrec_dont_know
)
1372 calculate_dominance_info (CDI_DOMINATORS
);
1374 /* Allow statements that can be handled during if-conversion. */
1375 ifc_bbs
= get_loop_body_in_if_conv_order (loop
);
1378 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1379 fprintf (dump_file
, "Irreducible loop\n");
1383 for (i
= 0; i
< loop
->num_nodes
; i
++)
1385 basic_block bb
= ifc_bbs
[i
];
1387 if (!if_convertible_bb_p (loop
, bb
, exit_bb
))
1390 if (bb_with_exit_edge_p (loop
, bb
))
1394 for (i
= 0; i
< loop
->num_nodes
; i
++)
1396 basic_block bb
= ifc_bbs
[i
];
1397 gimple_stmt_iterator gsi
;
1399 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1400 switch (gimple_code (gsi_stmt (gsi
)))
1407 gimple_set_uid (gsi_stmt (gsi
), 0);
1414 data_reference_p dr
;
1417 = new hash_map
<innermost_loop_behavior_hash
, data_reference_p
>;
1418 baseref_DR_map
= new hash_map
<tree_operand_hash
, data_reference_p
>;
1420 /* Compute post-dominator tree locally. */
1421 region
= build_region (loop
);
1422 calculate_dominance_info_for_region (CDI_POST_DOMINATORS
, region
);
1424 predicate_bbs (loop
);
1426 /* Free post-dominator tree since it is not used after predication. */
1427 free_dominance_info_for_region (cfun
, CDI_POST_DOMINATORS
, region
);
1430 for (i
= 0; refs
->iterate (i
, &dr
); i
++)
1432 tree ref
= DR_REF (dr
);
1434 dr
->aux
= XNEW (struct ifc_dr
);
1435 DR_BASE_W_UNCONDITIONALLY (dr
) = false;
1436 DR_RW_UNCONDITIONALLY (dr
) = false;
1437 DR_W_UNCONDITIONALLY (dr
) = false;
1438 IFC_DR (dr
)->rw_predicate
= boolean_false_node
;
1439 IFC_DR (dr
)->w_predicate
= boolean_false_node
;
1440 IFC_DR (dr
)->base_w_predicate
= boolean_false_node
;
1441 if (gimple_uid (DR_STMT (dr
)) == 0)
1442 gimple_set_uid (DR_STMT (dr
), i
+ 1);
1444 /* If DR doesn't have innermost loop behavior or it's a compound
1445 memory reference, we synthesize its innermost loop behavior
1447 if (TREE_CODE (ref
) == COMPONENT_REF
1448 || TREE_CODE (ref
) == IMAGPART_EXPR
1449 || TREE_CODE (ref
) == REALPART_EXPR
1450 || !(DR_BASE_ADDRESS (dr
) || DR_OFFSET (dr
)
1451 || DR_INIT (dr
) || DR_STEP (dr
)))
1453 while (TREE_CODE (ref
) == COMPONENT_REF
1454 || TREE_CODE (ref
) == IMAGPART_EXPR
1455 || TREE_CODE (ref
) == REALPART_EXPR
)
1456 ref
= TREE_OPERAND (ref
, 0);
1458 memset (&DR_INNERMOST (dr
), 0, sizeof (DR_INNERMOST (dr
)));
1459 DR_BASE_ADDRESS (dr
) = ref
;
1461 hash_memrefs_baserefs_and_store_DRs_read_written_info (dr
);
1464 for (i
= 0; i
< loop
->num_nodes
; i
++)
1466 basic_block bb
= ifc_bbs
[i
];
1467 gimple_stmt_iterator itr
;
1469 /* Check the if-convertibility of statements in predicated BBs. */
1470 if (!dominated_by_p (CDI_DOMINATORS
, loop
->latch
, bb
))
1471 for (itr
= gsi_start_bb (bb
); !gsi_end_p (itr
); gsi_next (&itr
))
1472 if (!if_convertible_stmt_p (gsi_stmt (itr
), *refs
))
1476 /* Checking PHIs needs to be done after stmts, as the fact whether there
1477 are any masked loads or stores affects the tests. */
1478 for (i
= 0; i
< loop
->num_nodes
; i
++)
1480 basic_block bb
= ifc_bbs
[i
];
1483 for (itr
= gsi_start_phis (bb
); !gsi_end_p (itr
); gsi_next (&itr
))
1484 if (!if_convertible_phi_p (loop
, bb
, itr
.phi ()))
1489 fprintf (dump_file
, "Applying if-conversion\n");
1494 /* Return true when LOOP is if-convertible.
1495 LOOP is if-convertible if:
1497 - it has two or more basic blocks,
1498 - it has only one exit,
1499 - loop header is not the exit edge,
1500 - if its basic blocks and phi nodes are if convertible. */
1503 if_convertible_loop_p (struct loop
*loop
)
1508 vec
<data_reference_p
> refs
;
1510 /* Handle only innermost loop. */
1511 if (!loop
|| loop
->inner
)
1513 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1514 fprintf (dump_file
, "not innermost loop\n");
1518 /* If only one block, no need for if-conversion. */
1519 if (loop
->num_nodes
<= 2)
1521 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1522 fprintf (dump_file
, "less than 2 basic blocks\n");
1526 /* More than one loop exit is too much to handle. */
1527 if (!single_exit (loop
))
1529 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1530 fprintf (dump_file
, "multiple exits\n");
1534 /* If one of the loop header's edge is an exit edge then do not
1535 apply if-conversion. */
1536 FOR_EACH_EDGE (e
, ei
, loop
->header
->succs
)
1537 if (loop_exit_edge_p (loop
, e
))
1541 res
= if_convertible_loop_p_1 (loop
, &refs
);
1543 data_reference_p dr
;
1545 for (i
= 0; refs
.iterate (i
, &dr
); i
++)
1548 free_data_refs (refs
);
1550 delete innermost_DR_map
;
1551 innermost_DR_map
= NULL
;
1553 delete baseref_DR_map
;
1554 baseref_DR_map
= NULL
;
1559 /* Returns true if def-stmt for phi argument ARG is simple increment/decrement
1560 which is in predicated basic block.
1561 In fact, the following PHI pattern is searching:
1563 reduc_1 = PHI <..., reduc_2>
1567 reduc_2 = PHI <reduc_1, reduc_3>
1569 ARG_0 and ARG_1 are correspondent PHI arguments.
1570 REDUC, OP0 and OP1 contain reduction stmt and its operands.
1571 EXTENDED is true if PHI has > 2 arguments. */
1574 is_cond_scalar_reduction (gimple
*phi
, gimple
**reduc
, tree arg_0
, tree arg_1
,
1575 tree
*op0
, tree
*op1
, bool extended
)
1577 tree lhs
, r_op1
, r_op2
;
1579 gimple
*header_phi
= NULL
;
1580 enum tree_code reduction_op
;
1581 basic_block bb
= gimple_bb (phi
);
1582 struct loop
*loop
= bb
->loop_father
;
1583 edge latch_e
= loop_latch_edge (loop
);
1584 imm_use_iterator imm_iter
;
1585 use_operand_p use_p
;
1588 bool result
= false;
1589 if (TREE_CODE (arg_0
) != SSA_NAME
|| TREE_CODE (arg_1
) != SSA_NAME
)
1592 if (!extended
&& gimple_code (SSA_NAME_DEF_STMT (arg_0
)) == GIMPLE_PHI
)
1595 header_phi
= SSA_NAME_DEF_STMT (arg_0
);
1596 stmt
= SSA_NAME_DEF_STMT (arg_1
);
1598 else if (gimple_code (SSA_NAME_DEF_STMT (arg_1
)) == GIMPLE_PHI
)
1601 header_phi
= SSA_NAME_DEF_STMT (arg_1
);
1602 stmt
= SSA_NAME_DEF_STMT (arg_0
);
1606 if (gimple_bb (header_phi
) != loop
->header
)
1609 if (PHI_ARG_DEF_FROM_EDGE (header_phi
, latch_e
) != PHI_RESULT (phi
))
1612 if (gimple_code (stmt
) != GIMPLE_ASSIGN
1613 || gimple_has_volatile_ops (stmt
))
1616 if (!flow_bb_inside_loop_p (loop
, gimple_bb (stmt
)))
1619 if (!is_predicated (gimple_bb (stmt
)))
1622 /* Check that stmt-block is predecessor of phi-block. */
1623 FOR_EACH_EDGE (e
, ei
, gimple_bb (stmt
)->succs
)
1632 if (!has_single_use (lhs
))
1635 reduction_op
= gimple_assign_rhs_code (stmt
);
1636 if (reduction_op
!= PLUS_EXPR
&& reduction_op
!= MINUS_EXPR
)
1638 r_op1
= gimple_assign_rhs1 (stmt
);
1639 r_op2
= gimple_assign_rhs2 (stmt
);
1641 /* Make R_OP1 to hold reduction variable. */
1642 if (r_op2
== PHI_RESULT (header_phi
)
1643 && reduction_op
== PLUS_EXPR
)
1644 std::swap (r_op1
, r_op2
);
1645 else if (r_op1
!= PHI_RESULT (header_phi
))
1648 /* Check that R_OP1 is used in reduction stmt or in PHI only. */
1649 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, r_op1
)
1651 gimple
*use_stmt
= USE_STMT (use_p
);
1652 if (is_gimple_debug (use_stmt
))
1654 if (use_stmt
== stmt
)
1656 if (gimple_code (use_stmt
) != GIMPLE_PHI
)
1660 *op0
= r_op1
; *op1
= r_op2
;
1665 /* Converts conditional scalar reduction into unconditional form, e.g.
1667 if (_5 != 0) goto bb_5 else goto bb_6
1673 # res_2 = PHI <res_13(4), res_6(5)>
1676 will be converted into sequence
1677 _ifc__1 = _5 != 0 ? 1 : 0;
1678 res_2 = res_13 + _ifc__1;
1679 Argument SWAP tells that arguments of conditional expression should be
1681 Returns rhs of resulting PHI assignment. */
1684 convert_scalar_cond_reduction (gimple
*reduc
, gimple_stmt_iterator
*gsi
,
1685 tree cond
, tree op0
, tree op1
, bool swap
)
1687 gimple_stmt_iterator stmt_it
;
1690 tree rhs1
= gimple_assign_rhs1 (reduc
);
1691 tree tmp
= make_temp_ssa_name (TREE_TYPE (rhs1
), NULL
, "_ifc_");
1693 tree zero
= build_zero_cst (TREE_TYPE (rhs1
));
1695 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1697 fprintf (dump_file
, "Found cond scalar reduction.\n");
1698 print_gimple_stmt (dump_file
, reduc
, 0, TDF_SLIM
);
1701 /* Build cond expression using COND and constant operand
1702 of reduction rhs. */
1703 c
= fold_build_cond_expr (TREE_TYPE (rhs1
),
1704 unshare_expr (cond
),
1708 /* Create assignment stmt and insert it at GSI. */
1709 new_assign
= gimple_build_assign (tmp
, c
);
1710 gsi_insert_before (gsi
, new_assign
, GSI_SAME_STMT
);
1711 /* Build rhs for unconditional increment/decrement. */
1712 rhs
= fold_build2 (gimple_assign_rhs_code (reduc
),
1713 TREE_TYPE (rhs1
), op0
, tmp
);
1715 /* Delete original reduction stmt. */
1716 stmt_it
= gsi_for_stmt (reduc
);
1717 gsi_remove (&stmt_it
, true);
1718 release_defs (reduc
);
1722 /* Produce condition for all occurrences of ARG in PHI node. */
1725 gen_phi_arg_condition (gphi
*phi
, vec
<int> *occur
,
1726 gimple_stmt_iterator
*gsi
)
1730 tree cond
= NULL_TREE
;
1734 len
= occur
->length ();
1735 gcc_assert (len
> 0);
1736 for (i
= 0; i
< len
; i
++)
1738 e
= gimple_phi_arg_edge (phi
, (*occur
)[i
]);
1739 c
= bb_predicate (e
->src
);
1740 if (is_true_predicate (c
))
1745 c
= force_gimple_operand_gsi_1 (gsi
, unshare_expr (c
),
1746 is_gimple_condexpr
, NULL_TREE
,
1747 true, GSI_SAME_STMT
);
1748 if (cond
!= NULL_TREE
)
1750 /* Must build OR expression. */
1751 cond
= fold_or_predicates (EXPR_LOCATION (c
), c
, cond
);
1752 cond
= force_gimple_operand_gsi_1 (gsi
, unshare_expr (cond
),
1753 is_gimple_condexpr
, NULL_TREE
,
1754 true, GSI_SAME_STMT
);
1759 gcc_assert (cond
!= NULL_TREE
);
1763 /* Local valueization callback that follows all-use SSA edges. */
1766 ifcvt_follow_ssa_use_edges (tree val
)
1771 /* Replace a scalar PHI node with a COND_EXPR using COND as condition.
1772 This routine can handle PHI nodes with more than two arguments.
1775 S1: A = PHI <x1(1), x2(5)>
1777 S2: A = cond ? x1 : x2;
1779 The generated code is inserted at GSI that points to the top of
1780 basic block's statement list.
1781 If PHI node has more than two arguments a chain of conditional
1782 expression is produced. */
1786 predicate_scalar_phi (gphi
*phi
, gimple_stmt_iterator
*gsi
)
1788 gimple
*new_stmt
= NULL
, *reduc
;
1789 tree rhs
, res
, arg0
, arg1
, op0
, op1
, scev
;
1791 unsigned int index0
;
1792 unsigned int max
, args_len
;
1797 res
= gimple_phi_result (phi
);
1798 if (virtual_operand_p (res
))
1801 if ((rhs
= degenerate_phi_result (phi
))
1802 || ((scev
= analyze_scalar_evolution (gimple_bb (phi
)->loop_father
,
1804 && !chrec_contains_undetermined (scev
)
1806 && (rhs
= gimple_phi_arg_def (phi
, 0))))
1808 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1810 fprintf (dump_file
, "Degenerate phi!\n");
1811 print_gimple_stmt (dump_file
, phi
, 0, TDF_SLIM
);
1813 new_stmt
= gimple_build_assign (res
, rhs
);
1814 gsi_insert_before (gsi
, new_stmt
, GSI_SAME_STMT
);
1815 update_stmt (new_stmt
);
1819 bb
= gimple_bb (phi
);
1820 if (EDGE_COUNT (bb
->preds
) == 2)
1822 /* Predicate ordinary PHI node with 2 arguments. */
1823 edge first_edge
, second_edge
;
1824 basic_block true_bb
;
1825 first_edge
= EDGE_PRED (bb
, 0);
1826 second_edge
= EDGE_PRED (bb
, 1);
1827 cond
= bb_predicate (first_edge
->src
);
1828 if (TREE_CODE (cond
) == TRUTH_NOT_EXPR
)
1829 std::swap (first_edge
, second_edge
);
1830 if (EDGE_COUNT (first_edge
->src
->succs
) > 1)
1832 cond
= bb_predicate (second_edge
->src
);
1833 if (TREE_CODE (cond
) == TRUTH_NOT_EXPR
)
1834 cond
= TREE_OPERAND (cond
, 0);
1836 first_edge
= second_edge
;
1839 cond
= bb_predicate (first_edge
->src
);
1840 /* Gimplify the condition to a valid cond-expr conditonal operand. */
1841 cond
= force_gimple_operand_gsi_1 (gsi
, unshare_expr (cond
),
1842 is_gimple_condexpr
, NULL_TREE
,
1843 true, GSI_SAME_STMT
);
1844 true_bb
= first_edge
->src
;
1845 if (EDGE_PRED (bb
, 1)->src
== true_bb
)
1847 arg0
= gimple_phi_arg_def (phi
, 1);
1848 arg1
= gimple_phi_arg_def (phi
, 0);
1852 arg0
= gimple_phi_arg_def (phi
, 0);
1853 arg1
= gimple_phi_arg_def (phi
, 1);
1855 if (is_cond_scalar_reduction (phi
, &reduc
, arg0
, arg1
,
1857 /* Convert reduction stmt into vectorizable form. */
1858 rhs
= convert_scalar_cond_reduction (reduc
, gsi
, cond
, op0
, op1
,
1859 true_bb
!= gimple_bb (reduc
));
1861 /* Build new RHS using selected condition and arguments. */
1862 rhs
= fold_build_cond_expr (TREE_TYPE (res
), unshare_expr (cond
),
1864 new_stmt
= gimple_build_assign (res
, rhs
);
1865 gsi_insert_before (gsi
, new_stmt
, GSI_SAME_STMT
);
1866 gimple_stmt_iterator new_gsi
= gsi_for_stmt (new_stmt
);
1867 if (fold_stmt (&new_gsi
, ifcvt_follow_ssa_use_edges
))
1869 new_stmt
= gsi_stmt (new_gsi
);
1870 update_stmt (new_stmt
);
1873 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1875 fprintf (dump_file
, "new phi replacement stmt\n");
1876 print_gimple_stmt (dump_file
, new_stmt
, 0, TDF_SLIM
);
1881 /* Create hashmap for PHI node which contain vector of argument indexes
1882 having the same value. */
1884 hash_map
<tree_operand_hash
, auto_vec
<int> > phi_arg_map
;
1885 unsigned int num_args
= gimple_phi_num_args (phi
);
1887 /* Vector of different PHI argument values. */
1888 auto_vec
<tree
> args (num_args
);
1890 /* Compute phi_arg_map. */
1891 for (i
= 0; i
< num_args
; i
++)
1895 arg
= gimple_phi_arg_def (phi
, i
);
1896 if (!phi_arg_map
.get (arg
))
1897 args
.quick_push (arg
);
1898 phi_arg_map
.get_or_insert (arg
).safe_push (i
);
1901 /* Determine element with max number of occurrences. */
1904 args_len
= args
.length ();
1905 for (i
= 0; i
< args_len
; i
++)
1908 if ((len
= phi_arg_map
.get (args
[i
])->length ()) > max
)
1915 /* Put element with max number of occurences to the end of ARGS. */
1916 if (max_ind
!= -1 && max_ind
+1 != (int) args_len
)
1917 std::swap (args
[args_len
- 1], args
[max_ind
]);
1919 /* Handle one special case when number of arguments with different values
1920 is equal 2 and one argument has the only occurrence. Such PHI can be
1921 handled as if would have only 2 arguments. */
1922 if (args_len
== 2 && phi_arg_map
.get (args
[0])->length () == 1)
1925 indexes
= phi_arg_map
.get (args
[0]);
1926 index0
= (*indexes
)[0];
1929 e
= gimple_phi_arg_edge (phi
, index0
);
1930 cond
= bb_predicate (e
->src
);
1931 if (TREE_CODE (cond
) == TRUTH_NOT_EXPR
)
1934 cond
= TREE_OPERAND (cond
, 0);
1936 /* Gimplify the condition to a valid cond-expr conditonal operand. */
1937 cond
= force_gimple_operand_gsi_1 (gsi
, unshare_expr (cond
),
1938 is_gimple_condexpr
, NULL_TREE
,
1939 true, GSI_SAME_STMT
);
1940 if (!(is_cond_scalar_reduction (phi
, &reduc
, arg0
, arg1
,
1942 rhs
= fold_build_cond_expr (TREE_TYPE (res
), unshare_expr (cond
),
1946 /* Convert reduction stmt into vectorizable form. */
1947 rhs
= convert_scalar_cond_reduction (reduc
, gsi
, cond
, op0
, op1
,
1949 new_stmt
= gimple_build_assign (res
, rhs
);
1950 gsi_insert_before (gsi
, new_stmt
, GSI_SAME_STMT
);
1951 update_stmt (new_stmt
);
1957 tree type
= TREE_TYPE (gimple_phi_result (phi
));
1960 for (i
= 0; i
< args_len
; i
++)
1963 indexes
= phi_arg_map
.get (args
[i
]);
1964 if (i
!= args_len
- 1)
1965 lhs
= make_temp_ssa_name (type
, NULL
, "_ifc_");
1968 cond
= gen_phi_arg_condition (phi
, indexes
, gsi
);
1969 rhs
= fold_build_cond_expr (type
, unshare_expr (cond
),
1971 new_stmt
= gimple_build_assign (lhs
, rhs
);
1972 gsi_insert_before (gsi
, new_stmt
, GSI_SAME_STMT
);
1973 update_stmt (new_stmt
);
1978 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1980 fprintf (dump_file
, "new extended phi replacement stmt\n");
1981 print_gimple_stmt (dump_file
, new_stmt
, 0, TDF_SLIM
);
1985 /* Replaces in LOOP all the scalar phi nodes other than those in the
1986 LOOP->header block with conditional modify expressions. */
1989 predicate_all_scalar_phis (struct loop
*loop
)
1992 unsigned int orig_loop_num_nodes
= loop
->num_nodes
;
1995 for (i
= 1; i
< orig_loop_num_nodes
; i
++)
1998 gimple_stmt_iterator gsi
;
1999 gphi_iterator phi_gsi
;
2002 if (bb
== loop
->header
)
2005 phi_gsi
= gsi_start_phis (bb
);
2006 if (gsi_end_p (phi_gsi
))
2009 gsi
= gsi_after_labels (bb
);
2010 while (!gsi_end_p (phi_gsi
))
2012 phi
= phi_gsi
.phi ();
2013 if (virtual_operand_p (gimple_phi_result (phi
)))
2014 gsi_next (&phi_gsi
);
2017 predicate_scalar_phi (phi
, &gsi
);
2018 remove_phi_node (&phi_gsi
, false);
2024 /* Insert in each basic block of LOOP the statements produced by the
2025 gimplification of the predicates. */
2028 insert_gimplified_predicates (loop_p loop
)
2032 for (i
= 0; i
< loop
->num_nodes
; i
++)
2034 basic_block bb
= ifc_bbs
[i
];
2036 if (!is_predicated (bb
))
2037 gcc_assert (bb_predicate_gimplified_stmts (bb
) == NULL
);
2038 if (!is_predicated (bb
))
2040 /* Do not insert statements for a basic block that is not
2041 predicated. Also make sure that the predicate of the
2042 basic block is set to true. */
2043 reset_bb_predicate (bb
);
2047 stmts
= bb_predicate_gimplified_stmts (bb
);
2050 if (any_pred_load_store
)
2052 /* Insert the predicate of the BB just after the label,
2053 as the if-conversion of memory writes will use this
2055 gimple_stmt_iterator gsi
= gsi_after_labels (bb
);
2056 gsi_insert_seq_before (&gsi
, stmts
, GSI_SAME_STMT
);
2060 /* Insert the predicate of the BB at the end of the BB
2061 as this would reduce the register pressure: the only
2062 use of this predicate will be in successor BBs. */
2063 gimple_stmt_iterator gsi
= gsi_last_bb (bb
);
2066 || stmt_ends_bb_p (gsi_stmt (gsi
)))
2067 gsi_insert_seq_before (&gsi
, stmts
, GSI_SAME_STMT
);
2069 gsi_insert_seq_after (&gsi
, stmts
, GSI_SAME_STMT
);
2072 /* Once the sequence is code generated, set it to NULL. */
2073 set_bb_predicate_gimplified_stmts (bb
, NULL
);
2078 /* Helper function for predicate_mem_writes. Returns index of existent
2079 mask if it was created for given SIZE and -1 otherwise. */
2082 mask_exists (int size
, vec
<int> vec
)
2086 FOR_EACH_VEC_ELT (vec
, ix
, v
)
2092 /* Predicate each write to memory in LOOP.
2094 This function transforms control flow constructs containing memory
2097 | for (i = 0; i < N; i++)
2101 into the following form that does not contain control flow:
2103 | for (i = 0; i < N; i++)
2104 | A[i] = cond ? expr : A[i];
2106 The original CFG looks like this:
2113 | if (i < N) goto bb_5 else goto bb_2
2117 | cond = some_computation;
2118 | if (cond) goto bb_3 else goto bb_4
2130 insert_gimplified_predicates inserts the computation of the COND
2131 expression at the beginning of the destination basic block:
2138 | if (i < N) goto bb_5 else goto bb_2
2142 | cond = some_computation;
2143 | if (cond) goto bb_3 else goto bb_4
2147 | cond = some_computation;
2156 predicate_mem_writes is then predicating the memory write as follows:
2163 | if (i < N) goto bb_5 else goto bb_2
2167 | if (cond) goto bb_3 else goto bb_4
2171 | cond = some_computation;
2172 | A[i] = cond ? expr : A[i];
2180 and finally combine_blocks removes the basic block boundaries making
2181 the loop vectorizable:
2185 | if (i < N) goto bb_5 else goto bb_1
2189 | cond = some_computation;
2190 | A[i] = cond ? expr : A[i];
2191 | if (i < N) goto bb_5 else goto bb_4
2200 predicate_mem_writes (loop_p loop
)
2202 unsigned int i
, orig_loop_num_nodes
= loop
->num_nodes
;
2203 auto_vec
<int, 1> vect_sizes
;
2204 auto_vec
<tree
, 1> vect_masks
;
2206 for (i
= 1; i
< orig_loop_num_nodes
; i
++)
2208 gimple_stmt_iterator gsi
;
2209 basic_block bb
= ifc_bbs
[i
];
2210 tree cond
= bb_predicate (bb
);
2215 if (is_true_predicate (cond
))
2219 if (TREE_CODE (cond
) == TRUTH_NOT_EXPR
)
2222 cond
= TREE_OPERAND (cond
, 0);
2225 vect_sizes
.truncate (0);
2226 vect_masks
.truncate (0);
2228 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
);)
2230 if (!gimple_assign_single_p (stmt
= gsi_stmt (gsi
)))
2232 else if (is_false_predicate (cond
)
2233 && gimple_vdef (stmt
))
2235 unlink_stmt_vdef (stmt
);
2236 gsi_remove (&gsi
, true);
2237 release_defs (stmt
);
2240 else if (gimple_plf (stmt
, GF_PLF_2
))
2242 tree lhs
= gimple_assign_lhs (stmt
);
2243 tree rhs
= gimple_assign_rhs1 (stmt
);
2244 tree ref
, addr
, ptr
, mask
;
2246 gimple_seq stmts
= NULL
;
2247 machine_mode mode
= TYPE_MODE (TREE_TYPE (lhs
));
2248 /* We checked before setting GF_PLF_2 that an equivalent
2249 integer mode exists. */
2250 int bitsize
= GET_MODE_BITSIZE (mode
).to_constant ();
2251 ref
= TREE_CODE (lhs
) == SSA_NAME
? rhs
: lhs
;
2252 mark_addressable (ref
);
2253 addr
= force_gimple_operand_gsi (&gsi
, build_fold_addr_expr (ref
),
2254 true, NULL_TREE
, true,
2256 if (!vect_sizes
.is_empty ()
2257 && (index
= mask_exists (bitsize
, vect_sizes
)) != -1)
2258 /* Use created mask. */
2259 mask
= vect_masks
[index
];
2262 if (COMPARISON_CLASS_P (cond
))
2263 mask
= gimple_build (&stmts
, TREE_CODE (cond
),
2265 TREE_OPERAND (cond
, 0),
2266 TREE_OPERAND (cond
, 1));
2273 = constant_boolean_node (true, TREE_TYPE (mask
));
2274 mask
= gimple_build (&stmts
, BIT_XOR_EXPR
,
2275 TREE_TYPE (mask
), mask
, true_val
);
2277 gsi_insert_seq_before (&gsi
, stmts
, GSI_SAME_STMT
);
2279 /* Save mask and its size for further use. */
2280 vect_sizes
.safe_push (bitsize
);
2281 vect_masks
.safe_push (mask
);
2283 ptr
= build_int_cst (reference_alias_ptr_type (ref
),
2284 get_object_alignment (ref
));
2285 /* Copy points-to info if possible. */
2286 if (TREE_CODE (addr
) == SSA_NAME
&& !SSA_NAME_PTR_INFO (addr
))
2287 copy_ref_info (build2 (MEM_REF
, TREE_TYPE (ref
), addr
, ptr
),
2289 if (TREE_CODE (lhs
) == SSA_NAME
)
2292 = gimple_build_call_internal (IFN_MASK_LOAD
, 3, addr
,
2294 gimple_call_set_lhs (new_stmt
, lhs
);
2295 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
2300 = gimple_build_call_internal (IFN_MASK_STORE
, 4, addr
, ptr
,
2302 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
2303 gimple_set_vdef (new_stmt
, gimple_vdef (stmt
));
2304 SSA_NAME_DEF_STMT (gimple_vdef (new_stmt
)) = new_stmt
;
2306 gimple_call_set_nothrow (new_stmt
, true);
2308 gsi_replace (&gsi
, new_stmt
, true);
2310 else if (gimple_vdef (stmt
))
2312 tree lhs
= gimple_assign_lhs (stmt
);
2313 tree rhs
= gimple_assign_rhs1 (stmt
);
2314 tree type
= TREE_TYPE (lhs
);
2316 lhs
= ifc_temp_var (type
, unshare_expr (lhs
), &gsi
);
2317 rhs
= ifc_temp_var (type
, unshare_expr (rhs
), &gsi
);
2319 std::swap (lhs
, rhs
);
2320 cond
= force_gimple_operand_gsi_1 (&gsi
, unshare_expr (cond
),
2321 is_gimple_condexpr
, NULL_TREE
,
2322 true, GSI_SAME_STMT
);
2323 rhs
= fold_build_cond_expr (type
, unshare_expr (cond
), rhs
, lhs
);
2324 gimple_assign_set_rhs1 (stmt
, ifc_temp_var (type
, rhs
, &gsi
));
2332 /* Remove all GIMPLE_CONDs and GIMPLE_LABELs of all the basic blocks
2333 other than the exit and latch of the LOOP. Also resets the
2334 GIMPLE_DEBUG information. */
2337 remove_conditions_and_labels (loop_p loop
)
2339 gimple_stmt_iterator gsi
;
2342 for (i
= 0; i
< loop
->num_nodes
; i
++)
2344 basic_block bb
= ifc_bbs
[i
];
2346 if (bb_with_exit_edge_p (loop
, bb
)
2347 || bb
== loop
->latch
)
2350 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); )
2351 switch (gimple_code (gsi_stmt (gsi
)))
2355 gsi_remove (&gsi
, true);
2359 /* ??? Should there be conditional GIMPLE_DEBUG_BINDs? */
2360 if (gimple_debug_bind_p (gsi_stmt (gsi
)))
2362 gimple_debug_bind_reset_value (gsi_stmt (gsi
));
2363 update_stmt (gsi_stmt (gsi
));
2374 /* Combine all the basic blocks from LOOP into one or two super basic
2375 blocks. Replace PHI nodes with conditional modify expressions. */
2378 combine_blocks (struct loop
*loop
)
2380 basic_block bb
, exit_bb
, merge_target_bb
;
2381 unsigned int orig_loop_num_nodes
= loop
->num_nodes
;
2386 remove_conditions_and_labels (loop
);
2387 insert_gimplified_predicates (loop
);
2388 predicate_all_scalar_phis (loop
);
2390 if (any_pred_load_store
)
2391 predicate_mem_writes (loop
);
2393 /* Merge basic blocks: first remove all the edges in the loop,
2394 except for those from the exit block. */
2396 bool *predicated
= XNEWVEC (bool, orig_loop_num_nodes
);
2397 for (i
= 0; i
< orig_loop_num_nodes
; i
++)
2400 predicated
[i
] = !is_true_predicate (bb_predicate (bb
));
2401 free_bb_predicate (bb
);
2402 if (bb_with_exit_edge_p (loop
, bb
))
2404 gcc_assert (exit_bb
== NULL
);
2408 gcc_assert (exit_bb
!= loop
->latch
);
2410 for (i
= 1; i
< orig_loop_num_nodes
; i
++)
2414 for (ei
= ei_start (bb
->preds
); (e
= ei_safe_edge (ei
));)
2416 if (e
->src
== exit_bb
)
2423 if (exit_bb
!= NULL
)
2425 if (exit_bb
!= loop
->header
)
2427 /* Connect this node to loop header. */
2428 make_single_succ_edge (loop
->header
, exit_bb
, EDGE_FALLTHRU
);
2429 set_immediate_dominator (CDI_DOMINATORS
, exit_bb
, loop
->header
);
2432 /* Redirect non-exit edges to loop->latch. */
2433 FOR_EACH_EDGE (e
, ei
, exit_bb
->succs
)
2435 if (!loop_exit_edge_p (loop
, e
))
2436 redirect_edge_and_branch (e
, loop
->latch
);
2438 set_immediate_dominator (CDI_DOMINATORS
, loop
->latch
, exit_bb
);
2442 /* If the loop does not have an exit, reconnect header and latch. */
2443 make_edge (loop
->header
, loop
->latch
, EDGE_FALLTHRU
);
2444 set_immediate_dominator (CDI_DOMINATORS
, loop
->latch
, loop
->header
);
2447 merge_target_bb
= loop
->header
;
2449 /* Get at the virtual def valid for uses starting at the first block
2450 we merge into the header. Without a virtual PHI the loop has the
2451 same virtual use on all stmts. */
2452 gphi
*vphi
= get_virtual_phi (loop
->header
);
2453 tree last_vdef
= NULL_TREE
;
2456 last_vdef
= gimple_phi_result (vphi
);
2457 for (gimple_stmt_iterator gsi
= gsi_start_bb (loop
->header
);
2458 ! gsi_end_p (gsi
); gsi_next (&gsi
))
2459 if (gimple_vdef (gsi_stmt (gsi
)))
2460 last_vdef
= gimple_vdef (gsi_stmt (gsi
));
2462 for (i
= 1; i
< orig_loop_num_nodes
; i
++)
2464 gimple_stmt_iterator gsi
;
2465 gimple_stmt_iterator last
;
2469 if (bb
== exit_bb
|| bb
== loop
->latch
)
2472 /* We release virtual PHIs late because we have to propagate them
2473 out using the current VUSE. The def might be the one used
2475 vphi
= get_virtual_phi (bb
);
2478 imm_use_iterator iter
;
2479 use_operand_p use_p
;
2481 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, gimple_phi_result (vphi
))
2483 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
2484 SET_USE (use_p
, last_vdef
);
2486 gsi
= gsi_for_stmt (vphi
);
2487 remove_phi_node (&gsi
, true);
2490 /* Make stmts member of loop->header and clear range info from all stmts
2491 in BB which is now no longer executed conditional on a predicate we
2492 could have derived it from. */
2493 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2495 gimple
*stmt
= gsi_stmt (gsi
);
2496 gimple_set_bb (stmt
, merge_target_bb
);
2497 /* Update virtual operands. */
2500 use_operand_p use_p
= ssa_vuse_operand (stmt
);
2502 && USE_FROM_PTR (use_p
) != last_vdef
)
2503 SET_USE (use_p
, last_vdef
);
2504 if (gimple_vdef (stmt
))
2505 last_vdef
= gimple_vdef (stmt
);
2511 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_DEF
)
2512 reset_flow_sensitive_info (op
);
2516 /* Update stmt list. */
2517 last
= gsi_last_bb (merge_target_bb
);
2518 gsi_insert_seq_after_without_update (&last
, bb_seq (bb
), GSI_NEW_STMT
);
2519 set_bb_seq (bb
, NULL
);
2521 delete_basic_block (bb
);
2524 /* If possible, merge loop header to the block with the exit edge.
2525 This reduces the number of basic blocks to two, to please the
2526 vectorizer that handles only loops with two nodes. */
2528 && exit_bb
!= loop
->header
)
2530 /* We release virtual PHIs late because we have to propagate them
2531 out using the current VUSE. The def might be the one used
2533 vphi
= get_virtual_phi (exit_bb
);
2536 imm_use_iterator iter
;
2537 use_operand_p use_p
;
2539 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, gimple_phi_result (vphi
))
2541 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
2542 SET_USE (use_p
, last_vdef
);
2544 gimple_stmt_iterator gsi
= gsi_for_stmt (vphi
);
2545 remove_phi_node (&gsi
, true);
2548 if (can_merge_blocks_p (loop
->header
, exit_bb
))
2549 merge_blocks (loop
->header
, exit_bb
);
2557 /* Version LOOP before if-converting it; the original loop
2558 will be if-converted, the new copy of the loop will not,
2559 and the LOOP_VECTORIZED internal call will be guarding which
2560 loop to execute. The vectorizer pass will fold this
2561 internal call into either true or false.
2563 Note that this function intentionally invalidates profile. Both edges
2564 out of LOOP_VECTORIZED must have 100% probability so the profile remains
2565 consistent after the condition is folded in the vectorizer. */
2567 static struct loop
*
2568 version_loop_for_if_conversion (struct loop
*loop
)
2570 basic_block cond_bb
;
2571 tree cond
= make_ssa_name (boolean_type_node
);
2572 struct loop
*new_loop
;
2574 gimple_stmt_iterator gsi
;
2575 unsigned int save_length
;
2577 g
= gimple_build_call_internal (IFN_LOOP_VECTORIZED
, 2,
2578 build_int_cst (integer_type_node
, loop
->num
),
2580 gimple_call_set_lhs (g
, cond
);
2582 /* Save BB->aux around loop_version as that uses the same field. */
2583 save_length
= loop
->inner
? loop
->inner
->num_nodes
: loop
->num_nodes
;
2584 void **saved_preds
= XALLOCAVEC (void *, save_length
);
2585 for (unsigned i
= 0; i
< save_length
; i
++)
2586 saved_preds
[i
] = ifc_bbs
[i
]->aux
;
2588 initialize_original_copy_tables ();
2589 /* At this point we invalidate porfile confistency until IFN_LOOP_VECTORIZED
2590 is re-merged in the vectorizer. */
2591 new_loop
= loop_version (loop
, cond
, &cond_bb
,
2592 profile_probability::always (),
2593 profile_probability::always (),
2594 profile_probability::always (),
2595 profile_probability::always (), true);
2596 free_original_copy_tables ();
2598 for (unsigned i
= 0; i
< save_length
; i
++)
2599 ifc_bbs
[i
]->aux
= saved_preds
[i
];
2601 if (new_loop
== NULL
)
2604 new_loop
->dont_vectorize
= true;
2605 new_loop
->force_vectorize
= false;
2606 gsi
= gsi_last_bb (cond_bb
);
2607 gimple_call_set_arg (g
, 1, build_int_cst (integer_type_node
, new_loop
->num
));
2608 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
2609 update_ssa (TODO_update_ssa
);
2613 /* Return true when LOOP satisfies the follow conditions that will
2614 allow it to be recognized by the vectorizer for outer-loop
2616 - The loop is not the root node of the loop tree.
2617 - The loop has exactly one inner loop.
2618 - The loop has a single exit.
2619 - The loop header has a single successor, which is the inner
2621 - Each of the inner and outer loop latches have a single
2623 - The loop exit block has a single predecessor, which is the
2624 inner loop's exit block. */
2627 versionable_outer_loop_p (struct loop
*loop
)
2629 if (!loop_outer (loop
)
2630 || loop
->dont_vectorize
2632 || loop
->inner
->next
2633 || !single_exit (loop
)
2634 || !single_succ_p (loop
->header
)
2635 || single_succ (loop
->header
) != loop
->inner
->header
2636 || !single_pred_p (loop
->latch
)
2637 || !single_pred_p (loop
->inner
->latch
))
2640 basic_block outer_exit
= single_pred (loop
->latch
);
2641 basic_block inner_exit
= single_pred (loop
->inner
->latch
);
2643 if (!single_pred_p (outer_exit
) || single_pred (outer_exit
) != inner_exit
)
2647 fprintf (dump_file
, "Found vectorizable outer loop for versioning\n");
2652 /* Performs splitting of critical edges. Skip splitting and return false
2653 if LOOP will not be converted because:
2655 - LOOP is not well formed.
2656 - LOOP has PHI with more than MAX_PHI_ARG_NUM arguments.
2658 Last restriction is valid only if AGGRESSIVE_IF_CONV is false. */
2661 ifcvt_split_critical_edges (struct loop
*loop
, bool aggressive_if_conv
)
2665 unsigned int num
= loop
->num_nodes
;
2670 auto_vec
<edge
> critical_edges
;
2672 /* Loop is not well formed. */
2673 if (num
<= 2 || loop
->inner
|| !single_exit (loop
))
2676 body
= get_loop_body (loop
);
2677 for (i
= 0; i
< num
; i
++)
2680 if (!aggressive_if_conv
2682 && EDGE_COUNT (bb
->preds
) > MAX_PHI_ARG_NUM
)
2684 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2686 "BB %d has complicated PHI with more than %u args.\n",
2687 bb
->index
, MAX_PHI_ARG_NUM
);
2692 if (bb
== loop
->latch
|| bb_with_exit_edge_p (loop
, bb
))
2695 stmt
= last_stmt (bb
);
2696 /* Skip basic blocks not ending with conditional branch. */
2697 if (!stmt
|| gimple_code (stmt
) != GIMPLE_COND
)
2700 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
2701 if (EDGE_CRITICAL_P (e
) && e
->dest
->loop_father
== loop
)
2702 critical_edges
.safe_push (e
);
2706 while (critical_edges
.length () > 0)
2708 e
= critical_edges
.pop ();
2709 /* Don't split if bb can be predicated along non-critical edge. */
2710 if (EDGE_COUNT (e
->dest
->preds
) > 2 || all_preds_critical_p (e
->dest
))
2717 /* Delete redundant statements produced by predication which prevents
2718 loop vectorization. */
2721 ifcvt_local_dce (basic_block bb
)
2726 gimple_stmt_iterator gsi
;
2727 auto_vec
<gimple
*> worklist
;
2728 enum gimple_code code
;
2729 use_operand_p use_p
;
2730 imm_use_iterator imm_iter
;
2732 worklist
.create (64);
2733 /* Consider all phi as live statements. */
2734 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2736 phi
= gsi_stmt (gsi
);
2737 gimple_set_plf (phi
, GF_PLF_2
, true);
2738 worklist
.safe_push (phi
);
2740 /* Consider load/store statements, CALL and COND as live. */
2741 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2743 stmt
= gsi_stmt (gsi
);
2744 if (gimple_store_p (stmt
)
2745 || gimple_assign_load_p (stmt
)
2746 || is_gimple_debug (stmt
))
2748 gimple_set_plf (stmt
, GF_PLF_2
, true);
2749 worklist
.safe_push (stmt
);
2752 code
= gimple_code (stmt
);
2753 if (code
== GIMPLE_COND
|| code
== GIMPLE_CALL
)
2755 gimple_set_plf (stmt
, GF_PLF_2
, true);
2756 worklist
.safe_push (stmt
);
2759 gimple_set_plf (stmt
, GF_PLF_2
, false);
2761 if (code
== GIMPLE_ASSIGN
)
2763 tree lhs
= gimple_assign_lhs (stmt
);
2764 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, lhs
)
2766 stmt1
= USE_STMT (use_p
);
2767 if (gimple_bb (stmt1
) != bb
)
2769 gimple_set_plf (stmt
, GF_PLF_2
, true);
2770 worklist
.safe_push (stmt
);
2776 /* Propagate liveness through arguments of live stmt. */
2777 while (worklist
.length () > 0)
2780 use_operand_p use_p
;
2783 stmt
= worklist
.pop ();
2784 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
2786 use
= USE_FROM_PTR (use_p
);
2787 if (TREE_CODE (use
) != SSA_NAME
)
2789 stmt1
= SSA_NAME_DEF_STMT (use
);
2790 if (gimple_bb (stmt1
) != bb
2791 || gimple_plf (stmt1
, GF_PLF_2
))
2793 gimple_set_plf (stmt1
, GF_PLF_2
, true);
2794 worklist
.safe_push (stmt1
);
2797 /* Delete dead statements. */
2798 gsi
= gsi_start_bb (bb
);
2799 while (!gsi_end_p (gsi
))
2801 stmt
= gsi_stmt (gsi
);
2802 if (gimple_plf (stmt
, GF_PLF_2
))
2807 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2809 fprintf (dump_file
, "Delete dead stmt in bb#%d\n", bb
->index
);
2810 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
2812 gsi_remove (&gsi
, true);
2813 release_defs (stmt
);
2817 /* If-convert LOOP when it is legal. For the moment this pass has no
2818 profitability analysis. Returns non-zero todo flags when something
2822 tree_if_conversion (struct loop
*loop
)
2824 unsigned int todo
= 0;
2825 bool aggressive_if_conv
;
2831 any_pred_load_store
= false;
2832 any_complicated_phi
= false;
2834 /* Apply more aggressive if-conversion when loop or its outer loop were
2835 marked with simd pragma. When that's the case, we try to if-convert
2836 loop containing PHIs with more than MAX_PHI_ARG_NUM arguments. */
2837 aggressive_if_conv
= loop
->force_vectorize
;
2838 if (!aggressive_if_conv
)
2840 struct loop
*outer_loop
= loop_outer (loop
);
2841 if (outer_loop
&& outer_loop
->force_vectorize
)
2842 aggressive_if_conv
= true;
2845 if (!ifcvt_split_critical_edges (loop
, aggressive_if_conv
))
2848 if (!if_convertible_loop_p (loop
)
2849 || !dbg_cnt (if_conversion_tree
))
2852 if ((any_pred_load_store
|| any_complicated_phi
)
2853 && ((!flag_tree_loop_vectorize
&& !loop
->force_vectorize
)
2854 || loop
->dont_vectorize
))
2857 /* Since we have no cost model, always version loops unless the user
2858 specified -ftree-loop-if-convert or unless versioning is required.
2859 Either version this loop, or if the pattern is right for outer-loop
2860 vectorization, version the outer loop. In the latter case we will
2861 still if-convert the original inner loop. */
2862 if (any_pred_load_store
2863 || any_complicated_phi
2864 || flag_tree_loop_if_convert
!= 1)
2867 = (versionable_outer_loop_p (loop_outer (loop
))
2868 ? loop_outer (loop
) : loop
);
2869 struct loop
*nloop
= version_loop_for_if_conversion (vloop
);
2874 /* If versionable_outer_loop_p decided to version the
2875 outer loop, version also the inner loop of the non-vectorized
2876 loop copy. So we transform:
2880 if (LOOP_VECTORIZED (1, 3))
2886 loop3 (copy of loop1)
2887 if (LOOP_VECTORIZED (4, 5))
2888 loop4 (copy of loop2)
2890 loop5 (copy of loop4) */
2891 gcc_assert (nloop
->inner
&& nloop
->inner
->next
== NULL
);
2892 rloop
= nloop
->inner
;
2896 /* Now all statements are if-convertible. Combine all the basic
2897 blocks into one huge basic block doing the if-conversion
2899 combine_blocks (loop
);
2901 /* Delete dead predicate computations. */
2902 ifcvt_local_dce (loop
->header
);
2904 todo
|= TODO_cleanup_cfg
;
2911 for (i
= 0; i
< loop
->num_nodes
; i
++)
2912 free_bb_predicate (ifc_bbs
[i
]);
2926 /* Tree if-conversion pass management. */
2930 const pass_data pass_data_if_conversion
=
2932 GIMPLE_PASS
, /* type */
2934 OPTGROUP_NONE
, /* optinfo_flags */
2935 TV_TREE_LOOP_IFCVT
, /* tv_id */
2936 ( PROP_cfg
| PROP_ssa
), /* properties_required */
2937 0, /* properties_provided */
2938 0, /* properties_destroyed */
2939 0, /* todo_flags_start */
2940 0, /* todo_flags_finish */
2943 class pass_if_conversion
: public gimple_opt_pass
2946 pass_if_conversion (gcc::context
*ctxt
)
2947 : gimple_opt_pass (pass_data_if_conversion
, ctxt
)
2950 /* opt_pass methods: */
2951 virtual bool gate (function
*);
2952 virtual unsigned int execute (function
*);
2954 }; // class pass_if_conversion
2957 pass_if_conversion::gate (function
*fun
)
2959 return (((flag_tree_loop_vectorize
|| fun
->has_force_vectorize_loops
)
2960 && flag_tree_loop_if_convert
!= 0)
2961 || flag_tree_loop_if_convert
== 1);
2965 pass_if_conversion::execute (function
*fun
)
2970 if (number_of_loops (fun
) <= 1)
2973 FOR_EACH_LOOP (loop
, 0)
2974 if (flag_tree_loop_if_convert
== 1
2975 || ((flag_tree_loop_vectorize
|| loop
->force_vectorize
)
2976 && !loop
->dont_vectorize
))
2977 todo
|= tree_if_conversion (loop
);
2981 free_numbers_of_iterations_estimates (fun
);
2988 FOR_EACH_BB_FN (bb
, fun
)
2989 gcc_assert (!bb
->aux
);
2998 make_pass_if_conversion (gcc::context
*ctxt
)
3000 return new pass_if_conversion (ctxt
);