1 /* If-conversion for vectorizer.
2 Copyright (C) 2004-2016 Free Software Foundation, Inc.
3 Contributed by Devang Patel <dpatel@apple.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* This pass implements a tree level if-conversion of loops. Its
22 initial goal is to help the vectorizer to vectorize loops with
25 A short description of if-conversion:
27 o Decide if a loop is if-convertible or not.
28 o Walk all loop basic blocks in breadth first order (BFS order).
29 o Remove conditional statements (at the end of basic block)
30 and propagate condition into destination basic blocks'
32 o Replace modify expression with conditional modify expression
33 using current basic block's condition.
34 o Merge all basic blocks
35 o Replace phi nodes with conditional modify expr
36 o Merge all basic blocks into header
38 Sample transformation:
43 # i_23 = PHI <0(0), i_18(10)>;
46 if (j_15 > 41) goto <L1>; else goto <L17>;
53 # iftmp.2_4 = PHI <0(8), 42(2)>;
57 if (i_18 <= 15) goto <L19>; else goto <L18>;
67 # i_23 = PHI <0(0), i_18(10)>;
72 iftmp.2_4 = j_15 > 41 ? 42 : 0;
75 if (i_18 <= 15) goto <L19>; else goto <L18>;
85 #include "coretypes.h"
91 #include "tree-pass.h"
94 #include "optabs-query.h"
95 #include "gimple-pretty-print.h"
97 #include "fold-const.h"
98 #include "stor-layout.h"
99 #include "gimple-fold.h"
100 #include "gimplify.h"
101 #include "gimple-iterator.h"
102 #include "gimplify-me.h"
103 #include "tree-cfg.h"
104 #include "tree-into-ssa.h"
105 #include "tree-ssa.h"
107 #include "tree-data-ref.h"
108 #include "tree-scalar-evolution.h"
109 #include "tree-ssa-loop.h"
110 #include "tree-ssa-loop-niter.h"
111 #include "tree-ssa-loop-ivopts.h"
112 #include "tree-ssa-address.h"
114 #include "tree-hash-traits.h"
116 #include "builtins.h"
120 /* Only handle PHIs with no more arguments unless we are asked to by
122 #define MAX_PHI_ARG_NUM \
123 ((unsigned) PARAM_VALUE (PARAM_MAX_TREE_IF_CONVERSION_PHI_ARGS))
125 /* Indicate if new load/store that needs to be predicated is introduced
126 during if conversion. */
127 static bool any_pred_load_store
;
129 /* Indicate if there are any complicated PHIs that need to be handled in
130 if-conversion. Complicated PHI has more than two arguments and can't
131 be degenerated to two arguments PHI. See more information in comment
132 before phi_convertible_by_degenerating_args. */
133 static bool any_complicated_phi
;
135 /* Hash for struct innermost_loop_behavior. It depends on the user to
138 struct innermost_loop_behavior_hash
: nofree_ptr_hash
<innermost_loop_behavior
>
140 static inline hashval_t
hash (const value_type
&);
141 static inline bool equal (const value_type
&,
142 const compare_type
&);
146 innermost_loop_behavior_hash::hash (const value_type
&e
)
150 hash
= iterative_hash_expr (e
->base_address
, 0);
151 hash
= iterative_hash_expr (e
->offset
, hash
);
152 hash
= iterative_hash_expr (e
->init
, hash
);
153 return iterative_hash_expr (e
->step
, hash
);
157 innermost_loop_behavior_hash::equal (const value_type
&e1
,
158 const compare_type
&e2
)
160 if ((e1
->base_address
&& !e2
->base_address
)
161 || (!e1
->base_address
&& e2
->base_address
)
162 || (!e1
->offset
&& e2
->offset
)
163 || (e1
->offset
&& !e2
->offset
)
164 || (!e1
->init
&& e2
->init
)
165 || (e1
->init
&& !e2
->init
)
166 || (!e1
->step
&& e2
->step
)
167 || (e1
->step
&& !e2
->step
))
170 if (e1
->base_address
&& e2
->base_address
171 && !operand_equal_p (e1
->base_address
, e2
->base_address
, 0))
173 if (e1
->offset
&& e2
->offset
174 && !operand_equal_p (e1
->offset
, e2
->offset
, 0))
176 if (e1
->init
&& e2
->init
177 && !operand_equal_p (e1
->init
, e2
->init
, 0))
179 if (e1
->step
&& e2
->step
180 && !operand_equal_p (e1
->step
, e2
->step
, 0))
186 /* List of basic blocks in if-conversion-suitable order. */
187 static basic_block
*ifc_bbs
;
189 /* Hash table to store <DR's innermost loop behavior, DR> pairs. */
190 static hash_map
<innermost_loop_behavior_hash
,
191 data_reference_p
> *innermost_DR_map
;
193 /* Hash table to store <base reference, DR> pairs. */
194 static hash_map
<tree_operand_hash
, data_reference_p
> *baseref_DR_map
;
196 /* Structure used to predicate basic blocks. This is attached to the
197 ->aux field of the BBs in the loop to be if-converted. */
198 struct bb_predicate
{
200 /* The condition under which this basic block is executed. */
203 /* PREDICATE is gimplified, and the sequence of statements is
204 recorded here, in order to avoid the duplication of computations
205 that occur in previous conditions. See PR44483. */
206 gimple_seq predicate_gimplified_stmts
;
209 /* Returns true when the basic block BB has a predicate. */
212 bb_has_predicate (basic_block bb
)
214 return bb
->aux
!= NULL
;
217 /* Returns the gimplified predicate for basic block BB. */
220 bb_predicate (basic_block bb
)
222 return ((struct bb_predicate
*) bb
->aux
)->predicate
;
225 /* Sets the gimplified predicate COND for basic block BB. */
228 set_bb_predicate (basic_block bb
, tree cond
)
230 gcc_assert ((TREE_CODE (cond
) == TRUTH_NOT_EXPR
231 && is_gimple_condexpr (TREE_OPERAND (cond
, 0)))
232 || is_gimple_condexpr (cond
));
233 ((struct bb_predicate
*) bb
->aux
)->predicate
= cond
;
236 /* Returns the sequence of statements of the gimplification of the
237 predicate for basic block BB. */
239 static inline gimple_seq
240 bb_predicate_gimplified_stmts (basic_block bb
)
242 return ((struct bb_predicate
*) bb
->aux
)->predicate_gimplified_stmts
;
245 /* Sets the sequence of statements STMTS of the gimplification of the
246 predicate for basic block BB. */
249 set_bb_predicate_gimplified_stmts (basic_block bb
, gimple_seq stmts
)
251 ((struct bb_predicate
*) bb
->aux
)->predicate_gimplified_stmts
= stmts
;
254 /* Adds the sequence of statements STMTS to the sequence of statements
255 of the predicate for basic block BB. */
258 add_bb_predicate_gimplified_stmts (basic_block bb
, gimple_seq stmts
)
260 gimple_seq_add_seq_without_update
261 (&(((struct bb_predicate
*) bb
->aux
)->predicate_gimplified_stmts
), stmts
);
264 /* Initializes to TRUE the predicate of basic block BB. */
267 init_bb_predicate (basic_block bb
)
269 bb
->aux
= XNEW (struct bb_predicate
);
270 set_bb_predicate_gimplified_stmts (bb
, NULL
);
271 set_bb_predicate (bb
, boolean_true_node
);
274 /* Release the SSA_NAMEs associated with the predicate of basic block BB,
275 but don't actually free it. */
278 release_bb_predicate (basic_block bb
)
280 gimple_seq stmts
= bb_predicate_gimplified_stmts (bb
);
284 for (gimple_stmt_iterator i
= gsi_start (stmts
);
285 !gsi_end_p (i
); gsi_next (&i
))
286 gcc_assert (! gimple_use_ops (gsi_stmt (i
)));
288 set_bb_predicate_gimplified_stmts (bb
, NULL
);
292 /* Free the predicate of basic block BB. */
295 free_bb_predicate (basic_block bb
)
297 if (!bb_has_predicate (bb
))
300 release_bb_predicate (bb
);
305 /* Reinitialize predicate of BB with the true predicate. */
308 reset_bb_predicate (basic_block bb
)
310 if (!bb_has_predicate (bb
))
311 init_bb_predicate (bb
);
314 release_bb_predicate (bb
);
315 set_bb_predicate (bb
, boolean_true_node
);
319 /* Returns a new SSA_NAME of type TYPE that is assigned the value of
320 the expression EXPR. Inserts the statement created for this
321 computation before GSI and leaves the iterator GSI at the same
325 ifc_temp_var (tree type
, tree expr
, gimple_stmt_iterator
*gsi
)
327 tree new_name
= make_temp_ssa_name (type
, NULL
, "_ifc_");
328 gimple
*stmt
= gimple_build_assign (new_name
, expr
);
329 gimple_set_vuse (stmt
, gimple_vuse (gsi_stmt (*gsi
)));
330 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
334 /* Return true when COND is a false predicate. */
337 is_false_predicate (tree cond
)
339 return (cond
!= NULL_TREE
340 && (cond
== boolean_false_node
341 || integer_zerop (cond
)));
344 /* Return true when COND is a true predicate. */
347 is_true_predicate (tree cond
)
349 return (cond
== NULL_TREE
350 || cond
== boolean_true_node
351 || integer_onep (cond
));
354 /* Returns true when BB has a predicate that is not trivial: true or
358 is_predicated (basic_block bb
)
360 return !is_true_predicate (bb_predicate (bb
));
363 /* Parses the predicate COND and returns its comparison code and
364 operands OP0 and OP1. */
366 static enum tree_code
367 parse_predicate (tree cond
, tree
*op0
, tree
*op1
)
371 if (TREE_CODE (cond
) == SSA_NAME
372 && is_gimple_assign (s
= SSA_NAME_DEF_STMT (cond
)))
374 if (TREE_CODE_CLASS (gimple_assign_rhs_code (s
)) == tcc_comparison
)
376 *op0
= gimple_assign_rhs1 (s
);
377 *op1
= gimple_assign_rhs2 (s
);
378 return gimple_assign_rhs_code (s
);
381 else if (gimple_assign_rhs_code (s
) == TRUTH_NOT_EXPR
)
383 tree op
= gimple_assign_rhs1 (s
);
384 tree type
= TREE_TYPE (op
);
385 enum tree_code code
= parse_predicate (op
, op0
, op1
);
387 return code
== ERROR_MARK
? ERROR_MARK
388 : invert_tree_comparison (code
, HONOR_NANS (type
));
394 if (COMPARISON_CLASS_P (cond
))
396 *op0
= TREE_OPERAND (cond
, 0);
397 *op1
= TREE_OPERAND (cond
, 1);
398 return TREE_CODE (cond
);
404 /* Returns the fold of predicate C1 OR C2 at location LOC. */
407 fold_or_predicates (location_t loc
, tree c1
, tree c2
)
409 tree op1a
, op1b
, op2a
, op2b
;
410 enum tree_code code1
= parse_predicate (c1
, &op1a
, &op1b
);
411 enum tree_code code2
= parse_predicate (c2
, &op2a
, &op2b
);
413 if (code1
!= ERROR_MARK
&& code2
!= ERROR_MARK
)
415 tree t
= maybe_fold_or_comparisons (code1
, op1a
, op1b
,
421 return fold_build2_loc (loc
, TRUTH_OR_EXPR
, boolean_type_node
, c1
, c2
);
424 /* Returns either a COND_EXPR or the folded expression if the folded
425 expression is a MIN_EXPR, a MAX_EXPR, an ABS_EXPR,
426 a constant or a SSA_NAME. */
429 fold_build_cond_expr (tree type
, tree cond
, tree rhs
, tree lhs
)
431 tree rhs1
, lhs1
, cond_expr
;
433 /* If COND is comparison r != 0 and r has boolean type, convert COND
434 to SSA_NAME to accept by vect bool pattern. */
435 if (TREE_CODE (cond
) == NE_EXPR
)
437 tree op0
= TREE_OPERAND (cond
, 0);
438 tree op1
= TREE_OPERAND (cond
, 1);
439 if (TREE_CODE (op0
) == SSA_NAME
440 && TREE_CODE (TREE_TYPE (op0
)) == BOOLEAN_TYPE
441 && (integer_zerop (op1
)))
444 cond_expr
= fold_ternary (COND_EXPR
, type
, cond
, rhs
, lhs
);
446 if (cond_expr
== NULL_TREE
)
447 return build3 (COND_EXPR
, type
, cond
, rhs
, lhs
);
449 STRIP_USELESS_TYPE_CONVERSION (cond_expr
);
451 if (is_gimple_val (cond_expr
))
454 if (TREE_CODE (cond_expr
) == ABS_EXPR
)
456 rhs1
= TREE_OPERAND (cond_expr
, 1);
457 STRIP_USELESS_TYPE_CONVERSION (rhs1
);
458 if (is_gimple_val (rhs1
))
459 return build1 (ABS_EXPR
, type
, rhs1
);
462 if (TREE_CODE (cond_expr
) == MIN_EXPR
463 || TREE_CODE (cond_expr
) == MAX_EXPR
)
465 lhs1
= TREE_OPERAND (cond_expr
, 0);
466 STRIP_USELESS_TYPE_CONVERSION (lhs1
);
467 rhs1
= TREE_OPERAND (cond_expr
, 1);
468 STRIP_USELESS_TYPE_CONVERSION (rhs1
);
469 if (is_gimple_val (rhs1
) && is_gimple_val (lhs1
))
470 return build2 (TREE_CODE (cond_expr
), type
, lhs1
, rhs1
);
472 return build3 (COND_EXPR
, type
, cond
, rhs
, lhs
);
475 /* Add condition NC to the predicate list of basic block BB. LOOP is
476 the loop to be if-converted. Use predicate of cd-equivalent block
477 for join bb if it exists: we call basic blocks bb1 and bb2
478 cd-equivalent if they are executed under the same condition. */
481 add_to_predicate_list (struct loop
*loop
, basic_block bb
, tree nc
)
486 if (is_true_predicate (nc
))
489 /* If dominance tells us this basic block is always executed,
490 don't record any predicates for it. */
491 if (dominated_by_p (CDI_DOMINATORS
, loop
->latch
, bb
))
494 dom_bb
= get_immediate_dominator (CDI_DOMINATORS
, bb
);
495 /* We use notion of cd equivalence to get simpler predicate for
496 join block, e.g. if join block has 2 predecessors with predicates
497 p1 & p2 and p1 & !p2, we'd like to get p1 for it instead of
498 p1 & p2 | p1 & !p2. */
499 if (dom_bb
!= loop
->header
500 && get_immediate_dominator (CDI_POST_DOMINATORS
, dom_bb
) == bb
)
502 gcc_assert (flow_bb_inside_loop_p (loop
, dom_bb
));
503 bc
= bb_predicate (dom_bb
);
504 if (!is_true_predicate (bc
))
505 set_bb_predicate (bb
, bc
);
507 gcc_assert (is_true_predicate (bb_predicate (bb
)));
508 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
509 fprintf (dump_file
, "Use predicate of bb#%d for bb#%d\n",
510 dom_bb
->index
, bb
->index
);
514 if (!is_predicated (bb
))
518 bc
= bb_predicate (bb
);
519 bc
= fold_or_predicates (EXPR_LOCATION (bc
), nc
, bc
);
520 if (is_true_predicate (bc
))
522 reset_bb_predicate (bb
);
527 /* Allow a TRUTH_NOT_EXPR around the main predicate. */
528 if (TREE_CODE (bc
) == TRUTH_NOT_EXPR
)
529 tp
= &TREE_OPERAND (bc
, 0);
532 if (!is_gimple_condexpr (*tp
))
535 *tp
= force_gimple_operand_1 (*tp
, &stmts
, is_gimple_condexpr
, NULL_TREE
);
536 add_bb_predicate_gimplified_stmts (bb
, stmts
);
538 set_bb_predicate (bb
, bc
);
541 /* Add the condition COND to the previous condition PREV_COND, and add
542 this to the predicate list of the destination of edge E. LOOP is
543 the loop to be if-converted. */
546 add_to_dst_predicate_list (struct loop
*loop
, edge e
,
547 tree prev_cond
, tree cond
)
549 if (!flow_bb_inside_loop_p (loop
, e
->dest
))
552 if (!is_true_predicate (prev_cond
))
553 cond
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
556 if (!dominated_by_p (CDI_DOMINATORS
, loop
->latch
, e
->dest
))
557 add_to_predicate_list (loop
, e
->dest
, cond
);
560 /* Return true if one of the successor edges of BB exits LOOP. */
563 bb_with_exit_edge_p (struct loop
*loop
, basic_block bb
)
568 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
569 if (loop_exit_edge_p (loop
, e
))
575 /* Given PHI which has more than two arguments, this function checks if
576 it's if-convertible by degenerating its arguments. Specifically, if
577 below two conditions are satisfied:
579 1) Number of PHI arguments with different values equals to 2 and one
580 argument has the only occurrence.
581 2) The edge corresponding to the unique argument isn't critical edge.
583 Such PHI can be handled as PHIs have only two arguments. For example,
586 res = PHI <A_1(e1), A_1(e2), A_2(e3)>;
588 can be transformed into:
590 res = (predicate of e3) ? A_2 : A_1;
592 Return TRUE if it is the case, FALSE otherwise. */
595 phi_convertible_by_degenerating_args (gphi
*phi
)
598 tree arg
, t1
= NULL
, t2
= NULL
;
599 unsigned int i
, i1
= 0, i2
= 0, n1
= 0, n2
= 0;
600 unsigned int num_args
= gimple_phi_num_args (phi
);
602 gcc_assert (num_args
> 2);
604 for (i
= 0; i
< num_args
; i
++)
606 arg
= gimple_phi_arg_def (phi
, i
);
607 if (t1
== NULL
|| operand_equal_p (t1
, arg
, 0))
613 else if (t2
== NULL
|| operand_equal_p (t2
, arg
, 0))
623 if (n1
!= 1 && n2
!= 1)
626 /* Check if the edge corresponding to the unique arg is critical. */
627 e
= gimple_phi_arg_edge (phi
, (n1
== 1) ? i1
: i2
);
628 if (EDGE_COUNT (e
->src
->succs
) > 1)
634 /* Return true when PHI is if-convertible. PHI is part of loop LOOP
635 and it belongs to basic block BB. Note at this point, it is sure
636 that PHI is if-convertible. This function updates global variable
637 ANY_COMPLICATED_PHI if PHI is complicated. */
640 if_convertible_phi_p (struct loop
*loop
, basic_block bb
, gphi
*phi
)
642 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
644 fprintf (dump_file
, "-------------------------\n");
645 print_gimple_stmt (dump_file
, phi
, 0, TDF_SLIM
);
648 if (bb
!= loop
->header
649 && gimple_phi_num_args (phi
) > 2
650 && !phi_convertible_by_degenerating_args (phi
))
651 any_complicated_phi
= true;
656 /* Records the status of a data reference. This struct is attached to
657 each DR->aux field. */
660 bool rw_unconditionally
;
661 bool w_unconditionally
;
662 bool written_at_least_once
;
666 tree base_w_predicate
;
669 #define IFC_DR(DR) ((struct ifc_dr *) (DR)->aux)
670 #define DR_BASE_W_UNCONDITIONALLY(DR) (IFC_DR (DR)->written_at_least_once)
671 #define DR_RW_UNCONDITIONALLY(DR) (IFC_DR (DR)->rw_unconditionally)
672 #define DR_W_UNCONDITIONALLY(DR) (IFC_DR (DR)->w_unconditionally)
674 /* Iterates over DR's and stores refs, DR and base refs, DR pairs in
675 HASH tables. While storing them in HASH table, it checks if the
676 reference is unconditionally read or written and stores that as a flag
677 information. For base reference it checks if it is written atlest once
678 unconditionally and stores it as flag information along with DR.
679 In other words for every data reference A in STMT there exist other
680 accesses to a data reference with the same base with predicates that
681 add up (OR-up) to the true predicate: this ensures that the data
682 reference A is touched (read or written) on every iteration of the
683 if-converted loop. */
685 hash_memrefs_baserefs_and_store_DRs_read_written_info (data_reference_p a
)
688 data_reference_p
*master_dr
, *base_master_dr
;
689 tree base_ref
= DR_BASE_OBJECT (a
);
690 innermost_loop_behavior
*innermost
= &DR_INNERMOST (a
);
691 tree ca
= bb_predicate (gimple_bb (DR_STMT (a
)));
694 master_dr
= &innermost_DR_map
->get_or_insert (innermost
, &exist1
);
700 IFC_DR (*master_dr
)->w_predicate
701 = fold_or_predicates (UNKNOWN_LOCATION
, ca
,
702 IFC_DR (*master_dr
)->w_predicate
);
703 if (is_true_predicate (IFC_DR (*master_dr
)->w_predicate
))
704 DR_W_UNCONDITIONALLY (*master_dr
) = true;
706 IFC_DR (*master_dr
)->rw_predicate
707 = fold_or_predicates (UNKNOWN_LOCATION
, ca
,
708 IFC_DR (*master_dr
)->rw_predicate
);
709 if (is_true_predicate (IFC_DR (*master_dr
)->rw_predicate
))
710 DR_RW_UNCONDITIONALLY (*master_dr
) = true;
714 base_master_dr
= &baseref_DR_map
->get_or_insert (base_ref
, &exist2
);
717 IFC_DR (*base_master_dr
)->base_w_predicate
718 = fold_or_predicates (UNKNOWN_LOCATION
, ca
,
719 IFC_DR (*base_master_dr
)->base_w_predicate
);
720 if (is_true_predicate (IFC_DR (*base_master_dr
)->base_w_predicate
))
721 DR_BASE_W_UNCONDITIONALLY (*base_master_dr
) = true;
725 /* Return TRUE if can prove the index IDX of an array reference REF is
726 within array bound. Return false otherwise. */
729 idx_within_array_bound (tree ref
, tree
*idx
, void *dta
)
732 widest_int niter
, valid_niter
, delta
, wi_step
;
735 struct loop
*loop
= (struct loop
*) dta
;
737 /* Only support within-bound access for array references. */
738 if (TREE_CODE (ref
) != ARRAY_REF
)
741 /* For arrays at the end of the structure, we are not guaranteed that they
742 do not really extend over their declared size. However, for arrays of
743 size greater than one, this is unlikely to be intended. */
744 if (array_at_struct_end_p (ref
))
747 ev
= analyze_scalar_evolution (loop
, *idx
);
748 ev
= instantiate_parameters (loop
, ev
);
749 init
= initial_condition (ev
);
750 step
= evolution_part_in_loop_num (ev
, loop
->num
);
752 if (!init
|| TREE_CODE (init
) != INTEGER_CST
753 || (step
&& TREE_CODE (step
) != INTEGER_CST
))
756 low
= array_ref_low_bound (ref
);
757 high
= array_ref_up_bound (ref
);
759 /* The case of nonconstant bounds could be handled, but it would be
761 if (TREE_CODE (low
) != INTEGER_CST
762 || !high
|| TREE_CODE (high
) != INTEGER_CST
)
765 /* Check if the intial idx is within bound. */
766 if (wi::to_widest (init
) < wi::to_widest (low
)
767 || wi::to_widest (init
) > wi::to_widest (high
))
770 /* The idx is always within bound. */
771 if (!step
|| integer_zerop (step
))
774 if (!max_loop_iterations (loop
, &niter
))
777 if (wi::to_widest (step
) < 0)
779 delta
= wi::to_widest (init
) - wi::to_widest (low
);
780 wi_step
= -wi::to_widest (step
);
784 delta
= wi::to_widest (high
) - wi::to_widest (init
);
785 wi_step
= wi::to_widest (step
);
788 valid_niter
= wi::div_floor (delta
, wi_step
, SIGNED
, &overflow
);
789 /* The iteration space of idx is within array bound. */
790 if (!overflow
&& niter
<= valid_niter
)
796 /* Return TRUE if ref is a within bound array reference. */
799 ref_within_array_bound (gimple
*stmt
, tree ref
)
801 struct loop
*loop
= loop_containing_stmt (stmt
);
803 gcc_assert (loop
!= NULL
);
804 return for_each_index (&ref
, idx_within_array_bound
, loop
);
808 /* Given a memory reference expression T, return TRUE if base object
809 it refers to is writable. The base object of a memory reference
810 is the main object being referenced, which is returned by function
814 base_object_writable (tree ref
)
816 tree base_tree
= get_base_address (ref
);
819 && DECL_P (base_tree
)
820 && decl_binds_to_current_def_p (base_tree
)
821 && !TREE_READONLY (base_tree
));
824 /* Return true when the memory references of STMT won't trap in the
825 if-converted code. There are two things that we have to check for:
827 - writes to memory occur to writable memory: if-conversion of
828 memory writes transforms the conditional memory writes into
829 unconditional writes, i.e. "if (cond) A[i] = foo" is transformed
830 into "A[i] = cond ? foo : A[i]", and as the write to memory may not
831 be executed at all in the original code, it may be a readonly
832 memory. To check that A is not const-qualified, we check that
833 there exists at least an unconditional write to A in the current
836 - reads or writes to memory are valid memory accesses for every
837 iteration. To check that the memory accesses are correctly formed
838 and that we are allowed to read and write in these locations, we
839 check that the memory accesses to be if-converted occur at every
840 iteration unconditionally.
842 Returns true for the memory reference in STMT, same memory reference
843 is read or written unconditionally atleast once and the base memory
844 reference is written unconditionally once. This is to check reference
845 will not write fault. Also retuns true if the memory reference is
846 unconditionally read once then we are conditionally writing to memory
847 which is defined as read and write and is bound to the definition
850 ifcvt_memrefs_wont_trap (gimple
*stmt
, vec
<data_reference_p
> drs
)
852 data_reference_p
*master_dr
, *base_master_dr
;
853 data_reference_p a
= drs
[gimple_uid (stmt
) - 1];
855 tree base
= DR_BASE_OBJECT (a
);
856 innermost_loop_behavior
*innermost
= &DR_INNERMOST (a
);
858 gcc_assert (DR_STMT (a
) == stmt
);
859 gcc_assert (DR_BASE_ADDRESS (a
) || DR_OFFSET (a
)
860 || DR_INIT (a
) || DR_STEP (a
));
862 master_dr
= innermost_DR_map
->get (innermost
);
863 gcc_assert (master_dr
!= NULL
);
865 base_master_dr
= baseref_DR_map
->get (base
);
867 /* If a is unconditionally written to it doesn't trap. */
868 if (DR_W_UNCONDITIONALLY (*master_dr
))
871 /* If a is unconditionally accessed then ...
873 Even a is conditional access, we can treat it as an unconditional
874 one if it's an array reference and all its index are within array
876 if (DR_RW_UNCONDITIONALLY (*master_dr
)
877 || ref_within_array_bound (stmt
, DR_REF (a
)))
879 /* an unconditional read won't trap. */
883 /* an unconditionaly write won't trap if the base is written
884 to unconditionally. */
886 && DR_BASE_W_UNCONDITIONALLY (*base_master_dr
))
887 return PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES
);
888 /* or the base is known to be not readonly. */
889 else if (base_object_writable (DR_REF (a
)))
890 return PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES
);
896 /* Return true if STMT could be converted into a masked load or store
897 (conditional load or store based on a mask computed from bb predicate). */
900 ifcvt_can_use_mask_load_store (gimple
*stmt
)
904 basic_block bb
= gimple_bb (stmt
);
907 if (!(flag_tree_loop_vectorize
|| bb
->loop_father
->force_vectorize
)
908 || bb
->loop_father
->dont_vectorize
909 || !gimple_assign_single_p (stmt
)
910 || gimple_has_volatile_ops (stmt
))
913 /* Check whether this is a load or store. */
914 lhs
= gimple_assign_lhs (stmt
);
915 if (gimple_store_p (stmt
))
917 if (!is_gimple_val (gimple_assign_rhs1 (stmt
)))
922 else if (gimple_assign_load_p (stmt
))
925 ref
= gimple_assign_rhs1 (stmt
);
930 if (may_be_nonaddressable_p (ref
))
933 /* Mask should be integer mode of the same size as the load/store
935 mode
= TYPE_MODE (TREE_TYPE (lhs
));
936 if (int_mode_for_mode (mode
) == BLKmode
937 || VECTOR_MODE_P (mode
))
940 if (can_vec_mask_load_store_p (mode
, VOIDmode
, is_load
))
946 /* Return true when STMT is if-convertible.
948 GIMPLE_ASSIGN statement is not if-convertible if,
951 - LHS is not var decl. */
954 if_convertible_gimple_assign_stmt_p (gimple
*stmt
,
955 vec
<data_reference_p
> refs
)
957 tree lhs
= gimple_assign_lhs (stmt
);
959 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
961 fprintf (dump_file
, "-------------------------\n");
962 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
965 if (!is_gimple_reg_type (TREE_TYPE (lhs
)))
968 /* Some of these constrains might be too conservative. */
969 if (stmt_ends_bb_p (stmt
)
970 || gimple_has_volatile_ops (stmt
)
971 || (TREE_CODE (lhs
) == SSA_NAME
972 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs
))
973 || gimple_has_side_effects (stmt
))
975 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
976 fprintf (dump_file
, "stmt not suitable for ifcvt\n");
980 /* tree-into-ssa.c uses GF_PLF_1, so avoid it, because
981 in between if_convertible_loop_p and combine_blocks
982 we can perform loop versioning. */
983 gimple_set_plf (stmt
, GF_PLF_2
, false);
985 if ((! gimple_vuse (stmt
)
986 || gimple_could_trap_p_1 (stmt
, false, false)
987 || ! ifcvt_memrefs_wont_trap (stmt
, refs
))
988 && gimple_could_trap_p (stmt
))
990 if (ifcvt_can_use_mask_load_store (stmt
))
992 gimple_set_plf (stmt
, GF_PLF_2
, true);
993 any_pred_load_store
= true;
996 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
997 fprintf (dump_file
, "tree could trap...\n");
1001 /* When if-converting stores force versioning, likewise if we
1002 ended up generating store data races. */
1003 if (gimple_vdef (stmt
))
1004 any_pred_load_store
= true;
1009 /* Return true when STMT is if-convertible.
1011 A statement is if-convertible if:
1012 - it is an if-convertible GIMPLE_ASSIGN,
1013 - it is a GIMPLE_LABEL or a GIMPLE_COND,
1014 - it is builtins call. */
1017 if_convertible_stmt_p (gimple
*stmt
, vec
<data_reference_p
> refs
)
1019 switch (gimple_code (stmt
))
1027 return if_convertible_gimple_assign_stmt_p (stmt
, refs
);
1031 tree fndecl
= gimple_call_fndecl (stmt
);
1034 int flags
= gimple_call_flags (stmt
);
1035 if ((flags
& ECF_CONST
)
1036 && !(flags
& ECF_LOOPING_CONST_OR_PURE
)
1037 /* We can only vectorize some builtins at the moment,
1038 so restrict if-conversion to those. */
1039 && DECL_BUILT_IN (fndecl
))
1046 /* Don't know what to do with 'em so don't do anything. */
1047 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1049 fprintf (dump_file
, "don't know what to do\n");
1050 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
1058 /* Assumes that BB has more than 1 predecessors.
1059 Returns false if at least one successor is not on critical edge
1060 and true otherwise. */
1063 all_preds_critical_p (basic_block bb
)
1068 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1069 if (EDGE_COUNT (e
->src
->succs
) == 1)
1074 /* Returns true if at least one successor in on critical edge. */
1076 has_pred_critical_p (basic_block bb
)
1081 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1082 if (EDGE_COUNT (e
->src
->succs
) > 1)
1087 /* Return true when BB is if-convertible. This routine does not check
1088 basic block's statements and phis.
1090 A basic block is not if-convertible if:
1091 - it is non-empty and it is after the exit block (in BFS order),
1092 - it is after the exit block but before the latch,
1093 - its edges are not normal.
1095 EXIT_BB is the basic block containing the exit of the LOOP. BB is
1099 if_convertible_bb_p (struct loop
*loop
, basic_block bb
, basic_block exit_bb
)
1104 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1105 fprintf (dump_file
, "----------[%d]-------------\n", bb
->index
);
1107 if (EDGE_COUNT (bb
->succs
) > 2)
1112 if (bb
!= loop
->latch
)
1114 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1115 fprintf (dump_file
, "basic block after exit bb but before latch\n");
1118 else if (!empty_block_p (bb
))
1120 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1121 fprintf (dump_file
, "non empty basic block after exit bb\n");
1124 else if (bb
== loop
->latch
1126 && !dominated_by_p (CDI_DOMINATORS
, bb
, exit_bb
))
1128 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1129 fprintf (dump_file
, "latch is not dominated by exit_block\n");
1134 /* Be less adventurous and handle only normal edges. */
1135 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
1136 if (e
->flags
& (EDGE_EH
| EDGE_ABNORMAL
| EDGE_IRREDUCIBLE_LOOP
))
1138 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1139 fprintf (dump_file
, "Difficult to handle edges\n");
1146 /* Return true when all predecessor blocks of BB are visited. The
1147 VISITED bitmap keeps track of the visited blocks. */
1150 pred_blocks_visited_p (basic_block bb
, bitmap
*visited
)
1154 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1155 if (!bitmap_bit_p (*visited
, e
->src
->index
))
1161 /* Get body of a LOOP in suitable order for if-conversion. It is
1162 caller's responsibility to deallocate basic block list.
1163 If-conversion suitable order is, breadth first sort (BFS) order
1164 with an additional constraint: select a block only if all its
1165 predecessors are already selected. */
1167 static basic_block
*
1168 get_loop_body_in_if_conv_order (const struct loop
*loop
)
1170 basic_block
*blocks
, *blocks_in_bfs_order
;
1173 unsigned int index
= 0;
1174 unsigned int visited_count
= 0;
1176 gcc_assert (loop
->num_nodes
);
1177 gcc_assert (loop
->latch
!= EXIT_BLOCK_PTR_FOR_FN (cfun
));
1179 blocks
= XCNEWVEC (basic_block
, loop
->num_nodes
);
1180 visited
= BITMAP_ALLOC (NULL
);
1182 blocks_in_bfs_order
= get_loop_body_in_bfs_order (loop
);
1185 while (index
< loop
->num_nodes
)
1187 bb
= blocks_in_bfs_order
[index
];
1189 if (bb
->flags
& BB_IRREDUCIBLE_LOOP
)
1191 free (blocks_in_bfs_order
);
1192 BITMAP_FREE (visited
);
1197 if (!bitmap_bit_p (visited
, bb
->index
))
1199 if (pred_blocks_visited_p (bb
, &visited
)
1200 || bb
== loop
->header
)
1202 /* This block is now visited. */
1203 bitmap_set_bit (visited
, bb
->index
);
1204 blocks
[visited_count
++] = bb
;
1210 if (index
== loop
->num_nodes
1211 && visited_count
!= loop
->num_nodes
)
1215 free (blocks_in_bfs_order
);
1216 BITMAP_FREE (visited
);
1220 /* Returns true when the analysis of the predicates for all the basic
1221 blocks in LOOP succeeded.
1223 predicate_bbs first allocates the predicates of the basic blocks.
1224 These fields are then initialized with the tree expressions
1225 representing the predicates under which a basic block is executed
1226 in the LOOP. As the loop->header is executed at each iteration, it
1227 has the "true" predicate. Other statements executed under a
1228 condition are predicated with that condition, for example
1235 S1 will be predicated with "x", and
1236 S2 will be predicated with "!x". */
1239 predicate_bbs (loop_p loop
)
1243 for (i
= 0; i
< loop
->num_nodes
; i
++)
1244 init_bb_predicate (ifc_bbs
[i
]);
1246 for (i
= 0; i
< loop
->num_nodes
; i
++)
1248 basic_block bb
= ifc_bbs
[i
];
1252 /* The loop latch and loop exit block are always executed and
1253 have no extra conditions to be processed: skip them. */
1254 if (bb
== loop
->latch
1255 || bb_with_exit_edge_p (loop
, bb
))
1257 reset_bb_predicate (bb
);
1261 cond
= bb_predicate (bb
);
1262 stmt
= last_stmt (bb
);
1263 if (stmt
&& gimple_code (stmt
) == GIMPLE_COND
)
1266 edge true_edge
, false_edge
;
1267 location_t loc
= gimple_location (stmt
);
1268 tree c
= build2_loc (loc
, gimple_cond_code (stmt
),
1270 gimple_cond_lhs (stmt
),
1271 gimple_cond_rhs (stmt
));
1273 /* Add new condition into destination's predicate list. */
1274 extract_true_false_edges_from_block (gimple_bb (stmt
),
1275 &true_edge
, &false_edge
);
1277 /* If C is true, then TRUE_EDGE is taken. */
1278 add_to_dst_predicate_list (loop
, true_edge
, unshare_expr (cond
),
1281 /* If C is false, then FALSE_EDGE is taken. */
1282 c2
= build1_loc (loc
, TRUTH_NOT_EXPR
, boolean_type_node
,
1284 add_to_dst_predicate_list (loop
, false_edge
,
1285 unshare_expr (cond
), c2
);
1290 /* If current bb has only one successor, then consider it as an
1291 unconditional goto. */
1292 if (single_succ_p (bb
))
1294 basic_block bb_n
= single_succ (bb
);
1296 /* The successor bb inherits the predicate of its
1297 predecessor. If there is no predicate in the predecessor
1298 bb, then consider the successor bb as always executed. */
1299 if (cond
== NULL_TREE
)
1300 cond
= boolean_true_node
;
1302 add_to_predicate_list (loop
, bb_n
, cond
);
1306 /* The loop header is always executed. */
1307 reset_bb_predicate (loop
->header
);
1308 gcc_assert (bb_predicate_gimplified_stmts (loop
->header
) == NULL
1309 && bb_predicate_gimplified_stmts (loop
->latch
) == NULL
);
1312 /* Return true when LOOP is if-convertible. This is a helper function
1313 for if_convertible_loop_p. REFS and DDRS are initialized and freed
1314 in if_convertible_loop_p. */
1317 if_convertible_loop_p_1 (struct loop
*loop
, vec
<data_reference_p
> *refs
)
1320 basic_block exit_bb
= NULL
;
1322 if (find_data_references_in_loop (loop
, refs
) == chrec_dont_know
)
1325 calculate_dominance_info (CDI_DOMINATORS
);
1327 /* Allow statements that can be handled during if-conversion. */
1328 ifc_bbs
= get_loop_body_in_if_conv_order (loop
);
1331 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1332 fprintf (dump_file
, "Irreducible loop\n");
1336 for (i
= 0; i
< loop
->num_nodes
; i
++)
1338 basic_block bb
= ifc_bbs
[i
];
1340 if (!if_convertible_bb_p (loop
, bb
, exit_bb
))
1343 if (bb_with_exit_edge_p (loop
, bb
))
1347 for (i
= 0; i
< loop
->num_nodes
; i
++)
1349 basic_block bb
= ifc_bbs
[i
];
1350 gimple_stmt_iterator gsi
;
1352 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1353 switch (gimple_code (gsi_stmt (gsi
)))
1360 gimple_set_uid (gsi_stmt (gsi
), 0);
1367 data_reference_p dr
;
1370 = new hash_map
<innermost_loop_behavior_hash
, data_reference_p
>;
1371 baseref_DR_map
= new hash_map
<tree_operand_hash
, data_reference_p
>;
1373 calculate_dominance_info (CDI_POST_DOMINATORS
);
1374 predicate_bbs (loop
);
1376 for (i
= 0; refs
->iterate (i
, &dr
); i
++)
1378 tree ref
= DR_REF (dr
);
1380 dr
->aux
= XNEW (struct ifc_dr
);
1381 DR_BASE_W_UNCONDITIONALLY (dr
) = false;
1382 DR_RW_UNCONDITIONALLY (dr
) = false;
1383 DR_W_UNCONDITIONALLY (dr
) = false;
1384 IFC_DR (dr
)->rw_predicate
= boolean_false_node
;
1385 IFC_DR (dr
)->w_predicate
= boolean_false_node
;
1386 IFC_DR (dr
)->base_w_predicate
= boolean_false_node
;
1387 if (gimple_uid (DR_STMT (dr
)) == 0)
1388 gimple_set_uid (DR_STMT (dr
), i
+ 1);
1390 /* If DR doesn't have innermost loop behavior or it's a compound
1391 memory reference, we synthesize its innermost loop behavior
1393 if (TREE_CODE (ref
) == COMPONENT_REF
1394 || TREE_CODE (ref
) == IMAGPART_EXPR
1395 || TREE_CODE (ref
) == REALPART_EXPR
1396 || !(DR_BASE_ADDRESS (dr
) || DR_OFFSET (dr
)
1397 || DR_INIT (dr
) || DR_STEP (dr
)))
1399 while (TREE_CODE (ref
) == COMPONENT_REF
1400 || TREE_CODE (ref
) == IMAGPART_EXPR
1401 || TREE_CODE (ref
) == REALPART_EXPR
)
1402 ref
= TREE_OPERAND (ref
, 0);
1404 DR_BASE_ADDRESS (dr
) = ref
;
1405 DR_OFFSET (dr
) = NULL
;
1406 DR_INIT (dr
) = NULL
;
1407 DR_STEP (dr
) = NULL
;
1408 DR_ALIGNED_TO (dr
) = NULL
;
1410 hash_memrefs_baserefs_and_store_DRs_read_written_info (dr
);
1413 for (i
= 0; i
< loop
->num_nodes
; i
++)
1415 basic_block bb
= ifc_bbs
[i
];
1416 gimple_stmt_iterator itr
;
1418 /* Check the if-convertibility of statements in predicated BBs. */
1419 if (!dominated_by_p (CDI_DOMINATORS
, loop
->latch
, bb
))
1420 for (itr
= gsi_start_bb (bb
); !gsi_end_p (itr
); gsi_next (&itr
))
1421 if (!if_convertible_stmt_p (gsi_stmt (itr
), *refs
))
1425 /* Checking PHIs needs to be done after stmts, as the fact whether there
1426 are any masked loads or stores affects the tests. */
1427 for (i
= 0; i
< loop
->num_nodes
; i
++)
1429 basic_block bb
= ifc_bbs
[i
];
1432 for (itr
= gsi_start_phis (bb
); !gsi_end_p (itr
); gsi_next (&itr
))
1433 if (!if_convertible_phi_p (loop
, bb
, itr
.phi ()))
1438 fprintf (dump_file
, "Applying if-conversion\n");
1443 /* Return true when LOOP is if-convertible.
1444 LOOP is if-convertible if:
1446 - it has two or more basic blocks,
1447 - it has only one exit,
1448 - loop header is not the exit edge,
1449 - if its basic blocks and phi nodes are if convertible. */
1452 if_convertible_loop_p (struct loop
*loop
)
1457 vec
<data_reference_p
> refs
;
1459 /* Handle only innermost loop. */
1460 if (!loop
|| loop
->inner
)
1462 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1463 fprintf (dump_file
, "not innermost loop\n");
1467 /* If only one block, no need for if-conversion. */
1468 if (loop
->num_nodes
<= 2)
1470 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1471 fprintf (dump_file
, "less than 2 basic blocks\n");
1475 /* More than one loop exit is too much to handle. */
1476 if (!single_exit (loop
))
1478 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1479 fprintf (dump_file
, "multiple exits\n");
1483 /* If one of the loop header's edge is an exit edge then do not
1484 apply if-conversion. */
1485 FOR_EACH_EDGE (e
, ei
, loop
->header
->succs
)
1486 if (loop_exit_edge_p (loop
, e
))
1490 res
= if_convertible_loop_p_1 (loop
, &refs
);
1492 data_reference_p dr
;
1494 for (i
= 0; refs
.iterate (i
, &dr
); i
++)
1497 free_data_refs (refs
);
1499 delete innermost_DR_map
;
1500 innermost_DR_map
= NULL
;
1502 delete baseref_DR_map
;
1503 baseref_DR_map
= NULL
;
1508 /* Returns true if def-stmt for phi argument ARG is simple increment/decrement
1509 which is in predicated basic block.
1510 In fact, the following PHI pattern is searching:
1512 reduc_1 = PHI <..., reduc_2>
1516 reduc_2 = PHI <reduc_1, reduc_3>
1518 ARG_0 and ARG_1 are correspondent PHI arguments.
1519 REDUC, OP0 and OP1 contain reduction stmt and its operands.
1520 EXTENDED is true if PHI has > 2 arguments. */
1523 is_cond_scalar_reduction (gimple
*phi
, gimple
**reduc
, tree arg_0
, tree arg_1
,
1524 tree
*op0
, tree
*op1
, bool extended
)
1526 tree lhs
, r_op1
, r_op2
;
1528 gimple
*header_phi
= NULL
;
1529 enum tree_code reduction_op
;
1530 basic_block bb
= gimple_bb (phi
);
1531 struct loop
*loop
= bb
->loop_father
;
1532 edge latch_e
= loop_latch_edge (loop
);
1533 imm_use_iterator imm_iter
;
1534 use_operand_p use_p
;
1537 bool result
= false;
1538 if (TREE_CODE (arg_0
) != SSA_NAME
|| TREE_CODE (arg_1
) != SSA_NAME
)
1541 if (!extended
&& gimple_code (SSA_NAME_DEF_STMT (arg_0
)) == GIMPLE_PHI
)
1544 header_phi
= SSA_NAME_DEF_STMT (arg_0
);
1545 stmt
= SSA_NAME_DEF_STMT (arg_1
);
1547 else if (gimple_code (SSA_NAME_DEF_STMT (arg_1
)) == GIMPLE_PHI
)
1550 header_phi
= SSA_NAME_DEF_STMT (arg_1
);
1551 stmt
= SSA_NAME_DEF_STMT (arg_0
);
1555 if (gimple_bb (header_phi
) != loop
->header
)
1558 if (PHI_ARG_DEF_FROM_EDGE (header_phi
, latch_e
) != PHI_RESULT (phi
))
1561 if (gimple_code (stmt
) != GIMPLE_ASSIGN
1562 || gimple_has_volatile_ops (stmt
))
1565 if (!flow_bb_inside_loop_p (loop
, gimple_bb (stmt
)))
1568 if (!is_predicated (gimple_bb (stmt
)))
1571 /* Check that stmt-block is predecessor of phi-block. */
1572 FOR_EACH_EDGE (e
, ei
, gimple_bb (stmt
)->succs
)
1581 if (!has_single_use (lhs
))
1584 reduction_op
= gimple_assign_rhs_code (stmt
);
1585 if (reduction_op
!= PLUS_EXPR
&& reduction_op
!= MINUS_EXPR
)
1587 r_op1
= gimple_assign_rhs1 (stmt
);
1588 r_op2
= gimple_assign_rhs2 (stmt
);
1590 /* Make R_OP1 to hold reduction variable. */
1591 if (r_op2
== PHI_RESULT (header_phi
)
1592 && reduction_op
== PLUS_EXPR
)
1593 std::swap (r_op1
, r_op2
);
1594 else if (r_op1
!= PHI_RESULT (header_phi
))
1597 /* Check that R_OP1 is used in reduction stmt or in PHI only. */
1598 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, r_op1
)
1600 gimple
*use_stmt
= USE_STMT (use_p
);
1601 if (is_gimple_debug (use_stmt
))
1603 if (use_stmt
== stmt
)
1605 if (gimple_code (use_stmt
) != GIMPLE_PHI
)
1609 *op0
= r_op1
; *op1
= r_op2
;
1614 /* Converts conditional scalar reduction into unconditional form, e.g.
1616 if (_5 != 0) goto bb_5 else goto bb_6
1622 # res_2 = PHI <res_13(4), res_6(5)>
1625 will be converted into sequence
1626 _ifc__1 = _5 != 0 ? 1 : 0;
1627 res_2 = res_13 + _ifc__1;
1628 Argument SWAP tells that arguments of conditional expression should be
1630 Returns rhs of resulting PHI assignment. */
1633 convert_scalar_cond_reduction (gimple
*reduc
, gimple_stmt_iterator
*gsi
,
1634 tree cond
, tree op0
, tree op1
, bool swap
)
1636 gimple_stmt_iterator stmt_it
;
1639 tree rhs1
= gimple_assign_rhs1 (reduc
);
1640 tree tmp
= make_temp_ssa_name (TREE_TYPE (rhs1
), NULL
, "_ifc_");
1642 tree zero
= build_zero_cst (TREE_TYPE (rhs1
));
1644 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1646 fprintf (dump_file
, "Found cond scalar reduction.\n");
1647 print_gimple_stmt (dump_file
, reduc
, 0, TDF_SLIM
);
1650 /* Build cond expression using COND and constant operand
1651 of reduction rhs. */
1652 c
= fold_build_cond_expr (TREE_TYPE (rhs1
),
1653 unshare_expr (cond
),
1657 /* Create assignment stmt and insert it at GSI. */
1658 new_assign
= gimple_build_assign (tmp
, c
);
1659 gsi_insert_before (gsi
, new_assign
, GSI_SAME_STMT
);
1660 /* Build rhs for unconditional increment/decrement. */
1661 rhs
= fold_build2 (gimple_assign_rhs_code (reduc
),
1662 TREE_TYPE (rhs1
), op0
, tmp
);
1664 /* Delete original reduction stmt. */
1665 stmt_it
= gsi_for_stmt (reduc
);
1666 gsi_remove (&stmt_it
, true);
1667 release_defs (reduc
);
1671 /* Produce condition for all occurrences of ARG in PHI node. */
1674 gen_phi_arg_condition (gphi
*phi
, vec
<int> *occur
,
1675 gimple_stmt_iterator
*gsi
)
1679 tree cond
= NULL_TREE
;
1683 len
= occur
->length ();
1684 gcc_assert (len
> 0);
1685 for (i
= 0; i
< len
; i
++)
1687 e
= gimple_phi_arg_edge (phi
, (*occur
)[i
]);
1688 c
= bb_predicate (e
->src
);
1689 if (is_true_predicate (c
))
1694 c
= force_gimple_operand_gsi_1 (gsi
, unshare_expr (c
),
1695 is_gimple_condexpr
, NULL_TREE
,
1696 true, GSI_SAME_STMT
);
1697 if (cond
!= NULL_TREE
)
1699 /* Must build OR expression. */
1700 cond
= fold_or_predicates (EXPR_LOCATION (c
), c
, cond
);
1701 cond
= force_gimple_operand_gsi_1 (gsi
, unshare_expr (cond
),
1702 is_gimple_condexpr
, NULL_TREE
,
1703 true, GSI_SAME_STMT
);
1708 gcc_assert (cond
!= NULL_TREE
);
1712 /* Replace a scalar PHI node with a COND_EXPR using COND as condition.
1713 This routine can handle PHI nodes with more than two arguments.
1716 S1: A = PHI <x1(1), x2(5)>
1718 S2: A = cond ? x1 : x2;
1720 The generated code is inserted at GSI that points to the top of
1721 basic block's statement list.
1722 If PHI node has more than two arguments a chain of conditional
1723 expression is produced. */
1727 predicate_scalar_phi (gphi
*phi
, gimple_stmt_iterator
*gsi
)
1729 gimple
*new_stmt
= NULL
, *reduc
;
1730 tree rhs
, res
, arg0
, arg1
, op0
, op1
, scev
;
1732 unsigned int index0
;
1733 unsigned int max
, args_len
;
1738 res
= gimple_phi_result (phi
);
1739 if (virtual_operand_p (res
))
1742 if ((rhs
= degenerate_phi_result (phi
))
1743 || ((scev
= analyze_scalar_evolution (gimple_bb (phi
)->loop_father
,
1745 && !chrec_contains_undetermined (scev
)
1747 && (rhs
= gimple_phi_arg_def (phi
, 0))))
1749 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1751 fprintf (dump_file
, "Degenerate phi!\n");
1752 print_gimple_stmt (dump_file
, phi
, 0, TDF_SLIM
);
1754 new_stmt
= gimple_build_assign (res
, rhs
);
1755 gsi_insert_before (gsi
, new_stmt
, GSI_SAME_STMT
);
1756 update_stmt (new_stmt
);
1760 bb
= gimple_bb (phi
);
1761 if (EDGE_COUNT (bb
->preds
) == 2)
1763 /* Predicate ordinary PHI node with 2 arguments. */
1764 edge first_edge
, second_edge
;
1765 basic_block true_bb
;
1766 first_edge
= EDGE_PRED (bb
, 0);
1767 second_edge
= EDGE_PRED (bb
, 1);
1768 cond
= bb_predicate (first_edge
->src
);
1769 if (TREE_CODE (cond
) == TRUTH_NOT_EXPR
)
1770 std::swap (first_edge
, second_edge
);
1771 if (EDGE_COUNT (first_edge
->src
->succs
) > 1)
1773 cond
= bb_predicate (second_edge
->src
);
1774 if (TREE_CODE (cond
) == TRUTH_NOT_EXPR
)
1775 cond
= TREE_OPERAND (cond
, 0);
1777 first_edge
= second_edge
;
1780 cond
= bb_predicate (first_edge
->src
);
1781 /* Gimplify the condition to a valid cond-expr conditonal operand. */
1782 cond
= force_gimple_operand_gsi_1 (gsi
, unshare_expr (cond
),
1783 is_gimple_condexpr
, NULL_TREE
,
1784 true, GSI_SAME_STMT
);
1785 true_bb
= first_edge
->src
;
1786 if (EDGE_PRED (bb
, 1)->src
== true_bb
)
1788 arg0
= gimple_phi_arg_def (phi
, 1);
1789 arg1
= gimple_phi_arg_def (phi
, 0);
1793 arg0
= gimple_phi_arg_def (phi
, 0);
1794 arg1
= gimple_phi_arg_def (phi
, 1);
1796 if (is_cond_scalar_reduction (phi
, &reduc
, arg0
, arg1
,
1798 /* Convert reduction stmt into vectorizable form. */
1799 rhs
= convert_scalar_cond_reduction (reduc
, gsi
, cond
, op0
, op1
,
1800 true_bb
!= gimple_bb (reduc
));
1802 /* Build new RHS using selected condition and arguments. */
1803 rhs
= fold_build_cond_expr (TREE_TYPE (res
), unshare_expr (cond
),
1805 new_stmt
= gimple_build_assign (res
, rhs
);
1806 gsi_insert_before (gsi
, new_stmt
, GSI_SAME_STMT
);
1807 update_stmt (new_stmt
);
1809 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1811 fprintf (dump_file
, "new phi replacement stmt\n");
1812 print_gimple_stmt (dump_file
, new_stmt
, 0, TDF_SLIM
);
1817 /* Create hashmap for PHI node which contain vector of argument indexes
1818 having the same value. */
1820 hash_map
<tree_operand_hash
, auto_vec
<int> > phi_arg_map
;
1821 unsigned int num_args
= gimple_phi_num_args (phi
);
1823 /* Vector of different PHI argument values. */
1824 auto_vec
<tree
> args (num_args
);
1826 /* Compute phi_arg_map. */
1827 for (i
= 0; i
< num_args
; i
++)
1831 arg
= gimple_phi_arg_def (phi
, i
);
1832 if (!phi_arg_map
.get (arg
))
1833 args
.quick_push (arg
);
1834 phi_arg_map
.get_or_insert (arg
).safe_push (i
);
1837 /* Determine element with max number of occurrences. */
1840 args_len
= args
.length ();
1841 for (i
= 0; i
< args_len
; i
++)
1844 if ((len
= phi_arg_map
.get (args
[i
])->length ()) > max
)
1851 /* Put element with max number of occurences to the end of ARGS. */
1852 if (max_ind
!= -1 && max_ind
+1 != (int) args_len
)
1853 std::swap (args
[args_len
- 1], args
[max_ind
]);
1855 /* Handle one special case when number of arguments with different values
1856 is equal 2 and one argument has the only occurrence. Such PHI can be
1857 handled as if would have only 2 arguments. */
1858 if (args_len
== 2 && phi_arg_map
.get (args
[0])->length () == 1)
1861 indexes
= phi_arg_map
.get (args
[0]);
1862 index0
= (*indexes
)[0];
1865 e
= gimple_phi_arg_edge (phi
, index0
);
1866 cond
= bb_predicate (e
->src
);
1867 if (TREE_CODE (cond
) == TRUTH_NOT_EXPR
)
1870 cond
= TREE_OPERAND (cond
, 0);
1872 /* Gimplify the condition to a valid cond-expr conditonal operand. */
1873 cond
= force_gimple_operand_gsi_1 (gsi
, unshare_expr (cond
),
1874 is_gimple_condexpr
, NULL_TREE
,
1875 true, GSI_SAME_STMT
);
1876 if (!(is_cond_scalar_reduction (phi
, &reduc
, arg0
, arg1
,
1878 rhs
= fold_build_cond_expr (TREE_TYPE (res
), unshare_expr (cond
),
1882 /* Convert reduction stmt into vectorizable form. */
1883 rhs
= convert_scalar_cond_reduction (reduc
, gsi
, cond
, op0
, op1
,
1885 new_stmt
= gimple_build_assign (res
, rhs
);
1886 gsi_insert_before (gsi
, new_stmt
, GSI_SAME_STMT
);
1887 update_stmt (new_stmt
);
1893 tree type
= TREE_TYPE (gimple_phi_result (phi
));
1896 for (i
= 0; i
< args_len
; i
++)
1899 indexes
= phi_arg_map
.get (args
[i
]);
1900 if (i
!= args_len
- 1)
1901 lhs
= make_temp_ssa_name (type
, NULL
, "_ifc_");
1904 cond
= gen_phi_arg_condition (phi
, indexes
, gsi
);
1905 rhs
= fold_build_cond_expr (type
, unshare_expr (cond
),
1907 new_stmt
= gimple_build_assign (lhs
, rhs
);
1908 gsi_insert_before (gsi
, new_stmt
, GSI_SAME_STMT
);
1909 update_stmt (new_stmt
);
1914 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1916 fprintf (dump_file
, "new extended phi replacement stmt\n");
1917 print_gimple_stmt (dump_file
, new_stmt
, 0, TDF_SLIM
);
1921 /* Replaces in LOOP all the scalar phi nodes other than those in the
1922 LOOP->header block with conditional modify expressions. */
1925 predicate_all_scalar_phis (struct loop
*loop
)
1928 unsigned int orig_loop_num_nodes
= loop
->num_nodes
;
1931 for (i
= 1; i
< orig_loop_num_nodes
; i
++)
1934 gimple_stmt_iterator gsi
;
1935 gphi_iterator phi_gsi
;
1938 if (bb
== loop
->header
)
1941 phi_gsi
= gsi_start_phis (bb
);
1942 if (gsi_end_p (phi_gsi
))
1945 gsi
= gsi_after_labels (bb
);
1946 while (!gsi_end_p (phi_gsi
))
1948 phi
= phi_gsi
.phi ();
1949 if (virtual_operand_p (gimple_phi_result (phi
)))
1950 gsi_next (&phi_gsi
);
1953 predicate_scalar_phi (phi
, &gsi
);
1954 remove_phi_node (&phi_gsi
, false);
1960 /* Insert in each basic block of LOOP the statements produced by the
1961 gimplification of the predicates. */
1964 insert_gimplified_predicates (loop_p loop
)
1968 for (i
= 0; i
< loop
->num_nodes
; i
++)
1970 basic_block bb
= ifc_bbs
[i
];
1972 if (!is_predicated (bb
))
1973 gcc_assert (bb_predicate_gimplified_stmts (bb
) == NULL
);
1974 if (!is_predicated (bb
))
1976 /* Do not insert statements for a basic block that is not
1977 predicated. Also make sure that the predicate of the
1978 basic block is set to true. */
1979 reset_bb_predicate (bb
);
1983 stmts
= bb_predicate_gimplified_stmts (bb
);
1986 if (any_pred_load_store
)
1988 /* Insert the predicate of the BB just after the label,
1989 as the if-conversion of memory writes will use this
1991 gimple_stmt_iterator gsi
= gsi_after_labels (bb
);
1992 gsi_insert_seq_before (&gsi
, stmts
, GSI_SAME_STMT
);
1996 /* Insert the predicate of the BB at the end of the BB
1997 as this would reduce the register pressure: the only
1998 use of this predicate will be in successor BBs. */
1999 gimple_stmt_iterator gsi
= gsi_last_bb (bb
);
2002 || stmt_ends_bb_p (gsi_stmt (gsi
)))
2003 gsi_insert_seq_before (&gsi
, stmts
, GSI_SAME_STMT
);
2005 gsi_insert_seq_after (&gsi
, stmts
, GSI_SAME_STMT
);
2008 /* Once the sequence is code generated, set it to NULL. */
2009 set_bb_predicate_gimplified_stmts (bb
, NULL
);
2014 /* Helper function for predicate_mem_writes. Returns index of existent
2015 mask if it was created for given SIZE and -1 otherwise. */
2018 mask_exists (int size
, vec
<int> vec
)
2022 FOR_EACH_VEC_ELT (vec
, ix
, v
)
2028 /* Predicate each write to memory in LOOP.
2030 This function transforms control flow constructs containing memory
2033 | for (i = 0; i < N; i++)
2037 into the following form that does not contain control flow:
2039 | for (i = 0; i < N; i++)
2040 | A[i] = cond ? expr : A[i];
2042 The original CFG looks like this:
2049 | if (i < N) goto bb_5 else goto bb_2
2053 | cond = some_computation;
2054 | if (cond) goto bb_3 else goto bb_4
2066 insert_gimplified_predicates inserts the computation of the COND
2067 expression at the beginning of the destination basic block:
2074 | if (i < N) goto bb_5 else goto bb_2
2078 | cond = some_computation;
2079 | if (cond) goto bb_3 else goto bb_4
2083 | cond = some_computation;
2092 predicate_mem_writes is then predicating the memory write as follows:
2099 | if (i < N) goto bb_5 else goto bb_2
2103 | if (cond) goto bb_3 else goto bb_4
2107 | cond = some_computation;
2108 | A[i] = cond ? expr : A[i];
2116 and finally combine_blocks removes the basic block boundaries making
2117 the loop vectorizable:
2121 | if (i < N) goto bb_5 else goto bb_1
2125 | cond = some_computation;
2126 | A[i] = cond ? expr : A[i];
2127 | if (i < N) goto bb_5 else goto bb_4
2136 predicate_mem_writes (loop_p loop
)
2138 unsigned int i
, orig_loop_num_nodes
= loop
->num_nodes
;
2139 auto_vec
<int, 1> vect_sizes
;
2140 auto_vec
<tree
, 1> vect_masks
;
2142 for (i
= 1; i
< orig_loop_num_nodes
; i
++)
2144 gimple_stmt_iterator gsi
;
2145 basic_block bb
= ifc_bbs
[i
];
2146 tree cond
= bb_predicate (bb
);
2151 if (is_true_predicate (cond
) || is_false_predicate (cond
))
2155 if (TREE_CODE (cond
) == TRUTH_NOT_EXPR
)
2158 cond
= TREE_OPERAND (cond
, 0);
2161 vect_sizes
.truncate (0);
2162 vect_masks
.truncate (0);
2164 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2165 if (!gimple_assign_single_p (stmt
= gsi_stmt (gsi
)))
2167 else if (gimple_plf (stmt
, GF_PLF_2
))
2169 tree lhs
= gimple_assign_lhs (stmt
);
2170 tree rhs
= gimple_assign_rhs1 (stmt
);
2171 tree ref
, addr
, ptr
, mask
;
2173 gimple_seq stmts
= NULL
;
2174 int bitsize
= GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (lhs
)));
2175 ref
= TREE_CODE (lhs
) == SSA_NAME
? rhs
: lhs
;
2176 mark_addressable (ref
);
2177 addr
= force_gimple_operand_gsi (&gsi
, build_fold_addr_expr (ref
),
2178 true, NULL_TREE
, true,
2180 if (!vect_sizes
.is_empty ()
2181 && (index
= mask_exists (bitsize
, vect_sizes
)) != -1)
2182 /* Use created mask. */
2183 mask
= vect_masks
[index
];
2186 if (COMPARISON_CLASS_P (cond
))
2187 mask
= gimple_build (&stmts
, TREE_CODE (cond
),
2189 TREE_OPERAND (cond
, 0),
2190 TREE_OPERAND (cond
, 1));
2193 gcc_assert (TREE_CODE (cond
) == SSA_NAME
);
2200 = constant_boolean_node (true, TREE_TYPE (mask
));
2201 mask
= gimple_build (&stmts
, BIT_XOR_EXPR
,
2202 TREE_TYPE (mask
), mask
, true_val
);
2204 gsi_insert_seq_before (&gsi
, stmts
, GSI_SAME_STMT
);
2206 mask
= ifc_temp_var (TREE_TYPE (mask
), mask
, &gsi
);
2207 /* Save mask and its size for further use. */
2208 vect_sizes
.safe_push (bitsize
);
2209 vect_masks
.safe_push (mask
);
2211 ptr
= build_int_cst (reference_alias_ptr_type (ref
),
2212 get_object_alignment (ref
));
2213 /* Copy points-to info if possible. */
2214 if (TREE_CODE (addr
) == SSA_NAME
&& !SSA_NAME_PTR_INFO (addr
))
2215 copy_ref_info (build2 (MEM_REF
, TREE_TYPE (ref
), addr
, ptr
),
2217 if (TREE_CODE (lhs
) == SSA_NAME
)
2220 = gimple_build_call_internal (IFN_MASK_LOAD
, 3, addr
,
2222 gimple_call_set_lhs (new_stmt
, lhs
);
2223 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
2228 = gimple_build_call_internal (IFN_MASK_STORE
, 4, addr
, ptr
,
2230 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
2231 gimple_set_vdef (new_stmt
, gimple_vdef (stmt
));
2232 SSA_NAME_DEF_STMT (gimple_vdef (new_stmt
)) = new_stmt
;
2235 gsi_replace (&gsi
, new_stmt
, true);
2237 else if (gimple_vdef (stmt
))
2239 tree lhs
= gimple_assign_lhs (stmt
);
2240 tree rhs
= gimple_assign_rhs1 (stmt
);
2241 tree type
= TREE_TYPE (lhs
);
2243 lhs
= ifc_temp_var (type
, unshare_expr (lhs
), &gsi
);
2244 rhs
= ifc_temp_var (type
, unshare_expr (rhs
), &gsi
);
2246 std::swap (lhs
, rhs
);
2247 cond
= force_gimple_operand_gsi_1 (&gsi
, unshare_expr (cond
),
2248 is_gimple_condexpr
, NULL_TREE
,
2249 true, GSI_SAME_STMT
);
2250 rhs
= fold_build_cond_expr (type
, unshare_expr (cond
), rhs
, lhs
);
2251 gimple_assign_set_rhs1 (stmt
, ifc_temp_var (type
, rhs
, &gsi
));
2257 /* Remove all GIMPLE_CONDs and GIMPLE_LABELs of all the basic blocks
2258 other than the exit and latch of the LOOP. Also resets the
2259 GIMPLE_DEBUG information. */
2262 remove_conditions_and_labels (loop_p loop
)
2264 gimple_stmt_iterator gsi
;
2267 for (i
= 0; i
< loop
->num_nodes
; i
++)
2269 basic_block bb
= ifc_bbs
[i
];
2271 if (bb_with_exit_edge_p (loop
, bb
)
2272 || bb
== loop
->latch
)
2275 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); )
2276 switch (gimple_code (gsi_stmt (gsi
)))
2280 gsi_remove (&gsi
, true);
2284 /* ??? Should there be conditional GIMPLE_DEBUG_BINDs? */
2285 if (gimple_debug_bind_p (gsi_stmt (gsi
)))
2287 gimple_debug_bind_reset_value (gsi_stmt (gsi
));
2288 update_stmt (gsi_stmt (gsi
));
2299 /* Combine all the basic blocks from LOOP into one or two super basic
2300 blocks. Replace PHI nodes with conditional modify expressions. */
2303 combine_blocks (struct loop
*loop
)
2305 basic_block bb
, exit_bb
, merge_target_bb
;
2306 unsigned int orig_loop_num_nodes
= loop
->num_nodes
;
2311 remove_conditions_and_labels (loop
);
2312 insert_gimplified_predicates (loop
);
2313 predicate_all_scalar_phis (loop
);
2315 if (any_pred_load_store
)
2316 predicate_mem_writes (loop
);
2318 /* Merge basic blocks: first remove all the edges in the loop,
2319 except for those from the exit block. */
2321 bool *predicated
= XNEWVEC (bool, orig_loop_num_nodes
);
2322 for (i
= 0; i
< orig_loop_num_nodes
; i
++)
2325 predicated
[i
] = !is_true_predicate (bb_predicate (bb
));
2326 free_bb_predicate (bb
);
2327 if (bb_with_exit_edge_p (loop
, bb
))
2329 gcc_assert (exit_bb
== NULL
);
2333 gcc_assert (exit_bb
!= loop
->latch
);
2335 for (i
= 1; i
< orig_loop_num_nodes
; i
++)
2339 for (ei
= ei_start (bb
->preds
); (e
= ei_safe_edge (ei
));)
2341 if (e
->src
== exit_bb
)
2348 if (exit_bb
!= NULL
)
2350 if (exit_bb
!= loop
->header
)
2352 /* Connect this node to loop header. */
2353 make_edge (loop
->header
, exit_bb
, EDGE_FALLTHRU
);
2354 set_immediate_dominator (CDI_DOMINATORS
, exit_bb
, loop
->header
);
2357 /* Redirect non-exit edges to loop->latch. */
2358 FOR_EACH_EDGE (e
, ei
, exit_bb
->succs
)
2360 if (!loop_exit_edge_p (loop
, e
))
2361 redirect_edge_and_branch (e
, loop
->latch
);
2363 set_immediate_dominator (CDI_DOMINATORS
, loop
->latch
, exit_bb
);
2367 /* If the loop does not have an exit, reconnect header and latch. */
2368 make_edge (loop
->header
, loop
->latch
, EDGE_FALLTHRU
);
2369 set_immediate_dominator (CDI_DOMINATORS
, loop
->latch
, loop
->header
);
2372 merge_target_bb
= loop
->header
;
2374 /* Get at the virtual def valid for uses starting at the first block
2375 we merge into the header. Without a virtual PHI the loop has the
2376 same virtual use on all stmts. */
2377 gphi
*vphi
= get_virtual_phi (loop
->header
);
2378 tree last_vdef
= NULL_TREE
;
2381 last_vdef
= gimple_phi_result (vphi
);
2382 for (gimple_stmt_iterator gsi
= gsi_start_bb (loop
->header
);
2383 ! gsi_end_p (gsi
); gsi_next (&gsi
))
2384 if (gimple_vdef (gsi_stmt (gsi
)))
2385 last_vdef
= gimple_vdef (gsi_stmt (gsi
));
2387 for (i
= 1; i
< orig_loop_num_nodes
; i
++)
2389 gimple_stmt_iterator gsi
;
2390 gimple_stmt_iterator last
;
2394 if (bb
== exit_bb
|| bb
== loop
->latch
)
2397 /* We release virtual PHIs late because we have to propagate them
2398 out using the current VUSE. The def might be the one used
2400 vphi
= get_virtual_phi (bb
);
2403 imm_use_iterator iter
;
2404 use_operand_p use_p
;
2406 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, gimple_phi_result (vphi
))
2408 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
2409 SET_USE (use_p
, last_vdef
);
2411 gsi
= gsi_for_stmt (vphi
);
2412 remove_phi_node (&gsi
, true);
2415 /* Make stmts member of loop->header and clear range info from all stmts
2416 in BB which is now no longer executed conditional on a predicate we
2417 could have derived it from. */
2418 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2420 gimple
*stmt
= gsi_stmt (gsi
);
2421 gimple_set_bb (stmt
, merge_target_bb
);
2422 /* Update virtual operands. */
2425 use_operand_p use_p
= ssa_vuse_operand (stmt
);
2427 && USE_FROM_PTR (use_p
) != last_vdef
)
2428 SET_USE (use_p
, last_vdef
);
2429 if (gimple_vdef (stmt
))
2430 last_vdef
= gimple_vdef (stmt
);
2436 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_DEF
)
2437 reset_flow_sensitive_info (op
);
2441 /* Update stmt list. */
2442 last
= gsi_last_bb (merge_target_bb
);
2443 gsi_insert_seq_after_without_update (&last
, bb_seq (bb
), GSI_NEW_STMT
);
2444 set_bb_seq (bb
, NULL
);
2446 delete_basic_block (bb
);
2449 /* If possible, merge loop header to the block with the exit edge.
2450 This reduces the number of basic blocks to two, to please the
2451 vectorizer that handles only loops with two nodes. */
2453 && exit_bb
!= loop
->header
)
2455 /* We release virtual PHIs late because we have to propagate them
2456 out using the current VUSE. The def might be the one used
2458 vphi
= get_virtual_phi (exit_bb
);
2461 imm_use_iterator iter
;
2462 use_operand_p use_p
;
2464 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, gimple_phi_result (vphi
))
2466 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
2467 SET_USE (use_p
, last_vdef
);
2469 gimple_stmt_iterator gsi
= gsi_for_stmt (vphi
);
2470 remove_phi_node (&gsi
, true);
2473 if (can_merge_blocks_p (loop
->header
, exit_bb
))
2474 merge_blocks (loop
->header
, exit_bb
);
2482 /* Version LOOP before if-converting it; the original loop
2483 will be if-converted, the new copy of the loop will not,
2484 and the LOOP_VECTORIZED internal call will be guarding which
2485 loop to execute. The vectorizer pass will fold this
2486 internal call into either true or false. */
2489 version_loop_for_if_conversion (struct loop
*loop
)
2491 basic_block cond_bb
;
2492 tree cond
= make_ssa_name (boolean_type_node
);
2493 struct loop
*new_loop
;
2495 gimple_stmt_iterator gsi
;
2497 g
= gimple_build_call_internal (IFN_LOOP_VECTORIZED
, 2,
2498 build_int_cst (integer_type_node
, loop
->num
),
2500 gimple_call_set_lhs (g
, cond
);
2502 /* Save BB->aux around loop_version as that uses the same field. */
2503 void **saved_preds
= XALLOCAVEC (void *, loop
->num_nodes
);
2504 for (unsigned i
= 0; i
< loop
->num_nodes
; i
++)
2505 saved_preds
[i
] = ifc_bbs
[i
]->aux
;
2507 initialize_original_copy_tables ();
2508 new_loop
= loop_version (loop
, cond
, &cond_bb
,
2509 REG_BR_PROB_BASE
, REG_BR_PROB_BASE
,
2510 REG_BR_PROB_BASE
, true);
2511 free_original_copy_tables ();
2513 for (unsigned i
= 0; i
< loop
->num_nodes
; i
++)
2514 ifc_bbs
[i
]->aux
= saved_preds
[i
];
2516 if (new_loop
== NULL
)
2519 new_loop
->dont_vectorize
= true;
2520 new_loop
->force_vectorize
= false;
2521 gsi
= gsi_last_bb (cond_bb
);
2522 gimple_call_set_arg (g
, 1, build_int_cst (integer_type_node
, new_loop
->num
));
2523 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
2524 update_ssa (TODO_update_ssa
);
2528 /* Performs splitting of critical edges. Skip splitting and return false
2529 if LOOP will not be converted because:
2531 - LOOP is not well formed.
2532 - LOOP has PHI with more than MAX_PHI_ARG_NUM arguments.
2534 Last restriction is valid only if AGGRESSIVE_IF_CONV is false. */
2537 ifcvt_split_critical_edges (struct loop
*loop
, bool aggressive_if_conv
)
2541 unsigned int num
= loop
->num_nodes
;
2546 auto_vec
<edge
> critical_edges
;
2548 /* Loop is not well formed. */
2549 if (num
<= 2 || loop
->inner
|| !single_exit (loop
))
2552 body
= get_loop_body (loop
);
2553 for (i
= 0; i
< num
; i
++)
2556 if (!aggressive_if_conv
2558 && EDGE_COUNT (bb
->preds
) > MAX_PHI_ARG_NUM
)
2560 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2562 "BB %d has complicated PHI with more than %u args.\n",
2563 bb
->index
, MAX_PHI_ARG_NUM
);
2568 if (bb
== loop
->latch
|| bb_with_exit_edge_p (loop
, bb
))
2571 stmt
= last_stmt (bb
);
2572 /* Skip basic blocks not ending with conditional branch. */
2573 if (!stmt
|| gimple_code (stmt
) != GIMPLE_COND
)
2576 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
2577 if (EDGE_CRITICAL_P (e
) && e
->dest
->loop_father
== loop
)
2578 critical_edges
.safe_push (e
);
2582 while (critical_edges
.length () > 0)
2584 e
= critical_edges
.pop ();
2585 /* Don't split if bb can be predicated along non-critical edge. */
2586 if (EDGE_COUNT (e
->dest
->preds
) > 2 || all_preds_critical_p (e
->dest
))
2593 /* Delete redundant statements produced by predication which prevents
2594 loop vectorization. */
2597 ifcvt_local_dce (basic_block bb
)
2602 gimple_stmt_iterator gsi
;
2603 auto_vec
<gimple
*> worklist
;
2604 enum gimple_code code
;
2605 use_operand_p use_p
;
2606 imm_use_iterator imm_iter
;
2608 worklist
.create (64);
2609 /* Consider all phi as live statements. */
2610 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2612 phi
= gsi_stmt (gsi
);
2613 gimple_set_plf (phi
, GF_PLF_2
, true);
2614 worklist
.safe_push (phi
);
2616 /* Consider load/store statements, CALL and COND as live. */
2617 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2619 stmt
= gsi_stmt (gsi
);
2620 if (gimple_store_p (stmt
)
2621 || gimple_assign_load_p (stmt
)
2622 || is_gimple_debug (stmt
))
2624 gimple_set_plf (stmt
, GF_PLF_2
, true);
2625 worklist
.safe_push (stmt
);
2628 code
= gimple_code (stmt
);
2629 if (code
== GIMPLE_COND
|| code
== GIMPLE_CALL
)
2631 gimple_set_plf (stmt
, GF_PLF_2
, true);
2632 worklist
.safe_push (stmt
);
2635 gimple_set_plf (stmt
, GF_PLF_2
, false);
2637 if (code
== GIMPLE_ASSIGN
)
2639 tree lhs
= gimple_assign_lhs (stmt
);
2640 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, lhs
)
2642 stmt1
= USE_STMT (use_p
);
2643 if (gimple_bb (stmt1
) != bb
)
2645 gimple_set_plf (stmt
, GF_PLF_2
, true);
2646 worklist
.safe_push (stmt
);
2652 /* Propagate liveness through arguments of live stmt. */
2653 while (worklist
.length () > 0)
2656 use_operand_p use_p
;
2659 stmt
= worklist
.pop ();
2660 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
2662 use
= USE_FROM_PTR (use_p
);
2663 if (TREE_CODE (use
) != SSA_NAME
)
2665 stmt1
= SSA_NAME_DEF_STMT (use
);
2666 if (gimple_bb (stmt1
) != bb
2667 || gimple_plf (stmt1
, GF_PLF_2
))
2669 gimple_set_plf (stmt1
, GF_PLF_2
, true);
2670 worklist
.safe_push (stmt1
);
2673 /* Delete dead statements. */
2674 gsi
= gsi_start_bb (bb
);
2675 while (!gsi_end_p (gsi
))
2677 stmt
= gsi_stmt (gsi
);
2678 if (gimple_plf (stmt
, GF_PLF_2
))
2683 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2685 fprintf (dump_file
, "Delete dead stmt in bb#%d\n", bb
->index
);
2686 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
2688 gsi_remove (&gsi
, true);
2689 release_defs (stmt
);
2693 /* If-convert LOOP when it is legal. For the moment this pass has no
2694 profitability analysis. Returns non-zero todo flags when something
2698 tree_if_conversion (struct loop
*loop
)
2700 unsigned int todo
= 0;
2701 bool aggressive_if_conv
;
2704 any_pred_load_store
= false;
2705 any_complicated_phi
= false;
2707 /* Apply more aggressive if-conversion when loop or its outer loop were
2708 marked with simd pragma. When that's the case, we try to if-convert
2709 loop containing PHIs with more than MAX_PHI_ARG_NUM arguments. */
2710 aggressive_if_conv
= loop
->force_vectorize
;
2711 if (!aggressive_if_conv
)
2713 struct loop
*outer_loop
= loop_outer (loop
);
2714 if (outer_loop
&& outer_loop
->force_vectorize
)
2715 aggressive_if_conv
= true;
2718 if (!ifcvt_split_critical_edges (loop
, aggressive_if_conv
))
2721 if (!if_convertible_loop_p (loop
)
2722 || !dbg_cnt (if_conversion_tree
))
2725 if ((any_pred_load_store
|| any_complicated_phi
)
2726 && ((!flag_tree_loop_vectorize
&& !loop
->force_vectorize
)
2727 || loop
->dont_vectorize
))
2730 if ((any_pred_load_store
|| any_complicated_phi
)
2731 && !version_loop_for_if_conversion (loop
))
2734 /* Now all statements are if-convertible. Combine all the basic
2735 blocks into one huge basic block doing the if-conversion
2737 combine_blocks (loop
);
2739 /* Delete dead predicate computations. */
2740 ifcvt_local_dce (loop
->header
);
2742 todo
|= TODO_cleanup_cfg
;
2749 for (i
= 0; i
< loop
->num_nodes
; i
++)
2750 free_bb_predicate (ifc_bbs
[i
]);
2755 free_dominance_info (CDI_POST_DOMINATORS
);
2760 /* Tree if-conversion pass management. */
2764 const pass_data pass_data_if_conversion
=
2766 GIMPLE_PASS
, /* type */
2768 OPTGROUP_NONE
, /* optinfo_flags */
2769 TV_TREE_LOOP_IFCVT
, /* tv_id */
2770 ( PROP_cfg
| PROP_ssa
), /* properties_required */
2771 0, /* properties_provided */
2772 0, /* properties_destroyed */
2773 0, /* todo_flags_start */
2774 0, /* todo_flags_finish */
2777 class pass_if_conversion
: public gimple_opt_pass
2780 pass_if_conversion (gcc::context
*ctxt
)
2781 : gimple_opt_pass (pass_data_if_conversion
, ctxt
)
2784 /* opt_pass methods: */
2785 virtual bool gate (function
*);
2786 virtual unsigned int execute (function
*);
2788 }; // class pass_if_conversion
2791 pass_if_conversion::gate (function
*fun
)
2793 return (((flag_tree_loop_vectorize
|| fun
->has_force_vectorize_loops
)
2794 && flag_tree_loop_if_convert
!= 0)
2795 || flag_tree_loop_if_convert
== 1
2796 || flag_tree_loop_if_convert_stores
== 1);
2800 pass_if_conversion::execute (function
*fun
)
2805 if (number_of_loops (fun
) <= 1)
2808 /* If there are infinite loops, during CDI_POST_DOMINATORS computation
2809 we can pick pretty much random bb inside of the infinite loop that
2810 has the fake edge. If we are unlucky enough, this can confuse the
2811 add_to_predicate_list post-dominator check to optimize as if that
2812 bb or some other one is a join block when it actually is not.
2814 connect_infinite_loops_to_exit ();
2816 FOR_EACH_LOOP (loop
, 0)
2817 if (flag_tree_loop_if_convert
== 1
2818 || flag_tree_loop_if_convert_stores
== 1
2819 || ((flag_tree_loop_vectorize
|| loop
->force_vectorize
)
2820 && !loop
->dont_vectorize
))
2821 todo
|= tree_if_conversion (loop
);
2823 remove_fake_exit_edges ();
2828 FOR_EACH_BB_FN (bb
, fun
)
2829 gcc_assert (!bb
->aux
);
2838 make_pass_if_conversion (gcc::context
*ctxt
)
2840 return new pass_if_conversion (ctxt
);