1 /* If-conversion for vectorizer.
2 Copyright (C) 2004-2016 Free Software Foundation, Inc.
3 Contributed by Devang Patel <dpatel@apple.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* This pass implements a tree level if-conversion of loops. Its
22 initial goal is to help the vectorizer to vectorize loops with
25 A short description of if-conversion:
27 o Decide if a loop is if-convertible or not.
28 o Walk all loop basic blocks in breadth first order (BFS order).
29 o Remove conditional statements (at the end of basic block)
30 and propagate condition into destination basic blocks'
32 o Replace modify expression with conditional modify expression
33 using current basic block's condition.
34 o Merge all basic blocks
35 o Replace phi nodes with conditional modify expr
36 o Merge all basic blocks into header
38 Sample transformation:
43 # i_23 = PHI <0(0), i_18(10)>;
46 if (j_15 > 41) goto <L1>; else goto <L17>;
53 # iftmp.2_4 = PHI <0(8), 42(2)>;
57 if (i_18 <= 15) goto <L19>; else goto <L18>;
67 # i_23 = PHI <0(0), i_18(10)>;
72 iftmp.2_4 = j_15 > 41 ? 42 : 0;
75 if (i_18 <= 15) goto <L19>; else goto <L18>;
85 #include "coretypes.h"
91 #include "tree-pass.h"
94 #include "optabs-query.h"
95 #include "gimple-pretty-print.h"
97 #include "fold-const.h"
98 #include "stor-layout.h"
99 #include "gimple-fold.h"
100 #include "gimplify.h"
101 #include "gimple-iterator.h"
102 #include "gimplify-me.h"
103 #include "tree-cfg.h"
104 #include "tree-into-ssa.h"
105 #include "tree-ssa.h"
107 #include "tree-data-ref.h"
108 #include "tree-scalar-evolution.h"
109 #include "tree-ssa-loop-ivopts.h"
110 #include "tree-ssa-address.h"
112 #include "tree-hash-traits.h"
114 #include "builtins.h"
117 /* Hash for struct innermost_loop_behavior. It depends on the user to
120 struct innermost_loop_behavior_hash
: nofree_ptr_hash
<innermost_loop_behavior
>
122 static inline hashval_t
hash (const value_type
&);
123 static inline bool equal (const value_type
&,
124 const compare_type
&);
128 innermost_loop_behavior_hash::hash (const value_type
&e
)
132 hash
= iterative_hash_expr (e
->base_address
, 0);
133 hash
= iterative_hash_expr (e
->offset
, hash
);
134 hash
= iterative_hash_expr (e
->init
, hash
);
135 return iterative_hash_expr (e
->step
, hash
);
139 innermost_loop_behavior_hash::equal (const value_type
&e1
,
140 const compare_type
&e2
)
142 if ((e1
->base_address
&& !e2
->base_address
)
143 || (!e1
->base_address
&& e2
->base_address
)
144 || (!e1
->offset
&& e2
->offset
)
145 || (e1
->offset
&& !e2
->offset
)
146 || (!e1
->init
&& e2
->init
)
147 || (e1
->init
&& !e2
->init
)
148 || (!e1
->step
&& e2
->step
)
149 || (e1
->step
&& !e2
->step
))
152 if (e1
->base_address
&& e2
->base_address
153 && !operand_equal_p (e1
->base_address
, e2
->base_address
, 0))
155 if (e1
->offset
&& e2
->offset
156 && !operand_equal_p (e1
->offset
, e2
->offset
, 0))
158 if (e1
->init
&& e2
->init
159 && !operand_equal_p (e1
->init
, e2
->init
, 0))
161 if (e1
->step
&& e2
->step
162 && !operand_equal_p (e1
->step
, e2
->step
, 0))
168 /* List of basic blocks in if-conversion-suitable order. */
169 static basic_block
*ifc_bbs
;
171 /* Apply more aggressive (extended) if-conversion if true. */
172 static bool aggressive_if_conv
;
174 /* Hash table to store <DR's innermost loop behavior, DR> pairs. */
175 static hash_map
<innermost_loop_behavior_hash
,
176 data_reference_p
> *innermost_DR_map
;
178 /* Hash table to store <base reference, DR> pairs. */
179 static hash_map
<tree_operand_hash
, data_reference_p
> *baseref_DR_map
;
181 /* Structure used to predicate basic blocks. This is attached to the
182 ->aux field of the BBs in the loop to be if-converted. */
183 struct bb_predicate
{
185 /* The condition under which this basic block is executed. */
188 /* PREDICATE is gimplified, and the sequence of statements is
189 recorded here, in order to avoid the duplication of computations
190 that occur in previous conditions. See PR44483. */
191 gimple_seq predicate_gimplified_stmts
;
194 /* Returns true when the basic block BB has a predicate. */
197 bb_has_predicate (basic_block bb
)
199 return bb
->aux
!= NULL
;
202 /* Returns the gimplified predicate for basic block BB. */
205 bb_predicate (basic_block bb
)
207 return ((struct bb_predicate
*) bb
->aux
)->predicate
;
210 /* Sets the gimplified predicate COND for basic block BB. */
213 set_bb_predicate (basic_block bb
, tree cond
)
215 gcc_assert ((TREE_CODE (cond
) == TRUTH_NOT_EXPR
216 && is_gimple_condexpr (TREE_OPERAND (cond
, 0)))
217 || is_gimple_condexpr (cond
));
218 ((struct bb_predicate
*) bb
->aux
)->predicate
= cond
;
221 /* Returns the sequence of statements of the gimplification of the
222 predicate for basic block BB. */
224 static inline gimple_seq
225 bb_predicate_gimplified_stmts (basic_block bb
)
227 return ((struct bb_predicate
*) bb
->aux
)->predicate_gimplified_stmts
;
230 /* Sets the sequence of statements STMTS of the gimplification of the
231 predicate for basic block BB. */
234 set_bb_predicate_gimplified_stmts (basic_block bb
, gimple_seq stmts
)
236 ((struct bb_predicate
*) bb
->aux
)->predicate_gimplified_stmts
= stmts
;
239 /* Adds the sequence of statements STMTS to the sequence of statements
240 of the predicate for basic block BB. */
243 add_bb_predicate_gimplified_stmts (basic_block bb
, gimple_seq stmts
)
246 (&(((struct bb_predicate
*) bb
->aux
)->predicate_gimplified_stmts
), stmts
);
249 /* Initializes to TRUE the predicate of basic block BB. */
252 init_bb_predicate (basic_block bb
)
254 bb
->aux
= XNEW (struct bb_predicate
);
255 set_bb_predicate_gimplified_stmts (bb
, NULL
);
256 set_bb_predicate (bb
, boolean_true_node
);
259 /* Release the SSA_NAMEs associated with the predicate of basic block BB,
260 but don't actually free it. */
263 release_bb_predicate (basic_block bb
)
265 gimple_seq stmts
= bb_predicate_gimplified_stmts (bb
);
268 gimple_stmt_iterator i
;
270 for (i
= gsi_start (stmts
); !gsi_end_p (i
); gsi_next (&i
))
271 free_stmt_operands (cfun
, gsi_stmt (i
));
272 set_bb_predicate_gimplified_stmts (bb
, NULL
);
276 /* Free the predicate of basic block BB. */
279 free_bb_predicate (basic_block bb
)
281 if (!bb_has_predicate (bb
))
284 release_bb_predicate (bb
);
289 /* Reinitialize predicate of BB with the true predicate. */
292 reset_bb_predicate (basic_block bb
)
294 if (!bb_has_predicate (bb
))
295 init_bb_predicate (bb
);
298 release_bb_predicate (bb
);
299 set_bb_predicate (bb
, boolean_true_node
);
303 /* Returns a new SSA_NAME of type TYPE that is assigned the value of
304 the expression EXPR. Inserts the statement created for this
305 computation before GSI and leaves the iterator GSI at the same
309 ifc_temp_var (tree type
, tree expr
, gimple_stmt_iterator
*gsi
)
311 tree new_name
= make_temp_ssa_name (type
, NULL
, "_ifc_");
312 gimple
*stmt
= gimple_build_assign (new_name
, expr
);
313 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
317 /* Return true when COND is a false predicate. */
320 is_false_predicate (tree cond
)
322 return (cond
!= NULL_TREE
323 && (cond
== boolean_false_node
324 || integer_zerop (cond
)));
327 /* Return true when COND is a true predicate. */
330 is_true_predicate (tree cond
)
332 return (cond
== NULL_TREE
333 || cond
== boolean_true_node
334 || integer_onep (cond
));
337 /* Returns true when BB has a predicate that is not trivial: true or
341 is_predicated (basic_block bb
)
343 return !is_true_predicate (bb_predicate (bb
));
346 /* Parses the predicate COND and returns its comparison code and
347 operands OP0 and OP1. */
349 static enum tree_code
350 parse_predicate (tree cond
, tree
*op0
, tree
*op1
)
354 if (TREE_CODE (cond
) == SSA_NAME
355 && is_gimple_assign (s
= SSA_NAME_DEF_STMT (cond
)))
357 if (TREE_CODE_CLASS (gimple_assign_rhs_code (s
)) == tcc_comparison
)
359 *op0
= gimple_assign_rhs1 (s
);
360 *op1
= gimple_assign_rhs2 (s
);
361 return gimple_assign_rhs_code (s
);
364 else if (gimple_assign_rhs_code (s
) == TRUTH_NOT_EXPR
)
366 tree op
= gimple_assign_rhs1 (s
);
367 tree type
= TREE_TYPE (op
);
368 enum tree_code code
= parse_predicate (op
, op0
, op1
);
370 return code
== ERROR_MARK
? ERROR_MARK
371 : invert_tree_comparison (code
, HONOR_NANS (type
));
377 if (COMPARISON_CLASS_P (cond
))
379 *op0
= TREE_OPERAND (cond
, 0);
380 *op1
= TREE_OPERAND (cond
, 1);
381 return TREE_CODE (cond
);
387 /* Returns the fold of predicate C1 OR C2 at location LOC. */
390 fold_or_predicates (location_t loc
, tree c1
, tree c2
)
392 tree op1a
, op1b
, op2a
, op2b
;
393 enum tree_code code1
= parse_predicate (c1
, &op1a
, &op1b
);
394 enum tree_code code2
= parse_predicate (c2
, &op2a
, &op2b
);
396 if (code1
!= ERROR_MARK
&& code2
!= ERROR_MARK
)
398 tree t
= maybe_fold_or_comparisons (code1
, op1a
, op1b
,
404 return fold_build2_loc (loc
, TRUTH_OR_EXPR
, boolean_type_node
, c1
, c2
);
407 /* Returns true if N is either a constant or a SSA_NAME. */
410 constant_or_ssa_name (tree n
)
412 switch (TREE_CODE (n
))
425 /* Returns either a COND_EXPR or the folded expression if the folded
426 expression is a MIN_EXPR, a MAX_EXPR, an ABS_EXPR,
427 a constant or a SSA_NAME. */
430 fold_build_cond_expr (tree type
, tree cond
, tree rhs
, tree lhs
)
432 tree rhs1
, lhs1
, cond_expr
;
434 /* If COND is comparison r != 0 and r has boolean type, convert COND
435 to SSA_NAME to accept by vect bool pattern. */
436 if (TREE_CODE (cond
) == NE_EXPR
)
438 tree op0
= TREE_OPERAND (cond
, 0);
439 tree op1
= TREE_OPERAND (cond
, 1);
440 if (TREE_CODE (op0
) == SSA_NAME
441 && TREE_CODE (TREE_TYPE (op0
)) == BOOLEAN_TYPE
442 && (integer_zerop (op1
)))
445 cond_expr
= fold_ternary (COND_EXPR
, type
, cond
,
448 if (cond_expr
== NULL_TREE
)
449 return build3 (COND_EXPR
, type
, cond
, rhs
, lhs
);
451 STRIP_USELESS_TYPE_CONVERSION (cond_expr
);
453 if (constant_or_ssa_name (cond_expr
))
456 if (TREE_CODE (cond_expr
) == ABS_EXPR
)
458 rhs1
= TREE_OPERAND (cond_expr
, 1);
459 STRIP_USELESS_TYPE_CONVERSION (rhs1
);
460 if (constant_or_ssa_name (rhs1
))
461 return build1 (ABS_EXPR
, type
, rhs1
);
464 if (TREE_CODE (cond_expr
) == MIN_EXPR
465 || TREE_CODE (cond_expr
) == MAX_EXPR
)
467 lhs1
= TREE_OPERAND (cond_expr
, 0);
468 STRIP_USELESS_TYPE_CONVERSION (lhs1
);
469 rhs1
= TREE_OPERAND (cond_expr
, 1);
470 STRIP_USELESS_TYPE_CONVERSION (rhs1
);
471 if (constant_or_ssa_name (rhs1
)
472 && constant_or_ssa_name (lhs1
))
473 return build2 (TREE_CODE (cond_expr
), type
, lhs1
, rhs1
);
475 return build3 (COND_EXPR
, type
, cond
, rhs
, lhs
);
478 /* Add condition NC to the predicate list of basic block BB. LOOP is
479 the loop to be if-converted. Use predicate of cd-equivalent block
480 for join bb if it exists: we call basic blocks bb1 and bb2
481 cd-equivalent if they are executed under the same condition. */
484 add_to_predicate_list (struct loop
*loop
, basic_block bb
, tree nc
)
489 if (is_true_predicate (nc
))
492 /* If dominance tells us this basic block is always executed,
493 don't record any predicates for it. */
494 if (dominated_by_p (CDI_DOMINATORS
, loop
->latch
, bb
))
497 dom_bb
= get_immediate_dominator (CDI_DOMINATORS
, bb
);
498 /* We use notion of cd equivalence to get simpler predicate for
499 join block, e.g. if join block has 2 predecessors with predicates
500 p1 & p2 and p1 & !p2, we'd like to get p1 for it instead of
501 p1 & p2 | p1 & !p2. */
502 if (dom_bb
!= loop
->header
503 && get_immediate_dominator (CDI_POST_DOMINATORS
, dom_bb
) == bb
)
505 gcc_assert (flow_bb_inside_loop_p (loop
, dom_bb
));
506 bc
= bb_predicate (dom_bb
);
507 if (!is_true_predicate (bc
))
508 set_bb_predicate (bb
, bc
);
510 gcc_assert (is_true_predicate (bb_predicate (bb
)));
511 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
512 fprintf (dump_file
, "Use predicate of bb#%d for bb#%d\n",
513 dom_bb
->index
, bb
->index
);
517 if (!is_predicated (bb
))
521 bc
= bb_predicate (bb
);
522 bc
= fold_or_predicates (EXPR_LOCATION (bc
), nc
, bc
);
523 if (is_true_predicate (bc
))
525 reset_bb_predicate (bb
);
530 /* Allow a TRUTH_NOT_EXPR around the main predicate. */
531 if (TREE_CODE (bc
) == TRUTH_NOT_EXPR
)
532 tp
= &TREE_OPERAND (bc
, 0);
535 if (!is_gimple_condexpr (*tp
))
538 *tp
= force_gimple_operand_1 (*tp
, &stmts
, is_gimple_condexpr
, NULL_TREE
);
539 add_bb_predicate_gimplified_stmts (bb
, stmts
);
541 set_bb_predicate (bb
, bc
);
544 /* Add the condition COND to the previous condition PREV_COND, and add
545 this to the predicate list of the destination of edge E. LOOP is
546 the loop to be if-converted. */
549 add_to_dst_predicate_list (struct loop
*loop
, edge e
,
550 tree prev_cond
, tree cond
)
552 if (!flow_bb_inside_loop_p (loop
, e
->dest
))
555 if (!is_true_predicate (prev_cond
))
556 cond
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
559 if (!dominated_by_p (CDI_DOMINATORS
, loop
->latch
, e
->dest
))
560 add_to_predicate_list (loop
, e
->dest
, cond
);
563 /* Return true if one of the successor edges of BB exits LOOP. */
566 bb_with_exit_edge_p (struct loop
*loop
, basic_block bb
)
571 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
572 if (loop_exit_edge_p (loop
, e
))
578 /* Given PHI which has more than two arguments, this function checks if
579 it's if-convertible by degenerating its arguments. Specifically, if
580 below two conditions are satisfied:
582 1) Number of PHI arguments with different values equals to 2 and one
583 argument has the only occurrence.
584 2) The edge corresponding to the unique argument isn't critical edge.
586 Such PHI can be handled as PHIs have only two arguments. For example,
589 res = PHI <A_1(e1), A_1(e2), A_2(e3)>;
591 can be transformed into:
593 res = (predicate of e3) ? A_2 : A_1;
595 Return TRUE if it is the case, FALSE otherwise. */
598 phi_convertible_by_degenerating_args (gphi
*phi
)
601 tree arg
, t1
= NULL
, t2
= NULL
;
602 unsigned int i
, i1
= 0, i2
= 0, n1
= 0, n2
= 0;
603 unsigned int num_args
= gimple_phi_num_args (phi
);
605 gcc_assert (num_args
> 2);
607 for (i
= 0; i
< num_args
; i
++)
609 arg
= gimple_phi_arg_def (phi
, i
);
610 if (t1
== NULL
|| operand_equal_p (t1
, arg
, 0))
616 else if (t2
== NULL
|| operand_equal_p (t2
, arg
, 0))
626 if (n1
!= 1 && n2
!= 1)
629 /* Check if the edge corresponding to the unique arg is critical. */
630 e
= gimple_phi_arg_edge (phi
, (n1
== 1) ? i1
: i2
);
631 if (EDGE_COUNT (e
->src
->succs
) > 1)
637 /* Return true when PHI is if-convertible. PHI is part of loop LOOP
638 and it belongs to basic block BB.
640 PHI is not if-convertible if:
641 - it has more than 2 arguments.
643 When we didn't see if-convertible stores, PHI is not
645 - a virtual PHI is immediately used in another PHI node,
646 - there is a virtual PHI in a BB other than the loop->header.
647 When the aggressive_if_conv is set, PHI can have more than
651 if_convertible_phi_p (struct loop
*loop
, basic_block bb
, gphi
*phi
,
652 bool any_mask_load_store
)
654 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
656 fprintf (dump_file
, "-------------------------\n");
657 print_gimple_stmt (dump_file
, phi
, 0, TDF_SLIM
);
660 if (bb
!= loop
->header
)
662 if (gimple_phi_num_args (phi
) != 2
663 && !aggressive_if_conv
664 && !phi_convertible_by_degenerating_args (phi
))
666 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
667 fprintf (dump_file
, "Phi can't be predicated by single cond.\n");
672 if (any_mask_load_store
)
675 /* When there were no if-convertible stores, check
676 that there are no memory writes in the branches of the loop to be
678 if (virtual_operand_p (gimple_phi_result (phi
)))
680 imm_use_iterator imm_iter
;
683 if (bb
!= loop
->header
)
685 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
686 fprintf (dump_file
, "Virtual phi not on loop->header.\n");
690 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, gimple_phi_result (phi
))
692 if (gimple_code (USE_STMT (use_p
)) == GIMPLE_PHI
693 && USE_STMT (use_p
) != phi
)
695 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
696 fprintf (dump_file
, "Difficult to handle this virtual phi.\n");
705 /* Records the status of a data reference. This struct is attached to
706 each DR->aux field. */
709 bool rw_unconditionally
;
710 bool w_unconditionally
;
711 bool written_at_least_once
;
715 tree base_w_predicate
;
718 #define IFC_DR(DR) ((struct ifc_dr *) (DR)->aux)
719 #define DR_BASE_W_UNCONDITIONALLY(DR) (IFC_DR (DR)->written_at_least_once)
720 #define DR_RW_UNCONDITIONALLY(DR) (IFC_DR (DR)->rw_unconditionally)
721 #define DR_W_UNCONDITIONALLY(DR) (IFC_DR (DR)->w_unconditionally)
723 /* Iterates over DR's and stores refs, DR and base refs, DR pairs in
724 HASH tables. While storing them in HASH table, it checks if the
725 reference is unconditionally read or written and stores that as a flag
726 information. For base reference it checks if it is written atlest once
727 unconditionally and stores it as flag information along with DR.
728 In other words for every data reference A in STMT there exist other
729 accesses to a data reference with the same base with predicates that
730 add up (OR-up) to the true predicate: this ensures that the data
731 reference A is touched (read or written) on every iteration of the
732 if-converted loop. */
734 hash_memrefs_baserefs_and_store_DRs_read_written_info (data_reference_p a
)
737 data_reference_p
*master_dr
, *base_master_dr
;
738 tree base_ref
= DR_BASE_OBJECT (a
);
739 innermost_loop_behavior
*innermost
= &DR_INNERMOST (a
);
740 tree ca
= bb_predicate (gimple_bb (DR_STMT (a
)));
743 master_dr
= &innermost_DR_map
->get_or_insert (innermost
, &exist1
);
749 IFC_DR (*master_dr
)->w_predicate
750 = fold_or_predicates (UNKNOWN_LOCATION
, ca
,
751 IFC_DR (*master_dr
)->w_predicate
);
752 if (is_true_predicate (IFC_DR (*master_dr
)->w_predicate
))
753 DR_W_UNCONDITIONALLY (*master_dr
) = true;
755 IFC_DR (*master_dr
)->rw_predicate
756 = fold_or_predicates (UNKNOWN_LOCATION
, ca
,
757 IFC_DR (*master_dr
)->rw_predicate
);
758 if (is_true_predicate (IFC_DR (*master_dr
)->rw_predicate
))
759 DR_RW_UNCONDITIONALLY (*master_dr
) = true;
763 base_master_dr
= &baseref_DR_map
->get_or_insert (base_ref
, &exist2
);
766 IFC_DR (*base_master_dr
)->base_w_predicate
767 = fold_or_predicates (UNKNOWN_LOCATION
, ca
,
768 IFC_DR (*base_master_dr
)->base_w_predicate
);
769 if (is_true_predicate (IFC_DR (*base_master_dr
)->base_w_predicate
))
770 DR_BASE_W_UNCONDITIONALLY (*base_master_dr
) = true;
774 /* Return true when the memory references of STMT won't trap in the
775 if-converted code. There are two things that we have to check for:
777 - writes to memory occur to writable memory: if-conversion of
778 memory writes transforms the conditional memory writes into
779 unconditional writes, i.e. "if (cond) A[i] = foo" is transformed
780 into "A[i] = cond ? foo : A[i]", and as the write to memory may not
781 be executed at all in the original code, it may be a readonly
782 memory. To check that A is not const-qualified, we check that
783 there exists at least an unconditional write to A in the current
786 - reads or writes to memory are valid memory accesses for every
787 iteration. To check that the memory accesses are correctly formed
788 and that we are allowed to read and write in these locations, we
789 check that the memory accesses to be if-converted occur at every
790 iteration unconditionally.
792 Returns true for the memory reference in STMT, same memory reference
793 is read or written unconditionally atleast once and the base memory
794 reference is written unconditionally once. This is to check reference
795 will not write fault. Also retuns true if the memory reference is
796 unconditionally read once then we are conditionally writing to memory
797 which is defined as read and write and is bound to the definition
800 ifcvt_memrefs_wont_trap (gimple
*stmt
, vec
<data_reference_p
> drs
)
802 data_reference_p
*master_dr
, *base_master_dr
;
803 data_reference_p a
= drs
[gimple_uid (stmt
) - 1];
805 tree base
= DR_BASE_OBJECT (a
);
806 innermost_loop_behavior
*innermost
= &DR_INNERMOST (a
);
808 gcc_assert (DR_STMT (a
) == stmt
);
809 gcc_assert (DR_BASE_ADDRESS (a
) || DR_OFFSET (a
)
810 || DR_INIT (a
) || DR_STEP (a
));
812 master_dr
= innermost_DR_map
->get (innermost
);
813 gcc_assert (master_dr
!= NULL
);
815 base_master_dr
= baseref_DR_map
->get (base
);
817 /* If a is unconditionally written to it doesn't trap. */
818 if (DR_W_UNCONDITIONALLY (*master_dr
))
821 /* If a is unconditionally accessed then ... */
822 if (DR_RW_UNCONDITIONALLY (*master_dr
))
824 /* an unconditional read won't trap. */
828 /* an unconditionaly write won't trap if the base is written
829 to unconditionally. */
831 && DR_BASE_W_UNCONDITIONALLY (*base_master_dr
))
832 return PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES
);
835 /* or the base is know to be not readonly. */
836 tree base_tree
= get_base_address (DR_REF (a
));
837 if (DECL_P (base_tree
)
838 && decl_binds_to_current_def_p (base_tree
)
839 && ! TREE_READONLY (base_tree
))
840 return PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES
);
846 /* Return true if STMT could be converted into a masked load or store
847 (conditional load or store based on a mask computed from bb predicate). */
850 ifcvt_can_use_mask_load_store (gimple
*stmt
)
854 basic_block bb
= gimple_bb (stmt
);
857 if (!(flag_tree_loop_vectorize
|| bb
->loop_father
->force_vectorize
)
858 || bb
->loop_father
->dont_vectorize
859 || !gimple_assign_single_p (stmt
)
860 || gimple_has_volatile_ops (stmt
))
863 /* Check whether this is a load or store. */
864 lhs
= gimple_assign_lhs (stmt
);
865 if (gimple_store_p (stmt
))
867 if (!is_gimple_val (gimple_assign_rhs1 (stmt
)))
872 else if (gimple_assign_load_p (stmt
))
875 ref
= gimple_assign_rhs1 (stmt
);
880 if (may_be_nonaddressable_p (ref
))
883 /* Mask should be integer mode of the same size as the load/store
885 mode
= TYPE_MODE (TREE_TYPE (lhs
));
886 if (int_mode_for_mode (mode
) == BLKmode
887 || VECTOR_MODE_P (mode
))
890 if (can_vec_mask_load_store_p (mode
, VOIDmode
, is_load
))
896 /* Return true when STMT is if-convertible.
898 GIMPLE_ASSIGN statement is not if-convertible if,
901 - LHS is not var decl. */
904 if_convertible_gimple_assign_stmt_p (gimple
*stmt
,
905 vec
<data_reference_p
> refs
,
906 bool *any_mask_load_store
)
908 tree lhs
= gimple_assign_lhs (stmt
);
910 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
912 fprintf (dump_file
, "-------------------------\n");
913 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
916 if (!is_gimple_reg_type (TREE_TYPE (lhs
)))
919 /* Some of these constrains might be too conservative. */
920 if (stmt_ends_bb_p (stmt
)
921 || gimple_has_volatile_ops (stmt
)
922 || (TREE_CODE (lhs
) == SSA_NAME
923 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs
))
924 || gimple_has_side_effects (stmt
))
926 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
927 fprintf (dump_file
, "stmt not suitable for ifcvt\n");
931 /* tree-into-ssa.c uses GF_PLF_1, so avoid it, because
932 in between if_convertible_loop_p and combine_blocks
933 we can perform loop versioning. */
934 gimple_set_plf (stmt
, GF_PLF_2
, false);
936 if ((! gimple_vuse (stmt
)
937 || gimple_could_trap_p_1 (stmt
, false, false)
938 || ! ifcvt_memrefs_wont_trap (stmt
, refs
))
939 && gimple_could_trap_p (stmt
))
941 if (ifcvt_can_use_mask_load_store (stmt
))
943 gimple_set_plf (stmt
, GF_PLF_2
, true);
944 *any_mask_load_store
= true;
947 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
948 fprintf (dump_file
, "tree could trap...\n");
952 /* When if-converting stores force versioning, likewise if we
953 ended up generating store data races. */
954 if (gimple_vdef (stmt
))
955 *any_mask_load_store
= true;
960 /* Return true when STMT is if-convertible.
962 A statement is if-convertible if:
963 - it is an if-convertible GIMPLE_ASSIGN,
964 - it is a GIMPLE_LABEL or a GIMPLE_COND,
965 - it is builtins call. */
968 if_convertible_stmt_p (gimple
*stmt
, vec
<data_reference_p
> refs
,
969 bool *any_mask_load_store
)
971 switch (gimple_code (stmt
))
979 return if_convertible_gimple_assign_stmt_p (stmt
, refs
,
980 any_mask_load_store
);
984 tree fndecl
= gimple_call_fndecl (stmt
);
987 int flags
= gimple_call_flags (stmt
);
988 if ((flags
& ECF_CONST
)
989 && !(flags
& ECF_LOOPING_CONST_OR_PURE
)
990 /* We can only vectorize some builtins at the moment,
991 so restrict if-conversion to those. */
992 && DECL_BUILT_IN (fndecl
))
999 /* Don't know what to do with 'em so don't do anything. */
1000 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1002 fprintf (dump_file
, "don't know what to do\n");
1003 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
1012 /* Assumes that BB has more than 1 predecessors.
1013 Returns false if at least one successor is not on critical edge
1014 and true otherwise. */
1017 all_preds_critical_p (basic_block bb
)
1022 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1023 if (EDGE_COUNT (e
->src
->succs
) == 1)
1028 /* Returns true if at least one successor in on critical edge. */
1030 has_pred_critical_p (basic_block bb
)
1035 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1036 if (EDGE_COUNT (e
->src
->succs
) > 1)
1041 /* Return true when BB is if-convertible. This routine does not check
1042 basic block's statements and phis.
1044 A basic block is not if-convertible if:
1045 - it is non-empty and it is after the exit block (in BFS order),
1046 - it is after the exit block but before the latch,
1047 - its edges are not normal.
1049 Last restriction is valid if aggressive_if_conv is false.
1051 EXIT_BB is the basic block containing the exit of the LOOP. BB is
1055 if_convertible_bb_p (struct loop
*loop
, basic_block bb
, basic_block exit_bb
)
1060 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1061 fprintf (dump_file
, "----------[%d]-------------\n", bb
->index
);
1063 if (EDGE_COUNT (bb
->succs
) > 2)
1068 if (bb
!= loop
->latch
)
1070 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1071 fprintf (dump_file
, "basic block after exit bb but before latch\n");
1074 else if (!empty_block_p (bb
))
1076 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1077 fprintf (dump_file
, "non empty basic block after exit bb\n");
1080 else if (bb
== loop
->latch
1082 && !dominated_by_p (CDI_DOMINATORS
, bb
, exit_bb
))
1084 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1085 fprintf (dump_file
, "latch is not dominated by exit_block\n");
1090 /* Be less adventurous and handle only normal edges. */
1091 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
1092 if (e
->flags
& (EDGE_EH
| EDGE_ABNORMAL
| EDGE_IRREDUCIBLE_LOOP
))
1094 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1095 fprintf (dump_file
, "Difficult to handle edges\n");
1099 /* At least one incoming edge has to be non-critical as otherwise edge
1100 predicates are not equal to basic-block predicates of the edge
1101 source. This check is skipped if aggressive_if_conv is true. */
1102 if (!aggressive_if_conv
1103 && EDGE_COUNT (bb
->preds
) > 1
1104 && bb
!= loop
->header
1105 && all_preds_critical_p (bb
))
1107 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1108 fprintf (dump_file
, "only critical predecessors\n");
1115 /* Return true when all predecessor blocks of BB are visited. The
1116 VISITED bitmap keeps track of the visited blocks. */
1119 pred_blocks_visited_p (basic_block bb
, bitmap
*visited
)
1123 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1124 if (!bitmap_bit_p (*visited
, e
->src
->index
))
1130 /* Get body of a LOOP in suitable order for if-conversion. It is
1131 caller's responsibility to deallocate basic block list.
1132 If-conversion suitable order is, breadth first sort (BFS) order
1133 with an additional constraint: select a block only if all its
1134 predecessors are already selected. */
1136 static basic_block
*
1137 get_loop_body_in_if_conv_order (const struct loop
*loop
)
1139 basic_block
*blocks
, *blocks_in_bfs_order
;
1142 unsigned int index
= 0;
1143 unsigned int visited_count
= 0;
1145 gcc_assert (loop
->num_nodes
);
1146 gcc_assert (loop
->latch
!= EXIT_BLOCK_PTR_FOR_FN (cfun
));
1148 blocks
= XCNEWVEC (basic_block
, loop
->num_nodes
);
1149 visited
= BITMAP_ALLOC (NULL
);
1151 blocks_in_bfs_order
= get_loop_body_in_bfs_order (loop
);
1154 while (index
< loop
->num_nodes
)
1156 bb
= blocks_in_bfs_order
[index
];
1158 if (bb
->flags
& BB_IRREDUCIBLE_LOOP
)
1160 free (blocks_in_bfs_order
);
1161 BITMAP_FREE (visited
);
1166 if (!bitmap_bit_p (visited
, bb
->index
))
1168 if (pred_blocks_visited_p (bb
, &visited
)
1169 || bb
== loop
->header
)
1171 /* This block is now visited. */
1172 bitmap_set_bit (visited
, bb
->index
);
1173 blocks
[visited_count
++] = bb
;
1179 if (index
== loop
->num_nodes
1180 && visited_count
!= loop
->num_nodes
)
1184 free (blocks_in_bfs_order
);
1185 BITMAP_FREE (visited
);
1189 /* Returns true when the analysis of the predicates for all the basic
1190 blocks in LOOP succeeded.
1192 predicate_bbs first allocates the predicates of the basic blocks.
1193 These fields are then initialized with the tree expressions
1194 representing the predicates under which a basic block is executed
1195 in the LOOP. As the loop->header is executed at each iteration, it
1196 has the "true" predicate. Other statements executed under a
1197 condition are predicated with that condition, for example
1204 S1 will be predicated with "x", and
1205 S2 will be predicated with "!x". */
1208 predicate_bbs (loop_p loop
)
1212 for (i
= 0; i
< loop
->num_nodes
; i
++)
1213 init_bb_predicate (ifc_bbs
[i
]);
1215 for (i
= 0; i
< loop
->num_nodes
; i
++)
1217 basic_block bb
= ifc_bbs
[i
];
1221 /* The loop latch and loop exit block are always executed and
1222 have no extra conditions to be processed: skip them. */
1223 if (bb
== loop
->latch
1224 || bb_with_exit_edge_p (loop
, bb
))
1226 reset_bb_predicate (bb
);
1230 cond
= bb_predicate (bb
);
1231 stmt
= last_stmt (bb
);
1232 if (stmt
&& gimple_code (stmt
) == GIMPLE_COND
)
1235 edge true_edge
, false_edge
;
1236 location_t loc
= gimple_location (stmt
);
1237 tree c
= build2_loc (loc
, gimple_cond_code (stmt
),
1239 gimple_cond_lhs (stmt
),
1240 gimple_cond_rhs (stmt
));
1242 /* Add new condition into destination's predicate list. */
1243 extract_true_false_edges_from_block (gimple_bb (stmt
),
1244 &true_edge
, &false_edge
);
1246 /* If C is true, then TRUE_EDGE is taken. */
1247 add_to_dst_predicate_list (loop
, true_edge
, unshare_expr (cond
),
1250 /* If C is false, then FALSE_EDGE is taken. */
1251 c2
= build1_loc (loc
, TRUTH_NOT_EXPR
, boolean_type_node
,
1253 add_to_dst_predicate_list (loop
, false_edge
,
1254 unshare_expr (cond
), c2
);
1259 /* If current bb has only one successor, then consider it as an
1260 unconditional goto. */
1261 if (single_succ_p (bb
))
1263 basic_block bb_n
= single_succ (bb
);
1265 /* The successor bb inherits the predicate of its
1266 predecessor. If there is no predicate in the predecessor
1267 bb, then consider the successor bb as always executed. */
1268 if (cond
== NULL_TREE
)
1269 cond
= boolean_true_node
;
1271 add_to_predicate_list (loop
, bb_n
, cond
);
1275 /* The loop header is always executed. */
1276 reset_bb_predicate (loop
->header
);
1277 gcc_assert (bb_predicate_gimplified_stmts (loop
->header
) == NULL
1278 && bb_predicate_gimplified_stmts (loop
->latch
) == NULL
);
1281 /* Return true when LOOP is if-convertible. This is a helper function
1282 for if_convertible_loop_p. REFS and DDRS are initialized and freed
1283 in if_convertible_loop_p. */
1286 if_convertible_loop_p_1 (struct loop
*loop
,
1287 vec
<data_reference_p
> *refs
,
1288 bool *any_mask_load_store
)
1291 basic_block exit_bb
= NULL
;
1293 if (find_data_references_in_loop (loop
, refs
) == chrec_dont_know
)
1296 calculate_dominance_info (CDI_DOMINATORS
);
1297 calculate_dominance_info (CDI_POST_DOMINATORS
);
1299 /* Allow statements that can be handled during if-conversion. */
1300 ifc_bbs
= get_loop_body_in_if_conv_order (loop
);
1303 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1304 fprintf (dump_file
, "Irreducible loop\n");
1308 for (i
= 0; i
< loop
->num_nodes
; i
++)
1310 basic_block bb
= ifc_bbs
[i
];
1312 if (!if_convertible_bb_p (loop
, bb
, exit_bb
))
1315 if (bb_with_exit_edge_p (loop
, bb
))
1319 for (i
= 0; i
< loop
->num_nodes
; i
++)
1321 basic_block bb
= ifc_bbs
[i
];
1322 gimple_stmt_iterator gsi
;
1324 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1325 switch (gimple_code (gsi_stmt (gsi
)))
1332 gimple_set_uid (gsi_stmt (gsi
), 0);
1339 data_reference_p dr
;
1342 = new hash_map
<innermost_loop_behavior_hash
, data_reference_p
>;
1343 baseref_DR_map
= new hash_map
<tree_operand_hash
, data_reference_p
>;
1345 predicate_bbs (loop
);
1347 for (i
= 0; refs
->iterate (i
, &dr
); i
++)
1349 tree ref
= DR_REF (dr
);
1351 dr
->aux
= XNEW (struct ifc_dr
);
1352 DR_BASE_W_UNCONDITIONALLY (dr
) = false;
1353 DR_RW_UNCONDITIONALLY (dr
) = false;
1354 DR_W_UNCONDITIONALLY (dr
) = false;
1355 IFC_DR (dr
)->rw_predicate
= boolean_false_node
;
1356 IFC_DR (dr
)->w_predicate
= boolean_false_node
;
1357 IFC_DR (dr
)->base_w_predicate
= boolean_false_node
;
1358 if (gimple_uid (DR_STMT (dr
)) == 0)
1359 gimple_set_uid (DR_STMT (dr
), i
+ 1);
1361 /* If DR doesn't have innermost loop behavior or it's a compound
1362 memory reference, we synthesize its innermost loop behavior
1364 if (TREE_CODE (ref
) == COMPONENT_REF
1365 || TREE_CODE (ref
) == IMAGPART_EXPR
1366 || TREE_CODE (ref
) == REALPART_EXPR
1367 || !(DR_BASE_ADDRESS (dr
) || DR_OFFSET (dr
)
1368 || DR_INIT (dr
) || DR_STEP (dr
)))
1370 while (TREE_CODE (ref
) == COMPONENT_REF
1371 || TREE_CODE (ref
) == IMAGPART_EXPR
1372 || TREE_CODE (ref
) == REALPART_EXPR
)
1373 ref
= TREE_OPERAND (ref
, 0);
1375 DR_BASE_ADDRESS (dr
) = ref
;
1376 DR_OFFSET (dr
) = NULL
;
1377 DR_INIT (dr
) = NULL
;
1378 DR_STEP (dr
) = NULL
;
1379 DR_ALIGNED_TO (dr
) = NULL
;
1381 hash_memrefs_baserefs_and_store_DRs_read_written_info (dr
);
1384 for (i
= 0; i
< loop
->num_nodes
; i
++)
1386 basic_block bb
= ifc_bbs
[i
];
1387 gimple_stmt_iterator itr
;
1389 /* Check the if-convertibility of statements in predicated BBs. */
1390 if (!dominated_by_p (CDI_DOMINATORS
, loop
->latch
, bb
))
1391 for (itr
= gsi_start_bb (bb
); !gsi_end_p (itr
); gsi_next (&itr
))
1392 if (!if_convertible_stmt_p (gsi_stmt (itr
), *refs
,
1393 any_mask_load_store
))
1397 for (i
= 0; i
< loop
->num_nodes
; i
++)
1398 free_bb_predicate (ifc_bbs
[i
]);
1400 /* Checking PHIs needs to be done after stmts, as the fact whether there
1401 are any masked loads or stores affects the tests. */
1402 for (i
= 0; i
< loop
->num_nodes
; i
++)
1404 basic_block bb
= ifc_bbs
[i
];
1407 for (itr
= gsi_start_phis (bb
); !gsi_end_p (itr
); gsi_next (&itr
))
1408 if (!if_convertible_phi_p (loop
, bb
, itr
.phi (),
1409 *any_mask_load_store
))
1414 fprintf (dump_file
, "Applying if-conversion\n");
1419 /* Return true when LOOP is if-convertible.
1420 LOOP is if-convertible if:
1422 - it has two or more basic blocks,
1423 - it has only one exit,
1424 - loop header is not the exit edge,
1425 - if its basic blocks and phi nodes are if convertible. */
1428 if_convertible_loop_p (struct loop
*loop
, bool *any_mask_load_store
)
1433 vec
<data_reference_p
> refs
;
1435 /* Handle only innermost loop. */
1436 if (!loop
|| loop
->inner
)
1438 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1439 fprintf (dump_file
, "not innermost loop\n");
1443 /* If only one block, no need for if-conversion. */
1444 if (loop
->num_nodes
<= 2)
1446 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1447 fprintf (dump_file
, "less than 2 basic blocks\n");
1451 /* More than one loop exit is too much to handle. */
1452 if (!single_exit (loop
))
1454 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1455 fprintf (dump_file
, "multiple exits\n");
1459 /* If one of the loop header's edge is an exit edge then do not
1460 apply if-conversion. */
1461 FOR_EACH_EDGE (e
, ei
, loop
->header
->succs
)
1462 if (loop_exit_edge_p (loop
, e
))
1466 res
= if_convertible_loop_p_1 (loop
, &refs
, any_mask_load_store
);
1468 data_reference_p dr
;
1470 for (i
= 0; refs
.iterate (i
, &dr
); i
++)
1473 free_data_refs (refs
);
1475 delete innermost_DR_map
;
1476 innermost_DR_map
= NULL
;
1478 delete baseref_DR_map
;
1479 baseref_DR_map
= NULL
;
1484 /* Returns true if def-stmt for phi argument ARG is simple increment/decrement
1485 which is in predicated basic block.
1486 In fact, the following PHI pattern is searching:
1488 reduc_1 = PHI <..., reduc_2>
1492 reduc_2 = PHI <reduc_1, reduc_3>
1494 ARG_0 and ARG_1 are correspondent PHI arguments.
1495 REDUC, OP0 and OP1 contain reduction stmt and its operands.
1496 EXTENDED is true if PHI has > 2 arguments. */
1499 is_cond_scalar_reduction (gimple
*phi
, gimple
**reduc
, tree arg_0
, tree arg_1
,
1500 tree
*op0
, tree
*op1
, bool extended
)
1502 tree lhs
, r_op1
, r_op2
;
1504 gimple
*header_phi
= NULL
;
1505 enum tree_code reduction_op
;
1506 basic_block bb
= gimple_bb (phi
);
1507 struct loop
*loop
= bb
->loop_father
;
1508 edge latch_e
= loop_latch_edge (loop
);
1509 imm_use_iterator imm_iter
;
1510 use_operand_p use_p
;
1513 bool result
= false;
1514 if (TREE_CODE (arg_0
) != SSA_NAME
|| TREE_CODE (arg_1
) != SSA_NAME
)
1517 if (!extended
&& gimple_code (SSA_NAME_DEF_STMT (arg_0
)) == GIMPLE_PHI
)
1520 header_phi
= SSA_NAME_DEF_STMT (arg_0
);
1521 stmt
= SSA_NAME_DEF_STMT (arg_1
);
1523 else if (gimple_code (SSA_NAME_DEF_STMT (arg_1
)) == GIMPLE_PHI
)
1526 header_phi
= SSA_NAME_DEF_STMT (arg_1
);
1527 stmt
= SSA_NAME_DEF_STMT (arg_0
);
1531 if (gimple_bb (header_phi
) != loop
->header
)
1534 if (PHI_ARG_DEF_FROM_EDGE (header_phi
, latch_e
) != PHI_RESULT (phi
))
1537 if (gimple_code (stmt
) != GIMPLE_ASSIGN
1538 || gimple_has_volatile_ops (stmt
))
1541 if (!flow_bb_inside_loop_p (loop
, gimple_bb (stmt
)))
1544 if (!is_predicated (gimple_bb (stmt
)))
1547 /* Check that stmt-block is predecessor of phi-block. */
1548 FOR_EACH_EDGE (e
, ei
, gimple_bb (stmt
)->succs
)
1557 if (!has_single_use (lhs
))
1560 reduction_op
= gimple_assign_rhs_code (stmt
);
1561 if (reduction_op
!= PLUS_EXPR
&& reduction_op
!= MINUS_EXPR
)
1563 r_op1
= gimple_assign_rhs1 (stmt
);
1564 r_op2
= gimple_assign_rhs2 (stmt
);
1566 /* Make R_OP1 to hold reduction variable. */
1567 if (r_op2
== PHI_RESULT (header_phi
)
1568 && reduction_op
== PLUS_EXPR
)
1569 std::swap (r_op1
, r_op2
);
1570 else if (r_op1
!= PHI_RESULT (header_phi
))
1573 /* Check that R_OP1 is used in reduction stmt or in PHI only. */
1574 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, r_op1
)
1576 gimple
*use_stmt
= USE_STMT (use_p
);
1577 if (is_gimple_debug (use_stmt
))
1579 if (use_stmt
== stmt
)
1581 if (gimple_code (use_stmt
) != GIMPLE_PHI
)
1585 *op0
= r_op1
; *op1
= r_op2
;
1590 /* Converts conditional scalar reduction into unconditional form, e.g.
1592 if (_5 != 0) goto bb_5 else goto bb_6
1598 # res_2 = PHI <res_13(4), res_6(5)>
1601 will be converted into sequence
1602 _ifc__1 = _5 != 0 ? 1 : 0;
1603 res_2 = res_13 + _ifc__1;
1604 Argument SWAP tells that arguments of conditional expression should be
1606 Returns rhs of resulting PHI assignment. */
1609 convert_scalar_cond_reduction (gimple
*reduc
, gimple_stmt_iterator
*gsi
,
1610 tree cond
, tree op0
, tree op1
, bool swap
)
1612 gimple_stmt_iterator stmt_it
;
1615 tree rhs1
= gimple_assign_rhs1 (reduc
);
1616 tree tmp
= make_temp_ssa_name (TREE_TYPE (rhs1
), NULL
, "_ifc_");
1618 tree zero
= build_zero_cst (TREE_TYPE (rhs1
));
1620 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1622 fprintf (dump_file
, "Found cond scalar reduction.\n");
1623 print_gimple_stmt (dump_file
, reduc
, 0, TDF_SLIM
);
1626 /* Build cond expression using COND and constant operand
1627 of reduction rhs. */
1628 c
= fold_build_cond_expr (TREE_TYPE (rhs1
),
1629 unshare_expr (cond
),
1633 /* Create assignment stmt and insert it at GSI. */
1634 new_assign
= gimple_build_assign (tmp
, c
);
1635 gsi_insert_before (gsi
, new_assign
, GSI_SAME_STMT
);
1636 /* Build rhs for unconditional increment/decrement. */
1637 rhs
= fold_build2 (gimple_assign_rhs_code (reduc
),
1638 TREE_TYPE (rhs1
), op0
, tmp
);
1640 /* Delete original reduction stmt. */
1641 stmt_it
= gsi_for_stmt (reduc
);
1642 gsi_remove (&stmt_it
, true);
1643 release_defs (reduc
);
1647 /* Produce condition for all occurrences of ARG in PHI node. */
1650 gen_phi_arg_condition (gphi
*phi
, vec
<int> *occur
,
1651 gimple_stmt_iterator
*gsi
)
1655 tree cond
= NULL_TREE
;
1659 len
= occur
->length ();
1660 gcc_assert (len
> 0);
1661 for (i
= 0; i
< len
; i
++)
1663 e
= gimple_phi_arg_edge (phi
, (*occur
)[i
]);
1664 c
= bb_predicate (e
->src
);
1665 if (is_true_predicate (c
))
1667 c
= force_gimple_operand_gsi_1 (gsi
, unshare_expr (c
),
1668 is_gimple_condexpr
, NULL_TREE
,
1669 true, GSI_SAME_STMT
);
1670 if (cond
!= NULL_TREE
)
1672 /* Must build OR expression. */
1673 cond
= fold_or_predicates (EXPR_LOCATION (c
), c
, cond
);
1674 cond
= force_gimple_operand_gsi_1 (gsi
, unshare_expr (cond
),
1675 is_gimple_condexpr
, NULL_TREE
,
1676 true, GSI_SAME_STMT
);
1681 gcc_assert (cond
!= NULL_TREE
);
1685 /* Replace a scalar PHI node with a COND_EXPR using COND as condition.
1686 This routine can handle PHI nodes with more than two arguments.
1689 S1: A = PHI <x1(1), x2(5)>
1691 S2: A = cond ? x1 : x2;
1693 The generated code is inserted at GSI that points to the top of
1694 basic block's statement list.
1695 If PHI node has more than two arguments a chain of conditional
1696 expression is produced. */
1700 predicate_scalar_phi (gphi
*phi
, gimple_stmt_iterator
*gsi
)
1702 gimple
*new_stmt
= NULL
, *reduc
;
1703 tree rhs
, res
, arg0
, arg1
, op0
, op1
, scev
;
1705 unsigned int index0
;
1706 unsigned int max
, args_len
;
1711 res
= gimple_phi_result (phi
);
1712 if (virtual_operand_p (res
))
1715 if ((rhs
= degenerate_phi_result (phi
))
1716 || ((scev
= analyze_scalar_evolution (gimple_bb (phi
)->loop_father
,
1718 && !chrec_contains_undetermined (scev
)
1720 && (rhs
= gimple_phi_arg_def (phi
, 0))))
1722 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1724 fprintf (dump_file
, "Degenerate phi!\n");
1725 print_gimple_stmt (dump_file
, phi
, 0, TDF_SLIM
);
1727 new_stmt
= gimple_build_assign (res
, rhs
);
1728 gsi_insert_before (gsi
, new_stmt
, GSI_SAME_STMT
);
1729 update_stmt (new_stmt
);
1733 bb
= gimple_bb (phi
);
1734 if (EDGE_COUNT (bb
->preds
) == 2)
1736 /* Predicate ordinary PHI node with 2 arguments. */
1737 edge first_edge
, second_edge
;
1738 basic_block true_bb
;
1739 first_edge
= EDGE_PRED (bb
, 0);
1740 second_edge
= EDGE_PRED (bb
, 1);
1741 cond
= bb_predicate (first_edge
->src
);
1742 if (TREE_CODE (cond
) == TRUTH_NOT_EXPR
)
1743 std::swap (first_edge
, second_edge
);
1744 if (EDGE_COUNT (first_edge
->src
->succs
) > 1)
1746 cond
= bb_predicate (second_edge
->src
);
1747 if (TREE_CODE (cond
) == TRUTH_NOT_EXPR
)
1748 cond
= TREE_OPERAND (cond
, 0);
1750 first_edge
= second_edge
;
1753 cond
= bb_predicate (first_edge
->src
);
1754 /* Gimplify the condition to a valid cond-expr conditonal operand. */
1755 cond
= force_gimple_operand_gsi_1 (gsi
, unshare_expr (cond
),
1756 is_gimple_condexpr
, NULL_TREE
,
1757 true, GSI_SAME_STMT
);
1758 true_bb
= first_edge
->src
;
1759 if (EDGE_PRED (bb
, 1)->src
== true_bb
)
1761 arg0
= gimple_phi_arg_def (phi
, 1);
1762 arg1
= gimple_phi_arg_def (phi
, 0);
1766 arg0
= gimple_phi_arg_def (phi
, 0);
1767 arg1
= gimple_phi_arg_def (phi
, 1);
1769 if (is_cond_scalar_reduction (phi
, &reduc
, arg0
, arg1
,
1771 /* Convert reduction stmt into vectorizable form. */
1772 rhs
= convert_scalar_cond_reduction (reduc
, gsi
, cond
, op0
, op1
,
1773 true_bb
!= gimple_bb (reduc
));
1775 /* Build new RHS using selected condition and arguments. */
1776 rhs
= fold_build_cond_expr (TREE_TYPE (res
), unshare_expr (cond
),
1778 new_stmt
= gimple_build_assign (res
, rhs
);
1779 gsi_insert_before (gsi
, new_stmt
, GSI_SAME_STMT
);
1780 update_stmt (new_stmt
);
1782 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1784 fprintf (dump_file
, "new phi replacement stmt\n");
1785 print_gimple_stmt (dump_file
, new_stmt
, 0, TDF_SLIM
);
1790 /* Create hashmap for PHI node which contain vector of argument indexes
1791 having the same value. */
1793 hash_map
<tree_operand_hash
, auto_vec
<int> > phi_arg_map
;
1794 unsigned int num_args
= gimple_phi_num_args (phi
);
1796 /* Vector of different PHI argument values. */
1797 auto_vec
<tree
> args (num_args
);
1799 /* Compute phi_arg_map. */
1800 for (i
= 0; i
< num_args
; i
++)
1804 arg
= gimple_phi_arg_def (phi
, i
);
1805 if (!phi_arg_map
.get (arg
))
1806 args
.quick_push (arg
);
1807 phi_arg_map
.get_or_insert (arg
).safe_push (i
);
1810 /* Determine element with max number of occurrences. */
1813 args_len
= args
.length ();
1814 for (i
= 0; i
< args_len
; i
++)
1817 if ((len
= phi_arg_map
.get (args
[i
])->length ()) > max
)
1824 /* Put element with max number of occurences to the end of ARGS. */
1825 if (max_ind
!= -1 && max_ind
+1 != (int) args_len
)
1826 std::swap (args
[args_len
- 1], args
[max_ind
]);
1828 /* Handle one special case when number of arguments with different values
1829 is equal 2 and one argument has the only occurrence. Such PHI can be
1830 handled as if would have only 2 arguments. */
1831 if (args_len
== 2 && phi_arg_map
.get (args
[0])->length () == 1)
1834 indexes
= phi_arg_map
.get (args
[0]);
1835 index0
= (*indexes
)[0];
1838 e
= gimple_phi_arg_edge (phi
, index0
);
1839 cond
= bb_predicate (e
->src
);
1840 if (TREE_CODE (cond
) == TRUTH_NOT_EXPR
)
1843 cond
= TREE_OPERAND (cond
, 0);
1845 /* Gimplify the condition to a valid cond-expr conditonal operand. */
1846 cond
= force_gimple_operand_gsi_1 (gsi
, unshare_expr (cond
),
1847 is_gimple_condexpr
, NULL_TREE
,
1848 true, GSI_SAME_STMT
);
1849 if (!(is_cond_scalar_reduction (phi
, &reduc
, arg0
, arg1
,
1851 rhs
= fold_build_cond_expr (TREE_TYPE (res
), unshare_expr (cond
),
1855 /* Convert reduction stmt into vectorizable form. */
1856 rhs
= convert_scalar_cond_reduction (reduc
, gsi
, cond
, op0
, op1
,
1858 new_stmt
= gimple_build_assign (res
, rhs
);
1859 gsi_insert_before (gsi
, new_stmt
, GSI_SAME_STMT
);
1860 update_stmt (new_stmt
);
1866 tree type
= TREE_TYPE (gimple_phi_result (phi
));
1869 for (i
= 0; i
< args_len
; i
++)
1872 indexes
= phi_arg_map
.get (args
[i
]);
1873 if (i
!= args_len
- 1)
1874 lhs
= make_temp_ssa_name (type
, NULL
, "_ifc_");
1877 cond
= gen_phi_arg_condition (phi
, indexes
, gsi
);
1878 rhs
= fold_build_cond_expr (type
, unshare_expr (cond
),
1880 new_stmt
= gimple_build_assign (lhs
, rhs
);
1881 gsi_insert_before (gsi
, new_stmt
, GSI_SAME_STMT
);
1882 update_stmt (new_stmt
);
1887 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1889 fprintf (dump_file
, "new extended phi replacement stmt\n");
1890 print_gimple_stmt (dump_file
, new_stmt
, 0, TDF_SLIM
);
1894 /* Replaces in LOOP all the scalar phi nodes other than those in the
1895 LOOP->header block with conditional modify expressions. */
1898 predicate_all_scalar_phis (struct loop
*loop
)
1901 unsigned int orig_loop_num_nodes
= loop
->num_nodes
;
1904 for (i
= 1; i
< orig_loop_num_nodes
; i
++)
1907 gimple_stmt_iterator gsi
;
1908 gphi_iterator phi_gsi
;
1911 if (bb
== loop
->header
)
1914 if (EDGE_COUNT (bb
->preds
) == 1)
1917 phi_gsi
= gsi_start_phis (bb
);
1918 if (gsi_end_p (phi_gsi
))
1921 gsi
= gsi_after_labels (bb
);
1922 while (!gsi_end_p (phi_gsi
))
1924 phi
= phi_gsi
.phi ();
1925 predicate_scalar_phi (phi
, &gsi
);
1926 release_phi_node (phi
);
1927 gsi_next (&phi_gsi
);
1930 set_phi_nodes (bb
, NULL
);
1934 /* Insert in each basic block of LOOP the statements produced by the
1935 gimplification of the predicates. */
1938 insert_gimplified_predicates (loop_p loop
, bool any_mask_load_store
)
1942 for (i
= 0; i
< loop
->num_nodes
; i
++)
1944 basic_block bb
= ifc_bbs
[i
];
1946 if (!is_predicated (bb
))
1947 gcc_assert (bb_predicate_gimplified_stmts (bb
) == NULL
);
1948 if (!is_predicated (bb
))
1950 /* Do not insert statements for a basic block that is not
1951 predicated. Also make sure that the predicate of the
1952 basic block is set to true. */
1953 reset_bb_predicate (bb
);
1957 stmts
= bb_predicate_gimplified_stmts (bb
);
1960 if (any_mask_load_store
)
1962 /* Insert the predicate of the BB just after the label,
1963 as the if-conversion of memory writes will use this
1965 gimple_stmt_iterator gsi
= gsi_after_labels (bb
);
1966 gsi_insert_seq_before (&gsi
, stmts
, GSI_SAME_STMT
);
1970 /* Insert the predicate of the BB at the end of the BB
1971 as this would reduce the register pressure: the only
1972 use of this predicate will be in successor BBs. */
1973 gimple_stmt_iterator gsi
= gsi_last_bb (bb
);
1976 || stmt_ends_bb_p (gsi_stmt (gsi
)))
1977 gsi_insert_seq_before (&gsi
, stmts
, GSI_SAME_STMT
);
1979 gsi_insert_seq_after (&gsi
, stmts
, GSI_SAME_STMT
);
1982 /* Once the sequence is code generated, set it to NULL. */
1983 set_bb_predicate_gimplified_stmts (bb
, NULL
);
1988 /* Helper function for predicate_mem_writes. Returns index of existent
1989 mask if it was created for given SIZE and -1 otherwise. */
1992 mask_exists (int size
, vec
<int> vec
)
1996 FOR_EACH_VEC_ELT (vec
, ix
, v
)
2002 /* Predicate each write to memory in LOOP.
2004 This function transforms control flow constructs containing memory
2007 | for (i = 0; i < N; i++)
2011 into the following form that does not contain control flow:
2013 | for (i = 0; i < N; i++)
2014 | A[i] = cond ? expr : A[i];
2016 The original CFG looks like this:
2023 | if (i < N) goto bb_5 else goto bb_2
2027 | cond = some_computation;
2028 | if (cond) goto bb_3 else goto bb_4
2040 insert_gimplified_predicates inserts the computation of the COND
2041 expression at the beginning of the destination basic block:
2048 | if (i < N) goto bb_5 else goto bb_2
2052 | cond = some_computation;
2053 | if (cond) goto bb_3 else goto bb_4
2057 | cond = some_computation;
2066 predicate_mem_writes is then predicating the memory write as follows:
2073 | if (i < N) goto bb_5 else goto bb_2
2077 | if (cond) goto bb_3 else goto bb_4
2081 | cond = some_computation;
2082 | A[i] = cond ? expr : A[i];
2090 and finally combine_blocks removes the basic block boundaries making
2091 the loop vectorizable:
2095 | if (i < N) goto bb_5 else goto bb_1
2099 | cond = some_computation;
2100 | A[i] = cond ? expr : A[i];
2101 | if (i < N) goto bb_5 else goto bb_4
2110 predicate_mem_writes (loop_p loop
)
2112 unsigned int i
, orig_loop_num_nodes
= loop
->num_nodes
;
2113 auto_vec
<int, 1> vect_sizes
;
2114 auto_vec
<tree
, 1> vect_masks
;
2116 for (i
= 1; i
< orig_loop_num_nodes
; i
++)
2118 gimple_stmt_iterator gsi
;
2119 basic_block bb
= ifc_bbs
[i
];
2120 tree cond
= bb_predicate (bb
);
2125 if (is_true_predicate (cond
) || is_false_predicate (cond
))
2129 if (TREE_CODE (cond
) == TRUTH_NOT_EXPR
)
2132 cond
= TREE_OPERAND (cond
, 0);
2135 vect_sizes
.truncate (0);
2136 vect_masks
.truncate (0);
2138 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2139 if (!gimple_assign_single_p (stmt
= gsi_stmt (gsi
)))
2141 else if (gimple_plf (stmt
, GF_PLF_2
))
2143 tree lhs
= gimple_assign_lhs (stmt
);
2144 tree rhs
= gimple_assign_rhs1 (stmt
);
2145 tree ref
, addr
, ptr
, mask
;
2147 gimple_seq stmts
= NULL
;
2148 int bitsize
= GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (lhs
)));
2149 ref
= TREE_CODE (lhs
) == SSA_NAME
? rhs
: lhs
;
2150 mark_addressable (ref
);
2151 addr
= force_gimple_operand_gsi (&gsi
, build_fold_addr_expr (ref
),
2152 true, NULL_TREE
, true,
2154 if (!vect_sizes
.is_empty ()
2155 && (index
= mask_exists (bitsize
, vect_sizes
)) != -1)
2156 /* Use created mask. */
2157 mask
= vect_masks
[index
];
2160 if (COMPARISON_CLASS_P (cond
))
2161 mask
= gimple_build (&stmts
, TREE_CODE (cond
),
2163 TREE_OPERAND (cond
, 0),
2164 TREE_OPERAND (cond
, 1));
2167 gcc_assert (TREE_CODE (cond
) == SSA_NAME
);
2174 = constant_boolean_node (true, TREE_TYPE (mask
));
2175 mask
= gimple_build (&stmts
, BIT_XOR_EXPR
,
2176 TREE_TYPE (mask
), mask
, true_val
);
2178 gsi_insert_seq_before (&gsi
, stmts
, GSI_SAME_STMT
);
2180 mask
= ifc_temp_var (TREE_TYPE (mask
), mask
, &gsi
);
2181 /* Save mask and its size for further use. */
2182 vect_sizes
.safe_push (bitsize
);
2183 vect_masks
.safe_push (mask
);
2185 ptr
= build_int_cst (reference_alias_ptr_type (ref
),
2186 get_object_alignment (ref
));
2187 /* Copy points-to info if possible. */
2188 if (TREE_CODE (addr
) == SSA_NAME
&& !SSA_NAME_PTR_INFO (addr
))
2189 copy_ref_info (build2 (MEM_REF
, TREE_TYPE (ref
), addr
, ptr
),
2191 if (TREE_CODE (lhs
) == SSA_NAME
)
2194 = gimple_build_call_internal (IFN_MASK_LOAD
, 3, addr
,
2196 gimple_call_set_lhs (new_stmt
, lhs
);
2200 = gimple_build_call_internal (IFN_MASK_STORE
, 4, addr
, ptr
,
2202 gsi_replace (&gsi
, new_stmt
, true);
2204 else if (gimple_vdef (stmt
))
2206 tree lhs
= gimple_assign_lhs (stmt
);
2207 tree rhs
= gimple_assign_rhs1 (stmt
);
2208 tree type
= TREE_TYPE (lhs
);
2210 lhs
= ifc_temp_var (type
, unshare_expr (lhs
), &gsi
);
2211 rhs
= ifc_temp_var (type
, unshare_expr (rhs
), &gsi
);
2213 std::swap (lhs
, rhs
);
2214 cond
= force_gimple_operand_gsi_1 (&gsi
, unshare_expr (cond
),
2215 is_gimple_condexpr
, NULL_TREE
,
2216 true, GSI_SAME_STMT
);
2217 rhs
= fold_build_cond_expr (type
, unshare_expr (cond
), rhs
, lhs
);
2218 gimple_assign_set_rhs1 (stmt
, ifc_temp_var (type
, rhs
, &gsi
));
2224 /* Remove all GIMPLE_CONDs and GIMPLE_LABELs of all the basic blocks
2225 other than the exit and latch of the LOOP. Also resets the
2226 GIMPLE_DEBUG information. */
2229 remove_conditions_and_labels (loop_p loop
)
2231 gimple_stmt_iterator gsi
;
2234 for (i
= 0; i
< loop
->num_nodes
; i
++)
2236 basic_block bb
= ifc_bbs
[i
];
2238 if (bb_with_exit_edge_p (loop
, bb
)
2239 || bb
== loop
->latch
)
2242 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); )
2243 switch (gimple_code (gsi_stmt (gsi
)))
2247 gsi_remove (&gsi
, true);
2251 /* ??? Should there be conditional GIMPLE_DEBUG_BINDs? */
2252 if (gimple_debug_bind_p (gsi_stmt (gsi
)))
2254 gimple_debug_bind_reset_value (gsi_stmt (gsi
));
2255 update_stmt (gsi_stmt (gsi
));
2266 /* Combine all the basic blocks from LOOP into one or two super basic
2267 blocks. Replace PHI nodes with conditional modify expressions. */
2270 combine_blocks (struct loop
*loop
, bool any_mask_load_store
)
2272 basic_block bb
, exit_bb
, merge_target_bb
;
2273 unsigned int orig_loop_num_nodes
= loop
->num_nodes
;
2278 predicate_bbs (loop
);
2279 remove_conditions_and_labels (loop
);
2280 insert_gimplified_predicates (loop
, any_mask_load_store
);
2281 predicate_all_scalar_phis (loop
);
2283 if (any_mask_load_store
)
2284 predicate_mem_writes (loop
);
2286 /* Merge basic blocks: first remove all the edges in the loop,
2287 except for those from the exit block. */
2289 bool *predicated
= XNEWVEC (bool, orig_loop_num_nodes
);
2290 for (i
= 0; i
< orig_loop_num_nodes
; i
++)
2293 predicated
[i
] = !is_true_predicate (bb_predicate (bb
));
2294 free_bb_predicate (bb
);
2295 if (bb_with_exit_edge_p (loop
, bb
))
2297 gcc_assert (exit_bb
== NULL
);
2301 gcc_assert (exit_bb
!= loop
->latch
);
2303 for (i
= 1; i
< orig_loop_num_nodes
; i
++)
2307 for (ei
= ei_start (bb
->preds
); (e
= ei_safe_edge (ei
));)
2309 if (e
->src
== exit_bb
)
2316 if (exit_bb
!= NULL
)
2318 if (exit_bb
!= loop
->header
)
2320 /* Connect this node to loop header. */
2321 make_edge (loop
->header
, exit_bb
, EDGE_FALLTHRU
);
2322 set_immediate_dominator (CDI_DOMINATORS
, exit_bb
, loop
->header
);
2325 /* Redirect non-exit edges to loop->latch. */
2326 FOR_EACH_EDGE (e
, ei
, exit_bb
->succs
)
2328 if (!loop_exit_edge_p (loop
, e
))
2329 redirect_edge_and_branch (e
, loop
->latch
);
2331 set_immediate_dominator (CDI_DOMINATORS
, loop
->latch
, exit_bb
);
2335 /* If the loop does not have an exit, reconnect header and latch. */
2336 make_edge (loop
->header
, loop
->latch
, EDGE_FALLTHRU
);
2337 set_immediate_dominator (CDI_DOMINATORS
, loop
->latch
, loop
->header
);
2340 merge_target_bb
= loop
->header
;
2341 for (i
= 1; i
< orig_loop_num_nodes
; i
++)
2343 gimple_stmt_iterator gsi
;
2344 gimple_stmt_iterator last
;
2348 if (bb
== exit_bb
|| bb
== loop
->latch
)
2351 /* Make stmts member of loop->header and clear range info from all stmts
2352 in BB which is now no longer executed conditional on a predicate we
2353 could have derived it from. */
2354 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2356 gimple
*stmt
= gsi_stmt (gsi
);
2357 gimple_set_bb (stmt
, merge_target_bb
);
2362 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_DEF
)
2363 reset_flow_sensitive_info (op
);
2367 /* Update stmt list. */
2368 last
= gsi_last_bb (merge_target_bb
);
2369 gsi_insert_seq_after (&last
, bb_seq (bb
), GSI_NEW_STMT
);
2370 set_bb_seq (bb
, NULL
);
2372 delete_basic_block (bb
);
2375 /* If possible, merge loop header to the block with the exit edge.
2376 This reduces the number of basic blocks to two, to please the
2377 vectorizer that handles only loops with two nodes. */
2379 && exit_bb
!= loop
->header
2380 && can_merge_blocks_p (loop
->header
, exit_bb
))
2381 merge_blocks (loop
->header
, exit_bb
);
2388 /* Version LOOP before if-converting it; the original loop
2389 will be if-converted, the new copy of the loop will not,
2390 and the LOOP_VECTORIZED internal call will be guarding which
2391 loop to execute. The vectorizer pass will fold this
2392 internal call into either true or false. */
2395 version_loop_for_if_conversion (struct loop
*loop
)
2397 basic_block cond_bb
;
2398 tree cond
= make_ssa_name (boolean_type_node
);
2399 struct loop
*new_loop
;
2401 gimple_stmt_iterator gsi
;
2403 g
= gimple_build_call_internal (IFN_LOOP_VECTORIZED
, 2,
2404 build_int_cst (integer_type_node
, loop
->num
),
2406 gimple_call_set_lhs (g
, cond
);
2408 initialize_original_copy_tables ();
2409 new_loop
= loop_version (loop
, cond
, &cond_bb
,
2410 REG_BR_PROB_BASE
, REG_BR_PROB_BASE
,
2411 REG_BR_PROB_BASE
, true);
2412 free_original_copy_tables ();
2413 if (new_loop
== NULL
)
2415 new_loop
->dont_vectorize
= true;
2416 new_loop
->force_vectorize
= false;
2417 gsi
= gsi_last_bb (cond_bb
);
2418 gimple_call_set_arg (g
, 1, build_int_cst (integer_type_node
, new_loop
->num
));
2419 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
2420 update_ssa (TODO_update_ssa
);
2424 /* Performs splitting of critical edges if aggressive_if_conv is true.
2425 Returns false if loop won't be if converted and true otherwise. */
2428 ifcvt_split_critical_edges (struct loop
*loop
)
2432 unsigned int num
= loop
->num_nodes
;
2442 if (!single_exit (loop
))
2445 body
= get_loop_body (loop
);
2446 for (i
= 0; i
< num
; i
++)
2449 if (bb
== loop
->latch
2450 || bb_with_exit_edge_p (loop
, bb
))
2452 stmt
= last_stmt (bb
);
2453 /* Skip basic blocks not ending with conditional branch. */
2454 if (!(stmt
&& gimple_code (stmt
) == GIMPLE_COND
))
2456 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
2457 if (EDGE_CRITICAL_P (e
) && e
->dest
->loop_father
== loop
)
2464 /* Assumes that lhs of DEF_STMT have multiple uses.
2465 Delete one use by (1) creation of copy DEF_STMT with
2466 unique lhs; (2) change original use of lhs in one
2467 use statement with newly created lhs. */
2470 ifcvt_split_def_stmt (gimple
*def_stmt
, gimple
*use_stmt
)
2475 gimple_stmt_iterator gsi
;
2476 use_operand_p use_p
;
2477 imm_use_iterator imm_iter
;
2479 var
= gimple_assign_lhs (def_stmt
);
2480 copy_stmt
= gimple_copy (def_stmt
);
2481 lhs
= make_temp_ssa_name (TREE_TYPE (var
), NULL
, "_ifc_");
2482 gimple_assign_set_lhs (copy_stmt
, lhs
);
2483 SSA_NAME_DEF_STMT (lhs
) = copy_stmt
;
2484 /* Insert copy of DEF_STMT. */
2485 gsi
= gsi_for_stmt (def_stmt
);
2486 gsi_insert_after (&gsi
, copy_stmt
, GSI_SAME_STMT
);
2487 /* Change use of var to lhs in use_stmt. */
2488 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2490 fprintf (dump_file
, "Change use of var ");
2491 print_generic_expr (dump_file
, var
, TDF_SLIM
);
2492 fprintf (dump_file
, " to ");
2493 print_generic_expr (dump_file
, lhs
, TDF_SLIM
);
2494 fprintf (dump_file
, "\n");
2496 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, var
)
2498 if (USE_STMT (use_p
) != use_stmt
)
2500 SET_USE (use_p
, lhs
);
2505 /* Traverse bool pattern recursively starting from VAR.
2506 Save its def and use statements to defuse_list if VAR does
2507 not have single use. */
2510 ifcvt_walk_pattern_tree (tree var
, vec
<gimple
*> *defuse_list
,
2514 enum tree_code code
;
2517 def_stmt
= SSA_NAME_DEF_STMT (var
);
2518 if (gimple_code (def_stmt
) != GIMPLE_ASSIGN
)
2520 if (!has_single_use (var
))
2522 /* Put def and use stmts into defuse_list. */
2523 defuse_list
->safe_push (def_stmt
);
2524 defuse_list
->safe_push (use_stmt
);
2525 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2527 fprintf (dump_file
, "Multiple lhs uses in stmt\n");
2528 print_gimple_stmt (dump_file
, def_stmt
, 0, TDF_SLIM
);
2531 rhs1
= gimple_assign_rhs1 (def_stmt
);
2532 code
= gimple_assign_rhs_code (def_stmt
);
2536 ifcvt_walk_pattern_tree (rhs1
, defuse_list
, def_stmt
);
2539 if ((TYPE_PRECISION (TREE_TYPE (rhs1
)) != 1
2540 || !TYPE_UNSIGNED (TREE_TYPE (rhs1
)))
2541 && TREE_CODE (TREE_TYPE (rhs1
)) != BOOLEAN_TYPE
)
2543 ifcvt_walk_pattern_tree (rhs1
, defuse_list
, def_stmt
);
2546 ifcvt_walk_pattern_tree (rhs1
, defuse_list
, def_stmt
);
2551 ifcvt_walk_pattern_tree (rhs1
, defuse_list
, def_stmt
);
2552 rhs2
= gimple_assign_rhs2 (def_stmt
);
2553 ifcvt_walk_pattern_tree (rhs2
, defuse_list
, def_stmt
);
2561 /* Returns true if STMT can be a root of bool pattern applied
2565 stmt_is_root_of_bool_pattern (gimple
*stmt
)
2567 enum tree_code code
;
2570 code
= gimple_assign_rhs_code (stmt
);
2571 if (CONVERT_EXPR_CODE_P (code
))
2573 lhs
= gimple_assign_lhs (stmt
);
2574 rhs
= gimple_assign_rhs1 (stmt
);
2575 if (TREE_CODE (TREE_TYPE (rhs
)) != BOOLEAN_TYPE
)
2577 if (TREE_CODE (TREE_TYPE (lhs
)) == BOOLEAN_TYPE
)
2581 else if (code
== COND_EXPR
)
2583 rhs
= gimple_assign_rhs1 (stmt
);
2584 if (TREE_CODE (rhs
) != SSA_NAME
)
2591 /* Traverse all statements in BB which correspond to loop header to
2592 find out all statements which can start bool pattern applied by
2593 vectorizer and convert multiple uses in it to conform pattern
2594 restrictions. Such case can occur if the same predicate is used both
2595 for phi node conversion and load/store mask. */
2598 ifcvt_repair_bool_pattern (basic_block bb
)
2602 gimple_stmt_iterator gsi
;
2603 vec
<gimple
*> defuse_list
= vNULL
;
2604 vec
<gimple
*> pattern_roots
= vNULL
;
2609 /* Collect all root pattern statements. */
2610 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2612 stmt
= gsi_stmt (gsi
);
2613 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
2615 if (!stmt_is_root_of_bool_pattern (stmt
))
2617 pattern_roots
.safe_push (stmt
);
2620 if (pattern_roots
.is_empty ())
2623 /* Split all statements with multiple uses iteratively since splitting
2624 may create new multiple uses. */
2629 FOR_EACH_VEC_ELT (pattern_roots
, ix
, stmt
)
2631 rhs
= gimple_assign_rhs1 (stmt
);
2632 ifcvt_walk_pattern_tree (rhs
, &defuse_list
, stmt
);
2633 while (defuse_list
.length () > 0)
2636 gimple
*def_stmt
, *use_stmt
;
2637 use_stmt
= defuse_list
.pop ();
2638 def_stmt
= defuse_list
.pop ();
2639 ifcvt_split_def_stmt (def_stmt
, use_stmt
);
2644 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2645 fprintf (dump_file
, "Repair bool pattern takes %d iterations. \n",
2649 /* Delete redundant statements produced by predication which prevents
2650 loop vectorization. */
2653 ifcvt_local_dce (basic_block bb
)
2658 gimple_stmt_iterator gsi
;
2659 auto_vec
<gimple
*> worklist
;
2660 enum gimple_code code
;
2661 use_operand_p use_p
;
2662 imm_use_iterator imm_iter
;
2664 worklist
.create (64);
2665 /* Consider all phi as live statements. */
2666 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2668 phi
= gsi_stmt (gsi
);
2669 gimple_set_plf (phi
, GF_PLF_2
, true);
2670 worklist
.safe_push (phi
);
2672 /* Consider load/store statements, CALL and COND as live. */
2673 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2675 stmt
= gsi_stmt (gsi
);
2676 if (gimple_store_p (stmt
)
2677 || gimple_assign_load_p (stmt
)
2678 || is_gimple_debug (stmt
))
2680 gimple_set_plf (stmt
, GF_PLF_2
, true);
2681 worklist
.safe_push (stmt
);
2684 code
= gimple_code (stmt
);
2685 if (code
== GIMPLE_COND
|| code
== GIMPLE_CALL
)
2687 gimple_set_plf (stmt
, GF_PLF_2
, true);
2688 worklist
.safe_push (stmt
);
2691 gimple_set_plf (stmt
, GF_PLF_2
, false);
2693 if (code
== GIMPLE_ASSIGN
)
2695 tree lhs
= gimple_assign_lhs (stmt
);
2696 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, lhs
)
2698 stmt1
= USE_STMT (use_p
);
2699 if (gimple_bb (stmt1
) != bb
)
2701 gimple_set_plf (stmt
, GF_PLF_2
, true);
2702 worklist
.safe_push (stmt
);
2708 /* Propagate liveness through arguments of live stmt. */
2709 while (worklist
.length () > 0)
2712 use_operand_p use_p
;
2715 stmt
= worklist
.pop ();
2716 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
2718 use
= USE_FROM_PTR (use_p
);
2719 if (TREE_CODE (use
) != SSA_NAME
)
2721 stmt1
= SSA_NAME_DEF_STMT (use
);
2722 if (gimple_bb (stmt1
) != bb
2723 || gimple_plf (stmt1
, GF_PLF_2
))
2725 gimple_set_plf (stmt1
, GF_PLF_2
, true);
2726 worklist
.safe_push (stmt1
);
2729 /* Delete dead statements. */
2730 gsi
= gsi_start_bb (bb
);
2731 while (!gsi_end_p (gsi
))
2733 stmt
= gsi_stmt (gsi
);
2734 if (gimple_plf (stmt
, GF_PLF_2
))
2739 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2741 fprintf (dump_file
, "Delete dead stmt in bb#%d\n", bb
->index
);
2742 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
2744 gsi_remove (&gsi
, true);
2745 release_defs (stmt
);
2749 /* If-convert LOOP when it is legal. For the moment this pass has no
2750 profitability analysis. Returns non-zero todo flags when something
2754 tree_if_conversion (struct loop
*loop
)
2756 unsigned int todo
= 0;
2758 bool any_mask_load_store
= false;
2760 /* Set up aggressive if-conversion for loops marked with simd pragma. */
2761 aggressive_if_conv
= loop
->force_vectorize
;
2762 /* Check either outer loop was marked with simd pragma. */
2763 if (!aggressive_if_conv
)
2765 struct loop
*outer_loop
= loop_outer (loop
);
2766 if (outer_loop
&& outer_loop
->force_vectorize
)
2767 aggressive_if_conv
= true;
2770 if (aggressive_if_conv
)
2771 if (!ifcvt_split_critical_edges (loop
))
2774 if (!if_convertible_loop_p (loop
, &any_mask_load_store
)
2775 || !dbg_cnt (if_conversion_tree
))
2778 if (any_mask_load_store
2779 && ((!flag_tree_loop_vectorize
&& !loop
->force_vectorize
)
2780 || loop
->dont_vectorize
))
2783 if (any_mask_load_store
&& !version_loop_for_if_conversion (loop
))
2786 /* Now all statements are if-convertible. Combine all the basic
2787 blocks into one huge basic block doing the if-conversion
2789 combine_blocks (loop
, any_mask_load_store
);
2791 /* Delete dead predicate computations and repair tree correspondent
2792 to bool pattern to delete multiple uses of predicates. */
2793 if (aggressive_if_conv
)
2795 ifcvt_local_dce (loop
->header
);
2796 ifcvt_repair_bool_pattern (loop
->header
);
2799 todo
|= TODO_cleanup_cfg
;
2800 if (any_mask_load_store
)
2802 mark_virtual_operands_for_renaming (cfun
);
2803 todo
|= TODO_update_ssa_only_virtuals
;
2811 for (i
= 0; i
< loop
->num_nodes
; i
++)
2812 free_bb_predicate (ifc_bbs
[i
]);
2817 free_dominance_info (CDI_POST_DOMINATORS
);
2822 /* Tree if-conversion pass management. */
2826 const pass_data pass_data_if_conversion
=
2828 GIMPLE_PASS
, /* type */
2830 OPTGROUP_NONE
, /* optinfo_flags */
2831 TV_NONE
, /* tv_id */
2832 ( PROP_cfg
| PROP_ssa
), /* properties_required */
2833 0, /* properties_provided */
2834 0, /* properties_destroyed */
2835 0, /* todo_flags_start */
2836 0, /* todo_flags_finish */
2839 class pass_if_conversion
: public gimple_opt_pass
2842 pass_if_conversion (gcc::context
*ctxt
)
2843 : gimple_opt_pass (pass_data_if_conversion
, ctxt
)
2846 /* opt_pass methods: */
2847 virtual bool gate (function
*);
2848 virtual unsigned int execute (function
*);
2850 }; // class pass_if_conversion
2853 pass_if_conversion::gate (function
*fun
)
2855 return (((flag_tree_loop_vectorize
|| fun
->has_force_vectorize_loops
)
2856 && flag_tree_loop_if_convert
!= 0)
2857 || flag_tree_loop_if_convert
== 1
2858 || flag_tree_loop_if_convert_stores
== 1);
2862 pass_if_conversion::execute (function
*fun
)
2867 if (number_of_loops (fun
) <= 1)
2870 FOR_EACH_LOOP (loop
, 0)
2871 if (flag_tree_loop_if_convert
== 1
2872 || flag_tree_loop_if_convert_stores
== 1
2873 || ((flag_tree_loop_vectorize
|| loop
->force_vectorize
)
2874 && !loop
->dont_vectorize
))
2875 todo
|= tree_if_conversion (loop
);
2880 FOR_EACH_BB_FN (bb
, fun
)
2881 gcc_assert (!bb
->aux
);
2890 make_pass_if_conversion (gcc::context
*ctxt
)
2892 return new pass_if_conversion (ctxt
);