Import GCC-8 to a new vendor branch
[dragonfly.git] / contrib / gcc-8.0 / gcc / tree-if-conv.c
blob71dac4fb48a1c3f500affcf7bb66cc258c3d5b07
1 /* If-conversion for vectorizer.
2 Copyright (C) 2004-2018 Free Software Foundation, Inc.
3 Contributed by Devang Patel <dpatel@apple.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* This pass implements a tree level if-conversion of loops. Its
22 initial goal is to help the vectorizer to vectorize loops with
23 conditions.
25 A short description of if-conversion:
27 o Decide if a loop is if-convertible or not.
28 o Walk all loop basic blocks in breadth first order (BFS order).
29 o Remove conditional statements (at the end of basic block)
30 and propagate condition into destination basic blocks'
31 predicate list.
32 o Replace modify expression with conditional modify expression
33 using current basic block's condition.
34 o Merge all basic blocks
35 o Replace phi nodes with conditional modify expr
36 o Merge all basic blocks into header
38 Sample transformation:
40 INPUT
41 -----
43 # i_23 = PHI <0(0), i_18(10)>;
44 <L0>:;
45 j_15 = A[i_23];
46 if (j_15 > 41) goto <L1>; else goto <L17>;
48 <L17>:;
49 goto <bb 3> (<L3>);
51 <L1>:;
53 # iftmp.2_4 = PHI <0(8), 42(2)>;
54 <L3>:;
55 A[i_23] = iftmp.2_4;
56 i_18 = i_23 + 1;
57 if (i_18 <= 15) goto <L19>; else goto <L18>;
59 <L19>:;
60 goto <bb 1> (<L0>);
62 <L18>:;
64 OUTPUT
65 ------
67 # i_23 = PHI <0(0), i_18(10)>;
68 <L0>:;
69 j_15 = A[i_23];
71 <L3>:;
72 iftmp.2_4 = j_15 > 41 ? 42 : 0;
73 A[i_23] = iftmp.2_4;
74 i_18 = i_23 + 1;
75 if (i_18 <= 15) goto <L19>; else goto <L18>;
77 <L19>:;
78 goto <bb 1> (<L0>);
80 <L18>:;
83 #include "config.h"
84 #include "system.h"
85 #include "coretypes.h"
86 #include "backend.h"
87 #include "rtl.h"
88 #include "tree.h"
89 #include "gimple.h"
90 #include "cfghooks.h"
91 #include "tree-pass.h"
92 #include "ssa.h"
93 #include "expmed.h"
94 #include "optabs-query.h"
95 #include "gimple-pretty-print.h"
96 #include "alias.h"
97 #include "fold-const.h"
98 #include "stor-layout.h"
99 #include "gimple-fold.h"
100 #include "gimplify.h"
101 #include "gimple-iterator.h"
102 #include "gimplify-me.h"
103 #include "tree-cfg.h"
104 #include "tree-into-ssa.h"
105 #include "tree-ssa.h"
106 #include "cfgloop.h"
107 #include "tree-data-ref.h"
108 #include "tree-scalar-evolution.h"
109 #include "tree-ssa-loop.h"
110 #include "tree-ssa-loop-niter.h"
111 #include "tree-ssa-loop-ivopts.h"
112 #include "tree-ssa-address.h"
113 #include "dbgcnt.h"
114 #include "tree-hash-traits.h"
115 #include "varasm.h"
116 #include "builtins.h"
117 #include "params.h"
118 #include "cfganal.h"
120 /* Only handle PHIs with no more arguments unless we are asked to by
121 simd pragma. */
122 #define MAX_PHI_ARG_NUM \
123 ((unsigned) PARAM_VALUE (PARAM_MAX_TREE_IF_CONVERSION_PHI_ARGS))
125 /* Indicate if new load/store that needs to be predicated is introduced
126 during if conversion. */
127 static bool any_pred_load_store;
129 /* Indicate if there are any complicated PHIs that need to be handled in
130 if-conversion. Complicated PHI has more than two arguments and can't
131 be degenerated to two arguments PHI. See more information in comment
132 before phi_convertible_by_degenerating_args. */
133 static bool any_complicated_phi;
135 /* Hash for struct innermost_loop_behavior. It depends on the user to
136 free the memory. */
138 struct innermost_loop_behavior_hash : nofree_ptr_hash <innermost_loop_behavior>
140 static inline hashval_t hash (const value_type &);
141 static inline bool equal (const value_type &,
142 const compare_type &);
145 inline hashval_t
146 innermost_loop_behavior_hash::hash (const value_type &e)
148 hashval_t hash;
150 hash = iterative_hash_expr (e->base_address, 0);
151 hash = iterative_hash_expr (e->offset, hash);
152 hash = iterative_hash_expr (e->init, hash);
153 return iterative_hash_expr (e->step, hash);
156 inline bool
157 innermost_loop_behavior_hash::equal (const value_type &e1,
158 const compare_type &e2)
160 if ((e1->base_address && !e2->base_address)
161 || (!e1->base_address && e2->base_address)
162 || (!e1->offset && e2->offset)
163 || (e1->offset && !e2->offset)
164 || (!e1->init && e2->init)
165 || (e1->init && !e2->init)
166 || (!e1->step && e2->step)
167 || (e1->step && !e2->step))
168 return false;
170 if (e1->base_address && e2->base_address
171 && !operand_equal_p (e1->base_address, e2->base_address, 0))
172 return false;
173 if (e1->offset && e2->offset
174 && !operand_equal_p (e1->offset, e2->offset, 0))
175 return false;
176 if (e1->init && e2->init
177 && !operand_equal_p (e1->init, e2->init, 0))
178 return false;
179 if (e1->step && e2->step
180 && !operand_equal_p (e1->step, e2->step, 0))
181 return false;
183 return true;
186 /* List of basic blocks in if-conversion-suitable order. */
187 static basic_block *ifc_bbs;
189 /* Hash table to store <DR's innermost loop behavior, DR> pairs. */
190 static hash_map<innermost_loop_behavior_hash,
191 data_reference_p> *innermost_DR_map;
193 /* Hash table to store <base reference, DR> pairs. */
194 static hash_map<tree_operand_hash, data_reference_p> *baseref_DR_map;
196 /* Structure used to predicate basic blocks. This is attached to the
197 ->aux field of the BBs in the loop to be if-converted. */
198 struct bb_predicate {
200 /* The condition under which this basic block is executed. */
201 tree predicate;
203 /* PREDICATE is gimplified, and the sequence of statements is
204 recorded here, in order to avoid the duplication of computations
205 that occur in previous conditions. See PR44483. */
206 gimple_seq predicate_gimplified_stmts;
209 /* Returns true when the basic block BB has a predicate. */
211 static inline bool
212 bb_has_predicate (basic_block bb)
214 return bb->aux != NULL;
217 /* Returns the gimplified predicate for basic block BB. */
219 static inline tree
220 bb_predicate (basic_block bb)
222 return ((struct bb_predicate *) bb->aux)->predicate;
225 /* Sets the gimplified predicate COND for basic block BB. */
227 static inline void
228 set_bb_predicate (basic_block bb, tree cond)
230 gcc_assert ((TREE_CODE (cond) == TRUTH_NOT_EXPR
231 && is_gimple_condexpr (TREE_OPERAND (cond, 0)))
232 || is_gimple_condexpr (cond));
233 ((struct bb_predicate *) bb->aux)->predicate = cond;
236 /* Returns the sequence of statements of the gimplification of the
237 predicate for basic block BB. */
239 static inline gimple_seq
240 bb_predicate_gimplified_stmts (basic_block bb)
242 return ((struct bb_predicate *) bb->aux)->predicate_gimplified_stmts;
245 /* Sets the sequence of statements STMTS of the gimplification of the
246 predicate for basic block BB. */
248 static inline void
249 set_bb_predicate_gimplified_stmts (basic_block bb, gimple_seq stmts)
251 ((struct bb_predicate *) bb->aux)->predicate_gimplified_stmts = stmts;
254 /* Adds the sequence of statements STMTS to the sequence of statements
255 of the predicate for basic block BB. */
257 static inline void
258 add_bb_predicate_gimplified_stmts (basic_block bb, gimple_seq stmts)
260 /* We might have updated some stmts in STMTS via force_gimple_operand
261 calling fold_stmt and that producing multiple stmts. Delink immediate
262 uses so update_ssa after loop versioning doesn't get confused for
263 the not yet inserted predicates.
264 ??? This should go away once we reliably avoid updating stmts
265 not in any BB. */
266 for (gimple_stmt_iterator gsi = gsi_start (stmts);
267 !gsi_end_p (gsi); gsi_next (&gsi))
269 gimple *stmt = gsi_stmt (gsi);
270 delink_stmt_imm_use (stmt);
271 gimple_set_modified (stmt, true);
273 gimple_seq_add_seq_without_update
274 (&(((struct bb_predicate *) bb->aux)->predicate_gimplified_stmts), stmts);
277 /* Initializes to TRUE the predicate of basic block BB. */
279 static inline void
280 init_bb_predicate (basic_block bb)
282 bb->aux = XNEW (struct bb_predicate);
283 set_bb_predicate_gimplified_stmts (bb, NULL);
284 set_bb_predicate (bb, boolean_true_node);
287 /* Release the SSA_NAMEs associated with the predicate of basic block BB. */
289 static inline void
290 release_bb_predicate (basic_block bb)
292 gimple_seq stmts = bb_predicate_gimplified_stmts (bb);
293 if (stmts)
295 /* Ensure that these stmts haven't yet been added to a bb. */
296 if (flag_checking)
297 for (gimple_stmt_iterator i = gsi_start (stmts);
298 !gsi_end_p (i); gsi_next (&i))
299 gcc_assert (! gimple_bb (gsi_stmt (i)));
301 /* Discard them. */
302 gimple_seq_discard (stmts);
303 set_bb_predicate_gimplified_stmts (bb, NULL);
307 /* Free the predicate of basic block BB. */
309 static inline void
310 free_bb_predicate (basic_block bb)
312 if (!bb_has_predicate (bb))
313 return;
315 release_bb_predicate (bb);
316 free (bb->aux);
317 bb->aux = NULL;
320 /* Reinitialize predicate of BB with the true predicate. */
322 static inline void
323 reset_bb_predicate (basic_block bb)
325 if (!bb_has_predicate (bb))
326 init_bb_predicate (bb);
327 else
329 release_bb_predicate (bb);
330 set_bb_predicate (bb, boolean_true_node);
334 /* Returns a new SSA_NAME of type TYPE that is assigned the value of
335 the expression EXPR. Inserts the statement created for this
336 computation before GSI and leaves the iterator GSI at the same
337 statement. */
339 static tree
340 ifc_temp_var (tree type, tree expr, gimple_stmt_iterator *gsi)
342 tree new_name = make_temp_ssa_name (type, NULL, "_ifc_");
343 gimple *stmt = gimple_build_assign (new_name, expr);
344 gimple_set_vuse (stmt, gimple_vuse (gsi_stmt (*gsi)));
345 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
346 return new_name;
349 /* Return true when COND is a false predicate. */
351 static inline bool
352 is_false_predicate (tree cond)
354 return (cond != NULL_TREE
355 && (cond == boolean_false_node
356 || integer_zerop (cond)));
359 /* Return true when COND is a true predicate. */
361 static inline bool
362 is_true_predicate (tree cond)
364 return (cond == NULL_TREE
365 || cond == boolean_true_node
366 || integer_onep (cond));
369 /* Returns true when BB has a predicate that is not trivial: true or
370 NULL_TREE. */
372 static inline bool
373 is_predicated (basic_block bb)
375 return !is_true_predicate (bb_predicate (bb));
378 /* Parses the predicate COND and returns its comparison code and
379 operands OP0 and OP1. */
381 static enum tree_code
382 parse_predicate (tree cond, tree *op0, tree *op1)
384 gimple *s;
386 if (TREE_CODE (cond) == SSA_NAME
387 && is_gimple_assign (s = SSA_NAME_DEF_STMT (cond)))
389 if (TREE_CODE_CLASS (gimple_assign_rhs_code (s)) == tcc_comparison)
391 *op0 = gimple_assign_rhs1 (s);
392 *op1 = gimple_assign_rhs2 (s);
393 return gimple_assign_rhs_code (s);
396 else if (gimple_assign_rhs_code (s) == TRUTH_NOT_EXPR)
398 tree op = gimple_assign_rhs1 (s);
399 tree type = TREE_TYPE (op);
400 enum tree_code code = parse_predicate (op, op0, op1);
402 return code == ERROR_MARK ? ERROR_MARK
403 : invert_tree_comparison (code, HONOR_NANS (type));
406 return ERROR_MARK;
409 if (COMPARISON_CLASS_P (cond))
411 *op0 = TREE_OPERAND (cond, 0);
412 *op1 = TREE_OPERAND (cond, 1);
413 return TREE_CODE (cond);
416 return ERROR_MARK;
419 /* Returns the fold of predicate C1 OR C2 at location LOC. */
421 static tree
422 fold_or_predicates (location_t loc, tree c1, tree c2)
424 tree op1a, op1b, op2a, op2b;
425 enum tree_code code1 = parse_predicate (c1, &op1a, &op1b);
426 enum tree_code code2 = parse_predicate (c2, &op2a, &op2b);
428 if (code1 != ERROR_MARK && code2 != ERROR_MARK)
430 tree t = maybe_fold_or_comparisons (code1, op1a, op1b,
431 code2, op2a, op2b);
432 if (t)
433 return t;
436 return fold_build2_loc (loc, TRUTH_OR_EXPR, boolean_type_node, c1, c2);
439 /* Returns either a COND_EXPR or the folded expression if the folded
440 expression is a MIN_EXPR, a MAX_EXPR, an ABS_EXPR,
441 a constant or a SSA_NAME. */
443 static tree
444 fold_build_cond_expr (tree type, tree cond, tree rhs, tree lhs)
446 tree rhs1, lhs1, cond_expr;
448 /* If COND is comparison r != 0 and r has boolean type, convert COND
449 to SSA_NAME to accept by vect bool pattern. */
450 if (TREE_CODE (cond) == NE_EXPR)
452 tree op0 = TREE_OPERAND (cond, 0);
453 tree op1 = TREE_OPERAND (cond, 1);
454 if (TREE_CODE (op0) == SSA_NAME
455 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
456 && (integer_zerop (op1)))
457 cond = op0;
459 cond_expr = fold_ternary (COND_EXPR, type, cond, rhs, lhs);
461 if (cond_expr == NULL_TREE)
462 return build3 (COND_EXPR, type, cond, rhs, lhs);
464 STRIP_USELESS_TYPE_CONVERSION (cond_expr);
466 if (is_gimple_val (cond_expr))
467 return cond_expr;
469 if (TREE_CODE (cond_expr) == ABS_EXPR)
471 rhs1 = TREE_OPERAND (cond_expr, 1);
472 STRIP_USELESS_TYPE_CONVERSION (rhs1);
473 if (is_gimple_val (rhs1))
474 return build1 (ABS_EXPR, type, rhs1);
477 if (TREE_CODE (cond_expr) == MIN_EXPR
478 || TREE_CODE (cond_expr) == MAX_EXPR)
480 lhs1 = TREE_OPERAND (cond_expr, 0);
481 STRIP_USELESS_TYPE_CONVERSION (lhs1);
482 rhs1 = TREE_OPERAND (cond_expr, 1);
483 STRIP_USELESS_TYPE_CONVERSION (rhs1);
484 if (is_gimple_val (rhs1) && is_gimple_val (lhs1))
485 return build2 (TREE_CODE (cond_expr), type, lhs1, rhs1);
487 return build3 (COND_EXPR, type, cond, rhs, lhs);
490 /* Add condition NC to the predicate list of basic block BB. LOOP is
491 the loop to be if-converted. Use predicate of cd-equivalent block
492 for join bb if it exists: we call basic blocks bb1 and bb2
493 cd-equivalent if they are executed under the same condition. */
495 static inline void
496 add_to_predicate_list (struct loop *loop, basic_block bb, tree nc)
498 tree bc, *tp;
499 basic_block dom_bb;
501 if (is_true_predicate (nc))
502 return;
504 /* If dominance tells us this basic block is always executed,
505 don't record any predicates for it. */
506 if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
507 return;
509 dom_bb = get_immediate_dominator (CDI_DOMINATORS, bb);
510 /* We use notion of cd equivalence to get simpler predicate for
511 join block, e.g. if join block has 2 predecessors with predicates
512 p1 & p2 and p1 & !p2, we'd like to get p1 for it instead of
513 p1 & p2 | p1 & !p2. */
514 if (dom_bb != loop->header
515 && get_immediate_dominator (CDI_POST_DOMINATORS, dom_bb) == bb)
517 gcc_assert (flow_bb_inside_loop_p (loop, dom_bb));
518 bc = bb_predicate (dom_bb);
519 if (!is_true_predicate (bc))
520 set_bb_predicate (bb, bc);
521 else
522 gcc_assert (is_true_predicate (bb_predicate (bb)));
523 if (dump_file && (dump_flags & TDF_DETAILS))
524 fprintf (dump_file, "Use predicate of bb#%d for bb#%d\n",
525 dom_bb->index, bb->index);
526 return;
529 if (!is_predicated (bb))
530 bc = nc;
531 else
533 bc = bb_predicate (bb);
534 bc = fold_or_predicates (EXPR_LOCATION (bc), nc, bc);
535 if (is_true_predicate (bc))
537 reset_bb_predicate (bb);
538 return;
542 /* Allow a TRUTH_NOT_EXPR around the main predicate. */
543 if (TREE_CODE (bc) == TRUTH_NOT_EXPR)
544 tp = &TREE_OPERAND (bc, 0);
545 else
546 tp = &bc;
547 if (!is_gimple_condexpr (*tp))
549 gimple_seq stmts;
550 *tp = force_gimple_operand_1 (*tp, &stmts, is_gimple_condexpr, NULL_TREE);
551 add_bb_predicate_gimplified_stmts (bb, stmts);
553 set_bb_predicate (bb, bc);
556 /* Add the condition COND to the previous condition PREV_COND, and add
557 this to the predicate list of the destination of edge E. LOOP is
558 the loop to be if-converted. */
560 static void
561 add_to_dst_predicate_list (struct loop *loop, edge e,
562 tree prev_cond, tree cond)
564 if (!flow_bb_inside_loop_p (loop, e->dest))
565 return;
567 if (!is_true_predicate (prev_cond))
568 cond = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
569 prev_cond, cond);
571 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, e->dest))
572 add_to_predicate_list (loop, e->dest, cond);
575 /* Return true if one of the successor edges of BB exits LOOP. */
577 static bool
578 bb_with_exit_edge_p (struct loop *loop, basic_block bb)
580 edge e;
581 edge_iterator ei;
583 FOR_EACH_EDGE (e, ei, bb->succs)
584 if (loop_exit_edge_p (loop, e))
585 return true;
587 return false;
590 /* Given PHI which has more than two arguments, this function checks if
591 it's if-convertible by degenerating its arguments. Specifically, if
592 below two conditions are satisfied:
594 1) Number of PHI arguments with different values equals to 2 and one
595 argument has the only occurrence.
596 2) The edge corresponding to the unique argument isn't critical edge.
598 Such PHI can be handled as PHIs have only two arguments. For example,
599 below PHI:
601 res = PHI <A_1(e1), A_1(e2), A_2(e3)>;
603 can be transformed into:
605 res = (predicate of e3) ? A_2 : A_1;
607 Return TRUE if it is the case, FALSE otherwise. */
609 static bool
610 phi_convertible_by_degenerating_args (gphi *phi)
612 edge e;
613 tree arg, t1 = NULL, t2 = NULL;
614 unsigned int i, i1 = 0, i2 = 0, n1 = 0, n2 = 0;
615 unsigned int num_args = gimple_phi_num_args (phi);
617 gcc_assert (num_args > 2);
619 for (i = 0; i < num_args; i++)
621 arg = gimple_phi_arg_def (phi, i);
622 if (t1 == NULL || operand_equal_p (t1, arg, 0))
624 n1++;
625 i1 = i;
626 t1 = arg;
628 else if (t2 == NULL || operand_equal_p (t2, arg, 0))
630 n2++;
631 i2 = i;
632 t2 = arg;
634 else
635 return false;
638 if (n1 != 1 && n2 != 1)
639 return false;
641 /* Check if the edge corresponding to the unique arg is critical. */
642 e = gimple_phi_arg_edge (phi, (n1 == 1) ? i1 : i2);
643 if (EDGE_COUNT (e->src->succs) > 1)
644 return false;
646 return true;
649 /* Return true when PHI is if-convertible. PHI is part of loop LOOP
650 and it belongs to basic block BB. Note at this point, it is sure
651 that PHI is if-convertible. This function updates global variable
652 ANY_COMPLICATED_PHI if PHI is complicated. */
654 static bool
655 if_convertible_phi_p (struct loop *loop, basic_block bb, gphi *phi)
657 if (dump_file && (dump_flags & TDF_DETAILS))
659 fprintf (dump_file, "-------------------------\n");
660 print_gimple_stmt (dump_file, phi, 0, TDF_SLIM);
663 if (bb != loop->header
664 && gimple_phi_num_args (phi) > 2
665 && !phi_convertible_by_degenerating_args (phi))
666 any_complicated_phi = true;
668 return true;
671 /* Records the status of a data reference. This struct is attached to
672 each DR->aux field. */
674 struct ifc_dr {
675 bool rw_unconditionally;
676 bool w_unconditionally;
677 bool written_at_least_once;
679 tree rw_predicate;
680 tree w_predicate;
681 tree base_w_predicate;
684 #define IFC_DR(DR) ((struct ifc_dr *) (DR)->aux)
685 #define DR_BASE_W_UNCONDITIONALLY(DR) (IFC_DR (DR)->written_at_least_once)
686 #define DR_RW_UNCONDITIONALLY(DR) (IFC_DR (DR)->rw_unconditionally)
687 #define DR_W_UNCONDITIONALLY(DR) (IFC_DR (DR)->w_unconditionally)
689 /* Iterates over DR's and stores refs, DR and base refs, DR pairs in
690 HASH tables. While storing them in HASH table, it checks if the
691 reference is unconditionally read or written and stores that as a flag
692 information. For base reference it checks if it is written atlest once
693 unconditionally and stores it as flag information along with DR.
694 In other words for every data reference A in STMT there exist other
695 accesses to a data reference with the same base with predicates that
696 add up (OR-up) to the true predicate: this ensures that the data
697 reference A is touched (read or written) on every iteration of the
698 if-converted loop. */
699 static void
700 hash_memrefs_baserefs_and_store_DRs_read_written_info (data_reference_p a)
703 data_reference_p *master_dr, *base_master_dr;
704 tree base_ref = DR_BASE_OBJECT (a);
705 innermost_loop_behavior *innermost = &DR_INNERMOST (a);
706 tree ca = bb_predicate (gimple_bb (DR_STMT (a)));
707 bool exist1, exist2;
709 master_dr = &innermost_DR_map->get_or_insert (innermost, &exist1);
710 if (!exist1)
711 *master_dr = a;
713 if (DR_IS_WRITE (a))
715 IFC_DR (*master_dr)->w_predicate
716 = fold_or_predicates (UNKNOWN_LOCATION, ca,
717 IFC_DR (*master_dr)->w_predicate);
718 if (is_true_predicate (IFC_DR (*master_dr)->w_predicate))
719 DR_W_UNCONDITIONALLY (*master_dr) = true;
721 IFC_DR (*master_dr)->rw_predicate
722 = fold_or_predicates (UNKNOWN_LOCATION, ca,
723 IFC_DR (*master_dr)->rw_predicate);
724 if (is_true_predicate (IFC_DR (*master_dr)->rw_predicate))
725 DR_RW_UNCONDITIONALLY (*master_dr) = true;
727 if (DR_IS_WRITE (a))
729 base_master_dr = &baseref_DR_map->get_or_insert (base_ref, &exist2);
730 if (!exist2)
731 *base_master_dr = a;
732 IFC_DR (*base_master_dr)->base_w_predicate
733 = fold_or_predicates (UNKNOWN_LOCATION, ca,
734 IFC_DR (*base_master_dr)->base_w_predicate);
735 if (is_true_predicate (IFC_DR (*base_master_dr)->base_w_predicate))
736 DR_BASE_W_UNCONDITIONALLY (*base_master_dr) = true;
740 /* Return TRUE if can prove the index IDX of an array reference REF is
741 within array bound. Return false otherwise. */
743 static bool
744 idx_within_array_bound (tree ref, tree *idx, void *dta)
746 bool overflow;
747 widest_int niter, valid_niter, delta, wi_step;
748 tree ev, init, step;
749 tree low, high;
750 struct loop *loop = (struct loop*) dta;
752 /* Only support within-bound access for array references. */
753 if (TREE_CODE (ref) != ARRAY_REF)
754 return false;
756 /* For arrays at the end of the structure, we are not guaranteed that they
757 do not really extend over their declared size. However, for arrays of
758 size greater than one, this is unlikely to be intended. */
759 if (array_at_struct_end_p (ref))
760 return false;
762 ev = analyze_scalar_evolution (loop, *idx);
763 ev = instantiate_parameters (loop, ev);
764 init = initial_condition (ev);
765 step = evolution_part_in_loop_num (ev, loop->num);
767 if (!init || TREE_CODE (init) != INTEGER_CST
768 || (step && TREE_CODE (step) != INTEGER_CST))
769 return false;
771 low = array_ref_low_bound (ref);
772 high = array_ref_up_bound (ref);
774 /* The case of nonconstant bounds could be handled, but it would be
775 complicated. */
776 if (TREE_CODE (low) != INTEGER_CST
777 || !high || TREE_CODE (high) != INTEGER_CST)
778 return false;
780 /* Check if the intial idx is within bound. */
781 if (wi::to_widest (init) < wi::to_widest (low)
782 || wi::to_widest (init) > wi::to_widest (high))
783 return false;
785 /* The idx is always within bound. */
786 if (!step || integer_zerop (step))
787 return true;
789 if (!max_loop_iterations (loop, &niter))
790 return false;
792 if (wi::to_widest (step) < 0)
794 delta = wi::to_widest (init) - wi::to_widest (low);
795 wi_step = -wi::to_widest (step);
797 else
799 delta = wi::to_widest (high) - wi::to_widest (init);
800 wi_step = wi::to_widest (step);
803 valid_niter = wi::div_floor (delta, wi_step, SIGNED, &overflow);
804 /* The iteration space of idx is within array bound. */
805 if (!overflow && niter <= valid_niter)
806 return true;
808 return false;
811 /* Return TRUE if ref is a within bound array reference. */
813 static bool
814 ref_within_array_bound (gimple *stmt, tree ref)
816 struct loop *loop = loop_containing_stmt (stmt);
818 gcc_assert (loop != NULL);
819 return for_each_index (&ref, idx_within_array_bound, loop);
823 /* Given a memory reference expression T, return TRUE if base object
824 it refers to is writable. The base object of a memory reference
825 is the main object being referenced, which is returned by function
826 get_base_address. */
828 static bool
829 base_object_writable (tree ref)
831 tree base_tree = get_base_address (ref);
833 return (base_tree
834 && DECL_P (base_tree)
835 && decl_binds_to_current_def_p (base_tree)
836 && !TREE_READONLY (base_tree));
839 /* Return true when the memory references of STMT won't trap in the
840 if-converted code. There are two things that we have to check for:
842 - writes to memory occur to writable memory: if-conversion of
843 memory writes transforms the conditional memory writes into
844 unconditional writes, i.e. "if (cond) A[i] = foo" is transformed
845 into "A[i] = cond ? foo : A[i]", and as the write to memory may not
846 be executed at all in the original code, it may be a readonly
847 memory. To check that A is not const-qualified, we check that
848 there exists at least an unconditional write to A in the current
849 function.
851 - reads or writes to memory are valid memory accesses for every
852 iteration. To check that the memory accesses are correctly formed
853 and that we are allowed to read and write in these locations, we
854 check that the memory accesses to be if-converted occur at every
855 iteration unconditionally.
857 Returns true for the memory reference in STMT, same memory reference
858 is read or written unconditionally atleast once and the base memory
859 reference is written unconditionally once. This is to check reference
860 will not write fault. Also retuns true if the memory reference is
861 unconditionally read once then we are conditionally writing to memory
862 which is defined as read and write and is bound to the definition
863 we are seeing. */
864 static bool
865 ifcvt_memrefs_wont_trap (gimple *stmt, vec<data_reference_p> drs)
867 /* If DR didn't see a reference here we can't use it to tell
868 whether the ref traps or not. */
869 if (gimple_uid (stmt) == 0)
870 return false;
872 data_reference_p *master_dr, *base_master_dr;
873 data_reference_p a = drs[gimple_uid (stmt) - 1];
875 tree base = DR_BASE_OBJECT (a);
876 innermost_loop_behavior *innermost = &DR_INNERMOST (a);
878 gcc_assert (DR_STMT (a) == stmt);
879 gcc_assert (DR_BASE_ADDRESS (a) || DR_OFFSET (a)
880 || DR_INIT (a) || DR_STEP (a));
882 master_dr = innermost_DR_map->get (innermost);
883 gcc_assert (master_dr != NULL);
885 base_master_dr = baseref_DR_map->get (base);
887 /* If a is unconditionally written to it doesn't trap. */
888 if (DR_W_UNCONDITIONALLY (*master_dr))
889 return true;
891 /* If a is unconditionally accessed then ...
893 Even a is conditional access, we can treat it as an unconditional
894 one if it's an array reference and all its index are within array
895 bound. */
896 if (DR_RW_UNCONDITIONALLY (*master_dr)
897 || ref_within_array_bound (stmt, DR_REF (a)))
899 /* an unconditional read won't trap. */
900 if (DR_IS_READ (a))
901 return true;
903 /* an unconditionaly write won't trap if the base is written
904 to unconditionally. */
905 if (base_master_dr
906 && DR_BASE_W_UNCONDITIONALLY (*base_master_dr))
907 return PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES);
908 /* or the base is known to be not readonly. */
909 else if (base_object_writable (DR_REF (a)))
910 return PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES);
913 return false;
916 /* Return true if STMT could be converted into a masked load or store
917 (conditional load or store based on a mask computed from bb predicate). */
919 static bool
920 ifcvt_can_use_mask_load_store (gimple *stmt)
922 tree lhs, ref;
923 machine_mode mode;
924 basic_block bb = gimple_bb (stmt);
925 bool is_load;
927 if (!(flag_tree_loop_vectorize || bb->loop_father->force_vectorize)
928 || bb->loop_father->dont_vectorize
929 || !gimple_assign_single_p (stmt)
930 || gimple_has_volatile_ops (stmt))
931 return false;
933 /* Check whether this is a load or store. */
934 lhs = gimple_assign_lhs (stmt);
935 if (gimple_store_p (stmt))
937 if (!is_gimple_val (gimple_assign_rhs1 (stmt)))
938 return false;
939 is_load = false;
940 ref = lhs;
942 else if (gimple_assign_load_p (stmt))
944 is_load = true;
945 ref = gimple_assign_rhs1 (stmt);
947 else
948 return false;
950 if (may_be_nonaddressable_p (ref))
951 return false;
953 /* Mask should be integer mode of the same size as the load/store
954 mode. */
955 mode = TYPE_MODE (TREE_TYPE (lhs));
956 if (!int_mode_for_mode (mode).exists () || VECTOR_MODE_P (mode))
957 return false;
959 if (can_vec_mask_load_store_p (mode, VOIDmode, is_load))
960 return true;
962 return false;
965 /* Return true when STMT is if-convertible.
967 GIMPLE_ASSIGN statement is not if-convertible if,
968 - it is not movable,
969 - it could trap,
970 - LHS is not var decl. */
972 static bool
973 if_convertible_gimple_assign_stmt_p (gimple *stmt,
974 vec<data_reference_p> refs)
976 tree lhs = gimple_assign_lhs (stmt);
978 if (dump_file && (dump_flags & TDF_DETAILS))
980 fprintf (dump_file, "-------------------------\n");
981 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
984 if (!is_gimple_reg_type (TREE_TYPE (lhs)))
985 return false;
987 /* Some of these constrains might be too conservative. */
988 if (stmt_ends_bb_p (stmt)
989 || gimple_has_volatile_ops (stmt)
990 || (TREE_CODE (lhs) == SSA_NAME
991 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
992 || gimple_has_side_effects (stmt))
994 if (dump_file && (dump_flags & TDF_DETAILS))
995 fprintf (dump_file, "stmt not suitable for ifcvt\n");
996 return false;
999 /* tree-into-ssa.c uses GF_PLF_1, so avoid it, because
1000 in between if_convertible_loop_p and combine_blocks
1001 we can perform loop versioning. */
1002 gimple_set_plf (stmt, GF_PLF_2, false);
1004 if ((! gimple_vuse (stmt)
1005 || gimple_could_trap_p_1 (stmt, false, false)
1006 || ! ifcvt_memrefs_wont_trap (stmt, refs))
1007 && gimple_could_trap_p (stmt))
1009 if (ifcvt_can_use_mask_load_store (stmt))
1011 gimple_set_plf (stmt, GF_PLF_2, true);
1012 any_pred_load_store = true;
1013 return true;
1015 if (dump_file && (dump_flags & TDF_DETAILS))
1016 fprintf (dump_file, "tree could trap...\n");
1017 return false;
1020 /* When if-converting stores force versioning, likewise if we
1021 ended up generating store data races. */
1022 if (gimple_vdef (stmt))
1023 any_pred_load_store = true;
1025 return true;
1028 /* Return true when STMT is if-convertible.
1030 A statement is if-convertible if:
1031 - it is an if-convertible GIMPLE_ASSIGN,
1032 - it is a GIMPLE_LABEL or a GIMPLE_COND,
1033 - it is builtins call. */
1035 static bool
1036 if_convertible_stmt_p (gimple *stmt, vec<data_reference_p> refs)
1038 switch (gimple_code (stmt))
1040 case GIMPLE_LABEL:
1041 case GIMPLE_DEBUG:
1042 case GIMPLE_COND:
1043 return true;
1045 case GIMPLE_ASSIGN:
1046 return if_convertible_gimple_assign_stmt_p (stmt, refs);
1048 case GIMPLE_CALL:
1050 tree fndecl = gimple_call_fndecl (stmt);
1051 if (fndecl)
1053 int flags = gimple_call_flags (stmt);
1054 if ((flags & ECF_CONST)
1055 && !(flags & ECF_LOOPING_CONST_OR_PURE)
1056 /* We can only vectorize some builtins at the moment,
1057 so restrict if-conversion to those. */
1058 && DECL_BUILT_IN (fndecl))
1059 return true;
1061 return false;
1064 default:
1065 /* Don't know what to do with 'em so don't do anything. */
1066 if (dump_file && (dump_flags & TDF_DETAILS))
1068 fprintf (dump_file, "don't know what to do\n");
1069 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1071 return false;
1074 return true;
1077 /* Assumes that BB has more than 1 predecessors.
1078 Returns false if at least one successor is not on critical edge
1079 and true otherwise. */
1081 static inline bool
1082 all_preds_critical_p (basic_block bb)
1084 edge e;
1085 edge_iterator ei;
1087 FOR_EACH_EDGE (e, ei, bb->preds)
1088 if (EDGE_COUNT (e->src->succs) == 1)
1089 return false;
1090 return true;
1093 /* Returns true if at least one successor in on critical edge. */
1094 static inline bool
1095 has_pred_critical_p (basic_block bb)
1097 edge e;
1098 edge_iterator ei;
1100 FOR_EACH_EDGE (e, ei, bb->preds)
1101 if (EDGE_COUNT (e->src->succs) > 1)
1102 return true;
1103 return false;
1106 /* Return true when BB is if-convertible. This routine does not check
1107 basic block's statements and phis.
1109 A basic block is not if-convertible if:
1110 - it is non-empty and it is after the exit block (in BFS order),
1111 - it is after the exit block but before the latch,
1112 - its edges are not normal.
1114 EXIT_BB is the basic block containing the exit of the LOOP. BB is
1115 inside LOOP. */
1117 static bool
1118 if_convertible_bb_p (struct loop *loop, basic_block bb, basic_block exit_bb)
1120 edge e;
1121 edge_iterator ei;
1123 if (dump_file && (dump_flags & TDF_DETAILS))
1124 fprintf (dump_file, "----------[%d]-------------\n", bb->index);
1126 if (EDGE_COUNT (bb->succs) > 2)
1127 return false;
1129 if (exit_bb)
1131 if (bb != loop->latch)
1133 if (dump_file && (dump_flags & TDF_DETAILS))
1134 fprintf (dump_file, "basic block after exit bb but before latch\n");
1135 return false;
1137 else if (!empty_block_p (bb))
1139 if (dump_file && (dump_flags & TDF_DETAILS))
1140 fprintf (dump_file, "non empty basic block after exit bb\n");
1141 return false;
1143 else if (bb == loop->latch
1144 && bb != exit_bb
1145 && !dominated_by_p (CDI_DOMINATORS, bb, exit_bb))
1147 if (dump_file && (dump_flags & TDF_DETAILS))
1148 fprintf (dump_file, "latch is not dominated by exit_block\n");
1149 return false;
1153 /* Be less adventurous and handle only normal edges. */
1154 FOR_EACH_EDGE (e, ei, bb->succs)
1155 if (e->flags & (EDGE_EH | EDGE_ABNORMAL | EDGE_IRREDUCIBLE_LOOP))
1157 if (dump_file && (dump_flags & TDF_DETAILS))
1158 fprintf (dump_file, "Difficult to handle edges\n");
1159 return false;
1162 return true;
1165 /* Return true when all predecessor blocks of BB are visited. The
1166 VISITED bitmap keeps track of the visited blocks. */
1168 static bool
1169 pred_blocks_visited_p (basic_block bb, bitmap *visited)
1171 edge e;
1172 edge_iterator ei;
1173 FOR_EACH_EDGE (e, ei, bb->preds)
1174 if (!bitmap_bit_p (*visited, e->src->index))
1175 return false;
1177 return true;
1180 /* Get body of a LOOP in suitable order for if-conversion. It is
1181 caller's responsibility to deallocate basic block list.
1182 If-conversion suitable order is, breadth first sort (BFS) order
1183 with an additional constraint: select a block only if all its
1184 predecessors are already selected. */
1186 static basic_block *
1187 get_loop_body_in_if_conv_order (const struct loop *loop)
1189 basic_block *blocks, *blocks_in_bfs_order;
1190 basic_block bb;
1191 bitmap visited;
1192 unsigned int index = 0;
1193 unsigned int visited_count = 0;
1195 gcc_assert (loop->num_nodes);
1196 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
1198 blocks = XCNEWVEC (basic_block, loop->num_nodes);
1199 visited = BITMAP_ALLOC (NULL);
1201 blocks_in_bfs_order = get_loop_body_in_bfs_order (loop);
1203 index = 0;
1204 while (index < loop->num_nodes)
1206 bb = blocks_in_bfs_order [index];
1208 if (bb->flags & BB_IRREDUCIBLE_LOOP)
1210 free (blocks_in_bfs_order);
1211 BITMAP_FREE (visited);
1212 free (blocks);
1213 return NULL;
1216 if (!bitmap_bit_p (visited, bb->index))
1218 if (pred_blocks_visited_p (bb, &visited)
1219 || bb == loop->header)
1221 /* This block is now visited. */
1222 bitmap_set_bit (visited, bb->index);
1223 blocks[visited_count++] = bb;
1227 index++;
1229 if (index == loop->num_nodes
1230 && visited_count != loop->num_nodes)
1231 /* Not done yet. */
1232 index = 0;
1234 free (blocks_in_bfs_order);
1235 BITMAP_FREE (visited);
1236 return blocks;
1239 /* Returns true when the analysis of the predicates for all the basic
1240 blocks in LOOP succeeded.
1242 predicate_bbs first allocates the predicates of the basic blocks.
1243 These fields are then initialized with the tree expressions
1244 representing the predicates under which a basic block is executed
1245 in the LOOP. As the loop->header is executed at each iteration, it
1246 has the "true" predicate. Other statements executed under a
1247 condition are predicated with that condition, for example
1249 | if (x)
1250 | S1;
1251 | else
1252 | S2;
1254 S1 will be predicated with "x", and
1255 S2 will be predicated with "!x". */
1257 static void
1258 predicate_bbs (loop_p loop)
1260 unsigned int i;
1262 for (i = 0; i < loop->num_nodes; i++)
1263 init_bb_predicate (ifc_bbs[i]);
1265 for (i = 0; i < loop->num_nodes; i++)
1267 basic_block bb = ifc_bbs[i];
1268 tree cond;
1269 gimple *stmt;
1271 /* The loop latch and loop exit block are always executed and
1272 have no extra conditions to be processed: skip them. */
1273 if (bb == loop->latch
1274 || bb_with_exit_edge_p (loop, bb))
1276 reset_bb_predicate (bb);
1277 continue;
1280 cond = bb_predicate (bb);
1281 stmt = last_stmt (bb);
1282 if (stmt && gimple_code (stmt) == GIMPLE_COND)
1284 tree c2;
1285 edge true_edge, false_edge;
1286 location_t loc = gimple_location (stmt);
1287 tree c = build2_loc (loc, gimple_cond_code (stmt),
1288 boolean_type_node,
1289 gimple_cond_lhs (stmt),
1290 gimple_cond_rhs (stmt));
1292 /* Add new condition into destination's predicate list. */
1293 extract_true_false_edges_from_block (gimple_bb (stmt),
1294 &true_edge, &false_edge);
1296 /* If C is true, then TRUE_EDGE is taken. */
1297 add_to_dst_predicate_list (loop, true_edge, unshare_expr (cond),
1298 unshare_expr (c));
1300 /* If C is false, then FALSE_EDGE is taken. */
1301 c2 = build1_loc (loc, TRUTH_NOT_EXPR, boolean_type_node,
1302 unshare_expr (c));
1303 add_to_dst_predicate_list (loop, false_edge,
1304 unshare_expr (cond), c2);
1306 cond = NULL_TREE;
1309 /* If current bb has only one successor, then consider it as an
1310 unconditional goto. */
1311 if (single_succ_p (bb))
1313 basic_block bb_n = single_succ (bb);
1315 /* The successor bb inherits the predicate of its
1316 predecessor. If there is no predicate in the predecessor
1317 bb, then consider the successor bb as always executed. */
1318 if (cond == NULL_TREE)
1319 cond = boolean_true_node;
1321 add_to_predicate_list (loop, bb_n, cond);
1325 /* The loop header is always executed. */
1326 reset_bb_predicate (loop->header);
1327 gcc_assert (bb_predicate_gimplified_stmts (loop->header) == NULL
1328 && bb_predicate_gimplified_stmts (loop->latch) == NULL);
1331 /* Build region by adding loop pre-header and post-header blocks. */
1333 static vec<basic_block>
1334 build_region (struct loop *loop)
1336 vec<basic_block> region = vNULL;
1337 basic_block exit_bb = NULL;
1339 gcc_assert (ifc_bbs);
1340 /* The first element is loop pre-header. */
1341 region.safe_push (loop_preheader_edge (loop)->src);
1343 for (unsigned int i = 0; i < loop->num_nodes; i++)
1345 basic_block bb = ifc_bbs[i];
1346 region.safe_push (bb);
1347 /* Find loop postheader. */
1348 edge e;
1349 edge_iterator ei;
1350 FOR_EACH_EDGE (e, ei, bb->succs)
1351 if (loop_exit_edge_p (loop, e))
1353 exit_bb = e->dest;
1354 break;
1357 /* The last element is loop post-header. */
1358 gcc_assert (exit_bb);
1359 region.safe_push (exit_bb);
1360 return region;
1363 /* Return true when LOOP is if-convertible. This is a helper function
1364 for if_convertible_loop_p. REFS and DDRS are initialized and freed
1365 in if_convertible_loop_p. */
1367 static bool
1368 if_convertible_loop_p_1 (struct loop *loop, vec<data_reference_p> *refs)
1370 unsigned int i;
1371 basic_block exit_bb = NULL;
1372 vec<basic_block> region;
1374 if (find_data_references_in_loop (loop, refs) == chrec_dont_know)
1375 return false;
1377 calculate_dominance_info (CDI_DOMINATORS);
1379 /* Allow statements that can be handled during if-conversion. */
1380 ifc_bbs = get_loop_body_in_if_conv_order (loop);
1381 if (!ifc_bbs)
1383 if (dump_file && (dump_flags & TDF_DETAILS))
1384 fprintf (dump_file, "Irreducible loop\n");
1385 return false;
1388 for (i = 0; i < loop->num_nodes; i++)
1390 basic_block bb = ifc_bbs[i];
1392 if (!if_convertible_bb_p (loop, bb, exit_bb))
1393 return false;
1395 if (bb_with_exit_edge_p (loop, bb))
1396 exit_bb = bb;
1399 for (i = 0; i < loop->num_nodes; i++)
1401 basic_block bb = ifc_bbs[i];
1402 gimple_stmt_iterator gsi;
1404 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1405 switch (gimple_code (gsi_stmt (gsi)))
1407 case GIMPLE_LABEL:
1408 case GIMPLE_ASSIGN:
1409 case GIMPLE_CALL:
1410 case GIMPLE_DEBUG:
1411 case GIMPLE_COND:
1412 gimple_set_uid (gsi_stmt (gsi), 0);
1413 break;
1414 default:
1415 return false;
1419 data_reference_p dr;
1421 innermost_DR_map
1422 = new hash_map<innermost_loop_behavior_hash, data_reference_p>;
1423 baseref_DR_map = new hash_map<tree_operand_hash, data_reference_p>;
1425 /* Compute post-dominator tree locally. */
1426 region = build_region (loop);
1427 calculate_dominance_info_for_region (CDI_POST_DOMINATORS, region);
1429 predicate_bbs (loop);
1431 /* Free post-dominator tree since it is not used after predication. */
1432 free_dominance_info_for_region (cfun, CDI_POST_DOMINATORS, region);
1433 region.release ();
1435 for (i = 0; refs->iterate (i, &dr); i++)
1437 tree ref = DR_REF (dr);
1439 dr->aux = XNEW (struct ifc_dr);
1440 DR_BASE_W_UNCONDITIONALLY (dr) = false;
1441 DR_RW_UNCONDITIONALLY (dr) = false;
1442 DR_W_UNCONDITIONALLY (dr) = false;
1443 IFC_DR (dr)->rw_predicate = boolean_false_node;
1444 IFC_DR (dr)->w_predicate = boolean_false_node;
1445 IFC_DR (dr)->base_w_predicate = boolean_false_node;
1446 if (gimple_uid (DR_STMT (dr)) == 0)
1447 gimple_set_uid (DR_STMT (dr), i + 1);
1449 /* If DR doesn't have innermost loop behavior or it's a compound
1450 memory reference, we synthesize its innermost loop behavior
1451 for hashing. */
1452 if (TREE_CODE (ref) == COMPONENT_REF
1453 || TREE_CODE (ref) == IMAGPART_EXPR
1454 || TREE_CODE (ref) == REALPART_EXPR
1455 || !(DR_BASE_ADDRESS (dr) || DR_OFFSET (dr)
1456 || DR_INIT (dr) || DR_STEP (dr)))
1458 while (TREE_CODE (ref) == COMPONENT_REF
1459 || TREE_CODE (ref) == IMAGPART_EXPR
1460 || TREE_CODE (ref) == REALPART_EXPR)
1461 ref = TREE_OPERAND (ref, 0);
1463 memset (&DR_INNERMOST (dr), 0, sizeof (DR_INNERMOST (dr)));
1464 DR_BASE_ADDRESS (dr) = ref;
1466 hash_memrefs_baserefs_and_store_DRs_read_written_info (dr);
1469 for (i = 0; i < loop->num_nodes; i++)
1471 basic_block bb = ifc_bbs[i];
1472 gimple_stmt_iterator itr;
1474 /* Check the if-convertibility of statements in predicated BBs. */
1475 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
1476 for (itr = gsi_start_bb (bb); !gsi_end_p (itr); gsi_next (&itr))
1477 if (!if_convertible_stmt_p (gsi_stmt (itr), *refs))
1478 return false;
1481 /* Checking PHIs needs to be done after stmts, as the fact whether there
1482 are any masked loads or stores affects the tests. */
1483 for (i = 0; i < loop->num_nodes; i++)
1485 basic_block bb = ifc_bbs[i];
1486 gphi_iterator itr;
1488 for (itr = gsi_start_phis (bb); !gsi_end_p (itr); gsi_next (&itr))
1489 if (!if_convertible_phi_p (loop, bb, itr.phi ()))
1490 return false;
1493 if (dump_file)
1494 fprintf (dump_file, "Applying if-conversion\n");
1496 return true;
1499 /* Return true when LOOP is if-convertible.
1500 LOOP is if-convertible if:
1501 - it is innermost,
1502 - it has two or more basic blocks,
1503 - it has only one exit,
1504 - loop header is not the exit edge,
1505 - if its basic blocks and phi nodes are if convertible. */
1507 static bool
1508 if_convertible_loop_p (struct loop *loop)
1510 edge e;
1511 edge_iterator ei;
1512 bool res = false;
1513 vec<data_reference_p> refs;
1515 /* Handle only innermost loop. */
1516 if (!loop || loop->inner)
1518 if (dump_file && (dump_flags & TDF_DETAILS))
1519 fprintf (dump_file, "not innermost loop\n");
1520 return false;
1523 /* If only one block, no need for if-conversion. */
1524 if (loop->num_nodes <= 2)
1526 if (dump_file && (dump_flags & TDF_DETAILS))
1527 fprintf (dump_file, "less than 2 basic blocks\n");
1528 return false;
1531 /* More than one loop exit is too much to handle. */
1532 if (!single_exit (loop))
1534 if (dump_file && (dump_flags & TDF_DETAILS))
1535 fprintf (dump_file, "multiple exits\n");
1536 return false;
1539 /* If one of the loop header's edge is an exit edge then do not
1540 apply if-conversion. */
1541 FOR_EACH_EDGE (e, ei, loop->header->succs)
1542 if (loop_exit_edge_p (loop, e))
1543 return false;
1545 refs.create (5);
1546 res = if_convertible_loop_p_1 (loop, &refs);
1548 data_reference_p dr;
1549 unsigned int i;
1550 for (i = 0; refs.iterate (i, &dr); i++)
1551 free (dr->aux);
1553 free_data_refs (refs);
1555 delete innermost_DR_map;
1556 innermost_DR_map = NULL;
1558 delete baseref_DR_map;
1559 baseref_DR_map = NULL;
1561 return res;
1564 /* Returns true if def-stmt for phi argument ARG is simple increment/decrement
1565 which is in predicated basic block.
1566 In fact, the following PHI pattern is searching:
1567 loop-header:
1568 reduc_1 = PHI <..., reduc_2>
1570 if (...)
1571 reduc_3 = ...
1572 reduc_2 = PHI <reduc_1, reduc_3>
1574 ARG_0 and ARG_1 are correspondent PHI arguments.
1575 REDUC, OP0 and OP1 contain reduction stmt and its operands.
1576 EXTENDED is true if PHI has > 2 arguments. */
1578 static bool
1579 is_cond_scalar_reduction (gimple *phi, gimple **reduc, tree arg_0, tree arg_1,
1580 tree *op0, tree *op1, bool extended)
1582 tree lhs, r_op1, r_op2;
1583 gimple *stmt;
1584 gimple *header_phi = NULL;
1585 enum tree_code reduction_op;
1586 basic_block bb = gimple_bb (phi);
1587 struct loop *loop = bb->loop_father;
1588 edge latch_e = loop_latch_edge (loop);
1589 imm_use_iterator imm_iter;
1590 use_operand_p use_p;
1591 edge e;
1592 edge_iterator ei;
1593 bool result = false;
1594 if (TREE_CODE (arg_0) != SSA_NAME || TREE_CODE (arg_1) != SSA_NAME)
1595 return false;
1597 if (!extended && gimple_code (SSA_NAME_DEF_STMT (arg_0)) == GIMPLE_PHI)
1599 lhs = arg_1;
1600 header_phi = SSA_NAME_DEF_STMT (arg_0);
1601 stmt = SSA_NAME_DEF_STMT (arg_1);
1603 else if (gimple_code (SSA_NAME_DEF_STMT (arg_1)) == GIMPLE_PHI)
1605 lhs = arg_0;
1606 header_phi = SSA_NAME_DEF_STMT (arg_1);
1607 stmt = SSA_NAME_DEF_STMT (arg_0);
1609 else
1610 return false;
1611 if (gimple_bb (header_phi) != loop->header)
1612 return false;
1614 if (PHI_ARG_DEF_FROM_EDGE (header_phi, latch_e) != PHI_RESULT (phi))
1615 return false;
1617 if (gimple_code (stmt) != GIMPLE_ASSIGN
1618 || gimple_has_volatile_ops (stmt))
1619 return false;
1621 if (!flow_bb_inside_loop_p (loop, gimple_bb (stmt)))
1622 return false;
1624 if (!is_predicated (gimple_bb (stmt)))
1625 return false;
1627 /* Check that stmt-block is predecessor of phi-block. */
1628 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
1629 if (e->dest == bb)
1631 result = true;
1632 break;
1634 if (!result)
1635 return false;
1637 if (!has_single_use (lhs))
1638 return false;
1640 reduction_op = gimple_assign_rhs_code (stmt);
1641 if (reduction_op != PLUS_EXPR && reduction_op != MINUS_EXPR)
1642 return false;
1643 r_op1 = gimple_assign_rhs1 (stmt);
1644 r_op2 = gimple_assign_rhs2 (stmt);
1646 /* Make R_OP1 to hold reduction variable. */
1647 if (r_op2 == PHI_RESULT (header_phi)
1648 && reduction_op == PLUS_EXPR)
1649 std::swap (r_op1, r_op2);
1650 else if (r_op1 != PHI_RESULT (header_phi))
1651 return false;
1653 /* Check that R_OP1 is used in reduction stmt or in PHI only. */
1654 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, r_op1)
1656 gimple *use_stmt = USE_STMT (use_p);
1657 if (is_gimple_debug (use_stmt))
1658 continue;
1659 if (use_stmt == stmt)
1660 continue;
1661 if (gimple_code (use_stmt) != GIMPLE_PHI)
1662 return false;
1665 *op0 = r_op1; *op1 = r_op2;
1666 *reduc = stmt;
1667 return true;
1670 /* Converts conditional scalar reduction into unconditional form, e.g.
1671 bb_4
1672 if (_5 != 0) goto bb_5 else goto bb_6
1673 end_bb_4
1674 bb_5
1675 res_6 = res_13 + 1;
1676 end_bb_5
1677 bb_6
1678 # res_2 = PHI <res_13(4), res_6(5)>
1679 end_bb_6
1681 will be converted into sequence
1682 _ifc__1 = _5 != 0 ? 1 : 0;
1683 res_2 = res_13 + _ifc__1;
1684 Argument SWAP tells that arguments of conditional expression should be
1685 swapped.
1686 Returns rhs of resulting PHI assignment. */
1688 static tree
1689 convert_scalar_cond_reduction (gimple *reduc, gimple_stmt_iterator *gsi,
1690 tree cond, tree op0, tree op1, bool swap)
1692 gimple_stmt_iterator stmt_it;
1693 gimple *new_assign;
1694 tree rhs;
1695 tree rhs1 = gimple_assign_rhs1 (reduc);
1696 tree tmp = make_temp_ssa_name (TREE_TYPE (rhs1), NULL, "_ifc_");
1697 tree c;
1698 tree zero = build_zero_cst (TREE_TYPE (rhs1));
1700 if (dump_file && (dump_flags & TDF_DETAILS))
1702 fprintf (dump_file, "Found cond scalar reduction.\n");
1703 print_gimple_stmt (dump_file, reduc, 0, TDF_SLIM);
1706 /* Build cond expression using COND and constant operand
1707 of reduction rhs. */
1708 c = fold_build_cond_expr (TREE_TYPE (rhs1),
1709 unshare_expr (cond),
1710 swap ? zero : op1,
1711 swap ? op1 : zero);
1713 /* Create assignment stmt and insert it at GSI. */
1714 new_assign = gimple_build_assign (tmp, c);
1715 gsi_insert_before (gsi, new_assign, GSI_SAME_STMT);
1716 /* Build rhs for unconditional increment/decrement. */
1717 rhs = fold_build2 (gimple_assign_rhs_code (reduc),
1718 TREE_TYPE (rhs1), op0, tmp);
1720 /* Delete original reduction stmt. */
1721 stmt_it = gsi_for_stmt (reduc);
1722 gsi_remove (&stmt_it, true);
1723 release_defs (reduc);
1724 return rhs;
1727 /* Produce condition for all occurrences of ARG in PHI node. */
1729 static tree
1730 gen_phi_arg_condition (gphi *phi, vec<int> *occur,
1731 gimple_stmt_iterator *gsi)
1733 int len;
1734 int i;
1735 tree cond = NULL_TREE;
1736 tree c;
1737 edge e;
1739 len = occur->length ();
1740 gcc_assert (len > 0);
1741 for (i = 0; i < len; i++)
1743 e = gimple_phi_arg_edge (phi, (*occur)[i]);
1744 c = bb_predicate (e->src);
1745 if (is_true_predicate (c))
1747 cond = c;
1748 break;
1750 c = force_gimple_operand_gsi_1 (gsi, unshare_expr (c),
1751 is_gimple_condexpr, NULL_TREE,
1752 true, GSI_SAME_STMT);
1753 if (cond != NULL_TREE)
1755 /* Must build OR expression. */
1756 cond = fold_or_predicates (EXPR_LOCATION (c), c, cond);
1757 cond = force_gimple_operand_gsi_1 (gsi, unshare_expr (cond),
1758 is_gimple_condexpr, NULL_TREE,
1759 true, GSI_SAME_STMT);
1761 else
1762 cond = c;
1764 gcc_assert (cond != NULL_TREE);
1765 return cond;
1768 /* Local valueization callback that follows all-use SSA edges. */
1770 static tree
1771 ifcvt_follow_ssa_use_edges (tree val)
1773 return val;
1776 /* Replace a scalar PHI node with a COND_EXPR using COND as condition.
1777 This routine can handle PHI nodes with more than two arguments.
1779 For example,
1780 S1: A = PHI <x1(1), x2(5)>
1781 is converted into,
1782 S2: A = cond ? x1 : x2;
1784 The generated code is inserted at GSI that points to the top of
1785 basic block's statement list.
1786 If PHI node has more than two arguments a chain of conditional
1787 expression is produced. */
1790 static void
1791 predicate_scalar_phi (gphi *phi, gimple_stmt_iterator *gsi)
1793 gimple *new_stmt = NULL, *reduc;
1794 tree rhs, res, arg0, arg1, op0, op1, scev;
1795 tree cond;
1796 unsigned int index0;
1797 unsigned int max, args_len;
1798 edge e;
1799 basic_block bb;
1800 unsigned int i;
1802 res = gimple_phi_result (phi);
1803 if (virtual_operand_p (res))
1804 return;
1806 if ((rhs = degenerate_phi_result (phi))
1807 || ((scev = analyze_scalar_evolution (gimple_bb (phi)->loop_father,
1808 res))
1809 && !chrec_contains_undetermined (scev)
1810 && scev != res
1811 && (rhs = gimple_phi_arg_def (phi, 0))))
1813 if (dump_file && (dump_flags & TDF_DETAILS))
1815 fprintf (dump_file, "Degenerate phi!\n");
1816 print_gimple_stmt (dump_file, phi, 0, TDF_SLIM);
1818 new_stmt = gimple_build_assign (res, rhs);
1819 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1820 update_stmt (new_stmt);
1821 return;
1824 bb = gimple_bb (phi);
1825 if (EDGE_COUNT (bb->preds) == 2)
1827 /* Predicate ordinary PHI node with 2 arguments. */
1828 edge first_edge, second_edge;
1829 basic_block true_bb;
1830 first_edge = EDGE_PRED (bb, 0);
1831 second_edge = EDGE_PRED (bb, 1);
1832 cond = bb_predicate (first_edge->src);
1833 if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
1834 std::swap (first_edge, second_edge);
1835 if (EDGE_COUNT (first_edge->src->succs) > 1)
1837 cond = bb_predicate (second_edge->src);
1838 if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
1839 cond = TREE_OPERAND (cond, 0);
1840 else
1841 first_edge = second_edge;
1843 else
1844 cond = bb_predicate (first_edge->src);
1845 /* Gimplify the condition to a valid cond-expr conditonal operand. */
1846 cond = force_gimple_operand_gsi_1 (gsi, unshare_expr (cond),
1847 is_gimple_condexpr, NULL_TREE,
1848 true, GSI_SAME_STMT);
1849 true_bb = first_edge->src;
1850 if (EDGE_PRED (bb, 1)->src == true_bb)
1852 arg0 = gimple_phi_arg_def (phi, 1);
1853 arg1 = gimple_phi_arg_def (phi, 0);
1855 else
1857 arg0 = gimple_phi_arg_def (phi, 0);
1858 arg1 = gimple_phi_arg_def (phi, 1);
1860 if (is_cond_scalar_reduction (phi, &reduc, arg0, arg1,
1861 &op0, &op1, false))
1862 /* Convert reduction stmt into vectorizable form. */
1863 rhs = convert_scalar_cond_reduction (reduc, gsi, cond, op0, op1,
1864 true_bb != gimple_bb (reduc));
1865 else
1866 /* Build new RHS using selected condition and arguments. */
1867 rhs = fold_build_cond_expr (TREE_TYPE (res), unshare_expr (cond),
1868 arg0, arg1);
1869 new_stmt = gimple_build_assign (res, rhs);
1870 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1871 gimple_stmt_iterator new_gsi = gsi_for_stmt (new_stmt);
1872 if (fold_stmt (&new_gsi, ifcvt_follow_ssa_use_edges))
1874 new_stmt = gsi_stmt (new_gsi);
1875 update_stmt (new_stmt);
1878 if (dump_file && (dump_flags & TDF_DETAILS))
1880 fprintf (dump_file, "new phi replacement stmt\n");
1881 print_gimple_stmt (dump_file, new_stmt, 0, TDF_SLIM);
1883 return;
1886 /* Create hashmap for PHI node which contain vector of argument indexes
1887 having the same value. */
1888 bool swap = false;
1889 hash_map<tree_operand_hash, auto_vec<int> > phi_arg_map;
1890 unsigned int num_args = gimple_phi_num_args (phi);
1891 int max_ind = -1;
1892 /* Vector of different PHI argument values. */
1893 auto_vec<tree> args (num_args);
1895 /* Compute phi_arg_map. */
1896 for (i = 0; i < num_args; i++)
1898 tree arg;
1900 arg = gimple_phi_arg_def (phi, i);
1901 if (!phi_arg_map.get (arg))
1902 args.quick_push (arg);
1903 phi_arg_map.get_or_insert (arg).safe_push (i);
1906 /* Determine element with max number of occurrences. */
1907 max_ind = -1;
1908 max = 1;
1909 args_len = args.length ();
1910 for (i = 0; i < args_len; i++)
1912 unsigned int len;
1913 if ((len = phi_arg_map.get (args[i])->length ()) > max)
1915 max_ind = (int) i;
1916 max = len;
1920 /* Put element with max number of occurences to the end of ARGS. */
1921 if (max_ind != -1 && max_ind +1 != (int) args_len)
1922 std::swap (args[args_len - 1], args[max_ind]);
1924 /* Handle one special case when number of arguments with different values
1925 is equal 2 and one argument has the only occurrence. Such PHI can be
1926 handled as if would have only 2 arguments. */
1927 if (args_len == 2 && phi_arg_map.get (args[0])->length () == 1)
1929 vec<int> *indexes;
1930 indexes = phi_arg_map.get (args[0]);
1931 index0 = (*indexes)[0];
1932 arg0 = args[0];
1933 arg1 = args[1];
1934 e = gimple_phi_arg_edge (phi, index0);
1935 cond = bb_predicate (e->src);
1936 if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
1938 swap = true;
1939 cond = TREE_OPERAND (cond, 0);
1941 /* Gimplify the condition to a valid cond-expr conditonal operand. */
1942 cond = force_gimple_operand_gsi_1 (gsi, unshare_expr (cond),
1943 is_gimple_condexpr, NULL_TREE,
1944 true, GSI_SAME_STMT);
1945 if (!(is_cond_scalar_reduction (phi, &reduc, arg0 , arg1,
1946 &op0, &op1, true)))
1947 rhs = fold_build_cond_expr (TREE_TYPE (res), unshare_expr (cond),
1948 swap? arg1 : arg0,
1949 swap? arg0 : arg1);
1950 else
1951 /* Convert reduction stmt into vectorizable form. */
1952 rhs = convert_scalar_cond_reduction (reduc, gsi, cond, op0, op1,
1953 swap);
1954 new_stmt = gimple_build_assign (res, rhs);
1955 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1956 update_stmt (new_stmt);
1958 else
1960 /* Common case. */
1961 vec<int> *indexes;
1962 tree type = TREE_TYPE (gimple_phi_result (phi));
1963 tree lhs;
1964 arg1 = args[1];
1965 for (i = 0; i < args_len; i++)
1967 arg0 = args[i];
1968 indexes = phi_arg_map.get (args[i]);
1969 if (i != args_len - 1)
1970 lhs = make_temp_ssa_name (type, NULL, "_ifc_");
1971 else
1972 lhs = res;
1973 cond = gen_phi_arg_condition (phi, indexes, gsi);
1974 rhs = fold_build_cond_expr (type, unshare_expr (cond),
1975 arg0, arg1);
1976 new_stmt = gimple_build_assign (lhs, rhs);
1977 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1978 update_stmt (new_stmt);
1979 arg1 = lhs;
1983 if (dump_file && (dump_flags & TDF_DETAILS))
1985 fprintf (dump_file, "new extended phi replacement stmt\n");
1986 print_gimple_stmt (dump_file, new_stmt, 0, TDF_SLIM);
1990 /* Replaces in LOOP all the scalar phi nodes other than those in the
1991 LOOP->header block with conditional modify expressions. */
1993 static void
1994 predicate_all_scalar_phis (struct loop *loop)
1996 basic_block bb;
1997 unsigned int orig_loop_num_nodes = loop->num_nodes;
1998 unsigned int i;
2000 for (i = 1; i < orig_loop_num_nodes; i++)
2002 gphi *phi;
2003 gimple_stmt_iterator gsi;
2004 gphi_iterator phi_gsi;
2005 bb = ifc_bbs[i];
2007 if (bb == loop->header)
2008 continue;
2010 phi_gsi = gsi_start_phis (bb);
2011 if (gsi_end_p (phi_gsi))
2012 continue;
2014 gsi = gsi_after_labels (bb);
2015 while (!gsi_end_p (phi_gsi))
2017 phi = phi_gsi.phi ();
2018 if (virtual_operand_p (gimple_phi_result (phi)))
2019 gsi_next (&phi_gsi);
2020 else
2022 predicate_scalar_phi (phi, &gsi);
2023 remove_phi_node (&phi_gsi, false);
2029 /* Insert in each basic block of LOOP the statements produced by the
2030 gimplification of the predicates. */
2032 static void
2033 insert_gimplified_predicates (loop_p loop)
2035 unsigned int i;
2037 for (i = 0; i < loop->num_nodes; i++)
2039 basic_block bb = ifc_bbs[i];
2040 gimple_seq stmts;
2041 if (!is_predicated (bb))
2042 gcc_assert (bb_predicate_gimplified_stmts (bb) == NULL);
2043 if (!is_predicated (bb))
2045 /* Do not insert statements for a basic block that is not
2046 predicated. Also make sure that the predicate of the
2047 basic block is set to true. */
2048 reset_bb_predicate (bb);
2049 continue;
2052 stmts = bb_predicate_gimplified_stmts (bb);
2053 if (stmts)
2055 if (any_pred_load_store)
2057 /* Insert the predicate of the BB just after the label,
2058 as the if-conversion of memory writes will use this
2059 predicate. */
2060 gimple_stmt_iterator gsi = gsi_after_labels (bb);
2061 gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
2063 else
2065 /* Insert the predicate of the BB at the end of the BB
2066 as this would reduce the register pressure: the only
2067 use of this predicate will be in successor BBs. */
2068 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2070 if (gsi_end_p (gsi)
2071 || stmt_ends_bb_p (gsi_stmt (gsi)))
2072 gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
2073 else
2074 gsi_insert_seq_after (&gsi, stmts, GSI_SAME_STMT);
2077 /* Once the sequence is code generated, set it to NULL. */
2078 set_bb_predicate_gimplified_stmts (bb, NULL);
2083 /* Helper function for predicate_mem_writes. Returns index of existent
2084 mask if it was created for given SIZE and -1 otherwise. */
2086 static int
2087 mask_exists (int size, vec<int> vec)
2089 unsigned int ix;
2090 int v;
2091 FOR_EACH_VEC_ELT (vec, ix, v)
2092 if (v == size)
2093 return (int) ix;
2094 return -1;
2097 /* Predicate each write to memory in LOOP.
2099 This function transforms control flow constructs containing memory
2100 writes of the form:
2102 | for (i = 0; i < N; i++)
2103 | if (cond)
2104 | A[i] = expr;
2106 into the following form that does not contain control flow:
2108 | for (i = 0; i < N; i++)
2109 | A[i] = cond ? expr : A[i];
2111 The original CFG looks like this:
2113 | bb_0
2114 | i = 0
2115 | end_bb_0
2117 | bb_1
2118 | if (i < N) goto bb_5 else goto bb_2
2119 | end_bb_1
2121 | bb_2
2122 | cond = some_computation;
2123 | if (cond) goto bb_3 else goto bb_4
2124 | end_bb_2
2126 | bb_3
2127 | A[i] = expr;
2128 | goto bb_4
2129 | end_bb_3
2131 | bb_4
2132 | goto bb_1
2133 | end_bb_4
2135 insert_gimplified_predicates inserts the computation of the COND
2136 expression at the beginning of the destination basic block:
2138 | bb_0
2139 | i = 0
2140 | end_bb_0
2142 | bb_1
2143 | if (i < N) goto bb_5 else goto bb_2
2144 | end_bb_1
2146 | bb_2
2147 | cond = some_computation;
2148 | if (cond) goto bb_3 else goto bb_4
2149 | end_bb_2
2151 | bb_3
2152 | cond = some_computation;
2153 | A[i] = expr;
2154 | goto bb_4
2155 | end_bb_3
2157 | bb_4
2158 | goto bb_1
2159 | end_bb_4
2161 predicate_mem_writes is then predicating the memory write as follows:
2163 | bb_0
2164 | i = 0
2165 | end_bb_0
2167 | bb_1
2168 | if (i < N) goto bb_5 else goto bb_2
2169 | end_bb_1
2171 | bb_2
2172 | if (cond) goto bb_3 else goto bb_4
2173 | end_bb_2
2175 | bb_3
2176 | cond = some_computation;
2177 | A[i] = cond ? expr : A[i];
2178 | goto bb_4
2179 | end_bb_3
2181 | bb_4
2182 | goto bb_1
2183 | end_bb_4
2185 and finally combine_blocks removes the basic block boundaries making
2186 the loop vectorizable:
2188 | bb_0
2189 | i = 0
2190 | if (i < N) goto bb_5 else goto bb_1
2191 | end_bb_0
2193 | bb_1
2194 | cond = some_computation;
2195 | A[i] = cond ? expr : A[i];
2196 | if (i < N) goto bb_5 else goto bb_4
2197 | end_bb_1
2199 | bb_4
2200 | goto bb_1
2201 | end_bb_4
2204 static void
2205 predicate_mem_writes (loop_p loop)
2207 unsigned int i, orig_loop_num_nodes = loop->num_nodes;
2208 auto_vec<int, 1> vect_sizes;
2209 auto_vec<tree, 1> vect_masks;
2211 for (i = 1; i < orig_loop_num_nodes; i++)
2213 gimple_stmt_iterator gsi;
2214 basic_block bb = ifc_bbs[i];
2215 tree cond = bb_predicate (bb);
2216 bool swap;
2217 gimple *stmt;
2218 int index;
2220 if (is_true_predicate (cond))
2221 continue;
2223 swap = false;
2224 if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
2226 swap = true;
2227 cond = TREE_OPERAND (cond, 0);
2230 vect_sizes.truncate (0);
2231 vect_masks.truncate (0);
2233 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);)
2235 if (!gimple_assign_single_p (stmt = gsi_stmt (gsi)))
2237 else if (is_false_predicate (cond)
2238 && gimple_vdef (stmt))
2240 unlink_stmt_vdef (stmt);
2241 gsi_remove (&gsi, true);
2242 release_defs (stmt);
2243 continue;
2245 else if (gimple_plf (stmt, GF_PLF_2))
2247 tree lhs = gimple_assign_lhs (stmt);
2248 tree rhs = gimple_assign_rhs1 (stmt);
2249 tree ref, addr, ptr, mask;
2250 gcall *new_stmt;
2251 gimple_seq stmts = NULL;
2252 machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
2253 /* We checked before setting GF_PLF_2 that an equivalent
2254 integer mode exists. */
2255 int bitsize = GET_MODE_BITSIZE (mode).to_constant ();
2256 ref = TREE_CODE (lhs) == SSA_NAME ? rhs : lhs;
2257 mark_addressable (ref);
2258 addr = force_gimple_operand_gsi (&gsi, build_fold_addr_expr (ref),
2259 true, NULL_TREE, true,
2260 GSI_SAME_STMT);
2261 if (!vect_sizes.is_empty ()
2262 && (index = mask_exists (bitsize, vect_sizes)) != -1)
2263 /* Use created mask. */
2264 mask = vect_masks[index];
2265 else
2267 if (COMPARISON_CLASS_P (cond))
2268 mask = gimple_build (&stmts, TREE_CODE (cond),
2269 boolean_type_node,
2270 TREE_OPERAND (cond, 0),
2271 TREE_OPERAND (cond, 1));
2272 else
2273 mask = cond;
2275 if (swap)
2277 tree true_val
2278 = constant_boolean_node (true, TREE_TYPE (mask));
2279 mask = gimple_build (&stmts, BIT_XOR_EXPR,
2280 TREE_TYPE (mask), mask, true_val);
2282 gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
2284 /* Save mask and its size for further use. */
2285 vect_sizes.safe_push (bitsize);
2286 vect_masks.safe_push (mask);
2288 ptr = build_int_cst (reference_alias_ptr_type (ref),
2289 get_object_alignment (ref));
2290 /* Copy points-to info if possible. */
2291 if (TREE_CODE (addr) == SSA_NAME && !SSA_NAME_PTR_INFO (addr))
2292 copy_ref_info (build2 (MEM_REF, TREE_TYPE (ref), addr, ptr),
2293 ref);
2294 if (TREE_CODE (lhs) == SSA_NAME)
2296 new_stmt
2297 = gimple_build_call_internal (IFN_MASK_LOAD, 3, addr,
2298 ptr, mask);
2299 gimple_call_set_lhs (new_stmt, lhs);
2300 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2302 else
2304 new_stmt
2305 = gimple_build_call_internal (IFN_MASK_STORE, 4, addr, ptr,
2306 mask, rhs);
2307 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2308 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
2309 SSA_NAME_DEF_STMT (gimple_vdef (new_stmt)) = new_stmt;
2311 gimple_call_set_nothrow (new_stmt, true);
2313 gsi_replace (&gsi, new_stmt, true);
2315 else if (gimple_vdef (stmt))
2317 tree lhs = gimple_assign_lhs (stmt);
2318 tree rhs = gimple_assign_rhs1 (stmt);
2319 tree type = TREE_TYPE (lhs);
2321 lhs = ifc_temp_var (type, unshare_expr (lhs), &gsi);
2322 rhs = ifc_temp_var (type, unshare_expr (rhs), &gsi);
2323 if (swap)
2324 std::swap (lhs, rhs);
2325 cond = force_gimple_operand_gsi_1 (&gsi, unshare_expr (cond),
2326 is_gimple_condexpr, NULL_TREE,
2327 true, GSI_SAME_STMT);
2328 rhs = fold_build_cond_expr (type, unshare_expr (cond), rhs, lhs);
2329 gimple_assign_set_rhs1 (stmt, ifc_temp_var (type, rhs, &gsi));
2330 update_stmt (stmt);
2332 gsi_next (&gsi);
2337 /* Remove all GIMPLE_CONDs and GIMPLE_LABELs of all the basic blocks
2338 other than the exit and latch of the LOOP. Also resets the
2339 GIMPLE_DEBUG information. */
2341 static void
2342 remove_conditions_and_labels (loop_p loop)
2344 gimple_stmt_iterator gsi;
2345 unsigned int i;
2347 for (i = 0; i < loop->num_nodes; i++)
2349 basic_block bb = ifc_bbs[i];
2351 if (bb_with_exit_edge_p (loop, bb)
2352 || bb == loop->latch)
2353 continue;
2355 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
2356 switch (gimple_code (gsi_stmt (gsi)))
2358 case GIMPLE_COND:
2359 case GIMPLE_LABEL:
2360 gsi_remove (&gsi, true);
2361 break;
2363 case GIMPLE_DEBUG:
2364 /* ??? Should there be conditional GIMPLE_DEBUG_BINDs? */
2365 if (gimple_debug_bind_p (gsi_stmt (gsi)))
2367 gimple_debug_bind_reset_value (gsi_stmt (gsi));
2368 update_stmt (gsi_stmt (gsi));
2370 gsi_next (&gsi);
2371 break;
2373 default:
2374 gsi_next (&gsi);
2379 /* Combine all the basic blocks from LOOP into one or two super basic
2380 blocks. Replace PHI nodes with conditional modify expressions. */
2382 static void
2383 combine_blocks (struct loop *loop)
2385 basic_block bb, exit_bb, merge_target_bb;
2386 unsigned int orig_loop_num_nodes = loop->num_nodes;
2387 unsigned int i;
2388 edge e;
2389 edge_iterator ei;
2391 remove_conditions_and_labels (loop);
2392 insert_gimplified_predicates (loop);
2393 predicate_all_scalar_phis (loop);
2395 if (any_pred_load_store)
2396 predicate_mem_writes (loop);
2398 /* Merge basic blocks: first remove all the edges in the loop,
2399 except for those from the exit block. */
2400 exit_bb = NULL;
2401 bool *predicated = XNEWVEC (bool, orig_loop_num_nodes);
2402 for (i = 0; i < orig_loop_num_nodes; i++)
2404 bb = ifc_bbs[i];
2405 predicated[i] = !is_true_predicate (bb_predicate (bb));
2406 free_bb_predicate (bb);
2407 if (bb_with_exit_edge_p (loop, bb))
2409 gcc_assert (exit_bb == NULL);
2410 exit_bb = bb;
2413 gcc_assert (exit_bb != loop->latch);
2415 for (i = 1; i < orig_loop_num_nodes; i++)
2417 bb = ifc_bbs[i];
2419 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei));)
2421 if (e->src == exit_bb)
2422 ei_next (&ei);
2423 else
2424 remove_edge (e);
2428 if (exit_bb != NULL)
2430 if (exit_bb != loop->header)
2432 /* Connect this node to loop header. */
2433 make_single_succ_edge (loop->header, exit_bb, EDGE_FALLTHRU);
2434 set_immediate_dominator (CDI_DOMINATORS, exit_bb, loop->header);
2437 /* Redirect non-exit edges to loop->latch. */
2438 FOR_EACH_EDGE (e, ei, exit_bb->succs)
2440 if (!loop_exit_edge_p (loop, e))
2441 redirect_edge_and_branch (e, loop->latch);
2443 set_immediate_dominator (CDI_DOMINATORS, loop->latch, exit_bb);
2445 else
2447 /* If the loop does not have an exit, reconnect header and latch. */
2448 make_edge (loop->header, loop->latch, EDGE_FALLTHRU);
2449 set_immediate_dominator (CDI_DOMINATORS, loop->latch, loop->header);
2452 merge_target_bb = loop->header;
2454 /* Get at the virtual def valid for uses starting at the first block
2455 we merge into the header. Without a virtual PHI the loop has the
2456 same virtual use on all stmts. */
2457 gphi *vphi = get_virtual_phi (loop->header);
2458 tree last_vdef = NULL_TREE;
2459 if (vphi)
2461 last_vdef = gimple_phi_result (vphi);
2462 for (gimple_stmt_iterator gsi = gsi_start_bb (loop->header);
2463 ! gsi_end_p (gsi); gsi_next (&gsi))
2464 if (gimple_vdef (gsi_stmt (gsi)))
2465 last_vdef = gimple_vdef (gsi_stmt (gsi));
2467 for (i = 1; i < orig_loop_num_nodes; i++)
2469 gimple_stmt_iterator gsi;
2470 gimple_stmt_iterator last;
2472 bb = ifc_bbs[i];
2474 if (bb == exit_bb || bb == loop->latch)
2475 continue;
2477 /* We release virtual PHIs late because we have to propagate them
2478 out using the current VUSE. The def might be the one used
2479 after the loop. */
2480 vphi = get_virtual_phi (bb);
2481 if (vphi)
2483 imm_use_iterator iter;
2484 use_operand_p use_p;
2485 gimple *use_stmt;
2486 FOR_EACH_IMM_USE_STMT (use_stmt, iter, gimple_phi_result (vphi))
2488 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2489 SET_USE (use_p, last_vdef);
2491 gsi = gsi_for_stmt (vphi);
2492 remove_phi_node (&gsi, true);
2495 /* Make stmts member of loop->header and clear range info from all stmts
2496 in BB which is now no longer executed conditional on a predicate we
2497 could have derived it from. */
2498 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2500 gimple *stmt = gsi_stmt (gsi);
2501 gimple_set_bb (stmt, merge_target_bb);
2502 /* Update virtual operands. */
2503 if (last_vdef)
2505 use_operand_p use_p = ssa_vuse_operand (stmt);
2506 if (use_p
2507 && USE_FROM_PTR (use_p) != last_vdef)
2508 SET_USE (use_p, last_vdef);
2509 if (gimple_vdef (stmt))
2510 last_vdef = gimple_vdef (stmt);
2512 if (predicated[i])
2514 ssa_op_iter i;
2515 tree op;
2516 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
2517 reset_flow_sensitive_info (op);
2521 /* Update stmt list. */
2522 last = gsi_last_bb (merge_target_bb);
2523 gsi_insert_seq_after_without_update (&last, bb_seq (bb), GSI_NEW_STMT);
2524 set_bb_seq (bb, NULL);
2526 delete_basic_block (bb);
2529 /* If possible, merge loop header to the block with the exit edge.
2530 This reduces the number of basic blocks to two, to please the
2531 vectorizer that handles only loops with two nodes. */
2532 if (exit_bb
2533 && exit_bb != loop->header)
2535 /* We release virtual PHIs late because we have to propagate them
2536 out using the current VUSE. The def might be the one used
2537 after the loop. */
2538 vphi = get_virtual_phi (exit_bb);
2539 if (vphi)
2541 imm_use_iterator iter;
2542 use_operand_p use_p;
2543 gimple *use_stmt;
2544 FOR_EACH_IMM_USE_STMT (use_stmt, iter, gimple_phi_result (vphi))
2546 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2547 SET_USE (use_p, last_vdef);
2549 gimple_stmt_iterator gsi = gsi_for_stmt (vphi);
2550 remove_phi_node (&gsi, true);
2553 if (can_merge_blocks_p (loop->header, exit_bb))
2554 merge_blocks (loop->header, exit_bb);
2557 free (ifc_bbs);
2558 ifc_bbs = NULL;
2559 free (predicated);
2562 /* Version LOOP before if-converting it; the original loop
2563 will be if-converted, the new copy of the loop will not,
2564 and the LOOP_VECTORIZED internal call will be guarding which
2565 loop to execute. The vectorizer pass will fold this
2566 internal call into either true or false.
2568 Note that this function intentionally invalidates profile. Both edges
2569 out of LOOP_VECTORIZED must have 100% probability so the profile remains
2570 consistent after the condition is folded in the vectorizer. */
2572 static struct loop *
2573 version_loop_for_if_conversion (struct loop *loop)
2575 basic_block cond_bb;
2576 tree cond = make_ssa_name (boolean_type_node);
2577 struct loop *new_loop;
2578 gimple *g;
2579 gimple_stmt_iterator gsi;
2580 unsigned int save_length;
2582 g = gimple_build_call_internal (IFN_LOOP_VECTORIZED, 2,
2583 build_int_cst (integer_type_node, loop->num),
2584 integer_zero_node);
2585 gimple_call_set_lhs (g, cond);
2587 /* Save BB->aux around loop_version as that uses the same field. */
2588 save_length = loop->inner ? loop->inner->num_nodes : loop->num_nodes;
2589 void **saved_preds = XALLOCAVEC (void *, save_length);
2590 for (unsigned i = 0; i < save_length; i++)
2591 saved_preds[i] = ifc_bbs[i]->aux;
2593 initialize_original_copy_tables ();
2594 /* At this point we invalidate porfile confistency until IFN_LOOP_VECTORIZED
2595 is re-merged in the vectorizer. */
2596 new_loop = loop_version (loop, cond, &cond_bb,
2597 profile_probability::always (),
2598 profile_probability::always (),
2599 profile_probability::always (),
2600 profile_probability::always (), true);
2601 free_original_copy_tables ();
2603 for (unsigned i = 0; i < save_length; i++)
2604 ifc_bbs[i]->aux = saved_preds[i];
2606 if (new_loop == NULL)
2607 return NULL;
2609 new_loop->dont_vectorize = true;
2610 new_loop->force_vectorize = false;
2611 gsi = gsi_last_bb (cond_bb);
2612 gimple_call_set_arg (g, 1, build_int_cst (integer_type_node, new_loop->num));
2613 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2614 update_ssa (TODO_update_ssa);
2615 return new_loop;
2618 /* Return true when LOOP satisfies the follow conditions that will
2619 allow it to be recognized by the vectorizer for outer-loop
2620 vectorization:
2621 - The loop is not the root node of the loop tree.
2622 - The loop has exactly one inner loop.
2623 - The loop has a single exit.
2624 - The loop header has a single successor, which is the inner
2625 loop header.
2626 - Each of the inner and outer loop latches have a single
2627 predecessor.
2628 - The loop exit block has a single predecessor, which is the
2629 inner loop's exit block. */
2631 static bool
2632 versionable_outer_loop_p (struct loop *loop)
2634 if (!loop_outer (loop)
2635 || loop->dont_vectorize
2636 || !loop->inner
2637 || loop->inner->next
2638 || !single_exit (loop)
2639 || !single_succ_p (loop->header)
2640 || single_succ (loop->header) != loop->inner->header
2641 || !single_pred_p (loop->latch)
2642 || !single_pred_p (loop->inner->latch))
2643 return false;
2645 basic_block outer_exit = single_pred (loop->latch);
2646 basic_block inner_exit = single_pred (loop->inner->latch);
2648 if (!single_pred_p (outer_exit) || single_pred (outer_exit) != inner_exit)
2649 return false;
2651 if (dump_file)
2652 fprintf (dump_file, "Found vectorizable outer loop for versioning\n");
2654 return true;
2657 /* Performs splitting of critical edges. Skip splitting and return false
2658 if LOOP will not be converted because:
2660 - LOOP is not well formed.
2661 - LOOP has PHI with more than MAX_PHI_ARG_NUM arguments.
2663 Last restriction is valid only if AGGRESSIVE_IF_CONV is false. */
2665 static bool
2666 ifcvt_split_critical_edges (struct loop *loop, bool aggressive_if_conv)
2668 basic_block *body;
2669 basic_block bb;
2670 unsigned int num = loop->num_nodes;
2671 unsigned int i;
2672 gimple *stmt;
2673 edge e;
2674 edge_iterator ei;
2675 auto_vec<edge> critical_edges;
2677 /* Loop is not well formed. */
2678 if (num <= 2 || loop->inner || !single_exit (loop))
2679 return false;
2681 body = get_loop_body (loop);
2682 for (i = 0; i < num; i++)
2684 bb = body[i];
2685 if (!aggressive_if_conv
2686 && phi_nodes (bb)
2687 && EDGE_COUNT (bb->preds) > MAX_PHI_ARG_NUM)
2689 if (dump_file && (dump_flags & TDF_DETAILS))
2690 fprintf (dump_file,
2691 "BB %d has complicated PHI with more than %u args.\n",
2692 bb->index, MAX_PHI_ARG_NUM);
2694 free (body);
2695 return false;
2697 if (bb == loop->latch || bb_with_exit_edge_p (loop, bb))
2698 continue;
2700 stmt = last_stmt (bb);
2701 /* Skip basic blocks not ending with conditional branch. */
2702 if (!stmt || gimple_code (stmt) != GIMPLE_COND)
2703 continue;
2705 FOR_EACH_EDGE (e, ei, bb->succs)
2706 if (EDGE_CRITICAL_P (e) && e->dest->loop_father == loop)
2707 critical_edges.safe_push (e);
2709 free (body);
2711 while (critical_edges.length () > 0)
2713 e = critical_edges.pop ();
2714 /* Don't split if bb can be predicated along non-critical edge. */
2715 if (EDGE_COUNT (e->dest->preds) > 2 || all_preds_critical_p (e->dest))
2716 split_edge (e);
2719 return true;
2722 /* Delete redundant statements produced by predication which prevents
2723 loop vectorization. */
2725 static void
2726 ifcvt_local_dce (basic_block bb)
2728 gimple *stmt;
2729 gimple *stmt1;
2730 gimple *phi;
2731 gimple_stmt_iterator gsi;
2732 auto_vec<gimple *> worklist;
2733 enum gimple_code code;
2734 use_operand_p use_p;
2735 imm_use_iterator imm_iter;
2737 worklist.create (64);
2738 /* Consider all phi as live statements. */
2739 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2741 phi = gsi_stmt (gsi);
2742 gimple_set_plf (phi, GF_PLF_2, true);
2743 worklist.safe_push (phi);
2745 /* Consider load/store statements, CALL and COND as live. */
2746 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2748 stmt = gsi_stmt (gsi);
2749 if (gimple_store_p (stmt)
2750 || gimple_assign_load_p (stmt)
2751 || is_gimple_debug (stmt))
2753 gimple_set_plf (stmt, GF_PLF_2, true);
2754 worklist.safe_push (stmt);
2755 continue;
2757 code = gimple_code (stmt);
2758 if (code == GIMPLE_COND || code == GIMPLE_CALL)
2760 gimple_set_plf (stmt, GF_PLF_2, true);
2761 worklist.safe_push (stmt);
2762 continue;
2764 gimple_set_plf (stmt, GF_PLF_2, false);
2766 if (code == GIMPLE_ASSIGN)
2768 tree lhs = gimple_assign_lhs (stmt);
2769 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
2771 stmt1 = USE_STMT (use_p);
2772 if (gimple_bb (stmt1) != bb)
2774 gimple_set_plf (stmt, GF_PLF_2, true);
2775 worklist.safe_push (stmt);
2776 break;
2781 /* Propagate liveness through arguments of live stmt. */
2782 while (worklist.length () > 0)
2784 ssa_op_iter iter;
2785 use_operand_p use_p;
2786 tree use;
2788 stmt = worklist.pop ();
2789 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
2791 use = USE_FROM_PTR (use_p);
2792 if (TREE_CODE (use) != SSA_NAME)
2793 continue;
2794 stmt1 = SSA_NAME_DEF_STMT (use);
2795 if (gimple_bb (stmt1) != bb
2796 || gimple_plf (stmt1, GF_PLF_2))
2797 continue;
2798 gimple_set_plf (stmt1, GF_PLF_2, true);
2799 worklist.safe_push (stmt1);
2802 /* Delete dead statements. */
2803 gsi = gsi_start_bb (bb);
2804 while (!gsi_end_p (gsi))
2806 stmt = gsi_stmt (gsi);
2807 if (gimple_plf (stmt, GF_PLF_2))
2809 gsi_next (&gsi);
2810 continue;
2812 if (dump_file && (dump_flags & TDF_DETAILS))
2814 fprintf (dump_file, "Delete dead stmt in bb#%d\n", bb->index);
2815 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2817 gsi_remove (&gsi, true);
2818 release_defs (stmt);
2822 /* If-convert LOOP when it is legal. For the moment this pass has no
2823 profitability analysis. Returns non-zero todo flags when something
2824 changed. */
2826 unsigned int
2827 tree_if_conversion (struct loop *loop)
2829 unsigned int todo = 0;
2830 bool aggressive_if_conv;
2831 struct loop *rloop;
2833 again:
2834 rloop = NULL;
2835 ifc_bbs = NULL;
2836 any_pred_load_store = false;
2837 any_complicated_phi = false;
2839 /* Apply more aggressive if-conversion when loop or its outer loop were
2840 marked with simd pragma. When that's the case, we try to if-convert
2841 loop containing PHIs with more than MAX_PHI_ARG_NUM arguments. */
2842 aggressive_if_conv = loop->force_vectorize;
2843 if (!aggressive_if_conv)
2845 struct loop *outer_loop = loop_outer (loop);
2846 if (outer_loop && outer_loop->force_vectorize)
2847 aggressive_if_conv = true;
2850 if (!ifcvt_split_critical_edges (loop, aggressive_if_conv))
2851 goto cleanup;
2853 if (!if_convertible_loop_p (loop)
2854 || !dbg_cnt (if_conversion_tree))
2855 goto cleanup;
2857 if ((any_pred_load_store || any_complicated_phi)
2858 && ((!flag_tree_loop_vectorize && !loop->force_vectorize)
2859 || loop->dont_vectorize))
2860 goto cleanup;
2862 /* Since we have no cost model, always version loops unless the user
2863 specified -ftree-loop-if-convert or unless versioning is required.
2864 Either version this loop, or if the pattern is right for outer-loop
2865 vectorization, version the outer loop. In the latter case we will
2866 still if-convert the original inner loop. */
2867 if (any_pred_load_store
2868 || any_complicated_phi
2869 || flag_tree_loop_if_convert != 1)
2871 struct loop *vloop
2872 = (versionable_outer_loop_p (loop_outer (loop))
2873 ? loop_outer (loop) : loop);
2874 struct loop *nloop = version_loop_for_if_conversion (vloop);
2875 if (nloop == NULL)
2876 goto cleanup;
2877 if (vloop != loop)
2879 /* If versionable_outer_loop_p decided to version the
2880 outer loop, version also the inner loop of the non-vectorized
2881 loop copy. So we transform:
2882 loop1
2883 loop2
2884 into:
2885 if (LOOP_VECTORIZED (1, 3))
2887 loop1
2888 loop2
2890 else
2891 loop3 (copy of loop1)
2892 if (LOOP_VECTORIZED (4, 5))
2893 loop4 (copy of loop2)
2894 else
2895 loop5 (copy of loop4) */
2896 gcc_assert (nloop->inner && nloop->inner->next == NULL);
2897 rloop = nloop->inner;
2901 /* Now all statements are if-convertible. Combine all the basic
2902 blocks into one huge basic block doing the if-conversion
2903 on-the-fly. */
2904 combine_blocks (loop);
2906 /* Delete dead predicate computations. */
2907 ifcvt_local_dce (loop->header);
2909 todo |= TODO_cleanup_cfg;
2911 cleanup:
2912 if (ifc_bbs)
2914 unsigned int i;
2916 for (i = 0; i < loop->num_nodes; i++)
2917 free_bb_predicate (ifc_bbs[i]);
2919 free (ifc_bbs);
2920 ifc_bbs = NULL;
2922 if (rloop != NULL)
2924 loop = rloop;
2925 goto again;
2928 return todo;
2931 /* Tree if-conversion pass management. */
2933 namespace {
2935 const pass_data pass_data_if_conversion =
2937 GIMPLE_PASS, /* type */
2938 "ifcvt", /* name */
2939 OPTGROUP_NONE, /* optinfo_flags */
2940 TV_TREE_LOOP_IFCVT, /* tv_id */
2941 ( PROP_cfg | PROP_ssa ), /* properties_required */
2942 0, /* properties_provided */
2943 0, /* properties_destroyed */
2944 0, /* todo_flags_start */
2945 0, /* todo_flags_finish */
2948 class pass_if_conversion : public gimple_opt_pass
2950 public:
2951 pass_if_conversion (gcc::context *ctxt)
2952 : gimple_opt_pass (pass_data_if_conversion, ctxt)
2955 /* opt_pass methods: */
2956 virtual bool gate (function *);
2957 virtual unsigned int execute (function *);
2959 }; // class pass_if_conversion
2961 bool
2962 pass_if_conversion::gate (function *fun)
2964 return (((flag_tree_loop_vectorize || fun->has_force_vectorize_loops)
2965 && flag_tree_loop_if_convert != 0)
2966 || flag_tree_loop_if_convert == 1);
2969 unsigned int
2970 pass_if_conversion::execute (function *fun)
2972 struct loop *loop;
2973 unsigned todo = 0;
2975 if (number_of_loops (fun) <= 1)
2976 return 0;
2978 FOR_EACH_LOOP (loop, 0)
2979 if (flag_tree_loop_if_convert == 1
2980 || ((flag_tree_loop_vectorize || loop->force_vectorize)
2981 && !loop->dont_vectorize))
2982 todo |= tree_if_conversion (loop);
2984 if (todo)
2986 free_numbers_of_iterations_estimates (fun);
2987 scev_reset ();
2990 if (flag_checking)
2992 basic_block bb;
2993 FOR_EACH_BB_FN (bb, fun)
2994 gcc_assert (!bb->aux);
2997 return todo;
3000 } // anon namespace
3002 gimple_opt_pass *
3003 make_pass_if_conversion (gcc::context *ctxt)
3005 return new pass_if_conversion (ctxt);