PR rtl-optimization/78355
[official-gcc.git] / gcc / tree-if-conv.c
blob1235faf81b6308127f29f9b81c3228e91b9f5ac3
1 /* If-conversion for vectorizer.
2 Copyright (C) 2004-2016 Free Software Foundation, Inc.
3 Contributed by Devang Patel <dpatel@apple.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* This pass implements a tree level if-conversion of loops. Its
22 initial goal is to help the vectorizer to vectorize loops with
23 conditions.
25 A short description of if-conversion:
27 o Decide if a loop is if-convertible or not.
28 o Walk all loop basic blocks in breadth first order (BFS order).
29 o Remove conditional statements (at the end of basic block)
30 and propagate condition into destination basic blocks'
31 predicate list.
32 o Replace modify expression with conditional modify expression
33 using current basic block's condition.
34 o Merge all basic blocks
35 o Replace phi nodes with conditional modify expr
36 o Merge all basic blocks into header
38 Sample transformation:
40 INPUT
41 -----
43 # i_23 = PHI <0(0), i_18(10)>;
44 <L0>:;
45 j_15 = A[i_23];
46 if (j_15 > 41) goto <L1>; else goto <L17>;
48 <L17>:;
49 goto <bb 3> (<L3>);
51 <L1>:;
53 # iftmp.2_4 = PHI <0(8), 42(2)>;
54 <L3>:;
55 A[i_23] = iftmp.2_4;
56 i_18 = i_23 + 1;
57 if (i_18 <= 15) goto <L19>; else goto <L18>;
59 <L19>:;
60 goto <bb 1> (<L0>);
62 <L18>:;
64 OUTPUT
65 ------
67 # i_23 = PHI <0(0), i_18(10)>;
68 <L0>:;
69 j_15 = A[i_23];
71 <L3>:;
72 iftmp.2_4 = j_15 > 41 ? 42 : 0;
73 A[i_23] = iftmp.2_4;
74 i_18 = i_23 + 1;
75 if (i_18 <= 15) goto <L19>; else goto <L18>;
77 <L19>:;
78 goto <bb 1> (<L0>);
80 <L18>:;
83 #include "config.h"
84 #include "system.h"
85 #include "coretypes.h"
86 #include "backend.h"
87 #include "rtl.h"
88 #include "tree.h"
89 #include "gimple.h"
90 #include "cfghooks.h"
91 #include "tree-pass.h"
92 #include "ssa.h"
93 #include "expmed.h"
94 #include "optabs-query.h"
95 #include "gimple-pretty-print.h"
96 #include "alias.h"
97 #include "fold-const.h"
98 #include "stor-layout.h"
99 #include "gimple-fold.h"
100 #include "gimplify.h"
101 #include "gimple-iterator.h"
102 #include "gimplify-me.h"
103 #include "tree-cfg.h"
104 #include "tree-into-ssa.h"
105 #include "tree-ssa.h"
106 #include "cfgloop.h"
107 #include "tree-data-ref.h"
108 #include "tree-scalar-evolution.h"
109 #include "tree-ssa-loop.h"
110 #include "tree-ssa-loop-niter.h"
111 #include "tree-ssa-loop-ivopts.h"
112 #include "tree-ssa-address.h"
113 #include "dbgcnt.h"
114 #include "tree-hash-traits.h"
115 #include "varasm.h"
116 #include "builtins.h"
117 #include "params.h"
118 #include "cfganal.h"
120 /* Only handle PHIs with no more arguments unless we are asked to by
121 simd pragma. */
122 #define MAX_PHI_ARG_NUM \
123 ((unsigned) PARAM_VALUE (PARAM_MAX_TREE_IF_CONVERSION_PHI_ARGS))
125 /* Indicate if new load/store that needs to be predicated is introduced
126 during if conversion. */
127 static bool any_pred_load_store;
129 /* Indicate if there are any complicated PHIs that need to be handled in
130 if-conversion. Complicated PHI has more than two arguments and can't
131 be degenerated to two arguments PHI. See more information in comment
132 before phi_convertible_by_degenerating_args. */
133 static bool any_complicated_phi;
135 /* Hash for struct innermost_loop_behavior. It depends on the user to
136 free the memory. */
138 struct innermost_loop_behavior_hash : nofree_ptr_hash <innermost_loop_behavior>
140 static inline hashval_t hash (const value_type &);
141 static inline bool equal (const value_type &,
142 const compare_type &);
145 inline hashval_t
146 innermost_loop_behavior_hash::hash (const value_type &e)
148 hashval_t hash;
150 hash = iterative_hash_expr (e->base_address, 0);
151 hash = iterative_hash_expr (e->offset, hash);
152 hash = iterative_hash_expr (e->init, hash);
153 return iterative_hash_expr (e->step, hash);
156 inline bool
157 innermost_loop_behavior_hash::equal (const value_type &e1,
158 const compare_type &e2)
160 if ((e1->base_address && !e2->base_address)
161 || (!e1->base_address && e2->base_address)
162 || (!e1->offset && e2->offset)
163 || (e1->offset && !e2->offset)
164 || (!e1->init && e2->init)
165 || (e1->init && !e2->init)
166 || (!e1->step && e2->step)
167 || (e1->step && !e2->step))
168 return false;
170 if (e1->base_address && e2->base_address
171 && !operand_equal_p (e1->base_address, e2->base_address, 0))
172 return false;
173 if (e1->offset && e2->offset
174 && !operand_equal_p (e1->offset, e2->offset, 0))
175 return false;
176 if (e1->init && e2->init
177 && !operand_equal_p (e1->init, e2->init, 0))
178 return false;
179 if (e1->step && e2->step
180 && !operand_equal_p (e1->step, e2->step, 0))
181 return false;
183 return true;
186 /* List of basic blocks in if-conversion-suitable order. */
187 static basic_block *ifc_bbs;
189 /* Hash table to store <DR's innermost loop behavior, DR> pairs. */
190 static hash_map<innermost_loop_behavior_hash,
191 data_reference_p> *innermost_DR_map;
193 /* Hash table to store <base reference, DR> pairs. */
194 static hash_map<tree_operand_hash, data_reference_p> *baseref_DR_map;
196 /* Structure used to predicate basic blocks. This is attached to the
197 ->aux field of the BBs in the loop to be if-converted. */
198 struct bb_predicate {
200 /* The condition under which this basic block is executed. */
201 tree predicate;
203 /* PREDICATE is gimplified, and the sequence of statements is
204 recorded here, in order to avoid the duplication of computations
205 that occur in previous conditions. See PR44483. */
206 gimple_seq predicate_gimplified_stmts;
209 /* Returns true when the basic block BB has a predicate. */
211 static inline bool
212 bb_has_predicate (basic_block bb)
214 return bb->aux != NULL;
217 /* Returns the gimplified predicate for basic block BB. */
219 static inline tree
220 bb_predicate (basic_block bb)
222 return ((struct bb_predicate *) bb->aux)->predicate;
225 /* Sets the gimplified predicate COND for basic block BB. */
227 static inline void
228 set_bb_predicate (basic_block bb, tree cond)
230 gcc_assert ((TREE_CODE (cond) == TRUTH_NOT_EXPR
231 && is_gimple_condexpr (TREE_OPERAND (cond, 0)))
232 || is_gimple_condexpr (cond));
233 ((struct bb_predicate *) bb->aux)->predicate = cond;
236 /* Returns the sequence of statements of the gimplification of the
237 predicate for basic block BB. */
239 static inline gimple_seq
240 bb_predicate_gimplified_stmts (basic_block bb)
242 return ((struct bb_predicate *) bb->aux)->predicate_gimplified_stmts;
245 /* Sets the sequence of statements STMTS of the gimplification of the
246 predicate for basic block BB. */
248 static inline void
249 set_bb_predicate_gimplified_stmts (basic_block bb, gimple_seq stmts)
251 ((struct bb_predicate *) bb->aux)->predicate_gimplified_stmts = stmts;
254 /* Adds the sequence of statements STMTS to the sequence of statements
255 of the predicate for basic block BB. */
257 static inline void
258 add_bb_predicate_gimplified_stmts (basic_block bb, gimple_seq stmts)
260 gimple_seq_add_seq_without_update
261 (&(((struct bb_predicate *) bb->aux)->predicate_gimplified_stmts), stmts);
264 /* Initializes to TRUE the predicate of basic block BB. */
266 static inline void
267 init_bb_predicate (basic_block bb)
269 bb->aux = XNEW (struct bb_predicate);
270 set_bb_predicate_gimplified_stmts (bb, NULL);
271 set_bb_predicate (bb, boolean_true_node);
274 /* Release the SSA_NAMEs associated with the predicate of basic block BB,
275 but don't actually free it. */
277 static inline void
278 release_bb_predicate (basic_block bb)
280 gimple_seq stmts = bb_predicate_gimplified_stmts (bb);
281 if (stmts)
283 if (flag_checking)
284 for (gimple_stmt_iterator i = gsi_start (stmts);
285 !gsi_end_p (i); gsi_next (&i))
286 gcc_assert (! gimple_use_ops (gsi_stmt (i)));
288 set_bb_predicate_gimplified_stmts (bb, NULL);
292 /* Free the predicate of basic block BB. */
294 static inline void
295 free_bb_predicate (basic_block bb)
297 if (!bb_has_predicate (bb))
298 return;
300 release_bb_predicate (bb);
301 free (bb->aux);
302 bb->aux = NULL;
305 /* Reinitialize predicate of BB with the true predicate. */
307 static inline void
308 reset_bb_predicate (basic_block bb)
310 if (!bb_has_predicate (bb))
311 init_bb_predicate (bb);
312 else
314 release_bb_predicate (bb);
315 set_bb_predicate (bb, boolean_true_node);
319 /* Returns a new SSA_NAME of type TYPE that is assigned the value of
320 the expression EXPR. Inserts the statement created for this
321 computation before GSI and leaves the iterator GSI at the same
322 statement. */
324 static tree
325 ifc_temp_var (tree type, tree expr, gimple_stmt_iterator *gsi)
327 tree new_name = make_temp_ssa_name (type, NULL, "_ifc_");
328 gimple *stmt = gimple_build_assign (new_name, expr);
329 gimple_set_vuse (stmt, gimple_vuse (gsi_stmt (*gsi)));
330 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
331 return new_name;
334 /* Return true when COND is a false predicate. */
336 static inline bool
337 is_false_predicate (tree cond)
339 return (cond != NULL_TREE
340 && (cond == boolean_false_node
341 || integer_zerop (cond)));
344 /* Return true when COND is a true predicate. */
346 static inline bool
347 is_true_predicate (tree cond)
349 return (cond == NULL_TREE
350 || cond == boolean_true_node
351 || integer_onep (cond));
354 /* Returns true when BB has a predicate that is not trivial: true or
355 NULL_TREE. */
357 static inline bool
358 is_predicated (basic_block bb)
360 return !is_true_predicate (bb_predicate (bb));
363 /* Parses the predicate COND and returns its comparison code and
364 operands OP0 and OP1. */
366 static enum tree_code
367 parse_predicate (tree cond, tree *op0, tree *op1)
369 gimple *s;
371 if (TREE_CODE (cond) == SSA_NAME
372 && is_gimple_assign (s = SSA_NAME_DEF_STMT (cond)))
374 if (TREE_CODE_CLASS (gimple_assign_rhs_code (s)) == tcc_comparison)
376 *op0 = gimple_assign_rhs1 (s);
377 *op1 = gimple_assign_rhs2 (s);
378 return gimple_assign_rhs_code (s);
381 else if (gimple_assign_rhs_code (s) == TRUTH_NOT_EXPR)
383 tree op = gimple_assign_rhs1 (s);
384 tree type = TREE_TYPE (op);
385 enum tree_code code = parse_predicate (op, op0, op1);
387 return code == ERROR_MARK ? ERROR_MARK
388 : invert_tree_comparison (code, HONOR_NANS (type));
391 return ERROR_MARK;
394 if (COMPARISON_CLASS_P (cond))
396 *op0 = TREE_OPERAND (cond, 0);
397 *op1 = TREE_OPERAND (cond, 1);
398 return TREE_CODE (cond);
401 return ERROR_MARK;
404 /* Returns the fold of predicate C1 OR C2 at location LOC. */
406 static tree
407 fold_or_predicates (location_t loc, tree c1, tree c2)
409 tree op1a, op1b, op2a, op2b;
410 enum tree_code code1 = parse_predicate (c1, &op1a, &op1b);
411 enum tree_code code2 = parse_predicate (c2, &op2a, &op2b);
413 if (code1 != ERROR_MARK && code2 != ERROR_MARK)
415 tree t = maybe_fold_or_comparisons (code1, op1a, op1b,
416 code2, op2a, op2b);
417 if (t)
418 return t;
421 return fold_build2_loc (loc, TRUTH_OR_EXPR, boolean_type_node, c1, c2);
424 /* Returns either a COND_EXPR or the folded expression if the folded
425 expression is a MIN_EXPR, a MAX_EXPR, an ABS_EXPR,
426 a constant or a SSA_NAME. */
428 static tree
429 fold_build_cond_expr (tree type, tree cond, tree rhs, tree lhs)
431 tree rhs1, lhs1, cond_expr;
433 /* If COND is comparison r != 0 and r has boolean type, convert COND
434 to SSA_NAME to accept by vect bool pattern. */
435 if (TREE_CODE (cond) == NE_EXPR)
437 tree op0 = TREE_OPERAND (cond, 0);
438 tree op1 = TREE_OPERAND (cond, 1);
439 if (TREE_CODE (op0) == SSA_NAME
440 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
441 && (integer_zerop (op1)))
442 cond = op0;
444 cond_expr = fold_ternary (COND_EXPR, type, cond, rhs, lhs);
446 if (cond_expr == NULL_TREE)
447 return build3 (COND_EXPR, type, cond, rhs, lhs);
449 STRIP_USELESS_TYPE_CONVERSION (cond_expr);
451 if (is_gimple_val (cond_expr))
452 return cond_expr;
454 if (TREE_CODE (cond_expr) == ABS_EXPR)
456 rhs1 = TREE_OPERAND (cond_expr, 1);
457 STRIP_USELESS_TYPE_CONVERSION (rhs1);
458 if (is_gimple_val (rhs1))
459 return build1 (ABS_EXPR, type, rhs1);
462 if (TREE_CODE (cond_expr) == MIN_EXPR
463 || TREE_CODE (cond_expr) == MAX_EXPR)
465 lhs1 = TREE_OPERAND (cond_expr, 0);
466 STRIP_USELESS_TYPE_CONVERSION (lhs1);
467 rhs1 = TREE_OPERAND (cond_expr, 1);
468 STRIP_USELESS_TYPE_CONVERSION (rhs1);
469 if (is_gimple_val (rhs1) && is_gimple_val (lhs1))
470 return build2 (TREE_CODE (cond_expr), type, lhs1, rhs1);
472 return build3 (COND_EXPR, type, cond, rhs, lhs);
475 /* Add condition NC to the predicate list of basic block BB. LOOP is
476 the loop to be if-converted. Use predicate of cd-equivalent block
477 for join bb if it exists: we call basic blocks bb1 and bb2
478 cd-equivalent if they are executed under the same condition. */
480 static inline void
481 add_to_predicate_list (struct loop *loop, basic_block bb, tree nc)
483 tree bc, *tp;
484 basic_block dom_bb;
486 if (is_true_predicate (nc))
487 return;
489 /* If dominance tells us this basic block is always executed,
490 don't record any predicates for it. */
491 if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
492 return;
494 dom_bb = get_immediate_dominator (CDI_DOMINATORS, bb);
495 /* We use notion of cd equivalence to get simpler predicate for
496 join block, e.g. if join block has 2 predecessors with predicates
497 p1 & p2 and p1 & !p2, we'd like to get p1 for it instead of
498 p1 & p2 | p1 & !p2. */
499 if (dom_bb != loop->header
500 && get_immediate_dominator (CDI_POST_DOMINATORS, dom_bb) == bb)
502 gcc_assert (flow_bb_inside_loop_p (loop, dom_bb));
503 bc = bb_predicate (dom_bb);
504 if (!is_true_predicate (bc))
505 set_bb_predicate (bb, bc);
506 else
507 gcc_assert (is_true_predicate (bb_predicate (bb)));
508 if (dump_file && (dump_flags & TDF_DETAILS))
509 fprintf (dump_file, "Use predicate of bb#%d for bb#%d\n",
510 dom_bb->index, bb->index);
511 return;
514 if (!is_predicated (bb))
515 bc = nc;
516 else
518 bc = bb_predicate (bb);
519 bc = fold_or_predicates (EXPR_LOCATION (bc), nc, bc);
520 if (is_true_predicate (bc))
522 reset_bb_predicate (bb);
523 return;
527 /* Allow a TRUTH_NOT_EXPR around the main predicate. */
528 if (TREE_CODE (bc) == TRUTH_NOT_EXPR)
529 tp = &TREE_OPERAND (bc, 0);
530 else
531 tp = &bc;
532 if (!is_gimple_condexpr (*tp))
534 gimple_seq stmts;
535 *tp = force_gimple_operand_1 (*tp, &stmts, is_gimple_condexpr, NULL_TREE);
536 add_bb_predicate_gimplified_stmts (bb, stmts);
538 set_bb_predicate (bb, bc);
541 /* Add the condition COND to the previous condition PREV_COND, and add
542 this to the predicate list of the destination of edge E. LOOP is
543 the loop to be if-converted. */
545 static void
546 add_to_dst_predicate_list (struct loop *loop, edge e,
547 tree prev_cond, tree cond)
549 if (!flow_bb_inside_loop_p (loop, e->dest))
550 return;
552 if (!is_true_predicate (prev_cond))
553 cond = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
554 prev_cond, cond);
556 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, e->dest))
557 add_to_predicate_list (loop, e->dest, cond);
560 /* Return true if one of the successor edges of BB exits LOOP. */
562 static bool
563 bb_with_exit_edge_p (struct loop *loop, basic_block bb)
565 edge e;
566 edge_iterator ei;
568 FOR_EACH_EDGE (e, ei, bb->succs)
569 if (loop_exit_edge_p (loop, e))
570 return true;
572 return false;
575 /* Given PHI which has more than two arguments, this function checks if
576 it's if-convertible by degenerating its arguments. Specifically, if
577 below two conditions are satisfied:
579 1) Number of PHI arguments with different values equals to 2 and one
580 argument has the only occurrence.
581 2) The edge corresponding to the unique argument isn't critical edge.
583 Such PHI can be handled as PHIs have only two arguments. For example,
584 below PHI:
586 res = PHI <A_1(e1), A_1(e2), A_2(e3)>;
588 can be transformed into:
590 res = (predicate of e3) ? A_2 : A_1;
592 Return TRUE if it is the case, FALSE otherwise. */
594 static bool
595 phi_convertible_by_degenerating_args (gphi *phi)
597 edge e;
598 tree arg, t1 = NULL, t2 = NULL;
599 unsigned int i, i1 = 0, i2 = 0, n1 = 0, n2 = 0;
600 unsigned int num_args = gimple_phi_num_args (phi);
602 gcc_assert (num_args > 2);
604 for (i = 0; i < num_args; i++)
606 arg = gimple_phi_arg_def (phi, i);
607 if (t1 == NULL || operand_equal_p (t1, arg, 0))
609 n1++;
610 i1 = i;
611 t1 = arg;
613 else if (t2 == NULL || operand_equal_p (t2, arg, 0))
615 n2++;
616 i2 = i;
617 t2 = arg;
619 else
620 return false;
623 if (n1 != 1 && n2 != 1)
624 return false;
626 /* Check if the edge corresponding to the unique arg is critical. */
627 e = gimple_phi_arg_edge (phi, (n1 == 1) ? i1 : i2);
628 if (EDGE_COUNT (e->src->succs) > 1)
629 return false;
631 return true;
634 /* Return true when PHI is if-convertible. PHI is part of loop LOOP
635 and it belongs to basic block BB. Note at this point, it is sure
636 that PHI is if-convertible. This function updates global variable
637 ANY_COMPLICATED_PHI if PHI is complicated. */
639 static bool
640 if_convertible_phi_p (struct loop *loop, basic_block bb, gphi *phi)
642 if (dump_file && (dump_flags & TDF_DETAILS))
644 fprintf (dump_file, "-------------------------\n");
645 print_gimple_stmt (dump_file, phi, 0, TDF_SLIM);
648 if (bb != loop->header
649 && gimple_phi_num_args (phi) > 2
650 && !phi_convertible_by_degenerating_args (phi))
651 any_complicated_phi = true;
653 return true;
656 /* Records the status of a data reference. This struct is attached to
657 each DR->aux field. */
659 struct ifc_dr {
660 bool rw_unconditionally;
661 bool w_unconditionally;
662 bool written_at_least_once;
664 tree rw_predicate;
665 tree w_predicate;
666 tree base_w_predicate;
669 #define IFC_DR(DR) ((struct ifc_dr *) (DR)->aux)
670 #define DR_BASE_W_UNCONDITIONALLY(DR) (IFC_DR (DR)->written_at_least_once)
671 #define DR_RW_UNCONDITIONALLY(DR) (IFC_DR (DR)->rw_unconditionally)
672 #define DR_W_UNCONDITIONALLY(DR) (IFC_DR (DR)->w_unconditionally)
674 /* Iterates over DR's and stores refs, DR and base refs, DR pairs in
675 HASH tables. While storing them in HASH table, it checks if the
676 reference is unconditionally read or written and stores that as a flag
677 information. For base reference it checks if it is written atlest once
678 unconditionally and stores it as flag information along with DR.
679 In other words for every data reference A in STMT there exist other
680 accesses to a data reference with the same base with predicates that
681 add up (OR-up) to the true predicate: this ensures that the data
682 reference A is touched (read or written) on every iteration of the
683 if-converted loop. */
684 static void
685 hash_memrefs_baserefs_and_store_DRs_read_written_info (data_reference_p a)
688 data_reference_p *master_dr, *base_master_dr;
689 tree base_ref = DR_BASE_OBJECT (a);
690 innermost_loop_behavior *innermost = &DR_INNERMOST (a);
691 tree ca = bb_predicate (gimple_bb (DR_STMT (a)));
692 bool exist1, exist2;
694 master_dr = &innermost_DR_map->get_or_insert (innermost, &exist1);
695 if (!exist1)
696 *master_dr = a;
698 if (DR_IS_WRITE (a))
700 IFC_DR (*master_dr)->w_predicate
701 = fold_or_predicates (UNKNOWN_LOCATION, ca,
702 IFC_DR (*master_dr)->w_predicate);
703 if (is_true_predicate (IFC_DR (*master_dr)->w_predicate))
704 DR_W_UNCONDITIONALLY (*master_dr) = true;
706 IFC_DR (*master_dr)->rw_predicate
707 = fold_or_predicates (UNKNOWN_LOCATION, ca,
708 IFC_DR (*master_dr)->rw_predicate);
709 if (is_true_predicate (IFC_DR (*master_dr)->rw_predicate))
710 DR_RW_UNCONDITIONALLY (*master_dr) = true;
712 if (DR_IS_WRITE (a))
714 base_master_dr = &baseref_DR_map->get_or_insert (base_ref, &exist2);
715 if (!exist2)
716 *base_master_dr = a;
717 IFC_DR (*base_master_dr)->base_w_predicate
718 = fold_or_predicates (UNKNOWN_LOCATION, ca,
719 IFC_DR (*base_master_dr)->base_w_predicate);
720 if (is_true_predicate (IFC_DR (*base_master_dr)->base_w_predicate))
721 DR_BASE_W_UNCONDITIONALLY (*base_master_dr) = true;
725 /* Return TRUE if can prove the index IDX of an array reference REF is
726 within array bound. Return false otherwise. */
728 static bool
729 idx_within_array_bound (tree ref, tree *idx, void *dta)
731 bool overflow;
732 widest_int niter, valid_niter, delta, wi_step;
733 tree ev, init, step;
734 tree low, high;
735 struct loop *loop = (struct loop*) dta;
737 /* Only support within-bound access for array references. */
738 if (TREE_CODE (ref) != ARRAY_REF)
739 return false;
741 /* For arrays at the end of the structure, we are not guaranteed that they
742 do not really extend over their declared size. However, for arrays of
743 size greater than one, this is unlikely to be intended. */
744 if (array_at_struct_end_p (ref))
745 return false;
747 ev = analyze_scalar_evolution (loop, *idx);
748 ev = instantiate_parameters (loop, ev);
749 init = initial_condition (ev);
750 step = evolution_part_in_loop_num (ev, loop->num);
752 if (!init || TREE_CODE (init) != INTEGER_CST
753 || (step && TREE_CODE (step) != INTEGER_CST))
754 return false;
756 low = array_ref_low_bound (ref);
757 high = array_ref_up_bound (ref);
759 /* The case of nonconstant bounds could be handled, but it would be
760 complicated. */
761 if (TREE_CODE (low) != INTEGER_CST
762 || !high || TREE_CODE (high) != INTEGER_CST)
763 return false;
765 /* Check if the intial idx is within bound. */
766 if (wi::to_widest (init) < wi::to_widest (low)
767 || wi::to_widest (init) > wi::to_widest (high))
768 return false;
770 /* The idx is always within bound. */
771 if (!step || integer_zerop (step))
772 return true;
774 if (!max_loop_iterations (loop, &niter))
775 return false;
777 if (wi::to_widest (step) < 0)
779 delta = wi::to_widest (init) - wi::to_widest (low);
780 wi_step = -wi::to_widest (step);
782 else
784 delta = wi::to_widest (high) - wi::to_widest (init);
785 wi_step = wi::to_widest (step);
788 valid_niter = wi::div_floor (delta, wi_step, SIGNED, &overflow);
789 /* The iteration space of idx is within array bound. */
790 if (!overflow && niter <= valid_niter)
791 return true;
793 return false;
796 /* Return TRUE if ref is a within bound array reference. */
798 static bool
799 ref_within_array_bound (gimple *stmt, tree ref)
801 struct loop *loop = loop_containing_stmt (stmt);
803 gcc_assert (loop != NULL);
804 return for_each_index (&ref, idx_within_array_bound, loop);
808 /* Given a memory reference expression T, return TRUE if base object
809 it refers to is writable. The base object of a memory reference
810 is the main object being referenced, which is returned by function
811 get_base_address. */
813 static bool
814 base_object_writable (tree ref)
816 tree base_tree = get_base_address (ref);
818 return (base_tree
819 && DECL_P (base_tree)
820 && decl_binds_to_current_def_p (base_tree)
821 && !TREE_READONLY (base_tree));
824 /* Return true when the memory references of STMT won't trap in the
825 if-converted code. There are two things that we have to check for:
827 - writes to memory occur to writable memory: if-conversion of
828 memory writes transforms the conditional memory writes into
829 unconditional writes, i.e. "if (cond) A[i] = foo" is transformed
830 into "A[i] = cond ? foo : A[i]", and as the write to memory may not
831 be executed at all in the original code, it may be a readonly
832 memory. To check that A is not const-qualified, we check that
833 there exists at least an unconditional write to A in the current
834 function.
836 - reads or writes to memory are valid memory accesses for every
837 iteration. To check that the memory accesses are correctly formed
838 and that we are allowed to read and write in these locations, we
839 check that the memory accesses to be if-converted occur at every
840 iteration unconditionally.
842 Returns true for the memory reference in STMT, same memory reference
843 is read or written unconditionally atleast once and the base memory
844 reference is written unconditionally once. This is to check reference
845 will not write fault. Also retuns true if the memory reference is
846 unconditionally read once then we are conditionally writing to memory
847 which is defined as read and write and is bound to the definition
848 we are seeing. */
849 static bool
850 ifcvt_memrefs_wont_trap (gimple *stmt, vec<data_reference_p> drs)
852 data_reference_p *master_dr, *base_master_dr;
853 data_reference_p a = drs[gimple_uid (stmt) - 1];
855 tree base = DR_BASE_OBJECT (a);
856 innermost_loop_behavior *innermost = &DR_INNERMOST (a);
858 gcc_assert (DR_STMT (a) == stmt);
859 gcc_assert (DR_BASE_ADDRESS (a) || DR_OFFSET (a)
860 || DR_INIT (a) || DR_STEP (a));
862 master_dr = innermost_DR_map->get (innermost);
863 gcc_assert (master_dr != NULL);
865 base_master_dr = baseref_DR_map->get (base);
867 /* If a is unconditionally written to it doesn't trap. */
868 if (DR_W_UNCONDITIONALLY (*master_dr))
869 return true;
871 /* If a is unconditionally accessed then ...
873 Even a is conditional access, we can treat it as an unconditional
874 one if it's an array reference and all its index are within array
875 bound. */
876 if (DR_RW_UNCONDITIONALLY (*master_dr)
877 || ref_within_array_bound (stmt, DR_REF (a)))
879 /* an unconditional read won't trap. */
880 if (DR_IS_READ (a))
881 return true;
883 /* an unconditionaly write won't trap if the base is written
884 to unconditionally. */
885 if (base_master_dr
886 && DR_BASE_W_UNCONDITIONALLY (*base_master_dr))
887 return PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES);
888 /* or the base is known to be not readonly. */
889 else if (base_object_writable (DR_REF (a)))
890 return PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES);
893 return false;
896 /* Return true if STMT could be converted into a masked load or store
897 (conditional load or store based on a mask computed from bb predicate). */
899 static bool
900 ifcvt_can_use_mask_load_store (gimple *stmt)
902 tree lhs, ref;
903 machine_mode mode;
904 basic_block bb = gimple_bb (stmt);
905 bool is_load;
907 if (!(flag_tree_loop_vectorize || bb->loop_father->force_vectorize)
908 || bb->loop_father->dont_vectorize
909 || !gimple_assign_single_p (stmt)
910 || gimple_has_volatile_ops (stmt))
911 return false;
913 /* Check whether this is a load or store. */
914 lhs = gimple_assign_lhs (stmt);
915 if (gimple_store_p (stmt))
917 if (!is_gimple_val (gimple_assign_rhs1 (stmt)))
918 return false;
919 is_load = false;
920 ref = lhs;
922 else if (gimple_assign_load_p (stmt))
924 is_load = true;
925 ref = gimple_assign_rhs1 (stmt);
927 else
928 return false;
930 if (may_be_nonaddressable_p (ref))
931 return false;
933 /* Mask should be integer mode of the same size as the load/store
934 mode. */
935 mode = TYPE_MODE (TREE_TYPE (lhs));
936 if (int_mode_for_mode (mode) == BLKmode
937 || VECTOR_MODE_P (mode))
938 return false;
940 if (can_vec_mask_load_store_p (mode, VOIDmode, is_load))
941 return true;
943 return false;
946 /* Return true when STMT is if-convertible.
948 GIMPLE_ASSIGN statement is not if-convertible if,
949 - it is not movable,
950 - it could trap,
951 - LHS is not var decl. */
953 static bool
954 if_convertible_gimple_assign_stmt_p (gimple *stmt,
955 vec<data_reference_p> refs)
957 tree lhs = gimple_assign_lhs (stmt);
959 if (dump_file && (dump_flags & TDF_DETAILS))
961 fprintf (dump_file, "-------------------------\n");
962 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
965 if (!is_gimple_reg_type (TREE_TYPE (lhs)))
966 return false;
968 /* Some of these constrains might be too conservative. */
969 if (stmt_ends_bb_p (stmt)
970 || gimple_has_volatile_ops (stmt)
971 || (TREE_CODE (lhs) == SSA_NAME
972 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
973 || gimple_has_side_effects (stmt))
975 if (dump_file && (dump_flags & TDF_DETAILS))
976 fprintf (dump_file, "stmt not suitable for ifcvt\n");
977 return false;
980 /* tree-into-ssa.c uses GF_PLF_1, so avoid it, because
981 in between if_convertible_loop_p and combine_blocks
982 we can perform loop versioning. */
983 gimple_set_plf (stmt, GF_PLF_2, false);
985 if ((! gimple_vuse (stmt)
986 || gimple_could_trap_p_1 (stmt, false, false)
987 || ! ifcvt_memrefs_wont_trap (stmt, refs))
988 && gimple_could_trap_p (stmt))
990 if (ifcvt_can_use_mask_load_store (stmt))
992 gimple_set_plf (stmt, GF_PLF_2, true);
993 any_pred_load_store = true;
994 return true;
996 if (dump_file && (dump_flags & TDF_DETAILS))
997 fprintf (dump_file, "tree could trap...\n");
998 return false;
1001 /* When if-converting stores force versioning, likewise if we
1002 ended up generating store data races. */
1003 if (gimple_vdef (stmt))
1004 any_pred_load_store = true;
1006 return true;
1009 /* Return true when STMT is if-convertible.
1011 A statement is if-convertible if:
1012 - it is an if-convertible GIMPLE_ASSIGN,
1013 - it is a GIMPLE_LABEL or a GIMPLE_COND,
1014 - it is builtins call. */
1016 static bool
1017 if_convertible_stmt_p (gimple *stmt, vec<data_reference_p> refs)
1019 switch (gimple_code (stmt))
1021 case GIMPLE_LABEL:
1022 case GIMPLE_DEBUG:
1023 case GIMPLE_COND:
1024 return true;
1026 case GIMPLE_ASSIGN:
1027 return if_convertible_gimple_assign_stmt_p (stmt, refs);
1029 case GIMPLE_CALL:
1031 tree fndecl = gimple_call_fndecl (stmt);
1032 if (fndecl)
1034 int flags = gimple_call_flags (stmt);
1035 if ((flags & ECF_CONST)
1036 && !(flags & ECF_LOOPING_CONST_OR_PURE)
1037 /* We can only vectorize some builtins at the moment,
1038 so restrict if-conversion to those. */
1039 && DECL_BUILT_IN (fndecl))
1040 return true;
1042 return false;
1045 default:
1046 /* Don't know what to do with 'em so don't do anything. */
1047 if (dump_file && (dump_flags & TDF_DETAILS))
1049 fprintf (dump_file, "don't know what to do\n");
1050 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1052 return false;
1055 return true;
1058 /* Assumes that BB has more than 1 predecessors.
1059 Returns false if at least one successor is not on critical edge
1060 and true otherwise. */
1062 static inline bool
1063 all_preds_critical_p (basic_block bb)
1065 edge e;
1066 edge_iterator ei;
1068 FOR_EACH_EDGE (e, ei, bb->preds)
1069 if (EDGE_COUNT (e->src->succs) == 1)
1070 return false;
1071 return true;
1074 /* Returns true if at least one successor in on critical edge. */
1075 static inline bool
1076 has_pred_critical_p (basic_block bb)
1078 edge e;
1079 edge_iterator ei;
1081 FOR_EACH_EDGE (e, ei, bb->preds)
1082 if (EDGE_COUNT (e->src->succs) > 1)
1083 return true;
1084 return false;
1087 /* Return true when BB is if-convertible. This routine does not check
1088 basic block's statements and phis.
1090 A basic block is not if-convertible if:
1091 - it is non-empty and it is after the exit block (in BFS order),
1092 - it is after the exit block but before the latch,
1093 - its edges are not normal.
1095 EXIT_BB is the basic block containing the exit of the LOOP. BB is
1096 inside LOOP. */
1098 static bool
1099 if_convertible_bb_p (struct loop *loop, basic_block bb, basic_block exit_bb)
1101 edge e;
1102 edge_iterator ei;
1104 if (dump_file && (dump_flags & TDF_DETAILS))
1105 fprintf (dump_file, "----------[%d]-------------\n", bb->index);
1107 if (EDGE_COUNT (bb->succs) > 2)
1108 return false;
1110 if (exit_bb)
1112 if (bb != loop->latch)
1114 if (dump_file && (dump_flags & TDF_DETAILS))
1115 fprintf (dump_file, "basic block after exit bb but before latch\n");
1116 return false;
1118 else if (!empty_block_p (bb))
1120 if (dump_file && (dump_flags & TDF_DETAILS))
1121 fprintf (dump_file, "non empty basic block after exit bb\n");
1122 return false;
1124 else if (bb == loop->latch
1125 && bb != exit_bb
1126 && !dominated_by_p (CDI_DOMINATORS, bb, exit_bb))
1128 if (dump_file && (dump_flags & TDF_DETAILS))
1129 fprintf (dump_file, "latch is not dominated by exit_block\n");
1130 return false;
1134 /* Be less adventurous and handle only normal edges. */
1135 FOR_EACH_EDGE (e, ei, bb->succs)
1136 if (e->flags & (EDGE_EH | EDGE_ABNORMAL | EDGE_IRREDUCIBLE_LOOP))
1138 if (dump_file && (dump_flags & TDF_DETAILS))
1139 fprintf (dump_file, "Difficult to handle edges\n");
1140 return false;
1143 return true;
1146 /* Return true when all predecessor blocks of BB are visited. The
1147 VISITED bitmap keeps track of the visited blocks. */
1149 static bool
1150 pred_blocks_visited_p (basic_block bb, bitmap *visited)
1152 edge e;
1153 edge_iterator ei;
1154 FOR_EACH_EDGE (e, ei, bb->preds)
1155 if (!bitmap_bit_p (*visited, e->src->index))
1156 return false;
1158 return true;
1161 /* Get body of a LOOP in suitable order for if-conversion. It is
1162 caller's responsibility to deallocate basic block list.
1163 If-conversion suitable order is, breadth first sort (BFS) order
1164 with an additional constraint: select a block only if all its
1165 predecessors are already selected. */
1167 static basic_block *
1168 get_loop_body_in_if_conv_order (const struct loop *loop)
1170 basic_block *blocks, *blocks_in_bfs_order;
1171 basic_block bb;
1172 bitmap visited;
1173 unsigned int index = 0;
1174 unsigned int visited_count = 0;
1176 gcc_assert (loop->num_nodes);
1177 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
1179 blocks = XCNEWVEC (basic_block, loop->num_nodes);
1180 visited = BITMAP_ALLOC (NULL);
1182 blocks_in_bfs_order = get_loop_body_in_bfs_order (loop);
1184 index = 0;
1185 while (index < loop->num_nodes)
1187 bb = blocks_in_bfs_order [index];
1189 if (bb->flags & BB_IRREDUCIBLE_LOOP)
1191 free (blocks_in_bfs_order);
1192 BITMAP_FREE (visited);
1193 free (blocks);
1194 return NULL;
1197 if (!bitmap_bit_p (visited, bb->index))
1199 if (pred_blocks_visited_p (bb, &visited)
1200 || bb == loop->header)
1202 /* This block is now visited. */
1203 bitmap_set_bit (visited, bb->index);
1204 blocks[visited_count++] = bb;
1208 index++;
1210 if (index == loop->num_nodes
1211 && visited_count != loop->num_nodes)
1212 /* Not done yet. */
1213 index = 0;
1215 free (blocks_in_bfs_order);
1216 BITMAP_FREE (visited);
1217 return blocks;
1220 /* Returns true when the analysis of the predicates for all the basic
1221 blocks in LOOP succeeded.
1223 predicate_bbs first allocates the predicates of the basic blocks.
1224 These fields are then initialized with the tree expressions
1225 representing the predicates under which a basic block is executed
1226 in the LOOP. As the loop->header is executed at each iteration, it
1227 has the "true" predicate. Other statements executed under a
1228 condition are predicated with that condition, for example
1230 | if (x)
1231 | S1;
1232 | else
1233 | S2;
1235 S1 will be predicated with "x", and
1236 S2 will be predicated with "!x". */
1238 static void
1239 predicate_bbs (loop_p loop)
1241 unsigned int i;
1243 for (i = 0; i < loop->num_nodes; i++)
1244 init_bb_predicate (ifc_bbs[i]);
1246 for (i = 0; i < loop->num_nodes; i++)
1248 basic_block bb = ifc_bbs[i];
1249 tree cond;
1250 gimple *stmt;
1252 /* The loop latch and loop exit block are always executed and
1253 have no extra conditions to be processed: skip them. */
1254 if (bb == loop->latch
1255 || bb_with_exit_edge_p (loop, bb))
1257 reset_bb_predicate (bb);
1258 continue;
1261 cond = bb_predicate (bb);
1262 stmt = last_stmt (bb);
1263 if (stmt && gimple_code (stmt) == GIMPLE_COND)
1265 tree c2;
1266 edge true_edge, false_edge;
1267 location_t loc = gimple_location (stmt);
1268 tree c = build2_loc (loc, gimple_cond_code (stmt),
1269 boolean_type_node,
1270 gimple_cond_lhs (stmt),
1271 gimple_cond_rhs (stmt));
1273 /* Add new condition into destination's predicate list. */
1274 extract_true_false_edges_from_block (gimple_bb (stmt),
1275 &true_edge, &false_edge);
1277 /* If C is true, then TRUE_EDGE is taken. */
1278 add_to_dst_predicate_list (loop, true_edge, unshare_expr (cond),
1279 unshare_expr (c));
1281 /* If C is false, then FALSE_EDGE is taken. */
1282 c2 = build1_loc (loc, TRUTH_NOT_EXPR, boolean_type_node,
1283 unshare_expr (c));
1284 add_to_dst_predicate_list (loop, false_edge,
1285 unshare_expr (cond), c2);
1287 cond = NULL_TREE;
1290 /* If current bb has only one successor, then consider it as an
1291 unconditional goto. */
1292 if (single_succ_p (bb))
1294 basic_block bb_n = single_succ (bb);
1296 /* The successor bb inherits the predicate of its
1297 predecessor. If there is no predicate in the predecessor
1298 bb, then consider the successor bb as always executed. */
1299 if (cond == NULL_TREE)
1300 cond = boolean_true_node;
1302 add_to_predicate_list (loop, bb_n, cond);
1306 /* The loop header is always executed. */
1307 reset_bb_predicate (loop->header);
1308 gcc_assert (bb_predicate_gimplified_stmts (loop->header) == NULL
1309 && bb_predicate_gimplified_stmts (loop->latch) == NULL);
1312 /* Build region by adding loop pre-header and post-header blocks. */
1314 static vec<basic_block>
1315 build_region (struct loop *loop)
1317 vec<basic_block> region = vNULL;
1318 basic_block exit_bb = NULL;
1320 gcc_assert (ifc_bbs);
1321 /* The first element is loop pre-header. */
1322 region.safe_push (loop_preheader_edge (loop)->src);
1324 for (unsigned int i = 0; i < loop->num_nodes; i++)
1326 basic_block bb = ifc_bbs[i];
1327 region.safe_push (bb);
1328 /* Find loop postheader. */
1329 edge e;
1330 edge_iterator ei;
1331 FOR_EACH_EDGE (e, ei, bb->succs)
1332 if (loop_exit_edge_p (loop, e))
1334 exit_bb = e->dest;
1335 break;
1338 /* The last element is loop post-header. */
1339 gcc_assert (exit_bb);
1340 region.safe_push (exit_bb);
1341 return region;
1344 /* Return true when LOOP is if-convertible. This is a helper function
1345 for if_convertible_loop_p. REFS and DDRS are initialized and freed
1346 in if_convertible_loop_p. */
1348 static bool
1349 if_convertible_loop_p_1 (struct loop *loop, vec<data_reference_p> *refs)
1351 unsigned int i;
1352 basic_block exit_bb = NULL;
1353 vec<basic_block> region;
1355 if (find_data_references_in_loop (loop, refs) == chrec_dont_know)
1356 return false;
1358 calculate_dominance_info (CDI_DOMINATORS);
1360 /* Allow statements that can be handled during if-conversion. */
1361 ifc_bbs = get_loop_body_in_if_conv_order (loop);
1362 if (!ifc_bbs)
1364 if (dump_file && (dump_flags & TDF_DETAILS))
1365 fprintf (dump_file, "Irreducible loop\n");
1366 return false;
1369 for (i = 0; i < loop->num_nodes; i++)
1371 basic_block bb = ifc_bbs[i];
1373 if (!if_convertible_bb_p (loop, bb, exit_bb))
1374 return false;
1376 if (bb_with_exit_edge_p (loop, bb))
1377 exit_bb = bb;
1380 for (i = 0; i < loop->num_nodes; i++)
1382 basic_block bb = ifc_bbs[i];
1383 gimple_stmt_iterator gsi;
1385 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1386 switch (gimple_code (gsi_stmt (gsi)))
1388 case GIMPLE_LABEL:
1389 case GIMPLE_ASSIGN:
1390 case GIMPLE_CALL:
1391 case GIMPLE_DEBUG:
1392 case GIMPLE_COND:
1393 gimple_set_uid (gsi_stmt (gsi), 0);
1394 break;
1395 default:
1396 return false;
1400 data_reference_p dr;
1402 innermost_DR_map
1403 = new hash_map<innermost_loop_behavior_hash, data_reference_p>;
1404 baseref_DR_map = new hash_map<tree_operand_hash, data_reference_p>;
1406 /* Compute post-dominator tree locally. */
1407 region = build_region (loop);
1408 calculate_dominance_info_for_region (CDI_POST_DOMINATORS, region);
1410 predicate_bbs (loop);
1412 /* Free post-dominator tree since it is not used after predication. */
1413 free_dominance_info_for_region (cfun, CDI_POST_DOMINATORS, region);
1414 region.release ();
1416 for (i = 0; refs->iterate (i, &dr); i++)
1418 tree ref = DR_REF (dr);
1420 dr->aux = XNEW (struct ifc_dr);
1421 DR_BASE_W_UNCONDITIONALLY (dr) = false;
1422 DR_RW_UNCONDITIONALLY (dr) = false;
1423 DR_W_UNCONDITIONALLY (dr) = false;
1424 IFC_DR (dr)->rw_predicate = boolean_false_node;
1425 IFC_DR (dr)->w_predicate = boolean_false_node;
1426 IFC_DR (dr)->base_w_predicate = boolean_false_node;
1427 if (gimple_uid (DR_STMT (dr)) == 0)
1428 gimple_set_uid (DR_STMT (dr), i + 1);
1430 /* If DR doesn't have innermost loop behavior or it's a compound
1431 memory reference, we synthesize its innermost loop behavior
1432 for hashing. */
1433 if (TREE_CODE (ref) == COMPONENT_REF
1434 || TREE_CODE (ref) == IMAGPART_EXPR
1435 || TREE_CODE (ref) == REALPART_EXPR
1436 || !(DR_BASE_ADDRESS (dr) || DR_OFFSET (dr)
1437 || DR_INIT (dr) || DR_STEP (dr)))
1439 while (TREE_CODE (ref) == COMPONENT_REF
1440 || TREE_CODE (ref) == IMAGPART_EXPR
1441 || TREE_CODE (ref) == REALPART_EXPR)
1442 ref = TREE_OPERAND (ref, 0);
1444 DR_BASE_ADDRESS (dr) = ref;
1445 DR_OFFSET (dr) = NULL;
1446 DR_INIT (dr) = NULL;
1447 DR_STEP (dr) = NULL;
1448 DR_ALIGNED_TO (dr) = NULL;
1450 hash_memrefs_baserefs_and_store_DRs_read_written_info (dr);
1453 for (i = 0; i < loop->num_nodes; i++)
1455 basic_block bb = ifc_bbs[i];
1456 gimple_stmt_iterator itr;
1458 /* Check the if-convertibility of statements in predicated BBs. */
1459 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
1460 for (itr = gsi_start_bb (bb); !gsi_end_p (itr); gsi_next (&itr))
1461 if (!if_convertible_stmt_p (gsi_stmt (itr), *refs))
1462 return false;
1465 /* Checking PHIs needs to be done after stmts, as the fact whether there
1466 are any masked loads or stores affects the tests. */
1467 for (i = 0; i < loop->num_nodes; i++)
1469 basic_block bb = ifc_bbs[i];
1470 gphi_iterator itr;
1472 for (itr = gsi_start_phis (bb); !gsi_end_p (itr); gsi_next (&itr))
1473 if (!if_convertible_phi_p (loop, bb, itr.phi ()))
1474 return false;
1477 if (dump_file)
1478 fprintf (dump_file, "Applying if-conversion\n");
1480 return true;
1483 /* Return true when LOOP is if-convertible.
1484 LOOP is if-convertible if:
1485 - it is innermost,
1486 - it has two or more basic blocks,
1487 - it has only one exit,
1488 - loop header is not the exit edge,
1489 - if its basic blocks and phi nodes are if convertible. */
1491 static bool
1492 if_convertible_loop_p (struct loop *loop)
1494 edge e;
1495 edge_iterator ei;
1496 bool res = false;
1497 vec<data_reference_p> refs;
1499 /* Handle only innermost loop. */
1500 if (!loop || loop->inner)
1502 if (dump_file && (dump_flags & TDF_DETAILS))
1503 fprintf (dump_file, "not innermost loop\n");
1504 return false;
1507 /* If only one block, no need for if-conversion. */
1508 if (loop->num_nodes <= 2)
1510 if (dump_file && (dump_flags & TDF_DETAILS))
1511 fprintf (dump_file, "less than 2 basic blocks\n");
1512 return false;
1515 /* More than one loop exit is too much to handle. */
1516 if (!single_exit (loop))
1518 if (dump_file && (dump_flags & TDF_DETAILS))
1519 fprintf (dump_file, "multiple exits\n");
1520 return false;
1523 /* If one of the loop header's edge is an exit edge then do not
1524 apply if-conversion. */
1525 FOR_EACH_EDGE (e, ei, loop->header->succs)
1526 if (loop_exit_edge_p (loop, e))
1527 return false;
1529 refs.create (5);
1530 res = if_convertible_loop_p_1 (loop, &refs);
1532 data_reference_p dr;
1533 unsigned int i;
1534 for (i = 0; refs.iterate (i, &dr); i++)
1535 free (dr->aux);
1537 free_data_refs (refs);
1539 delete innermost_DR_map;
1540 innermost_DR_map = NULL;
1542 delete baseref_DR_map;
1543 baseref_DR_map = NULL;
1545 return res;
1548 /* Returns true if def-stmt for phi argument ARG is simple increment/decrement
1549 which is in predicated basic block.
1550 In fact, the following PHI pattern is searching:
1551 loop-header:
1552 reduc_1 = PHI <..., reduc_2>
1554 if (...)
1555 reduc_3 = ...
1556 reduc_2 = PHI <reduc_1, reduc_3>
1558 ARG_0 and ARG_1 are correspondent PHI arguments.
1559 REDUC, OP0 and OP1 contain reduction stmt and its operands.
1560 EXTENDED is true if PHI has > 2 arguments. */
1562 static bool
1563 is_cond_scalar_reduction (gimple *phi, gimple **reduc, tree arg_0, tree arg_1,
1564 tree *op0, tree *op1, bool extended)
1566 tree lhs, r_op1, r_op2;
1567 gimple *stmt;
1568 gimple *header_phi = NULL;
1569 enum tree_code reduction_op;
1570 basic_block bb = gimple_bb (phi);
1571 struct loop *loop = bb->loop_father;
1572 edge latch_e = loop_latch_edge (loop);
1573 imm_use_iterator imm_iter;
1574 use_operand_p use_p;
1575 edge e;
1576 edge_iterator ei;
1577 bool result = false;
1578 if (TREE_CODE (arg_0) != SSA_NAME || TREE_CODE (arg_1) != SSA_NAME)
1579 return false;
1581 if (!extended && gimple_code (SSA_NAME_DEF_STMT (arg_0)) == GIMPLE_PHI)
1583 lhs = arg_1;
1584 header_phi = SSA_NAME_DEF_STMT (arg_0);
1585 stmt = SSA_NAME_DEF_STMT (arg_1);
1587 else if (gimple_code (SSA_NAME_DEF_STMT (arg_1)) == GIMPLE_PHI)
1589 lhs = arg_0;
1590 header_phi = SSA_NAME_DEF_STMT (arg_1);
1591 stmt = SSA_NAME_DEF_STMT (arg_0);
1593 else
1594 return false;
1595 if (gimple_bb (header_phi) != loop->header)
1596 return false;
1598 if (PHI_ARG_DEF_FROM_EDGE (header_phi, latch_e) != PHI_RESULT (phi))
1599 return false;
1601 if (gimple_code (stmt) != GIMPLE_ASSIGN
1602 || gimple_has_volatile_ops (stmt))
1603 return false;
1605 if (!flow_bb_inside_loop_p (loop, gimple_bb (stmt)))
1606 return false;
1608 if (!is_predicated (gimple_bb (stmt)))
1609 return false;
1611 /* Check that stmt-block is predecessor of phi-block. */
1612 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
1613 if (e->dest == bb)
1615 result = true;
1616 break;
1618 if (!result)
1619 return false;
1621 if (!has_single_use (lhs))
1622 return false;
1624 reduction_op = gimple_assign_rhs_code (stmt);
1625 if (reduction_op != PLUS_EXPR && reduction_op != MINUS_EXPR)
1626 return false;
1627 r_op1 = gimple_assign_rhs1 (stmt);
1628 r_op2 = gimple_assign_rhs2 (stmt);
1630 /* Make R_OP1 to hold reduction variable. */
1631 if (r_op2 == PHI_RESULT (header_phi)
1632 && reduction_op == PLUS_EXPR)
1633 std::swap (r_op1, r_op2);
1634 else if (r_op1 != PHI_RESULT (header_phi))
1635 return false;
1637 /* Check that R_OP1 is used in reduction stmt or in PHI only. */
1638 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, r_op1)
1640 gimple *use_stmt = USE_STMT (use_p);
1641 if (is_gimple_debug (use_stmt))
1642 continue;
1643 if (use_stmt == stmt)
1644 continue;
1645 if (gimple_code (use_stmt) != GIMPLE_PHI)
1646 return false;
1649 *op0 = r_op1; *op1 = r_op2;
1650 *reduc = stmt;
1651 return true;
1654 /* Converts conditional scalar reduction into unconditional form, e.g.
1655 bb_4
1656 if (_5 != 0) goto bb_5 else goto bb_6
1657 end_bb_4
1658 bb_5
1659 res_6 = res_13 + 1;
1660 end_bb_5
1661 bb_6
1662 # res_2 = PHI <res_13(4), res_6(5)>
1663 end_bb_6
1665 will be converted into sequence
1666 _ifc__1 = _5 != 0 ? 1 : 0;
1667 res_2 = res_13 + _ifc__1;
1668 Argument SWAP tells that arguments of conditional expression should be
1669 swapped.
1670 Returns rhs of resulting PHI assignment. */
1672 static tree
1673 convert_scalar_cond_reduction (gimple *reduc, gimple_stmt_iterator *gsi,
1674 tree cond, tree op0, tree op1, bool swap)
1676 gimple_stmt_iterator stmt_it;
1677 gimple *new_assign;
1678 tree rhs;
1679 tree rhs1 = gimple_assign_rhs1 (reduc);
1680 tree tmp = make_temp_ssa_name (TREE_TYPE (rhs1), NULL, "_ifc_");
1681 tree c;
1682 tree zero = build_zero_cst (TREE_TYPE (rhs1));
1684 if (dump_file && (dump_flags & TDF_DETAILS))
1686 fprintf (dump_file, "Found cond scalar reduction.\n");
1687 print_gimple_stmt (dump_file, reduc, 0, TDF_SLIM);
1690 /* Build cond expression using COND and constant operand
1691 of reduction rhs. */
1692 c = fold_build_cond_expr (TREE_TYPE (rhs1),
1693 unshare_expr (cond),
1694 swap ? zero : op1,
1695 swap ? op1 : zero);
1697 /* Create assignment stmt and insert it at GSI. */
1698 new_assign = gimple_build_assign (tmp, c);
1699 gsi_insert_before (gsi, new_assign, GSI_SAME_STMT);
1700 /* Build rhs for unconditional increment/decrement. */
1701 rhs = fold_build2 (gimple_assign_rhs_code (reduc),
1702 TREE_TYPE (rhs1), op0, tmp);
1704 /* Delete original reduction stmt. */
1705 stmt_it = gsi_for_stmt (reduc);
1706 gsi_remove (&stmt_it, true);
1707 release_defs (reduc);
1708 return rhs;
1711 /* Produce condition for all occurrences of ARG in PHI node. */
1713 static tree
1714 gen_phi_arg_condition (gphi *phi, vec<int> *occur,
1715 gimple_stmt_iterator *gsi)
1717 int len;
1718 int i;
1719 tree cond = NULL_TREE;
1720 tree c;
1721 edge e;
1723 len = occur->length ();
1724 gcc_assert (len > 0);
1725 for (i = 0; i < len; i++)
1727 e = gimple_phi_arg_edge (phi, (*occur)[i]);
1728 c = bb_predicate (e->src);
1729 if (is_true_predicate (c))
1731 cond = c;
1732 break;
1734 c = force_gimple_operand_gsi_1 (gsi, unshare_expr (c),
1735 is_gimple_condexpr, NULL_TREE,
1736 true, GSI_SAME_STMT);
1737 if (cond != NULL_TREE)
1739 /* Must build OR expression. */
1740 cond = fold_or_predicates (EXPR_LOCATION (c), c, cond);
1741 cond = force_gimple_operand_gsi_1 (gsi, unshare_expr (cond),
1742 is_gimple_condexpr, NULL_TREE,
1743 true, GSI_SAME_STMT);
1745 else
1746 cond = c;
1748 gcc_assert (cond != NULL_TREE);
1749 return cond;
1752 /* Replace a scalar PHI node with a COND_EXPR using COND as condition.
1753 This routine can handle PHI nodes with more than two arguments.
1755 For example,
1756 S1: A = PHI <x1(1), x2(5)>
1757 is converted into,
1758 S2: A = cond ? x1 : x2;
1760 The generated code is inserted at GSI that points to the top of
1761 basic block's statement list.
1762 If PHI node has more than two arguments a chain of conditional
1763 expression is produced. */
1766 static void
1767 predicate_scalar_phi (gphi *phi, gimple_stmt_iterator *gsi)
1769 gimple *new_stmt = NULL, *reduc;
1770 tree rhs, res, arg0, arg1, op0, op1, scev;
1771 tree cond;
1772 unsigned int index0;
1773 unsigned int max, args_len;
1774 edge e;
1775 basic_block bb;
1776 unsigned int i;
1778 res = gimple_phi_result (phi);
1779 if (virtual_operand_p (res))
1780 return;
1782 if ((rhs = degenerate_phi_result (phi))
1783 || ((scev = analyze_scalar_evolution (gimple_bb (phi)->loop_father,
1784 res))
1785 && !chrec_contains_undetermined (scev)
1786 && scev != res
1787 && (rhs = gimple_phi_arg_def (phi, 0))))
1789 if (dump_file && (dump_flags & TDF_DETAILS))
1791 fprintf (dump_file, "Degenerate phi!\n");
1792 print_gimple_stmt (dump_file, phi, 0, TDF_SLIM);
1794 new_stmt = gimple_build_assign (res, rhs);
1795 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1796 update_stmt (new_stmt);
1797 return;
1800 bb = gimple_bb (phi);
1801 if (EDGE_COUNT (bb->preds) == 2)
1803 /* Predicate ordinary PHI node with 2 arguments. */
1804 edge first_edge, second_edge;
1805 basic_block true_bb;
1806 first_edge = EDGE_PRED (bb, 0);
1807 second_edge = EDGE_PRED (bb, 1);
1808 cond = bb_predicate (first_edge->src);
1809 if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
1810 std::swap (first_edge, second_edge);
1811 if (EDGE_COUNT (first_edge->src->succs) > 1)
1813 cond = bb_predicate (second_edge->src);
1814 if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
1815 cond = TREE_OPERAND (cond, 0);
1816 else
1817 first_edge = second_edge;
1819 else
1820 cond = bb_predicate (first_edge->src);
1821 /* Gimplify the condition to a valid cond-expr conditonal operand. */
1822 cond = force_gimple_operand_gsi_1 (gsi, unshare_expr (cond),
1823 is_gimple_condexpr, NULL_TREE,
1824 true, GSI_SAME_STMT);
1825 true_bb = first_edge->src;
1826 if (EDGE_PRED (bb, 1)->src == true_bb)
1828 arg0 = gimple_phi_arg_def (phi, 1);
1829 arg1 = gimple_phi_arg_def (phi, 0);
1831 else
1833 arg0 = gimple_phi_arg_def (phi, 0);
1834 arg1 = gimple_phi_arg_def (phi, 1);
1836 if (is_cond_scalar_reduction (phi, &reduc, arg0, arg1,
1837 &op0, &op1, false))
1838 /* Convert reduction stmt into vectorizable form. */
1839 rhs = convert_scalar_cond_reduction (reduc, gsi, cond, op0, op1,
1840 true_bb != gimple_bb (reduc));
1841 else
1842 /* Build new RHS using selected condition and arguments. */
1843 rhs = fold_build_cond_expr (TREE_TYPE (res), unshare_expr (cond),
1844 arg0, arg1);
1845 new_stmt = gimple_build_assign (res, rhs);
1846 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1847 update_stmt (new_stmt);
1849 if (dump_file && (dump_flags & TDF_DETAILS))
1851 fprintf (dump_file, "new phi replacement stmt\n");
1852 print_gimple_stmt (dump_file, new_stmt, 0, TDF_SLIM);
1854 return;
1857 /* Create hashmap for PHI node which contain vector of argument indexes
1858 having the same value. */
1859 bool swap = false;
1860 hash_map<tree_operand_hash, auto_vec<int> > phi_arg_map;
1861 unsigned int num_args = gimple_phi_num_args (phi);
1862 int max_ind = -1;
1863 /* Vector of different PHI argument values. */
1864 auto_vec<tree> args (num_args);
1866 /* Compute phi_arg_map. */
1867 for (i = 0; i < num_args; i++)
1869 tree arg;
1871 arg = gimple_phi_arg_def (phi, i);
1872 if (!phi_arg_map.get (arg))
1873 args.quick_push (arg);
1874 phi_arg_map.get_or_insert (arg).safe_push (i);
1877 /* Determine element with max number of occurrences. */
1878 max_ind = -1;
1879 max = 1;
1880 args_len = args.length ();
1881 for (i = 0; i < args_len; i++)
1883 unsigned int len;
1884 if ((len = phi_arg_map.get (args[i])->length ()) > max)
1886 max_ind = (int) i;
1887 max = len;
1891 /* Put element with max number of occurences to the end of ARGS. */
1892 if (max_ind != -1 && max_ind +1 != (int) args_len)
1893 std::swap (args[args_len - 1], args[max_ind]);
1895 /* Handle one special case when number of arguments with different values
1896 is equal 2 and one argument has the only occurrence. Such PHI can be
1897 handled as if would have only 2 arguments. */
1898 if (args_len == 2 && phi_arg_map.get (args[0])->length () == 1)
1900 vec<int> *indexes;
1901 indexes = phi_arg_map.get (args[0]);
1902 index0 = (*indexes)[0];
1903 arg0 = args[0];
1904 arg1 = args[1];
1905 e = gimple_phi_arg_edge (phi, index0);
1906 cond = bb_predicate (e->src);
1907 if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
1909 swap = true;
1910 cond = TREE_OPERAND (cond, 0);
1912 /* Gimplify the condition to a valid cond-expr conditonal operand. */
1913 cond = force_gimple_operand_gsi_1 (gsi, unshare_expr (cond),
1914 is_gimple_condexpr, NULL_TREE,
1915 true, GSI_SAME_STMT);
1916 if (!(is_cond_scalar_reduction (phi, &reduc, arg0 , arg1,
1917 &op0, &op1, true)))
1918 rhs = fold_build_cond_expr (TREE_TYPE (res), unshare_expr (cond),
1919 swap? arg1 : arg0,
1920 swap? arg0 : arg1);
1921 else
1922 /* Convert reduction stmt into vectorizable form. */
1923 rhs = convert_scalar_cond_reduction (reduc, gsi, cond, op0, op1,
1924 swap);
1925 new_stmt = gimple_build_assign (res, rhs);
1926 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1927 update_stmt (new_stmt);
1929 else
1931 /* Common case. */
1932 vec<int> *indexes;
1933 tree type = TREE_TYPE (gimple_phi_result (phi));
1934 tree lhs;
1935 arg1 = args[1];
1936 for (i = 0; i < args_len; i++)
1938 arg0 = args[i];
1939 indexes = phi_arg_map.get (args[i]);
1940 if (i != args_len - 1)
1941 lhs = make_temp_ssa_name (type, NULL, "_ifc_");
1942 else
1943 lhs = res;
1944 cond = gen_phi_arg_condition (phi, indexes, gsi);
1945 rhs = fold_build_cond_expr (type, unshare_expr (cond),
1946 arg0, arg1);
1947 new_stmt = gimple_build_assign (lhs, rhs);
1948 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1949 update_stmt (new_stmt);
1950 arg1 = lhs;
1954 if (dump_file && (dump_flags & TDF_DETAILS))
1956 fprintf (dump_file, "new extended phi replacement stmt\n");
1957 print_gimple_stmt (dump_file, new_stmt, 0, TDF_SLIM);
1961 /* Replaces in LOOP all the scalar phi nodes other than those in the
1962 LOOP->header block with conditional modify expressions. */
1964 static void
1965 predicate_all_scalar_phis (struct loop *loop)
1967 basic_block bb;
1968 unsigned int orig_loop_num_nodes = loop->num_nodes;
1969 unsigned int i;
1971 for (i = 1; i < orig_loop_num_nodes; i++)
1973 gphi *phi;
1974 gimple_stmt_iterator gsi;
1975 gphi_iterator phi_gsi;
1976 bb = ifc_bbs[i];
1978 if (bb == loop->header)
1979 continue;
1981 phi_gsi = gsi_start_phis (bb);
1982 if (gsi_end_p (phi_gsi))
1983 continue;
1985 gsi = gsi_after_labels (bb);
1986 while (!gsi_end_p (phi_gsi))
1988 phi = phi_gsi.phi ();
1989 if (virtual_operand_p (gimple_phi_result (phi)))
1990 gsi_next (&phi_gsi);
1991 else
1993 predicate_scalar_phi (phi, &gsi);
1994 remove_phi_node (&phi_gsi, false);
2000 /* Insert in each basic block of LOOP the statements produced by the
2001 gimplification of the predicates. */
2003 static void
2004 insert_gimplified_predicates (loop_p loop)
2006 unsigned int i;
2008 for (i = 0; i < loop->num_nodes; i++)
2010 basic_block bb = ifc_bbs[i];
2011 gimple_seq stmts;
2012 if (!is_predicated (bb))
2013 gcc_assert (bb_predicate_gimplified_stmts (bb) == NULL);
2014 if (!is_predicated (bb))
2016 /* Do not insert statements for a basic block that is not
2017 predicated. Also make sure that the predicate of the
2018 basic block is set to true. */
2019 reset_bb_predicate (bb);
2020 continue;
2023 stmts = bb_predicate_gimplified_stmts (bb);
2024 if (stmts)
2026 if (any_pred_load_store)
2028 /* Insert the predicate of the BB just after the label,
2029 as the if-conversion of memory writes will use this
2030 predicate. */
2031 gimple_stmt_iterator gsi = gsi_after_labels (bb);
2032 gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
2034 else
2036 /* Insert the predicate of the BB at the end of the BB
2037 as this would reduce the register pressure: the only
2038 use of this predicate will be in successor BBs. */
2039 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2041 if (gsi_end_p (gsi)
2042 || stmt_ends_bb_p (gsi_stmt (gsi)))
2043 gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
2044 else
2045 gsi_insert_seq_after (&gsi, stmts, GSI_SAME_STMT);
2048 /* Once the sequence is code generated, set it to NULL. */
2049 set_bb_predicate_gimplified_stmts (bb, NULL);
2054 /* Helper function for predicate_mem_writes. Returns index of existent
2055 mask if it was created for given SIZE and -1 otherwise. */
2057 static int
2058 mask_exists (int size, vec<int> vec)
2060 unsigned int ix;
2061 int v;
2062 FOR_EACH_VEC_ELT (vec, ix, v)
2063 if (v == size)
2064 return (int) ix;
2065 return -1;
2068 /* Predicate each write to memory in LOOP.
2070 This function transforms control flow constructs containing memory
2071 writes of the form:
2073 | for (i = 0; i < N; i++)
2074 | if (cond)
2075 | A[i] = expr;
2077 into the following form that does not contain control flow:
2079 | for (i = 0; i < N; i++)
2080 | A[i] = cond ? expr : A[i];
2082 The original CFG looks like this:
2084 | bb_0
2085 | i = 0
2086 | end_bb_0
2088 | bb_1
2089 | if (i < N) goto bb_5 else goto bb_2
2090 | end_bb_1
2092 | bb_2
2093 | cond = some_computation;
2094 | if (cond) goto bb_3 else goto bb_4
2095 | end_bb_2
2097 | bb_3
2098 | A[i] = expr;
2099 | goto bb_4
2100 | end_bb_3
2102 | bb_4
2103 | goto bb_1
2104 | end_bb_4
2106 insert_gimplified_predicates inserts the computation of the COND
2107 expression at the beginning of the destination basic block:
2109 | bb_0
2110 | i = 0
2111 | end_bb_0
2113 | bb_1
2114 | if (i < N) goto bb_5 else goto bb_2
2115 | end_bb_1
2117 | bb_2
2118 | cond = some_computation;
2119 | if (cond) goto bb_3 else goto bb_4
2120 | end_bb_2
2122 | bb_3
2123 | cond = some_computation;
2124 | A[i] = expr;
2125 | goto bb_4
2126 | end_bb_3
2128 | bb_4
2129 | goto bb_1
2130 | end_bb_4
2132 predicate_mem_writes is then predicating the memory write as follows:
2134 | bb_0
2135 | i = 0
2136 | end_bb_0
2138 | bb_1
2139 | if (i < N) goto bb_5 else goto bb_2
2140 | end_bb_1
2142 | bb_2
2143 | if (cond) goto bb_3 else goto bb_4
2144 | end_bb_2
2146 | bb_3
2147 | cond = some_computation;
2148 | A[i] = cond ? expr : A[i];
2149 | goto bb_4
2150 | end_bb_3
2152 | bb_4
2153 | goto bb_1
2154 | end_bb_4
2156 and finally combine_blocks removes the basic block boundaries making
2157 the loop vectorizable:
2159 | bb_0
2160 | i = 0
2161 | if (i < N) goto bb_5 else goto bb_1
2162 | end_bb_0
2164 | bb_1
2165 | cond = some_computation;
2166 | A[i] = cond ? expr : A[i];
2167 | if (i < N) goto bb_5 else goto bb_4
2168 | end_bb_1
2170 | bb_4
2171 | goto bb_1
2172 | end_bb_4
2175 static void
2176 predicate_mem_writes (loop_p loop)
2178 unsigned int i, orig_loop_num_nodes = loop->num_nodes;
2179 auto_vec<int, 1> vect_sizes;
2180 auto_vec<tree, 1> vect_masks;
2182 for (i = 1; i < orig_loop_num_nodes; i++)
2184 gimple_stmt_iterator gsi;
2185 basic_block bb = ifc_bbs[i];
2186 tree cond = bb_predicate (bb);
2187 bool swap;
2188 gimple *stmt;
2189 int index;
2191 if (is_true_predicate (cond) || is_false_predicate (cond))
2192 continue;
2194 swap = false;
2195 if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
2197 swap = true;
2198 cond = TREE_OPERAND (cond, 0);
2201 vect_sizes.truncate (0);
2202 vect_masks.truncate (0);
2204 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2205 if (!gimple_assign_single_p (stmt = gsi_stmt (gsi)))
2206 continue;
2207 else if (gimple_plf (stmt, GF_PLF_2))
2209 tree lhs = gimple_assign_lhs (stmt);
2210 tree rhs = gimple_assign_rhs1 (stmt);
2211 tree ref, addr, ptr, mask;
2212 gimple *new_stmt;
2213 gimple_seq stmts = NULL;
2214 int bitsize = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (lhs)));
2215 ref = TREE_CODE (lhs) == SSA_NAME ? rhs : lhs;
2216 mark_addressable (ref);
2217 addr = force_gimple_operand_gsi (&gsi, build_fold_addr_expr (ref),
2218 true, NULL_TREE, true,
2219 GSI_SAME_STMT);
2220 if (!vect_sizes.is_empty ()
2221 && (index = mask_exists (bitsize, vect_sizes)) != -1)
2222 /* Use created mask. */
2223 mask = vect_masks[index];
2224 else
2226 if (COMPARISON_CLASS_P (cond))
2227 mask = gimple_build (&stmts, TREE_CODE (cond),
2228 boolean_type_node,
2229 TREE_OPERAND (cond, 0),
2230 TREE_OPERAND (cond, 1));
2231 else
2233 gcc_assert (TREE_CODE (cond) == SSA_NAME);
2234 mask = cond;
2237 if (swap)
2239 tree true_val
2240 = constant_boolean_node (true, TREE_TYPE (mask));
2241 mask = gimple_build (&stmts, BIT_XOR_EXPR,
2242 TREE_TYPE (mask), mask, true_val);
2244 gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
2246 mask = ifc_temp_var (TREE_TYPE (mask), mask, &gsi);
2247 /* Save mask and its size for further use. */
2248 vect_sizes.safe_push (bitsize);
2249 vect_masks.safe_push (mask);
2251 ptr = build_int_cst (reference_alias_ptr_type (ref),
2252 get_object_alignment (ref));
2253 /* Copy points-to info if possible. */
2254 if (TREE_CODE (addr) == SSA_NAME && !SSA_NAME_PTR_INFO (addr))
2255 copy_ref_info (build2 (MEM_REF, TREE_TYPE (ref), addr, ptr),
2256 ref);
2257 if (TREE_CODE (lhs) == SSA_NAME)
2259 new_stmt
2260 = gimple_build_call_internal (IFN_MASK_LOAD, 3, addr,
2261 ptr, mask);
2262 gimple_call_set_lhs (new_stmt, lhs);
2263 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2265 else
2267 new_stmt
2268 = gimple_build_call_internal (IFN_MASK_STORE, 4, addr, ptr,
2269 mask, rhs);
2270 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2271 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
2272 SSA_NAME_DEF_STMT (gimple_vdef (new_stmt)) = new_stmt;
2275 gsi_replace (&gsi, new_stmt, true);
2277 else if (gimple_vdef (stmt))
2279 tree lhs = gimple_assign_lhs (stmt);
2280 tree rhs = gimple_assign_rhs1 (stmt);
2281 tree type = TREE_TYPE (lhs);
2283 lhs = ifc_temp_var (type, unshare_expr (lhs), &gsi);
2284 rhs = ifc_temp_var (type, unshare_expr (rhs), &gsi);
2285 if (swap)
2286 std::swap (lhs, rhs);
2287 cond = force_gimple_operand_gsi_1 (&gsi, unshare_expr (cond),
2288 is_gimple_condexpr, NULL_TREE,
2289 true, GSI_SAME_STMT);
2290 rhs = fold_build_cond_expr (type, unshare_expr (cond), rhs, lhs);
2291 gimple_assign_set_rhs1 (stmt, ifc_temp_var (type, rhs, &gsi));
2292 update_stmt (stmt);
2297 /* Remove all GIMPLE_CONDs and GIMPLE_LABELs of all the basic blocks
2298 other than the exit and latch of the LOOP. Also resets the
2299 GIMPLE_DEBUG information. */
2301 static void
2302 remove_conditions_and_labels (loop_p loop)
2304 gimple_stmt_iterator gsi;
2305 unsigned int i;
2307 for (i = 0; i < loop->num_nodes; i++)
2309 basic_block bb = ifc_bbs[i];
2311 if (bb_with_exit_edge_p (loop, bb)
2312 || bb == loop->latch)
2313 continue;
2315 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
2316 switch (gimple_code (gsi_stmt (gsi)))
2318 case GIMPLE_COND:
2319 case GIMPLE_LABEL:
2320 gsi_remove (&gsi, true);
2321 break;
2323 case GIMPLE_DEBUG:
2324 /* ??? Should there be conditional GIMPLE_DEBUG_BINDs? */
2325 if (gimple_debug_bind_p (gsi_stmt (gsi)))
2327 gimple_debug_bind_reset_value (gsi_stmt (gsi));
2328 update_stmt (gsi_stmt (gsi));
2330 gsi_next (&gsi);
2331 break;
2333 default:
2334 gsi_next (&gsi);
2339 /* Combine all the basic blocks from LOOP into one or two super basic
2340 blocks. Replace PHI nodes with conditional modify expressions. */
2342 static void
2343 combine_blocks (struct loop *loop)
2345 basic_block bb, exit_bb, merge_target_bb;
2346 unsigned int orig_loop_num_nodes = loop->num_nodes;
2347 unsigned int i;
2348 edge e;
2349 edge_iterator ei;
2351 remove_conditions_and_labels (loop);
2352 insert_gimplified_predicates (loop);
2353 predicate_all_scalar_phis (loop);
2355 if (any_pred_load_store)
2356 predicate_mem_writes (loop);
2358 /* Merge basic blocks: first remove all the edges in the loop,
2359 except for those from the exit block. */
2360 exit_bb = NULL;
2361 bool *predicated = XNEWVEC (bool, orig_loop_num_nodes);
2362 for (i = 0; i < orig_loop_num_nodes; i++)
2364 bb = ifc_bbs[i];
2365 predicated[i] = !is_true_predicate (bb_predicate (bb));
2366 free_bb_predicate (bb);
2367 if (bb_with_exit_edge_p (loop, bb))
2369 gcc_assert (exit_bb == NULL);
2370 exit_bb = bb;
2373 gcc_assert (exit_bb != loop->latch);
2375 for (i = 1; i < orig_loop_num_nodes; i++)
2377 bb = ifc_bbs[i];
2379 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei));)
2381 if (e->src == exit_bb)
2382 ei_next (&ei);
2383 else
2384 remove_edge (e);
2388 if (exit_bb != NULL)
2390 if (exit_bb != loop->header)
2392 /* Connect this node to loop header. */
2393 make_edge (loop->header, exit_bb, EDGE_FALLTHRU);
2394 set_immediate_dominator (CDI_DOMINATORS, exit_bb, loop->header);
2397 /* Redirect non-exit edges to loop->latch. */
2398 FOR_EACH_EDGE (e, ei, exit_bb->succs)
2400 if (!loop_exit_edge_p (loop, e))
2401 redirect_edge_and_branch (e, loop->latch);
2403 set_immediate_dominator (CDI_DOMINATORS, loop->latch, exit_bb);
2405 else
2407 /* If the loop does not have an exit, reconnect header and latch. */
2408 make_edge (loop->header, loop->latch, EDGE_FALLTHRU);
2409 set_immediate_dominator (CDI_DOMINATORS, loop->latch, loop->header);
2412 merge_target_bb = loop->header;
2414 /* Get at the virtual def valid for uses starting at the first block
2415 we merge into the header. Without a virtual PHI the loop has the
2416 same virtual use on all stmts. */
2417 gphi *vphi = get_virtual_phi (loop->header);
2418 tree last_vdef = NULL_TREE;
2419 if (vphi)
2421 last_vdef = gimple_phi_result (vphi);
2422 for (gimple_stmt_iterator gsi = gsi_start_bb (loop->header);
2423 ! gsi_end_p (gsi); gsi_next (&gsi))
2424 if (gimple_vdef (gsi_stmt (gsi)))
2425 last_vdef = gimple_vdef (gsi_stmt (gsi));
2427 for (i = 1; i < orig_loop_num_nodes; i++)
2429 gimple_stmt_iterator gsi;
2430 gimple_stmt_iterator last;
2432 bb = ifc_bbs[i];
2434 if (bb == exit_bb || bb == loop->latch)
2435 continue;
2437 /* We release virtual PHIs late because we have to propagate them
2438 out using the current VUSE. The def might be the one used
2439 after the loop. */
2440 vphi = get_virtual_phi (bb);
2441 if (vphi)
2443 imm_use_iterator iter;
2444 use_operand_p use_p;
2445 gimple *use_stmt;
2446 FOR_EACH_IMM_USE_STMT (use_stmt, iter, gimple_phi_result (vphi))
2448 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2449 SET_USE (use_p, last_vdef);
2451 gsi = gsi_for_stmt (vphi);
2452 remove_phi_node (&gsi, true);
2455 /* Make stmts member of loop->header and clear range info from all stmts
2456 in BB which is now no longer executed conditional on a predicate we
2457 could have derived it from. */
2458 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2460 gimple *stmt = gsi_stmt (gsi);
2461 gimple_set_bb (stmt, merge_target_bb);
2462 /* Update virtual operands. */
2463 if (last_vdef)
2465 use_operand_p use_p = ssa_vuse_operand (stmt);
2466 if (use_p
2467 && USE_FROM_PTR (use_p) != last_vdef)
2468 SET_USE (use_p, last_vdef);
2469 if (gimple_vdef (stmt))
2470 last_vdef = gimple_vdef (stmt);
2472 if (predicated[i])
2474 ssa_op_iter i;
2475 tree op;
2476 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
2477 reset_flow_sensitive_info (op);
2481 /* Update stmt list. */
2482 last = gsi_last_bb (merge_target_bb);
2483 gsi_insert_seq_after_without_update (&last, bb_seq (bb), GSI_NEW_STMT);
2484 set_bb_seq (bb, NULL);
2486 delete_basic_block (bb);
2489 /* If possible, merge loop header to the block with the exit edge.
2490 This reduces the number of basic blocks to two, to please the
2491 vectorizer that handles only loops with two nodes. */
2492 if (exit_bb
2493 && exit_bb != loop->header)
2495 /* We release virtual PHIs late because we have to propagate them
2496 out using the current VUSE. The def might be the one used
2497 after the loop. */
2498 vphi = get_virtual_phi (exit_bb);
2499 if (vphi)
2501 imm_use_iterator iter;
2502 use_operand_p use_p;
2503 gimple *use_stmt;
2504 FOR_EACH_IMM_USE_STMT (use_stmt, iter, gimple_phi_result (vphi))
2506 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2507 SET_USE (use_p, last_vdef);
2509 gimple_stmt_iterator gsi = gsi_for_stmt (vphi);
2510 remove_phi_node (&gsi, true);
2513 if (can_merge_blocks_p (loop->header, exit_bb))
2514 merge_blocks (loop->header, exit_bb);
2517 free (ifc_bbs);
2518 ifc_bbs = NULL;
2519 free (predicated);
2522 /* Version LOOP before if-converting it; the original loop
2523 will be if-converted, the new copy of the loop will not,
2524 and the LOOP_VECTORIZED internal call will be guarding which
2525 loop to execute. The vectorizer pass will fold this
2526 internal call into either true or false. */
2528 static bool
2529 version_loop_for_if_conversion (struct loop *loop)
2531 basic_block cond_bb;
2532 tree cond = make_ssa_name (boolean_type_node);
2533 struct loop *new_loop;
2534 gimple *g;
2535 gimple_stmt_iterator gsi;
2536 unsigned int save_length;
2538 g = gimple_build_call_internal (IFN_LOOP_VECTORIZED, 2,
2539 build_int_cst (integer_type_node, loop->num),
2540 integer_zero_node);
2541 gimple_call_set_lhs (g, cond);
2543 /* Save BB->aux around loop_version as that uses the same field. */
2544 save_length = loop->inner ? loop->inner->num_nodes : loop->num_nodes;
2545 void **saved_preds = XALLOCAVEC (void *, save_length);
2546 for (unsigned i = 0; i < save_length; i++)
2547 saved_preds[i] = ifc_bbs[i]->aux;
2549 initialize_original_copy_tables ();
2550 new_loop = loop_version (loop, cond, &cond_bb,
2551 REG_BR_PROB_BASE, REG_BR_PROB_BASE,
2552 REG_BR_PROB_BASE, true);
2553 free_original_copy_tables ();
2555 for (unsigned i = 0; i < save_length; i++)
2556 ifc_bbs[i]->aux = saved_preds[i];
2558 if (new_loop == NULL)
2559 return false;
2561 new_loop->dont_vectorize = true;
2562 new_loop->force_vectorize = false;
2563 gsi = gsi_last_bb (cond_bb);
2564 gimple_call_set_arg (g, 1, build_int_cst (integer_type_node, new_loop->num));
2565 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2566 update_ssa (TODO_update_ssa);
2567 return true;
2570 /* Return true when LOOP satisfies the follow conditions that will
2571 allow it to be recognized by the vectorizer for outer-loop
2572 vectorization:
2573 - The loop is not the root node of the loop tree.
2574 - The loop has exactly one inner loop.
2575 - The loop has a single exit.
2576 - The loop header has a single successor, which is the inner
2577 loop header.
2578 - The loop exit block has a single predecessor, which is the
2579 inner loop's exit block. */
2581 static bool
2582 versionable_outer_loop_p (struct loop *loop)
2584 if (!loop_outer (loop)
2585 || !loop->inner
2586 || loop->inner->next
2587 || !single_exit (loop)
2588 || !single_succ_p (loop->header)
2589 || single_succ (loop->header) != loop->inner->header)
2590 return false;
2592 basic_block outer_exit = single_pred (loop->latch);
2593 basic_block inner_exit = single_pred (loop->inner->latch);
2595 if (!single_pred_p (outer_exit) || single_pred (outer_exit) != inner_exit)
2596 return false;
2598 if (dump_file)
2599 fprintf (dump_file, "Found vectorizable outer loop for versioning\n");
2601 return true;
2604 /* Performs splitting of critical edges. Skip splitting and return false
2605 if LOOP will not be converted because:
2607 - LOOP is not well formed.
2608 - LOOP has PHI with more than MAX_PHI_ARG_NUM arguments.
2610 Last restriction is valid only if AGGRESSIVE_IF_CONV is false. */
2612 static bool
2613 ifcvt_split_critical_edges (struct loop *loop, bool aggressive_if_conv)
2615 basic_block *body;
2616 basic_block bb;
2617 unsigned int num = loop->num_nodes;
2618 unsigned int i;
2619 gimple *stmt;
2620 edge e;
2621 edge_iterator ei;
2622 auto_vec<edge> critical_edges;
2624 /* Loop is not well formed. */
2625 if (num <= 2 || loop->inner || !single_exit (loop))
2626 return false;
2628 body = get_loop_body (loop);
2629 for (i = 0; i < num; i++)
2631 bb = body[i];
2632 if (!aggressive_if_conv
2633 && phi_nodes (bb)
2634 && EDGE_COUNT (bb->preds) > MAX_PHI_ARG_NUM)
2636 if (dump_file && (dump_flags & TDF_DETAILS))
2637 fprintf (dump_file,
2638 "BB %d has complicated PHI with more than %u args.\n",
2639 bb->index, MAX_PHI_ARG_NUM);
2641 free (body);
2642 return false;
2644 if (bb == loop->latch || bb_with_exit_edge_p (loop, bb))
2645 continue;
2647 stmt = last_stmt (bb);
2648 /* Skip basic blocks not ending with conditional branch. */
2649 if (!stmt || gimple_code (stmt) != GIMPLE_COND)
2650 continue;
2652 FOR_EACH_EDGE (e, ei, bb->succs)
2653 if (EDGE_CRITICAL_P (e) && e->dest->loop_father == loop)
2654 critical_edges.safe_push (e);
2656 free (body);
2658 while (critical_edges.length () > 0)
2660 e = critical_edges.pop ();
2661 /* Don't split if bb can be predicated along non-critical edge. */
2662 if (EDGE_COUNT (e->dest->preds) > 2 || all_preds_critical_p (e->dest))
2663 split_edge (e);
2666 return true;
2669 /* Delete redundant statements produced by predication which prevents
2670 loop vectorization. */
2672 static void
2673 ifcvt_local_dce (basic_block bb)
2675 gimple *stmt;
2676 gimple *stmt1;
2677 gimple *phi;
2678 gimple_stmt_iterator gsi;
2679 auto_vec<gimple *> worklist;
2680 enum gimple_code code;
2681 use_operand_p use_p;
2682 imm_use_iterator imm_iter;
2684 worklist.create (64);
2685 /* Consider all phi as live statements. */
2686 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2688 phi = gsi_stmt (gsi);
2689 gimple_set_plf (phi, GF_PLF_2, true);
2690 worklist.safe_push (phi);
2692 /* Consider load/store statements, CALL and COND as live. */
2693 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2695 stmt = gsi_stmt (gsi);
2696 if (gimple_store_p (stmt)
2697 || gimple_assign_load_p (stmt)
2698 || is_gimple_debug (stmt))
2700 gimple_set_plf (stmt, GF_PLF_2, true);
2701 worklist.safe_push (stmt);
2702 continue;
2704 code = gimple_code (stmt);
2705 if (code == GIMPLE_COND || code == GIMPLE_CALL)
2707 gimple_set_plf (stmt, GF_PLF_2, true);
2708 worklist.safe_push (stmt);
2709 continue;
2711 gimple_set_plf (stmt, GF_PLF_2, false);
2713 if (code == GIMPLE_ASSIGN)
2715 tree lhs = gimple_assign_lhs (stmt);
2716 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
2718 stmt1 = USE_STMT (use_p);
2719 if (gimple_bb (stmt1) != bb)
2721 gimple_set_plf (stmt, GF_PLF_2, true);
2722 worklist.safe_push (stmt);
2723 break;
2728 /* Propagate liveness through arguments of live stmt. */
2729 while (worklist.length () > 0)
2731 ssa_op_iter iter;
2732 use_operand_p use_p;
2733 tree use;
2735 stmt = worklist.pop ();
2736 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
2738 use = USE_FROM_PTR (use_p);
2739 if (TREE_CODE (use) != SSA_NAME)
2740 continue;
2741 stmt1 = SSA_NAME_DEF_STMT (use);
2742 if (gimple_bb (stmt1) != bb
2743 || gimple_plf (stmt1, GF_PLF_2))
2744 continue;
2745 gimple_set_plf (stmt1, GF_PLF_2, true);
2746 worklist.safe_push (stmt1);
2749 /* Delete dead statements. */
2750 gsi = gsi_start_bb (bb);
2751 while (!gsi_end_p (gsi))
2753 stmt = gsi_stmt (gsi);
2754 if (gimple_plf (stmt, GF_PLF_2))
2756 gsi_next (&gsi);
2757 continue;
2759 if (dump_file && (dump_flags & TDF_DETAILS))
2761 fprintf (dump_file, "Delete dead stmt in bb#%d\n", bb->index);
2762 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2764 gsi_remove (&gsi, true);
2765 release_defs (stmt);
2769 /* If-convert LOOP when it is legal. For the moment this pass has no
2770 profitability analysis. Returns non-zero todo flags when something
2771 changed. */
2773 unsigned int
2774 tree_if_conversion (struct loop *loop)
2776 unsigned int todo = 0;
2777 bool aggressive_if_conv;
2779 ifc_bbs = NULL;
2780 any_pred_load_store = false;
2781 any_complicated_phi = false;
2783 /* Apply more aggressive if-conversion when loop or its outer loop were
2784 marked with simd pragma. When that's the case, we try to if-convert
2785 loop containing PHIs with more than MAX_PHI_ARG_NUM arguments. */
2786 aggressive_if_conv = loop->force_vectorize;
2787 if (!aggressive_if_conv)
2789 struct loop *outer_loop = loop_outer (loop);
2790 if (outer_loop && outer_loop->force_vectorize)
2791 aggressive_if_conv = true;
2794 if (!ifcvt_split_critical_edges (loop, aggressive_if_conv))
2795 goto cleanup;
2797 if (!if_convertible_loop_p (loop)
2798 || !dbg_cnt (if_conversion_tree))
2799 goto cleanup;
2801 if ((any_pred_load_store || any_complicated_phi)
2802 && ((!flag_tree_loop_vectorize && !loop->force_vectorize)
2803 || loop->dont_vectorize))
2804 goto cleanup;
2806 /* Since we have no cost model, always version loops unless the user
2807 specified -ftree-loop-if-convert. Either version this loop, or if
2808 the pattern is right for outer-loop vectorization, version the
2809 outer loop. In the latter case we will still if-convert the
2810 original inner loop. */
2811 if (flag_tree_loop_if_convert != 1
2812 && !version_loop_for_if_conversion
2813 (versionable_outer_loop_p (loop_outer (loop))
2814 ? loop_outer (loop) : loop))
2815 goto cleanup;
2817 /* Now all statements are if-convertible. Combine all the basic
2818 blocks into one huge basic block doing the if-conversion
2819 on-the-fly. */
2820 combine_blocks (loop);
2822 /* Delete dead predicate computations. */
2823 ifcvt_local_dce (loop->header);
2825 todo |= TODO_cleanup_cfg;
2827 cleanup:
2828 if (ifc_bbs)
2830 unsigned int i;
2832 for (i = 0; i < loop->num_nodes; i++)
2833 free_bb_predicate (ifc_bbs[i]);
2835 free (ifc_bbs);
2836 ifc_bbs = NULL;
2839 return todo;
2842 /* Tree if-conversion pass management. */
2844 namespace {
2846 const pass_data pass_data_if_conversion =
2848 GIMPLE_PASS, /* type */
2849 "ifcvt", /* name */
2850 OPTGROUP_NONE, /* optinfo_flags */
2851 TV_TREE_LOOP_IFCVT, /* tv_id */
2852 ( PROP_cfg | PROP_ssa ), /* properties_required */
2853 0, /* properties_provided */
2854 0, /* properties_destroyed */
2855 0, /* todo_flags_start */
2856 0, /* todo_flags_finish */
2859 class pass_if_conversion : public gimple_opt_pass
2861 public:
2862 pass_if_conversion (gcc::context *ctxt)
2863 : gimple_opt_pass (pass_data_if_conversion, ctxt)
2866 /* opt_pass methods: */
2867 virtual bool gate (function *);
2868 virtual unsigned int execute (function *);
2870 }; // class pass_if_conversion
2872 bool
2873 pass_if_conversion::gate (function *fun)
2875 return (((flag_tree_loop_vectorize || fun->has_force_vectorize_loops)
2876 && flag_tree_loop_if_convert != 0)
2877 || flag_tree_loop_if_convert == 1);
2880 unsigned int
2881 pass_if_conversion::execute (function *fun)
2883 struct loop *loop;
2884 unsigned todo = 0;
2886 if (number_of_loops (fun) <= 1)
2887 return 0;
2889 FOR_EACH_LOOP (loop, 0)
2890 if (flag_tree_loop_if_convert == 1
2891 || ((flag_tree_loop_vectorize || loop->force_vectorize)
2892 && !loop->dont_vectorize))
2893 todo |= tree_if_conversion (loop);
2895 if (flag_checking)
2897 basic_block bb;
2898 FOR_EACH_BB_FN (bb, fun)
2899 gcc_assert (!bb->aux);
2902 return todo;
2905 } // anon namespace
2907 gimple_opt_pass *
2908 make_pass_if_conversion (gcc::context *ctxt)
2910 return new pass_if_conversion (ctxt);