runtime/internal/atomic: new package, API copied from Go 1.7
[official-gcc.git] / gcc / tree-if-conv.c
bloba57c1c5a25f6186412f27ce8b31c9c58fbf8ac1c
1 /* If-conversion for vectorizer.
2 Copyright (C) 2004-2016 Free Software Foundation, Inc.
3 Contributed by Devang Patel <dpatel@apple.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* This pass implements a tree level if-conversion of loops. Its
22 initial goal is to help the vectorizer to vectorize loops with
23 conditions.
25 A short description of if-conversion:
27 o Decide if a loop is if-convertible or not.
28 o Walk all loop basic blocks in breadth first order (BFS order).
29 o Remove conditional statements (at the end of basic block)
30 and propagate condition into destination basic blocks'
31 predicate list.
32 o Replace modify expression with conditional modify expression
33 using current basic block's condition.
34 o Merge all basic blocks
35 o Replace phi nodes with conditional modify expr
36 o Merge all basic blocks into header
38 Sample transformation:
40 INPUT
41 -----
43 # i_23 = PHI <0(0), i_18(10)>;
44 <L0>:;
45 j_15 = A[i_23];
46 if (j_15 > 41) goto <L1>; else goto <L17>;
48 <L17>:;
49 goto <bb 3> (<L3>);
51 <L1>:;
53 # iftmp.2_4 = PHI <0(8), 42(2)>;
54 <L3>:;
55 A[i_23] = iftmp.2_4;
56 i_18 = i_23 + 1;
57 if (i_18 <= 15) goto <L19>; else goto <L18>;
59 <L19>:;
60 goto <bb 1> (<L0>);
62 <L18>:;
64 OUTPUT
65 ------
67 # i_23 = PHI <0(0), i_18(10)>;
68 <L0>:;
69 j_15 = A[i_23];
71 <L3>:;
72 iftmp.2_4 = j_15 > 41 ? 42 : 0;
73 A[i_23] = iftmp.2_4;
74 i_18 = i_23 + 1;
75 if (i_18 <= 15) goto <L19>; else goto <L18>;
77 <L19>:;
78 goto <bb 1> (<L0>);
80 <L18>:;
83 #include "config.h"
84 #include "system.h"
85 #include "coretypes.h"
86 #include "backend.h"
87 #include "rtl.h"
88 #include "tree.h"
89 #include "gimple.h"
90 #include "cfghooks.h"
91 #include "tree-pass.h"
92 #include "ssa.h"
93 #include "expmed.h"
94 #include "optabs-query.h"
95 #include "gimple-pretty-print.h"
96 #include "alias.h"
97 #include "fold-const.h"
98 #include "stor-layout.h"
99 #include "gimple-fold.h"
100 #include "gimplify.h"
101 #include "gimple-iterator.h"
102 #include "gimplify-me.h"
103 #include "tree-cfg.h"
104 #include "tree-into-ssa.h"
105 #include "tree-ssa.h"
106 #include "cfgloop.h"
107 #include "tree-data-ref.h"
108 #include "tree-scalar-evolution.h"
109 #include "tree-ssa-loop.h"
110 #include "tree-ssa-loop-niter.h"
111 #include "tree-ssa-loop-ivopts.h"
112 #include "tree-ssa-address.h"
113 #include "dbgcnt.h"
114 #include "tree-hash-traits.h"
115 #include "varasm.h"
116 #include "builtins.h"
117 #include "params.h"
118 #include "cfganal.h"
120 /* Only handle PHIs with no more arguments unless we are asked to by
121 simd pragma. */
122 #define MAX_PHI_ARG_NUM \
123 ((unsigned) PARAM_VALUE (PARAM_MAX_TREE_IF_CONVERSION_PHI_ARGS))
125 /* Indicate if new load/store that needs to be predicated is introduced
126 during if conversion. */
127 static bool any_pred_load_store;
129 /* Indicate if there are any complicated PHIs that need to be handled in
130 if-conversion. Complicated PHI has more than two arguments and can't
131 be degenerated to two arguments PHI. See more information in comment
132 before phi_convertible_by_degenerating_args. */
133 static bool any_complicated_phi;
135 /* Hash for struct innermost_loop_behavior. It depends on the user to
136 free the memory. */
138 struct innermost_loop_behavior_hash : nofree_ptr_hash <innermost_loop_behavior>
140 static inline hashval_t hash (const value_type &);
141 static inline bool equal (const value_type &,
142 const compare_type &);
145 inline hashval_t
146 innermost_loop_behavior_hash::hash (const value_type &e)
148 hashval_t hash;
150 hash = iterative_hash_expr (e->base_address, 0);
151 hash = iterative_hash_expr (e->offset, hash);
152 hash = iterative_hash_expr (e->init, hash);
153 return iterative_hash_expr (e->step, hash);
156 inline bool
157 innermost_loop_behavior_hash::equal (const value_type &e1,
158 const compare_type &e2)
160 if ((e1->base_address && !e2->base_address)
161 || (!e1->base_address && e2->base_address)
162 || (!e1->offset && e2->offset)
163 || (e1->offset && !e2->offset)
164 || (!e1->init && e2->init)
165 || (e1->init && !e2->init)
166 || (!e1->step && e2->step)
167 || (e1->step && !e2->step))
168 return false;
170 if (e1->base_address && e2->base_address
171 && !operand_equal_p (e1->base_address, e2->base_address, 0))
172 return false;
173 if (e1->offset && e2->offset
174 && !operand_equal_p (e1->offset, e2->offset, 0))
175 return false;
176 if (e1->init && e2->init
177 && !operand_equal_p (e1->init, e2->init, 0))
178 return false;
179 if (e1->step && e2->step
180 && !operand_equal_p (e1->step, e2->step, 0))
181 return false;
183 return true;
186 /* List of basic blocks in if-conversion-suitable order. */
187 static basic_block *ifc_bbs;
189 /* Hash table to store <DR's innermost loop behavior, DR> pairs. */
190 static hash_map<innermost_loop_behavior_hash,
191 data_reference_p> *innermost_DR_map;
193 /* Hash table to store <base reference, DR> pairs. */
194 static hash_map<tree_operand_hash, data_reference_p> *baseref_DR_map;
196 /* Structure used to predicate basic blocks. This is attached to the
197 ->aux field of the BBs in the loop to be if-converted. */
198 struct bb_predicate {
200 /* The condition under which this basic block is executed. */
201 tree predicate;
203 /* PREDICATE is gimplified, and the sequence of statements is
204 recorded here, in order to avoid the duplication of computations
205 that occur in previous conditions. See PR44483. */
206 gimple_seq predicate_gimplified_stmts;
209 /* Returns true when the basic block BB has a predicate. */
211 static inline bool
212 bb_has_predicate (basic_block bb)
214 return bb->aux != NULL;
217 /* Returns the gimplified predicate for basic block BB. */
219 static inline tree
220 bb_predicate (basic_block bb)
222 return ((struct bb_predicate *) bb->aux)->predicate;
225 /* Sets the gimplified predicate COND for basic block BB. */
227 static inline void
228 set_bb_predicate (basic_block bb, tree cond)
230 gcc_assert ((TREE_CODE (cond) == TRUTH_NOT_EXPR
231 && is_gimple_condexpr (TREE_OPERAND (cond, 0)))
232 || is_gimple_condexpr (cond));
233 ((struct bb_predicate *) bb->aux)->predicate = cond;
236 /* Returns the sequence of statements of the gimplification of the
237 predicate for basic block BB. */
239 static inline gimple_seq
240 bb_predicate_gimplified_stmts (basic_block bb)
242 return ((struct bb_predicate *) bb->aux)->predicate_gimplified_stmts;
245 /* Sets the sequence of statements STMTS of the gimplification of the
246 predicate for basic block BB. */
248 static inline void
249 set_bb_predicate_gimplified_stmts (basic_block bb, gimple_seq stmts)
251 ((struct bb_predicate *) bb->aux)->predicate_gimplified_stmts = stmts;
254 /* Adds the sequence of statements STMTS to the sequence of statements
255 of the predicate for basic block BB. */
257 static inline void
258 add_bb_predicate_gimplified_stmts (basic_block bb, gimple_seq stmts)
260 gimple_seq_add_seq_without_update
261 (&(((struct bb_predicate *) bb->aux)->predicate_gimplified_stmts), stmts);
264 /* Initializes to TRUE the predicate of basic block BB. */
266 static inline void
267 init_bb_predicate (basic_block bb)
269 bb->aux = XNEW (struct bb_predicate);
270 set_bb_predicate_gimplified_stmts (bb, NULL);
271 set_bb_predicate (bb, boolean_true_node);
274 /* Release the SSA_NAMEs associated with the predicate of basic block BB,
275 but don't actually free it. */
277 static inline void
278 release_bb_predicate (basic_block bb)
280 gimple_seq stmts = bb_predicate_gimplified_stmts (bb);
281 if (stmts)
283 if (flag_checking)
284 for (gimple_stmt_iterator i = gsi_start (stmts);
285 !gsi_end_p (i); gsi_next (&i))
286 gcc_assert (! gimple_use_ops (gsi_stmt (i)));
288 set_bb_predicate_gimplified_stmts (bb, NULL);
292 /* Free the predicate of basic block BB. */
294 static inline void
295 free_bb_predicate (basic_block bb)
297 if (!bb_has_predicate (bb))
298 return;
300 release_bb_predicate (bb);
301 free (bb->aux);
302 bb->aux = NULL;
305 /* Reinitialize predicate of BB with the true predicate. */
307 static inline void
308 reset_bb_predicate (basic_block bb)
310 if (!bb_has_predicate (bb))
311 init_bb_predicate (bb);
312 else
314 release_bb_predicate (bb);
315 set_bb_predicate (bb, boolean_true_node);
319 /* Returns a new SSA_NAME of type TYPE that is assigned the value of
320 the expression EXPR. Inserts the statement created for this
321 computation before GSI and leaves the iterator GSI at the same
322 statement. */
324 static tree
325 ifc_temp_var (tree type, tree expr, gimple_stmt_iterator *gsi)
327 tree new_name = make_temp_ssa_name (type, NULL, "_ifc_");
328 gimple *stmt = gimple_build_assign (new_name, expr);
329 gimple_set_vuse (stmt, gimple_vuse (gsi_stmt (*gsi)));
330 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
331 return new_name;
334 /* Return true when COND is a false predicate. */
336 static inline bool
337 is_false_predicate (tree cond)
339 return (cond != NULL_TREE
340 && (cond == boolean_false_node
341 || integer_zerop (cond)));
344 /* Return true when COND is a true predicate. */
346 static inline bool
347 is_true_predicate (tree cond)
349 return (cond == NULL_TREE
350 || cond == boolean_true_node
351 || integer_onep (cond));
354 /* Returns true when BB has a predicate that is not trivial: true or
355 NULL_TREE. */
357 static inline bool
358 is_predicated (basic_block bb)
360 return !is_true_predicate (bb_predicate (bb));
363 /* Parses the predicate COND and returns its comparison code and
364 operands OP0 and OP1. */
366 static enum tree_code
367 parse_predicate (tree cond, tree *op0, tree *op1)
369 gimple *s;
371 if (TREE_CODE (cond) == SSA_NAME
372 && is_gimple_assign (s = SSA_NAME_DEF_STMT (cond)))
374 if (TREE_CODE_CLASS (gimple_assign_rhs_code (s)) == tcc_comparison)
376 *op0 = gimple_assign_rhs1 (s);
377 *op1 = gimple_assign_rhs2 (s);
378 return gimple_assign_rhs_code (s);
381 else if (gimple_assign_rhs_code (s) == TRUTH_NOT_EXPR)
383 tree op = gimple_assign_rhs1 (s);
384 tree type = TREE_TYPE (op);
385 enum tree_code code = parse_predicate (op, op0, op1);
387 return code == ERROR_MARK ? ERROR_MARK
388 : invert_tree_comparison (code, HONOR_NANS (type));
391 return ERROR_MARK;
394 if (COMPARISON_CLASS_P (cond))
396 *op0 = TREE_OPERAND (cond, 0);
397 *op1 = TREE_OPERAND (cond, 1);
398 return TREE_CODE (cond);
401 return ERROR_MARK;
404 /* Returns the fold of predicate C1 OR C2 at location LOC. */
406 static tree
407 fold_or_predicates (location_t loc, tree c1, tree c2)
409 tree op1a, op1b, op2a, op2b;
410 enum tree_code code1 = parse_predicate (c1, &op1a, &op1b);
411 enum tree_code code2 = parse_predicate (c2, &op2a, &op2b);
413 if (code1 != ERROR_MARK && code2 != ERROR_MARK)
415 tree t = maybe_fold_or_comparisons (code1, op1a, op1b,
416 code2, op2a, op2b);
417 if (t)
418 return t;
421 return fold_build2_loc (loc, TRUTH_OR_EXPR, boolean_type_node, c1, c2);
424 /* Returns either a COND_EXPR or the folded expression if the folded
425 expression is a MIN_EXPR, a MAX_EXPR, an ABS_EXPR,
426 a constant or a SSA_NAME. */
428 static tree
429 fold_build_cond_expr (tree type, tree cond, tree rhs, tree lhs)
431 tree rhs1, lhs1, cond_expr;
433 /* If COND is comparison r != 0 and r has boolean type, convert COND
434 to SSA_NAME to accept by vect bool pattern. */
435 if (TREE_CODE (cond) == NE_EXPR)
437 tree op0 = TREE_OPERAND (cond, 0);
438 tree op1 = TREE_OPERAND (cond, 1);
439 if (TREE_CODE (op0) == SSA_NAME
440 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
441 && (integer_zerop (op1)))
442 cond = op0;
444 cond_expr = fold_ternary (COND_EXPR, type, cond, rhs, lhs);
446 if (cond_expr == NULL_TREE)
447 return build3 (COND_EXPR, type, cond, rhs, lhs);
449 STRIP_USELESS_TYPE_CONVERSION (cond_expr);
451 if (is_gimple_val (cond_expr))
452 return cond_expr;
454 if (TREE_CODE (cond_expr) == ABS_EXPR)
456 rhs1 = TREE_OPERAND (cond_expr, 1);
457 STRIP_USELESS_TYPE_CONVERSION (rhs1);
458 if (is_gimple_val (rhs1))
459 return build1 (ABS_EXPR, type, rhs1);
462 if (TREE_CODE (cond_expr) == MIN_EXPR
463 || TREE_CODE (cond_expr) == MAX_EXPR)
465 lhs1 = TREE_OPERAND (cond_expr, 0);
466 STRIP_USELESS_TYPE_CONVERSION (lhs1);
467 rhs1 = TREE_OPERAND (cond_expr, 1);
468 STRIP_USELESS_TYPE_CONVERSION (rhs1);
469 if (is_gimple_val (rhs1) && is_gimple_val (lhs1))
470 return build2 (TREE_CODE (cond_expr), type, lhs1, rhs1);
472 return build3 (COND_EXPR, type, cond, rhs, lhs);
475 /* Add condition NC to the predicate list of basic block BB. LOOP is
476 the loop to be if-converted. Use predicate of cd-equivalent block
477 for join bb if it exists: we call basic blocks bb1 and bb2
478 cd-equivalent if they are executed under the same condition. */
480 static inline void
481 add_to_predicate_list (struct loop *loop, basic_block bb, tree nc)
483 tree bc, *tp;
484 basic_block dom_bb;
486 if (is_true_predicate (nc))
487 return;
489 /* If dominance tells us this basic block is always executed,
490 don't record any predicates for it. */
491 if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
492 return;
494 dom_bb = get_immediate_dominator (CDI_DOMINATORS, bb);
495 /* We use notion of cd equivalence to get simpler predicate for
496 join block, e.g. if join block has 2 predecessors with predicates
497 p1 & p2 and p1 & !p2, we'd like to get p1 for it instead of
498 p1 & p2 | p1 & !p2. */
499 if (dom_bb != loop->header
500 && get_immediate_dominator (CDI_POST_DOMINATORS, dom_bb) == bb)
502 gcc_assert (flow_bb_inside_loop_p (loop, dom_bb));
503 bc = bb_predicate (dom_bb);
504 if (!is_true_predicate (bc))
505 set_bb_predicate (bb, bc);
506 else
507 gcc_assert (is_true_predicate (bb_predicate (bb)));
508 if (dump_file && (dump_flags & TDF_DETAILS))
509 fprintf (dump_file, "Use predicate of bb#%d for bb#%d\n",
510 dom_bb->index, bb->index);
511 return;
514 if (!is_predicated (bb))
515 bc = nc;
516 else
518 bc = bb_predicate (bb);
519 bc = fold_or_predicates (EXPR_LOCATION (bc), nc, bc);
520 if (is_true_predicate (bc))
522 reset_bb_predicate (bb);
523 return;
527 /* Allow a TRUTH_NOT_EXPR around the main predicate. */
528 if (TREE_CODE (bc) == TRUTH_NOT_EXPR)
529 tp = &TREE_OPERAND (bc, 0);
530 else
531 tp = &bc;
532 if (!is_gimple_condexpr (*tp))
534 gimple_seq stmts;
535 *tp = force_gimple_operand_1 (*tp, &stmts, is_gimple_condexpr, NULL_TREE);
536 add_bb_predicate_gimplified_stmts (bb, stmts);
538 set_bb_predicate (bb, bc);
541 /* Add the condition COND to the previous condition PREV_COND, and add
542 this to the predicate list of the destination of edge E. LOOP is
543 the loop to be if-converted. */
545 static void
546 add_to_dst_predicate_list (struct loop *loop, edge e,
547 tree prev_cond, tree cond)
549 if (!flow_bb_inside_loop_p (loop, e->dest))
550 return;
552 if (!is_true_predicate (prev_cond))
553 cond = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
554 prev_cond, cond);
556 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, e->dest))
557 add_to_predicate_list (loop, e->dest, cond);
560 /* Return true if one of the successor edges of BB exits LOOP. */
562 static bool
563 bb_with_exit_edge_p (struct loop *loop, basic_block bb)
565 edge e;
566 edge_iterator ei;
568 FOR_EACH_EDGE (e, ei, bb->succs)
569 if (loop_exit_edge_p (loop, e))
570 return true;
572 return false;
575 /* Given PHI which has more than two arguments, this function checks if
576 it's if-convertible by degenerating its arguments. Specifically, if
577 below two conditions are satisfied:
579 1) Number of PHI arguments with different values equals to 2 and one
580 argument has the only occurrence.
581 2) The edge corresponding to the unique argument isn't critical edge.
583 Such PHI can be handled as PHIs have only two arguments. For example,
584 below PHI:
586 res = PHI <A_1(e1), A_1(e2), A_2(e3)>;
588 can be transformed into:
590 res = (predicate of e3) ? A_2 : A_1;
592 Return TRUE if it is the case, FALSE otherwise. */
594 static bool
595 phi_convertible_by_degenerating_args (gphi *phi)
597 edge e;
598 tree arg, t1 = NULL, t2 = NULL;
599 unsigned int i, i1 = 0, i2 = 0, n1 = 0, n2 = 0;
600 unsigned int num_args = gimple_phi_num_args (phi);
602 gcc_assert (num_args > 2);
604 for (i = 0; i < num_args; i++)
606 arg = gimple_phi_arg_def (phi, i);
607 if (t1 == NULL || operand_equal_p (t1, arg, 0))
609 n1++;
610 i1 = i;
611 t1 = arg;
613 else if (t2 == NULL || operand_equal_p (t2, arg, 0))
615 n2++;
616 i2 = i;
617 t2 = arg;
619 else
620 return false;
623 if (n1 != 1 && n2 != 1)
624 return false;
626 /* Check if the edge corresponding to the unique arg is critical. */
627 e = gimple_phi_arg_edge (phi, (n1 == 1) ? i1 : i2);
628 if (EDGE_COUNT (e->src->succs) > 1)
629 return false;
631 return true;
634 /* Return true when PHI is if-convertible. PHI is part of loop LOOP
635 and it belongs to basic block BB. Note at this point, it is sure
636 that PHI is if-convertible. This function updates global variable
637 ANY_COMPLICATED_PHI if PHI is complicated. */
639 static bool
640 if_convertible_phi_p (struct loop *loop, basic_block bb, gphi *phi)
642 if (dump_file && (dump_flags & TDF_DETAILS))
644 fprintf (dump_file, "-------------------------\n");
645 print_gimple_stmt (dump_file, phi, 0, TDF_SLIM);
648 if (bb != loop->header
649 && gimple_phi_num_args (phi) > 2
650 && !phi_convertible_by_degenerating_args (phi))
651 any_complicated_phi = true;
653 return true;
656 /* Records the status of a data reference. This struct is attached to
657 each DR->aux field. */
659 struct ifc_dr {
660 bool rw_unconditionally;
661 bool w_unconditionally;
662 bool written_at_least_once;
664 tree rw_predicate;
665 tree w_predicate;
666 tree base_w_predicate;
669 #define IFC_DR(DR) ((struct ifc_dr *) (DR)->aux)
670 #define DR_BASE_W_UNCONDITIONALLY(DR) (IFC_DR (DR)->written_at_least_once)
671 #define DR_RW_UNCONDITIONALLY(DR) (IFC_DR (DR)->rw_unconditionally)
672 #define DR_W_UNCONDITIONALLY(DR) (IFC_DR (DR)->w_unconditionally)
674 /* Iterates over DR's and stores refs, DR and base refs, DR pairs in
675 HASH tables. While storing them in HASH table, it checks if the
676 reference is unconditionally read or written and stores that as a flag
677 information. For base reference it checks if it is written atlest once
678 unconditionally and stores it as flag information along with DR.
679 In other words for every data reference A in STMT there exist other
680 accesses to a data reference with the same base with predicates that
681 add up (OR-up) to the true predicate: this ensures that the data
682 reference A is touched (read or written) on every iteration of the
683 if-converted loop. */
684 static void
685 hash_memrefs_baserefs_and_store_DRs_read_written_info (data_reference_p a)
688 data_reference_p *master_dr, *base_master_dr;
689 tree base_ref = DR_BASE_OBJECT (a);
690 innermost_loop_behavior *innermost = &DR_INNERMOST (a);
691 tree ca = bb_predicate (gimple_bb (DR_STMT (a)));
692 bool exist1, exist2;
694 master_dr = &innermost_DR_map->get_or_insert (innermost, &exist1);
695 if (!exist1)
696 *master_dr = a;
698 if (DR_IS_WRITE (a))
700 IFC_DR (*master_dr)->w_predicate
701 = fold_or_predicates (UNKNOWN_LOCATION, ca,
702 IFC_DR (*master_dr)->w_predicate);
703 if (is_true_predicate (IFC_DR (*master_dr)->w_predicate))
704 DR_W_UNCONDITIONALLY (*master_dr) = true;
706 IFC_DR (*master_dr)->rw_predicate
707 = fold_or_predicates (UNKNOWN_LOCATION, ca,
708 IFC_DR (*master_dr)->rw_predicate);
709 if (is_true_predicate (IFC_DR (*master_dr)->rw_predicate))
710 DR_RW_UNCONDITIONALLY (*master_dr) = true;
712 if (DR_IS_WRITE (a))
714 base_master_dr = &baseref_DR_map->get_or_insert (base_ref, &exist2);
715 if (!exist2)
716 *base_master_dr = a;
717 IFC_DR (*base_master_dr)->base_w_predicate
718 = fold_or_predicates (UNKNOWN_LOCATION, ca,
719 IFC_DR (*base_master_dr)->base_w_predicate);
720 if (is_true_predicate (IFC_DR (*base_master_dr)->base_w_predicate))
721 DR_BASE_W_UNCONDITIONALLY (*base_master_dr) = true;
725 /* Return TRUE if can prove the index IDX of an array reference REF is
726 within array bound. Return false otherwise. */
728 static bool
729 idx_within_array_bound (tree ref, tree *idx, void *dta)
731 bool overflow;
732 widest_int niter, valid_niter, delta, wi_step;
733 tree ev, init, step;
734 tree low, high;
735 struct loop *loop = (struct loop*) dta;
737 /* Only support within-bound access for array references. */
738 if (TREE_CODE (ref) != ARRAY_REF)
739 return false;
741 /* For arrays at the end of the structure, we are not guaranteed that they
742 do not really extend over their declared size. However, for arrays of
743 size greater than one, this is unlikely to be intended. */
744 if (array_at_struct_end_p (ref))
745 return false;
747 ev = analyze_scalar_evolution (loop, *idx);
748 ev = instantiate_parameters (loop, ev);
749 init = initial_condition (ev);
750 step = evolution_part_in_loop_num (ev, loop->num);
752 if (!init || TREE_CODE (init) != INTEGER_CST
753 || (step && TREE_CODE (step) != INTEGER_CST))
754 return false;
756 low = array_ref_low_bound (ref);
757 high = array_ref_up_bound (ref);
759 /* The case of nonconstant bounds could be handled, but it would be
760 complicated. */
761 if (TREE_CODE (low) != INTEGER_CST
762 || !high || TREE_CODE (high) != INTEGER_CST)
763 return false;
765 /* Check if the intial idx is within bound. */
766 if (wi::to_widest (init) < wi::to_widest (low)
767 || wi::to_widest (init) > wi::to_widest (high))
768 return false;
770 /* The idx is always within bound. */
771 if (!step || integer_zerop (step))
772 return true;
774 if (!max_loop_iterations (loop, &niter))
775 return false;
777 if (wi::to_widest (step) < 0)
779 delta = wi::to_widest (init) - wi::to_widest (low);
780 wi_step = -wi::to_widest (step);
782 else
784 delta = wi::to_widest (high) - wi::to_widest (init);
785 wi_step = wi::to_widest (step);
788 valid_niter = wi::div_floor (delta, wi_step, SIGNED, &overflow);
789 /* The iteration space of idx is within array bound. */
790 if (!overflow && niter <= valid_niter)
791 return true;
793 return false;
796 /* Return TRUE if ref is a within bound array reference. */
798 static bool
799 ref_within_array_bound (gimple *stmt, tree ref)
801 struct loop *loop = loop_containing_stmt (stmt);
803 gcc_assert (loop != NULL);
804 return for_each_index (&ref, idx_within_array_bound, loop);
808 /* Given a memory reference expression T, return TRUE if base object
809 it refers to is writable. The base object of a memory reference
810 is the main object being referenced, which is returned by function
811 get_base_address. */
813 static bool
814 base_object_writable (tree ref)
816 tree base_tree = get_base_address (ref);
818 return (base_tree
819 && DECL_P (base_tree)
820 && decl_binds_to_current_def_p (base_tree)
821 && !TREE_READONLY (base_tree));
824 /* Return true when the memory references of STMT won't trap in the
825 if-converted code. There are two things that we have to check for:
827 - writes to memory occur to writable memory: if-conversion of
828 memory writes transforms the conditional memory writes into
829 unconditional writes, i.e. "if (cond) A[i] = foo" is transformed
830 into "A[i] = cond ? foo : A[i]", and as the write to memory may not
831 be executed at all in the original code, it may be a readonly
832 memory. To check that A is not const-qualified, we check that
833 there exists at least an unconditional write to A in the current
834 function.
836 - reads or writes to memory are valid memory accesses for every
837 iteration. To check that the memory accesses are correctly formed
838 and that we are allowed to read and write in these locations, we
839 check that the memory accesses to be if-converted occur at every
840 iteration unconditionally.
842 Returns true for the memory reference in STMT, same memory reference
843 is read or written unconditionally atleast once and the base memory
844 reference is written unconditionally once. This is to check reference
845 will not write fault. Also retuns true if the memory reference is
846 unconditionally read once then we are conditionally writing to memory
847 which is defined as read and write and is bound to the definition
848 we are seeing. */
849 static bool
850 ifcvt_memrefs_wont_trap (gimple *stmt, vec<data_reference_p> drs)
852 data_reference_p *master_dr, *base_master_dr;
853 data_reference_p a = drs[gimple_uid (stmt) - 1];
855 tree base = DR_BASE_OBJECT (a);
856 innermost_loop_behavior *innermost = &DR_INNERMOST (a);
858 gcc_assert (DR_STMT (a) == stmt);
859 gcc_assert (DR_BASE_ADDRESS (a) || DR_OFFSET (a)
860 || DR_INIT (a) || DR_STEP (a));
862 master_dr = innermost_DR_map->get (innermost);
863 gcc_assert (master_dr != NULL);
865 base_master_dr = baseref_DR_map->get (base);
867 /* If a is unconditionally written to it doesn't trap. */
868 if (DR_W_UNCONDITIONALLY (*master_dr))
869 return true;
871 /* If a is unconditionally accessed then ...
873 Even a is conditional access, we can treat it as an unconditional
874 one if it's an array reference and all its index are within array
875 bound. */
876 if (DR_RW_UNCONDITIONALLY (*master_dr)
877 || ref_within_array_bound (stmt, DR_REF (a)))
879 /* an unconditional read won't trap. */
880 if (DR_IS_READ (a))
881 return true;
883 /* an unconditionaly write won't trap if the base is written
884 to unconditionally. */
885 if (base_master_dr
886 && DR_BASE_W_UNCONDITIONALLY (*base_master_dr))
887 return PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES);
888 /* or the base is known to be not readonly. */
889 else if (base_object_writable (DR_REF (a)))
890 return PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES);
893 return false;
896 /* Return true if STMT could be converted into a masked load or store
897 (conditional load or store based on a mask computed from bb predicate). */
899 static bool
900 ifcvt_can_use_mask_load_store (gimple *stmt)
902 tree lhs, ref;
903 machine_mode mode;
904 basic_block bb = gimple_bb (stmt);
905 bool is_load;
907 if (!(flag_tree_loop_vectorize || bb->loop_father->force_vectorize)
908 || bb->loop_father->dont_vectorize
909 || !gimple_assign_single_p (stmt)
910 || gimple_has_volatile_ops (stmt))
911 return false;
913 /* Check whether this is a load or store. */
914 lhs = gimple_assign_lhs (stmt);
915 if (gimple_store_p (stmt))
917 if (!is_gimple_val (gimple_assign_rhs1 (stmt)))
918 return false;
919 is_load = false;
920 ref = lhs;
922 else if (gimple_assign_load_p (stmt))
924 is_load = true;
925 ref = gimple_assign_rhs1 (stmt);
927 else
928 return false;
930 if (may_be_nonaddressable_p (ref))
931 return false;
933 /* Mask should be integer mode of the same size as the load/store
934 mode. */
935 mode = TYPE_MODE (TREE_TYPE (lhs));
936 if (int_mode_for_mode (mode) == BLKmode
937 || VECTOR_MODE_P (mode))
938 return false;
940 if (can_vec_mask_load_store_p (mode, VOIDmode, is_load))
941 return true;
943 return false;
946 /* Return true when STMT is if-convertible.
948 GIMPLE_ASSIGN statement is not if-convertible if,
949 - it is not movable,
950 - it could trap,
951 - LHS is not var decl. */
953 static bool
954 if_convertible_gimple_assign_stmt_p (gimple *stmt,
955 vec<data_reference_p> refs)
957 tree lhs = gimple_assign_lhs (stmt);
959 if (dump_file && (dump_flags & TDF_DETAILS))
961 fprintf (dump_file, "-------------------------\n");
962 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
965 if (!is_gimple_reg_type (TREE_TYPE (lhs)))
966 return false;
968 /* Some of these constrains might be too conservative. */
969 if (stmt_ends_bb_p (stmt)
970 || gimple_has_volatile_ops (stmt)
971 || (TREE_CODE (lhs) == SSA_NAME
972 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
973 || gimple_has_side_effects (stmt))
975 if (dump_file && (dump_flags & TDF_DETAILS))
976 fprintf (dump_file, "stmt not suitable for ifcvt\n");
977 return false;
980 /* tree-into-ssa.c uses GF_PLF_1, so avoid it, because
981 in between if_convertible_loop_p and combine_blocks
982 we can perform loop versioning. */
983 gimple_set_plf (stmt, GF_PLF_2, false);
985 if ((! gimple_vuse (stmt)
986 || gimple_could_trap_p_1 (stmt, false, false)
987 || ! ifcvt_memrefs_wont_trap (stmt, refs))
988 && gimple_could_trap_p (stmt))
990 if (ifcvt_can_use_mask_load_store (stmt))
992 gimple_set_plf (stmt, GF_PLF_2, true);
993 any_pred_load_store = true;
994 return true;
996 if (dump_file && (dump_flags & TDF_DETAILS))
997 fprintf (dump_file, "tree could trap...\n");
998 return false;
1001 /* When if-converting stores force versioning, likewise if we
1002 ended up generating store data races. */
1003 if (gimple_vdef (stmt))
1004 any_pred_load_store = true;
1006 return true;
1009 /* Return true when STMT is if-convertible.
1011 A statement is if-convertible if:
1012 - it is an if-convertible GIMPLE_ASSIGN,
1013 - it is a GIMPLE_LABEL or a GIMPLE_COND,
1014 - it is builtins call. */
1016 static bool
1017 if_convertible_stmt_p (gimple *stmt, vec<data_reference_p> refs)
1019 switch (gimple_code (stmt))
1021 case GIMPLE_LABEL:
1022 case GIMPLE_DEBUG:
1023 case GIMPLE_COND:
1024 return true;
1026 case GIMPLE_ASSIGN:
1027 return if_convertible_gimple_assign_stmt_p (stmt, refs);
1029 case GIMPLE_CALL:
1031 tree fndecl = gimple_call_fndecl (stmt);
1032 if (fndecl)
1034 int flags = gimple_call_flags (stmt);
1035 if ((flags & ECF_CONST)
1036 && !(flags & ECF_LOOPING_CONST_OR_PURE)
1037 /* We can only vectorize some builtins at the moment,
1038 so restrict if-conversion to those. */
1039 && DECL_BUILT_IN (fndecl))
1040 return true;
1042 return false;
1045 default:
1046 /* Don't know what to do with 'em so don't do anything. */
1047 if (dump_file && (dump_flags & TDF_DETAILS))
1049 fprintf (dump_file, "don't know what to do\n");
1050 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1052 return false;
1053 break;
1056 return true;
1059 /* Assumes that BB has more than 1 predecessors.
1060 Returns false if at least one successor is not on critical edge
1061 and true otherwise. */
1063 static inline bool
1064 all_preds_critical_p (basic_block bb)
1066 edge e;
1067 edge_iterator ei;
1069 FOR_EACH_EDGE (e, ei, bb->preds)
1070 if (EDGE_COUNT (e->src->succs) == 1)
1071 return false;
1072 return true;
1075 /* Returns true if at least one successor in on critical edge. */
1076 static inline bool
1077 has_pred_critical_p (basic_block bb)
1079 edge e;
1080 edge_iterator ei;
1082 FOR_EACH_EDGE (e, ei, bb->preds)
1083 if (EDGE_COUNT (e->src->succs) > 1)
1084 return true;
1085 return false;
1088 /* Return true when BB is if-convertible. This routine does not check
1089 basic block's statements and phis.
1091 A basic block is not if-convertible if:
1092 - it is non-empty and it is after the exit block (in BFS order),
1093 - it is after the exit block but before the latch,
1094 - its edges are not normal.
1096 EXIT_BB is the basic block containing the exit of the LOOP. BB is
1097 inside LOOP. */
1099 static bool
1100 if_convertible_bb_p (struct loop *loop, basic_block bb, basic_block exit_bb)
1102 edge e;
1103 edge_iterator ei;
1105 if (dump_file && (dump_flags & TDF_DETAILS))
1106 fprintf (dump_file, "----------[%d]-------------\n", bb->index);
1108 if (EDGE_COUNT (bb->succs) > 2)
1109 return false;
1111 if (exit_bb)
1113 if (bb != loop->latch)
1115 if (dump_file && (dump_flags & TDF_DETAILS))
1116 fprintf (dump_file, "basic block after exit bb but before latch\n");
1117 return false;
1119 else if (!empty_block_p (bb))
1121 if (dump_file && (dump_flags & TDF_DETAILS))
1122 fprintf (dump_file, "non empty basic block after exit bb\n");
1123 return false;
1125 else if (bb == loop->latch
1126 && bb != exit_bb
1127 && !dominated_by_p (CDI_DOMINATORS, bb, exit_bb))
1129 if (dump_file && (dump_flags & TDF_DETAILS))
1130 fprintf (dump_file, "latch is not dominated by exit_block\n");
1131 return false;
1135 /* Be less adventurous and handle only normal edges. */
1136 FOR_EACH_EDGE (e, ei, bb->succs)
1137 if (e->flags & (EDGE_EH | EDGE_ABNORMAL | EDGE_IRREDUCIBLE_LOOP))
1139 if (dump_file && (dump_flags & TDF_DETAILS))
1140 fprintf (dump_file, "Difficult to handle edges\n");
1141 return false;
1144 return true;
1147 /* Return true when all predecessor blocks of BB are visited. The
1148 VISITED bitmap keeps track of the visited blocks. */
1150 static bool
1151 pred_blocks_visited_p (basic_block bb, bitmap *visited)
1153 edge e;
1154 edge_iterator ei;
1155 FOR_EACH_EDGE (e, ei, bb->preds)
1156 if (!bitmap_bit_p (*visited, e->src->index))
1157 return false;
1159 return true;
1162 /* Get body of a LOOP in suitable order for if-conversion. It is
1163 caller's responsibility to deallocate basic block list.
1164 If-conversion suitable order is, breadth first sort (BFS) order
1165 with an additional constraint: select a block only if all its
1166 predecessors are already selected. */
1168 static basic_block *
1169 get_loop_body_in_if_conv_order (const struct loop *loop)
1171 basic_block *blocks, *blocks_in_bfs_order;
1172 basic_block bb;
1173 bitmap visited;
1174 unsigned int index = 0;
1175 unsigned int visited_count = 0;
1177 gcc_assert (loop->num_nodes);
1178 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
1180 blocks = XCNEWVEC (basic_block, loop->num_nodes);
1181 visited = BITMAP_ALLOC (NULL);
1183 blocks_in_bfs_order = get_loop_body_in_bfs_order (loop);
1185 index = 0;
1186 while (index < loop->num_nodes)
1188 bb = blocks_in_bfs_order [index];
1190 if (bb->flags & BB_IRREDUCIBLE_LOOP)
1192 free (blocks_in_bfs_order);
1193 BITMAP_FREE (visited);
1194 free (blocks);
1195 return NULL;
1198 if (!bitmap_bit_p (visited, bb->index))
1200 if (pred_blocks_visited_p (bb, &visited)
1201 || bb == loop->header)
1203 /* This block is now visited. */
1204 bitmap_set_bit (visited, bb->index);
1205 blocks[visited_count++] = bb;
1209 index++;
1211 if (index == loop->num_nodes
1212 && visited_count != loop->num_nodes)
1213 /* Not done yet. */
1214 index = 0;
1216 free (blocks_in_bfs_order);
1217 BITMAP_FREE (visited);
1218 return blocks;
1221 /* Returns true when the analysis of the predicates for all the basic
1222 blocks in LOOP succeeded.
1224 predicate_bbs first allocates the predicates of the basic blocks.
1225 These fields are then initialized with the tree expressions
1226 representing the predicates under which a basic block is executed
1227 in the LOOP. As the loop->header is executed at each iteration, it
1228 has the "true" predicate. Other statements executed under a
1229 condition are predicated with that condition, for example
1231 | if (x)
1232 | S1;
1233 | else
1234 | S2;
1236 S1 will be predicated with "x", and
1237 S2 will be predicated with "!x". */
1239 static void
1240 predicate_bbs (loop_p loop)
1242 unsigned int i;
1244 for (i = 0; i < loop->num_nodes; i++)
1245 init_bb_predicate (ifc_bbs[i]);
1247 for (i = 0; i < loop->num_nodes; i++)
1249 basic_block bb = ifc_bbs[i];
1250 tree cond;
1251 gimple *stmt;
1253 /* The loop latch and loop exit block are always executed and
1254 have no extra conditions to be processed: skip them. */
1255 if (bb == loop->latch
1256 || bb_with_exit_edge_p (loop, bb))
1258 reset_bb_predicate (bb);
1259 continue;
1262 cond = bb_predicate (bb);
1263 stmt = last_stmt (bb);
1264 if (stmt && gimple_code (stmt) == GIMPLE_COND)
1266 tree c2;
1267 edge true_edge, false_edge;
1268 location_t loc = gimple_location (stmt);
1269 tree c = build2_loc (loc, gimple_cond_code (stmt),
1270 boolean_type_node,
1271 gimple_cond_lhs (stmt),
1272 gimple_cond_rhs (stmt));
1274 /* Add new condition into destination's predicate list. */
1275 extract_true_false_edges_from_block (gimple_bb (stmt),
1276 &true_edge, &false_edge);
1278 /* If C is true, then TRUE_EDGE is taken. */
1279 add_to_dst_predicate_list (loop, true_edge, unshare_expr (cond),
1280 unshare_expr (c));
1282 /* If C is false, then FALSE_EDGE is taken. */
1283 c2 = build1_loc (loc, TRUTH_NOT_EXPR, boolean_type_node,
1284 unshare_expr (c));
1285 add_to_dst_predicate_list (loop, false_edge,
1286 unshare_expr (cond), c2);
1288 cond = NULL_TREE;
1291 /* If current bb has only one successor, then consider it as an
1292 unconditional goto. */
1293 if (single_succ_p (bb))
1295 basic_block bb_n = single_succ (bb);
1297 /* The successor bb inherits the predicate of its
1298 predecessor. If there is no predicate in the predecessor
1299 bb, then consider the successor bb as always executed. */
1300 if (cond == NULL_TREE)
1301 cond = boolean_true_node;
1303 add_to_predicate_list (loop, bb_n, cond);
1307 /* The loop header is always executed. */
1308 reset_bb_predicate (loop->header);
1309 gcc_assert (bb_predicate_gimplified_stmts (loop->header) == NULL
1310 && bb_predicate_gimplified_stmts (loop->latch) == NULL);
1313 /* Return true when LOOP is if-convertible. This is a helper function
1314 for if_convertible_loop_p. REFS and DDRS are initialized and freed
1315 in if_convertible_loop_p. */
1317 static bool
1318 if_convertible_loop_p_1 (struct loop *loop, vec<data_reference_p> *refs)
1320 unsigned int i;
1321 basic_block exit_bb = NULL;
1323 if (find_data_references_in_loop (loop, refs) == chrec_dont_know)
1324 return false;
1326 calculate_dominance_info (CDI_DOMINATORS);
1328 /* Allow statements that can be handled during if-conversion. */
1329 ifc_bbs = get_loop_body_in_if_conv_order (loop);
1330 if (!ifc_bbs)
1332 if (dump_file && (dump_flags & TDF_DETAILS))
1333 fprintf (dump_file, "Irreducible loop\n");
1334 return false;
1337 for (i = 0; i < loop->num_nodes; i++)
1339 basic_block bb = ifc_bbs[i];
1341 if (!if_convertible_bb_p (loop, bb, exit_bb))
1342 return false;
1344 if (bb_with_exit_edge_p (loop, bb))
1345 exit_bb = bb;
1348 for (i = 0; i < loop->num_nodes; i++)
1350 basic_block bb = ifc_bbs[i];
1351 gimple_stmt_iterator gsi;
1353 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1354 switch (gimple_code (gsi_stmt (gsi)))
1356 case GIMPLE_LABEL:
1357 case GIMPLE_ASSIGN:
1358 case GIMPLE_CALL:
1359 case GIMPLE_DEBUG:
1360 case GIMPLE_COND:
1361 gimple_set_uid (gsi_stmt (gsi), 0);
1362 break;
1363 default:
1364 return false;
1368 data_reference_p dr;
1370 innermost_DR_map
1371 = new hash_map<innermost_loop_behavior_hash, data_reference_p>;
1372 baseref_DR_map = new hash_map<tree_operand_hash, data_reference_p>;
1374 calculate_dominance_info (CDI_POST_DOMINATORS);
1375 predicate_bbs (loop);
1377 for (i = 0; refs->iterate (i, &dr); i++)
1379 tree ref = DR_REF (dr);
1381 dr->aux = XNEW (struct ifc_dr);
1382 DR_BASE_W_UNCONDITIONALLY (dr) = false;
1383 DR_RW_UNCONDITIONALLY (dr) = false;
1384 DR_W_UNCONDITIONALLY (dr) = false;
1385 IFC_DR (dr)->rw_predicate = boolean_false_node;
1386 IFC_DR (dr)->w_predicate = boolean_false_node;
1387 IFC_DR (dr)->base_w_predicate = boolean_false_node;
1388 if (gimple_uid (DR_STMT (dr)) == 0)
1389 gimple_set_uid (DR_STMT (dr), i + 1);
1391 /* If DR doesn't have innermost loop behavior or it's a compound
1392 memory reference, we synthesize its innermost loop behavior
1393 for hashing. */
1394 if (TREE_CODE (ref) == COMPONENT_REF
1395 || TREE_CODE (ref) == IMAGPART_EXPR
1396 || TREE_CODE (ref) == REALPART_EXPR
1397 || !(DR_BASE_ADDRESS (dr) || DR_OFFSET (dr)
1398 || DR_INIT (dr) || DR_STEP (dr)))
1400 while (TREE_CODE (ref) == COMPONENT_REF
1401 || TREE_CODE (ref) == IMAGPART_EXPR
1402 || TREE_CODE (ref) == REALPART_EXPR)
1403 ref = TREE_OPERAND (ref, 0);
1405 DR_BASE_ADDRESS (dr) = ref;
1406 DR_OFFSET (dr) = NULL;
1407 DR_INIT (dr) = NULL;
1408 DR_STEP (dr) = NULL;
1409 DR_ALIGNED_TO (dr) = NULL;
1411 hash_memrefs_baserefs_and_store_DRs_read_written_info (dr);
1414 for (i = 0; i < loop->num_nodes; i++)
1416 basic_block bb = ifc_bbs[i];
1417 gimple_stmt_iterator itr;
1419 /* Check the if-convertibility of statements in predicated BBs. */
1420 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
1421 for (itr = gsi_start_bb (bb); !gsi_end_p (itr); gsi_next (&itr))
1422 if (!if_convertible_stmt_p (gsi_stmt (itr), *refs))
1423 return false;
1426 /* Checking PHIs needs to be done after stmts, as the fact whether there
1427 are any masked loads or stores affects the tests. */
1428 for (i = 0; i < loop->num_nodes; i++)
1430 basic_block bb = ifc_bbs[i];
1431 gphi_iterator itr;
1433 for (itr = gsi_start_phis (bb); !gsi_end_p (itr); gsi_next (&itr))
1434 if (!if_convertible_phi_p (loop, bb, itr.phi ()))
1435 return false;
1438 if (dump_file)
1439 fprintf (dump_file, "Applying if-conversion\n");
1441 return true;
1444 /* Return true when LOOP is if-convertible.
1445 LOOP is if-convertible if:
1446 - it is innermost,
1447 - it has two or more basic blocks,
1448 - it has only one exit,
1449 - loop header is not the exit edge,
1450 - if its basic blocks and phi nodes are if convertible. */
1452 static bool
1453 if_convertible_loop_p (struct loop *loop)
1455 edge e;
1456 edge_iterator ei;
1457 bool res = false;
1458 vec<data_reference_p> refs;
1460 /* Handle only innermost loop. */
1461 if (!loop || loop->inner)
1463 if (dump_file && (dump_flags & TDF_DETAILS))
1464 fprintf (dump_file, "not innermost loop\n");
1465 return false;
1468 /* If only one block, no need for if-conversion. */
1469 if (loop->num_nodes <= 2)
1471 if (dump_file && (dump_flags & TDF_DETAILS))
1472 fprintf (dump_file, "less than 2 basic blocks\n");
1473 return false;
1476 /* More than one loop exit is too much to handle. */
1477 if (!single_exit (loop))
1479 if (dump_file && (dump_flags & TDF_DETAILS))
1480 fprintf (dump_file, "multiple exits\n");
1481 return false;
1484 /* If one of the loop header's edge is an exit edge then do not
1485 apply if-conversion. */
1486 FOR_EACH_EDGE (e, ei, loop->header->succs)
1487 if (loop_exit_edge_p (loop, e))
1488 return false;
1490 refs.create (5);
1491 res = if_convertible_loop_p_1 (loop, &refs);
1493 data_reference_p dr;
1494 unsigned int i;
1495 for (i = 0; refs.iterate (i, &dr); i++)
1496 free (dr->aux);
1498 free_data_refs (refs);
1500 delete innermost_DR_map;
1501 innermost_DR_map = NULL;
1503 delete baseref_DR_map;
1504 baseref_DR_map = NULL;
1506 return res;
1509 /* Returns true if def-stmt for phi argument ARG is simple increment/decrement
1510 which is in predicated basic block.
1511 In fact, the following PHI pattern is searching:
1512 loop-header:
1513 reduc_1 = PHI <..., reduc_2>
1515 if (...)
1516 reduc_3 = ...
1517 reduc_2 = PHI <reduc_1, reduc_3>
1519 ARG_0 and ARG_1 are correspondent PHI arguments.
1520 REDUC, OP0 and OP1 contain reduction stmt and its operands.
1521 EXTENDED is true if PHI has > 2 arguments. */
1523 static bool
1524 is_cond_scalar_reduction (gimple *phi, gimple **reduc, tree arg_0, tree arg_1,
1525 tree *op0, tree *op1, bool extended)
1527 tree lhs, r_op1, r_op2;
1528 gimple *stmt;
1529 gimple *header_phi = NULL;
1530 enum tree_code reduction_op;
1531 basic_block bb = gimple_bb (phi);
1532 struct loop *loop = bb->loop_father;
1533 edge latch_e = loop_latch_edge (loop);
1534 imm_use_iterator imm_iter;
1535 use_operand_p use_p;
1536 edge e;
1537 edge_iterator ei;
1538 bool result = false;
1539 if (TREE_CODE (arg_0) != SSA_NAME || TREE_CODE (arg_1) != SSA_NAME)
1540 return false;
1542 if (!extended && gimple_code (SSA_NAME_DEF_STMT (arg_0)) == GIMPLE_PHI)
1544 lhs = arg_1;
1545 header_phi = SSA_NAME_DEF_STMT (arg_0);
1546 stmt = SSA_NAME_DEF_STMT (arg_1);
1548 else if (gimple_code (SSA_NAME_DEF_STMT (arg_1)) == GIMPLE_PHI)
1550 lhs = arg_0;
1551 header_phi = SSA_NAME_DEF_STMT (arg_1);
1552 stmt = SSA_NAME_DEF_STMT (arg_0);
1554 else
1555 return false;
1556 if (gimple_bb (header_phi) != loop->header)
1557 return false;
1559 if (PHI_ARG_DEF_FROM_EDGE (header_phi, latch_e) != PHI_RESULT (phi))
1560 return false;
1562 if (gimple_code (stmt) != GIMPLE_ASSIGN
1563 || gimple_has_volatile_ops (stmt))
1564 return false;
1566 if (!flow_bb_inside_loop_p (loop, gimple_bb (stmt)))
1567 return false;
1569 if (!is_predicated (gimple_bb (stmt)))
1570 return false;
1572 /* Check that stmt-block is predecessor of phi-block. */
1573 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
1574 if (e->dest == bb)
1576 result = true;
1577 break;
1579 if (!result)
1580 return false;
1582 if (!has_single_use (lhs))
1583 return false;
1585 reduction_op = gimple_assign_rhs_code (stmt);
1586 if (reduction_op != PLUS_EXPR && reduction_op != MINUS_EXPR)
1587 return false;
1588 r_op1 = gimple_assign_rhs1 (stmt);
1589 r_op2 = gimple_assign_rhs2 (stmt);
1591 /* Make R_OP1 to hold reduction variable. */
1592 if (r_op2 == PHI_RESULT (header_phi)
1593 && reduction_op == PLUS_EXPR)
1594 std::swap (r_op1, r_op2);
1595 else if (r_op1 != PHI_RESULT (header_phi))
1596 return false;
1598 /* Check that R_OP1 is used in reduction stmt or in PHI only. */
1599 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, r_op1)
1601 gimple *use_stmt = USE_STMT (use_p);
1602 if (is_gimple_debug (use_stmt))
1603 continue;
1604 if (use_stmt == stmt)
1605 continue;
1606 if (gimple_code (use_stmt) != GIMPLE_PHI)
1607 return false;
1610 *op0 = r_op1; *op1 = r_op2;
1611 *reduc = stmt;
1612 return true;
1615 /* Converts conditional scalar reduction into unconditional form, e.g.
1616 bb_4
1617 if (_5 != 0) goto bb_5 else goto bb_6
1618 end_bb_4
1619 bb_5
1620 res_6 = res_13 + 1;
1621 end_bb_5
1622 bb_6
1623 # res_2 = PHI <res_13(4), res_6(5)>
1624 end_bb_6
1626 will be converted into sequence
1627 _ifc__1 = _5 != 0 ? 1 : 0;
1628 res_2 = res_13 + _ifc__1;
1629 Argument SWAP tells that arguments of conditional expression should be
1630 swapped.
1631 Returns rhs of resulting PHI assignment. */
1633 static tree
1634 convert_scalar_cond_reduction (gimple *reduc, gimple_stmt_iterator *gsi,
1635 tree cond, tree op0, tree op1, bool swap)
1637 gimple_stmt_iterator stmt_it;
1638 gimple *new_assign;
1639 tree rhs;
1640 tree rhs1 = gimple_assign_rhs1 (reduc);
1641 tree tmp = make_temp_ssa_name (TREE_TYPE (rhs1), NULL, "_ifc_");
1642 tree c;
1643 tree zero = build_zero_cst (TREE_TYPE (rhs1));
1645 if (dump_file && (dump_flags & TDF_DETAILS))
1647 fprintf (dump_file, "Found cond scalar reduction.\n");
1648 print_gimple_stmt (dump_file, reduc, 0, TDF_SLIM);
1651 /* Build cond expression using COND and constant operand
1652 of reduction rhs. */
1653 c = fold_build_cond_expr (TREE_TYPE (rhs1),
1654 unshare_expr (cond),
1655 swap ? zero : op1,
1656 swap ? op1 : zero);
1658 /* Create assignment stmt and insert it at GSI. */
1659 new_assign = gimple_build_assign (tmp, c);
1660 gsi_insert_before (gsi, new_assign, GSI_SAME_STMT);
1661 /* Build rhs for unconditional increment/decrement. */
1662 rhs = fold_build2 (gimple_assign_rhs_code (reduc),
1663 TREE_TYPE (rhs1), op0, tmp);
1665 /* Delete original reduction stmt. */
1666 stmt_it = gsi_for_stmt (reduc);
1667 gsi_remove (&stmt_it, true);
1668 release_defs (reduc);
1669 return rhs;
1672 /* Produce condition for all occurrences of ARG in PHI node. */
1674 static tree
1675 gen_phi_arg_condition (gphi *phi, vec<int> *occur,
1676 gimple_stmt_iterator *gsi)
1678 int len;
1679 int i;
1680 tree cond = NULL_TREE;
1681 tree c;
1682 edge e;
1684 len = occur->length ();
1685 gcc_assert (len > 0);
1686 for (i = 0; i < len; i++)
1688 e = gimple_phi_arg_edge (phi, (*occur)[i]);
1689 c = bb_predicate (e->src);
1690 if (is_true_predicate (c))
1692 cond = c;
1693 break;
1695 c = force_gimple_operand_gsi_1 (gsi, unshare_expr (c),
1696 is_gimple_condexpr, NULL_TREE,
1697 true, GSI_SAME_STMT);
1698 if (cond != NULL_TREE)
1700 /* Must build OR expression. */
1701 cond = fold_or_predicates (EXPR_LOCATION (c), c, cond);
1702 cond = force_gimple_operand_gsi_1 (gsi, unshare_expr (cond),
1703 is_gimple_condexpr, NULL_TREE,
1704 true, GSI_SAME_STMT);
1706 else
1707 cond = c;
1709 gcc_assert (cond != NULL_TREE);
1710 return cond;
1713 /* Replace a scalar PHI node with a COND_EXPR using COND as condition.
1714 This routine can handle PHI nodes with more than two arguments.
1716 For example,
1717 S1: A = PHI <x1(1), x2(5)>
1718 is converted into,
1719 S2: A = cond ? x1 : x2;
1721 The generated code is inserted at GSI that points to the top of
1722 basic block's statement list.
1723 If PHI node has more than two arguments a chain of conditional
1724 expression is produced. */
1727 static void
1728 predicate_scalar_phi (gphi *phi, gimple_stmt_iterator *gsi)
1730 gimple *new_stmt = NULL, *reduc;
1731 tree rhs, res, arg0, arg1, op0, op1, scev;
1732 tree cond;
1733 unsigned int index0;
1734 unsigned int max, args_len;
1735 edge e;
1736 basic_block bb;
1737 unsigned int i;
1739 res = gimple_phi_result (phi);
1740 if (virtual_operand_p (res))
1741 return;
1743 if ((rhs = degenerate_phi_result (phi))
1744 || ((scev = analyze_scalar_evolution (gimple_bb (phi)->loop_father,
1745 res))
1746 && !chrec_contains_undetermined (scev)
1747 && scev != res
1748 && (rhs = gimple_phi_arg_def (phi, 0))))
1750 if (dump_file && (dump_flags & TDF_DETAILS))
1752 fprintf (dump_file, "Degenerate phi!\n");
1753 print_gimple_stmt (dump_file, phi, 0, TDF_SLIM);
1755 new_stmt = gimple_build_assign (res, rhs);
1756 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1757 update_stmt (new_stmt);
1758 return;
1761 bb = gimple_bb (phi);
1762 if (EDGE_COUNT (bb->preds) == 2)
1764 /* Predicate ordinary PHI node with 2 arguments. */
1765 edge first_edge, second_edge;
1766 basic_block true_bb;
1767 first_edge = EDGE_PRED (bb, 0);
1768 second_edge = EDGE_PRED (bb, 1);
1769 cond = bb_predicate (first_edge->src);
1770 if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
1771 std::swap (first_edge, second_edge);
1772 if (EDGE_COUNT (first_edge->src->succs) > 1)
1774 cond = bb_predicate (second_edge->src);
1775 if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
1776 cond = TREE_OPERAND (cond, 0);
1777 else
1778 first_edge = second_edge;
1780 else
1781 cond = bb_predicate (first_edge->src);
1782 /* Gimplify the condition to a valid cond-expr conditonal operand. */
1783 cond = force_gimple_operand_gsi_1 (gsi, unshare_expr (cond),
1784 is_gimple_condexpr, NULL_TREE,
1785 true, GSI_SAME_STMT);
1786 true_bb = first_edge->src;
1787 if (EDGE_PRED (bb, 1)->src == true_bb)
1789 arg0 = gimple_phi_arg_def (phi, 1);
1790 arg1 = gimple_phi_arg_def (phi, 0);
1792 else
1794 arg0 = gimple_phi_arg_def (phi, 0);
1795 arg1 = gimple_phi_arg_def (phi, 1);
1797 if (is_cond_scalar_reduction (phi, &reduc, arg0, arg1,
1798 &op0, &op1, false))
1799 /* Convert reduction stmt into vectorizable form. */
1800 rhs = convert_scalar_cond_reduction (reduc, gsi, cond, op0, op1,
1801 true_bb != gimple_bb (reduc));
1802 else
1803 /* Build new RHS using selected condition and arguments. */
1804 rhs = fold_build_cond_expr (TREE_TYPE (res), unshare_expr (cond),
1805 arg0, arg1);
1806 new_stmt = gimple_build_assign (res, rhs);
1807 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1808 update_stmt (new_stmt);
1810 if (dump_file && (dump_flags & TDF_DETAILS))
1812 fprintf (dump_file, "new phi replacement stmt\n");
1813 print_gimple_stmt (dump_file, new_stmt, 0, TDF_SLIM);
1815 return;
1818 /* Create hashmap for PHI node which contain vector of argument indexes
1819 having the same value. */
1820 bool swap = false;
1821 hash_map<tree_operand_hash, auto_vec<int> > phi_arg_map;
1822 unsigned int num_args = gimple_phi_num_args (phi);
1823 int max_ind = -1;
1824 /* Vector of different PHI argument values. */
1825 auto_vec<tree> args (num_args);
1827 /* Compute phi_arg_map. */
1828 for (i = 0; i < num_args; i++)
1830 tree arg;
1832 arg = gimple_phi_arg_def (phi, i);
1833 if (!phi_arg_map.get (arg))
1834 args.quick_push (arg);
1835 phi_arg_map.get_or_insert (arg).safe_push (i);
1838 /* Determine element with max number of occurrences. */
1839 max_ind = -1;
1840 max = 1;
1841 args_len = args.length ();
1842 for (i = 0; i < args_len; i++)
1844 unsigned int len;
1845 if ((len = phi_arg_map.get (args[i])->length ()) > max)
1847 max_ind = (int) i;
1848 max = len;
1852 /* Put element with max number of occurences to the end of ARGS. */
1853 if (max_ind != -1 && max_ind +1 != (int) args_len)
1854 std::swap (args[args_len - 1], args[max_ind]);
1856 /* Handle one special case when number of arguments with different values
1857 is equal 2 and one argument has the only occurrence. Such PHI can be
1858 handled as if would have only 2 arguments. */
1859 if (args_len == 2 && phi_arg_map.get (args[0])->length () == 1)
1861 vec<int> *indexes;
1862 indexes = phi_arg_map.get (args[0]);
1863 index0 = (*indexes)[0];
1864 arg0 = args[0];
1865 arg1 = args[1];
1866 e = gimple_phi_arg_edge (phi, index0);
1867 cond = bb_predicate (e->src);
1868 if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
1870 swap = true;
1871 cond = TREE_OPERAND (cond, 0);
1873 /* Gimplify the condition to a valid cond-expr conditonal operand. */
1874 cond = force_gimple_operand_gsi_1 (gsi, unshare_expr (cond),
1875 is_gimple_condexpr, NULL_TREE,
1876 true, GSI_SAME_STMT);
1877 if (!(is_cond_scalar_reduction (phi, &reduc, arg0 , arg1,
1878 &op0, &op1, true)))
1879 rhs = fold_build_cond_expr (TREE_TYPE (res), unshare_expr (cond),
1880 swap? arg1 : arg0,
1881 swap? arg0 : arg1);
1882 else
1883 /* Convert reduction stmt into vectorizable form. */
1884 rhs = convert_scalar_cond_reduction (reduc, gsi, cond, op0, op1,
1885 swap);
1886 new_stmt = gimple_build_assign (res, rhs);
1887 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1888 update_stmt (new_stmt);
1890 else
1892 /* Common case. */
1893 vec<int> *indexes;
1894 tree type = TREE_TYPE (gimple_phi_result (phi));
1895 tree lhs;
1896 arg1 = args[1];
1897 for (i = 0; i < args_len; i++)
1899 arg0 = args[i];
1900 indexes = phi_arg_map.get (args[i]);
1901 if (i != args_len - 1)
1902 lhs = make_temp_ssa_name (type, NULL, "_ifc_");
1903 else
1904 lhs = res;
1905 cond = gen_phi_arg_condition (phi, indexes, gsi);
1906 rhs = fold_build_cond_expr (type, unshare_expr (cond),
1907 arg0, arg1);
1908 new_stmt = gimple_build_assign (lhs, rhs);
1909 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1910 update_stmt (new_stmt);
1911 arg1 = lhs;
1915 if (dump_file && (dump_flags & TDF_DETAILS))
1917 fprintf (dump_file, "new extended phi replacement stmt\n");
1918 print_gimple_stmt (dump_file, new_stmt, 0, TDF_SLIM);
1922 /* Replaces in LOOP all the scalar phi nodes other than those in the
1923 LOOP->header block with conditional modify expressions. */
1925 static void
1926 predicate_all_scalar_phis (struct loop *loop)
1928 basic_block bb;
1929 unsigned int orig_loop_num_nodes = loop->num_nodes;
1930 unsigned int i;
1932 for (i = 1; i < orig_loop_num_nodes; i++)
1934 gphi *phi;
1935 gimple_stmt_iterator gsi;
1936 gphi_iterator phi_gsi;
1937 bb = ifc_bbs[i];
1939 if (bb == loop->header)
1940 continue;
1942 phi_gsi = gsi_start_phis (bb);
1943 if (gsi_end_p (phi_gsi))
1944 continue;
1946 gsi = gsi_after_labels (bb);
1947 while (!gsi_end_p (phi_gsi))
1949 phi = phi_gsi.phi ();
1950 if (virtual_operand_p (gimple_phi_result (phi)))
1951 gsi_next (&phi_gsi);
1952 else
1954 predicate_scalar_phi (phi, &gsi);
1955 remove_phi_node (&phi_gsi, false);
1961 /* Insert in each basic block of LOOP the statements produced by the
1962 gimplification of the predicates. */
1964 static void
1965 insert_gimplified_predicates (loop_p loop)
1967 unsigned int i;
1969 for (i = 0; i < loop->num_nodes; i++)
1971 basic_block bb = ifc_bbs[i];
1972 gimple_seq stmts;
1973 if (!is_predicated (bb))
1974 gcc_assert (bb_predicate_gimplified_stmts (bb) == NULL);
1975 if (!is_predicated (bb))
1977 /* Do not insert statements for a basic block that is not
1978 predicated. Also make sure that the predicate of the
1979 basic block is set to true. */
1980 reset_bb_predicate (bb);
1981 continue;
1984 stmts = bb_predicate_gimplified_stmts (bb);
1985 if (stmts)
1987 if (any_pred_load_store)
1989 /* Insert the predicate of the BB just after the label,
1990 as the if-conversion of memory writes will use this
1991 predicate. */
1992 gimple_stmt_iterator gsi = gsi_after_labels (bb);
1993 gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
1995 else
1997 /* Insert the predicate of the BB at the end of the BB
1998 as this would reduce the register pressure: the only
1999 use of this predicate will be in successor BBs. */
2000 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2002 if (gsi_end_p (gsi)
2003 || stmt_ends_bb_p (gsi_stmt (gsi)))
2004 gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
2005 else
2006 gsi_insert_seq_after (&gsi, stmts, GSI_SAME_STMT);
2009 /* Once the sequence is code generated, set it to NULL. */
2010 set_bb_predicate_gimplified_stmts (bb, NULL);
2015 /* Helper function for predicate_mem_writes. Returns index of existent
2016 mask if it was created for given SIZE and -1 otherwise. */
2018 static int
2019 mask_exists (int size, vec<int> vec)
2021 unsigned int ix;
2022 int v;
2023 FOR_EACH_VEC_ELT (vec, ix, v)
2024 if (v == size)
2025 return (int) ix;
2026 return -1;
2029 /* Predicate each write to memory in LOOP.
2031 This function transforms control flow constructs containing memory
2032 writes of the form:
2034 | for (i = 0; i < N; i++)
2035 | if (cond)
2036 | A[i] = expr;
2038 into the following form that does not contain control flow:
2040 | for (i = 0; i < N; i++)
2041 | A[i] = cond ? expr : A[i];
2043 The original CFG looks like this:
2045 | bb_0
2046 | i = 0
2047 | end_bb_0
2049 | bb_1
2050 | if (i < N) goto bb_5 else goto bb_2
2051 | end_bb_1
2053 | bb_2
2054 | cond = some_computation;
2055 | if (cond) goto bb_3 else goto bb_4
2056 | end_bb_2
2058 | bb_3
2059 | A[i] = expr;
2060 | goto bb_4
2061 | end_bb_3
2063 | bb_4
2064 | goto bb_1
2065 | end_bb_4
2067 insert_gimplified_predicates inserts the computation of the COND
2068 expression at the beginning of the destination basic block:
2070 | bb_0
2071 | i = 0
2072 | end_bb_0
2074 | bb_1
2075 | if (i < N) goto bb_5 else goto bb_2
2076 | end_bb_1
2078 | bb_2
2079 | cond = some_computation;
2080 | if (cond) goto bb_3 else goto bb_4
2081 | end_bb_2
2083 | bb_3
2084 | cond = some_computation;
2085 | A[i] = expr;
2086 | goto bb_4
2087 | end_bb_3
2089 | bb_4
2090 | goto bb_1
2091 | end_bb_4
2093 predicate_mem_writes is then predicating the memory write as follows:
2095 | bb_0
2096 | i = 0
2097 | end_bb_0
2099 | bb_1
2100 | if (i < N) goto bb_5 else goto bb_2
2101 | end_bb_1
2103 | bb_2
2104 | if (cond) goto bb_3 else goto bb_4
2105 | end_bb_2
2107 | bb_3
2108 | cond = some_computation;
2109 | A[i] = cond ? expr : A[i];
2110 | goto bb_4
2111 | end_bb_3
2113 | bb_4
2114 | goto bb_1
2115 | end_bb_4
2117 and finally combine_blocks removes the basic block boundaries making
2118 the loop vectorizable:
2120 | bb_0
2121 | i = 0
2122 | if (i < N) goto bb_5 else goto bb_1
2123 | end_bb_0
2125 | bb_1
2126 | cond = some_computation;
2127 | A[i] = cond ? expr : A[i];
2128 | if (i < N) goto bb_5 else goto bb_4
2129 | end_bb_1
2131 | bb_4
2132 | goto bb_1
2133 | end_bb_4
2136 static void
2137 predicate_mem_writes (loop_p loop)
2139 unsigned int i, orig_loop_num_nodes = loop->num_nodes;
2140 auto_vec<int, 1> vect_sizes;
2141 auto_vec<tree, 1> vect_masks;
2143 for (i = 1; i < orig_loop_num_nodes; i++)
2145 gimple_stmt_iterator gsi;
2146 basic_block bb = ifc_bbs[i];
2147 tree cond = bb_predicate (bb);
2148 bool swap;
2149 gimple *stmt;
2150 int index;
2152 if (is_true_predicate (cond) || is_false_predicate (cond))
2153 continue;
2155 swap = false;
2156 if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
2158 swap = true;
2159 cond = TREE_OPERAND (cond, 0);
2162 vect_sizes.truncate (0);
2163 vect_masks.truncate (0);
2165 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2166 if (!gimple_assign_single_p (stmt = gsi_stmt (gsi)))
2167 continue;
2168 else if (gimple_plf (stmt, GF_PLF_2))
2170 tree lhs = gimple_assign_lhs (stmt);
2171 tree rhs = gimple_assign_rhs1 (stmt);
2172 tree ref, addr, ptr, mask;
2173 gimple *new_stmt;
2174 gimple_seq stmts = NULL;
2175 int bitsize = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (lhs)));
2176 ref = TREE_CODE (lhs) == SSA_NAME ? rhs : lhs;
2177 mark_addressable (ref);
2178 addr = force_gimple_operand_gsi (&gsi, build_fold_addr_expr (ref),
2179 true, NULL_TREE, true,
2180 GSI_SAME_STMT);
2181 if (!vect_sizes.is_empty ()
2182 && (index = mask_exists (bitsize, vect_sizes)) != -1)
2183 /* Use created mask. */
2184 mask = vect_masks[index];
2185 else
2187 if (COMPARISON_CLASS_P (cond))
2188 mask = gimple_build (&stmts, TREE_CODE (cond),
2189 boolean_type_node,
2190 TREE_OPERAND (cond, 0),
2191 TREE_OPERAND (cond, 1));
2192 else
2194 gcc_assert (TREE_CODE (cond) == SSA_NAME);
2195 mask = cond;
2198 if (swap)
2200 tree true_val
2201 = constant_boolean_node (true, TREE_TYPE (mask));
2202 mask = gimple_build (&stmts, BIT_XOR_EXPR,
2203 TREE_TYPE (mask), mask, true_val);
2205 gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
2207 mask = ifc_temp_var (TREE_TYPE (mask), mask, &gsi);
2208 /* Save mask and its size for further use. */
2209 vect_sizes.safe_push (bitsize);
2210 vect_masks.safe_push (mask);
2212 ptr = build_int_cst (reference_alias_ptr_type (ref),
2213 get_object_alignment (ref));
2214 /* Copy points-to info if possible. */
2215 if (TREE_CODE (addr) == SSA_NAME && !SSA_NAME_PTR_INFO (addr))
2216 copy_ref_info (build2 (MEM_REF, TREE_TYPE (ref), addr, ptr),
2217 ref);
2218 if (TREE_CODE (lhs) == SSA_NAME)
2220 new_stmt
2221 = gimple_build_call_internal (IFN_MASK_LOAD, 3, addr,
2222 ptr, mask);
2223 gimple_call_set_lhs (new_stmt, lhs);
2224 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2226 else
2228 new_stmt
2229 = gimple_build_call_internal (IFN_MASK_STORE, 4, addr, ptr,
2230 mask, rhs);
2231 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2232 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
2233 SSA_NAME_DEF_STMT (gimple_vdef (new_stmt)) = new_stmt;
2236 gsi_replace (&gsi, new_stmt, true);
2238 else if (gimple_vdef (stmt))
2240 tree lhs = gimple_assign_lhs (stmt);
2241 tree rhs = gimple_assign_rhs1 (stmt);
2242 tree type = TREE_TYPE (lhs);
2244 lhs = ifc_temp_var (type, unshare_expr (lhs), &gsi);
2245 rhs = ifc_temp_var (type, unshare_expr (rhs), &gsi);
2246 if (swap)
2247 std::swap (lhs, rhs);
2248 cond = force_gimple_operand_gsi_1 (&gsi, unshare_expr (cond),
2249 is_gimple_condexpr, NULL_TREE,
2250 true, GSI_SAME_STMT);
2251 rhs = fold_build_cond_expr (type, unshare_expr (cond), rhs, lhs);
2252 gimple_assign_set_rhs1 (stmt, ifc_temp_var (type, rhs, &gsi));
2253 update_stmt (stmt);
2258 /* Remove all GIMPLE_CONDs and GIMPLE_LABELs of all the basic blocks
2259 other than the exit and latch of the LOOP. Also resets the
2260 GIMPLE_DEBUG information. */
2262 static void
2263 remove_conditions_and_labels (loop_p loop)
2265 gimple_stmt_iterator gsi;
2266 unsigned int i;
2268 for (i = 0; i < loop->num_nodes; i++)
2270 basic_block bb = ifc_bbs[i];
2272 if (bb_with_exit_edge_p (loop, bb)
2273 || bb == loop->latch)
2274 continue;
2276 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
2277 switch (gimple_code (gsi_stmt (gsi)))
2279 case GIMPLE_COND:
2280 case GIMPLE_LABEL:
2281 gsi_remove (&gsi, true);
2282 break;
2284 case GIMPLE_DEBUG:
2285 /* ??? Should there be conditional GIMPLE_DEBUG_BINDs? */
2286 if (gimple_debug_bind_p (gsi_stmt (gsi)))
2288 gimple_debug_bind_reset_value (gsi_stmt (gsi));
2289 update_stmt (gsi_stmt (gsi));
2291 gsi_next (&gsi);
2292 break;
2294 default:
2295 gsi_next (&gsi);
2300 /* Combine all the basic blocks from LOOP into one or two super basic
2301 blocks. Replace PHI nodes with conditional modify expressions. */
2303 static void
2304 combine_blocks (struct loop *loop)
2306 basic_block bb, exit_bb, merge_target_bb;
2307 unsigned int orig_loop_num_nodes = loop->num_nodes;
2308 unsigned int i;
2309 edge e;
2310 edge_iterator ei;
2312 remove_conditions_and_labels (loop);
2313 insert_gimplified_predicates (loop);
2314 predicate_all_scalar_phis (loop);
2316 if (any_pred_load_store)
2317 predicate_mem_writes (loop);
2319 /* Merge basic blocks: first remove all the edges in the loop,
2320 except for those from the exit block. */
2321 exit_bb = NULL;
2322 bool *predicated = XNEWVEC (bool, orig_loop_num_nodes);
2323 for (i = 0; i < orig_loop_num_nodes; i++)
2325 bb = ifc_bbs[i];
2326 predicated[i] = !is_true_predicate (bb_predicate (bb));
2327 free_bb_predicate (bb);
2328 if (bb_with_exit_edge_p (loop, bb))
2330 gcc_assert (exit_bb == NULL);
2331 exit_bb = bb;
2334 gcc_assert (exit_bb != loop->latch);
2336 for (i = 1; i < orig_loop_num_nodes; i++)
2338 bb = ifc_bbs[i];
2340 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei));)
2342 if (e->src == exit_bb)
2343 ei_next (&ei);
2344 else
2345 remove_edge (e);
2349 if (exit_bb != NULL)
2351 if (exit_bb != loop->header)
2353 /* Connect this node to loop header. */
2354 make_edge (loop->header, exit_bb, EDGE_FALLTHRU);
2355 set_immediate_dominator (CDI_DOMINATORS, exit_bb, loop->header);
2358 /* Redirect non-exit edges to loop->latch. */
2359 FOR_EACH_EDGE (e, ei, exit_bb->succs)
2361 if (!loop_exit_edge_p (loop, e))
2362 redirect_edge_and_branch (e, loop->latch);
2364 set_immediate_dominator (CDI_DOMINATORS, loop->latch, exit_bb);
2366 else
2368 /* If the loop does not have an exit, reconnect header and latch. */
2369 make_edge (loop->header, loop->latch, EDGE_FALLTHRU);
2370 set_immediate_dominator (CDI_DOMINATORS, loop->latch, loop->header);
2373 merge_target_bb = loop->header;
2375 /* Get at the virtual def valid for uses starting at the first block
2376 we merge into the header. Without a virtual PHI the loop has the
2377 same virtual use on all stmts. */
2378 gphi *vphi = get_virtual_phi (loop->header);
2379 tree last_vdef = NULL_TREE;
2380 if (vphi)
2382 last_vdef = gimple_phi_result (vphi);
2383 for (gimple_stmt_iterator gsi = gsi_start_bb (loop->header);
2384 ! gsi_end_p (gsi); gsi_next (&gsi))
2385 if (gimple_vdef (gsi_stmt (gsi)))
2386 last_vdef = gimple_vdef (gsi_stmt (gsi));
2388 for (i = 1; i < orig_loop_num_nodes; i++)
2390 gimple_stmt_iterator gsi;
2391 gimple_stmt_iterator last;
2393 bb = ifc_bbs[i];
2395 if (bb == exit_bb || bb == loop->latch)
2396 continue;
2398 /* We release virtual PHIs late because we have to propagate them
2399 out using the current VUSE. The def might be the one used
2400 after the loop. */
2401 vphi = get_virtual_phi (bb);
2402 if (vphi)
2404 imm_use_iterator iter;
2405 use_operand_p use_p;
2406 gimple *use_stmt;
2407 FOR_EACH_IMM_USE_STMT (use_stmt, iter, gimple_phi_result (vphi))
2409 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2410 SET_USE (use_p, last_vdef);
2412 gsi = gsi_for_stmt (vphi);
2413 remove_phi_node (&gsi, true);
2416 /* Make stmts member of loop->header and clear range info from all stmts
2417 in BB which is now no longer executed conditional on a predicate we
2418 could have derived it from. */
2419 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2421 gimple *stmt = gsi_stmt (gsi);
2422 gimple_set_bb (stmt, merge_target_bb);
2423 /* Update virtual operands. */
2424 if (last_vdef)
2426 use_operand_p use_p = ssa_vuse_operand (stmt);
2427 if (use_p
2428 && USE_FROM_PTR (use_p) != last_vdef)
2429 SET_USE (use_p, last_vdef);
2430 if (gimple_vdef (stmt))
2431 last_vdef = gimple_vdef (stmt);
2433 if (predicated[i])
2435 ssa_op_iter i;
2436 tree op;
2437 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
2438 reset_flow_sensitive_info (op);
2442 /* Update stmt list. */
2443 last = gsi_last_bb (merge_target_bb);
2444 gsi_insert_seq_after_without_update (&last, bb_seq (bb), GSI_NEW_STMT);
2445 set_bb_seq (bb, NULL);
2447 delete_basic_block (bb);
2450 /* If possible, merge loop header to the block with the exit edge.
2451 This reduces the number of basic blocks to two, to please the
2452 vectorizer that handles only loops with two nodes. */
2453 if (exit_bb
2454 && exit_bb != loop->header)
2456 /* We release virtual PHIs late because we have to propagate them
2457 out using the current VUSE. The def might be the one used
2458 after the loop. */
2459 vphi = get_virtual_phi (exit_bb);
2460 if (vphi)
2462 imm_use_iterator iter;
2463 use_operand_p use_p;
2464 gimple *use_stmt;
2465 FOR_EACH_IMM_USE_STMT (use_stmt, iter, gimple_phi_result (vphi))
2467 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2468 SET_USE (use_p, last_vdef);
2470 gimple_stmt_iterator gsi = gsi_for_stmt (vphi);
2471 remove_phi_node (&gsi, true);
2474 if (can_merge_blocks_p (loop->header, exit_bb))
2475 merge_blocks (loop->header, exit_bb);
2478 free (ifc_bbs);
2479 ifc_bbs = NULL;
2480 free (predicated);
2483 /* Version LOOP before if-converting it; the original loop
2484 will be if-converted, the new copy of the loop will not,
2485 and the LOOP_VECTORIZED internal call will be guarding which
2486 loop to execute. The vectorizer pass will fold this
2487 internal call into either true or false. */
2489 static bool
2490 version_loop_for_if_conversion (struct loop *loop)
2492 basic_block cond_bb;
2493 tree cond = make_ssa_name (boolean_type_node);
2494 struct loop *new_loop;
2495 gimple *g;
2496 gimple_stmt_iterator gsi;
2498 g = gimple_build_call_internal (IFN_LOOP_VECTORIZED, 2,
2499 build_int_cst (integer_type_node, loop->num),
2500 integer_zero_node);
2501 gimple_call_set_lhs (g, cond);
2503 /* Save BB->aux around loop_version as that uses the same field. */
2504 void **saved_preds = XALLOCAVEC (void *, loop->num_nodes);
2505 for (unsigned i = 0; i < loop->num_nodes; i++)
2506 saved_preds[i] = ifc_bbs[i]->aux;
2508 initialize_original_copy_tables ();
2509 new_loop = loop_version (loop, cond, &cond_bb,
2510 REG_BR_PROB_BASE, REG_BR_PROB_BASE,
2511 REG_BR_PROB_BASE, true);
2512 free_original_copy_tables ();
2514 for (unsigned i = 0; i < loop->num_nodes; i++)
2515 ifc_bbs[i]->aux = saved_preds[i];
2517 if (new_loop == NULL)
2518 return false;
2520 new_loop->dont_vectorize = true;
2521 new_loop->force_vectorize = false;
2522 gsi = gsi_last_bb (cond_bb);
2523 gimple_call_set_arg (g, 1, build_int_cst (integer_type_node, new_loop->num));
2524 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2525 update_ssa (TODO_update_ssa);
2526 return true;
2529 /* Performs splitting of critical edges. Skip splitting and return false
2530 if LOOP will not be converted because:
2532 - LOOP is not well formed.
2533 - LOOP has PHI with more than MAX_PHI_ARG_NUM arguments.
2535 Last restriction is valid only if AGGRESSIVE_IF_CONV is false. */
2537 static bool
2538 ifcvt_split_critical_edges (struct loop *loop, bool aggressive_if_conv)
2540 basic_block *body;
2541 basic_block bb;
2542 unsigned int num = loop->num_nodes;
2543 unsigned int i;
2544 gimple *stmt;
2545 edge e;
2546 edge_iterator ei;
2547 auto_vec<edge> critical_edges;
2549 /* Loop is not well formed. */
2550 if (num <= 2 || loop->inner || !single_exit (loop))
2551 return false;
2553 body = get_loop_body (loop);
2554 for (i = 0; i < num; i++)
2556 bb = body[i];
2557 if (!aggressive_if_conv
2558 && phi_nodes (bb)
2559 && EDGE_COUNT (bb->preds) > MAX_PHI_ARG_NUM)
2561 if (dump_file && (dump_flags & TDF_DETAILS))
2562 fprintf (dump_file,
2563 "BB %d has complicated PHI with more than %u args.\n",
2564 bb->index, MAX_PHI_ARG_NUM);
2566 free (body);
2567 return false;
2569 if (bb == loop->latch || bb_with_exit_edge_p (loop, bb))
2570 continue;
2572 stmt = last_stmt (bb);
2573 /* Skip basic blocks not ending with conditional branch. */
2574 if (!stmt || gimple_code (stmt) != GIMPLE_COND)
2575 continue;
2577 FOR_EACH_EDGE (e, ei, bb->succs)
2578 if (EDGE_CRITICAL_P (e) && e->dest->loop_father == loop)
2579 critical_edges.safe_push (e);
2581 free (body);
2583 while (critical_edges.length () > 0)
2585 e = critical_edges.pop ();
2586 /* Don't split if bb can be predicated along non-critical edge. */
2587 if (EDGE_COUNT (e->dest->preds) > 2 || all_preds_critical_p (e->dest))
2588 split_edge (e);
2591 return true;
2594 /* Delete redundant statements produced by predication which prevents
2595 loop vectorization. */
2597 static void
2598 ifcvt_local_dce (basic_block bb)
2600 gimple *stmt;
2601 gimple *stmt1;
2602 gimple *phi;
2603 gimple_stmt_iterator gsi;
2604 auto_vec<gimple *> worklist;
2605 enum gimple_code code;
2606 use_operand_p use_p;
2607 imm_use_iterator imm_iter;
2609 worklist.create (64);
2610 /* Consider all phi as live statements. */
2611 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2613 phi = gsi_stmt (gsi);
2614 gimple_set_plf (phi, GF_PLF_2, true);
2615 worklist.safe_push (phi);
2617 /* Consider load/store statements, CALL and COND as live. */
2618 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2620 stmt = gsi_stmt (gsi);
2621 if (gimple_store_p (stmt)
2622 || gimple_assign_load_p (stmt)
2623 || is_gimple_debug (stmt))
2625 gimple_set_plf (stmt, GF_PLF_2, true);
2626 worklist.safe_push (stmt);
2627 continue;
2629 code = gimple_code (stmt);
2630 if (code == GIMPLE_COND || code == GIMPLE_CALL)
2632 gimple_set_plf (stmt, GF_PLF_2, true);
2633 worklist.safe_push (stmt);
2634 continue;
2636 gimple_set_plf (stmt, GF_PLF_2, false);
2638 if (code == GIMPLE_ASSIGN)
2640 tree lhs = gimple_assign_lhs (stmt);
2641 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
2643 stmt1 = USE_STMT (use_p);
2644 if (gimple_bb (stmt1) != bb)
2646 gimple_set_plf (stmt, GF_PLF_2, true);
2647 worklist.safe_push (stmt);
2648 break;
2653 /* Propagate liveness through arguments of live stmt. */
2654 while (worklist.length () > 0)
2656 ssa_op_iter iter;
2657 use_operand_p use_p;
2658 tree use;
2660 stmt = worklist.pop ();
2661 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
2663 use = USE_FROM_PTR (use_p);
2664 if (TREE_CODE (use) != SSA_NAME)
2665 continue;
2666 stmt1 = SSA_NAME_DEF_STMT (use);
2667 if (gimple_bb (stmt1) != bb
2668 || gimple_plf (stmt1, GF_PLF_2))
2669 continue;
2670 gimple_set_plf (stmt1, GF_PLF_2, true);
2671 worklist.safe_push (stmt1);
2674 /* Delete dead statements. */
2675 gsi = gsi_start_bb (bb);
2676 while (!gsi_end_p (gsi))
2678 stmt = gsi_stmt (gsi);
2679 if (gimple_plf (stmt, GF_PLF_2))
2681 gsi_next (&gsi);
2682 continue;
2684 if (dump_file && (dump_flags & TDF_DETAILS))
2686 fprintf (dump_file, "Delete dead stmt in bb#%d\n", bb->index);
2687 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2689 gsi_remove (&gsi, true);
2690 release_defs (stmt);
2694 /* If-convert LOOP when it is legal. For the moment this pass has no
2695 profitability analysis. Returns non-zero todo flags when something
2696 changed. */
2698 static unsigned int
2699 tree_if_conversion (struct loop *loop)
2701 unsigned int todo = 0;
2702 bool aggressive_if_conv;
2704 ifc_bbs = NULL;
2705 any_pred_load_store = false;
2706 any_complicated_phi = false;
2708 /* Apply more aggressive if-conversion when loop or its outer loop were
2709 marked with simd pragma. When that's the case, we try to if-convert
2710 loop containing PHIs with more than MAX_PHI_ARG_NUM arguments. */
2711 aggressive_if_conv = loop->force_vectorize;
2712 if (!aggressive_if_conv)
2714 struct loop *outer_loop = loop_outer (loop);
2715 if (outer_loop && outer_loop->force_vectorize)
2716 aggressive_if_conv = true;
2719 if (!ifcvt_split_critical_edges (loop, aggressive_if_conv))
2720 goto cleanup;
2722 if (!if_convertible_loop_p (loop)
2723 || !dbg_cnt (if_conversion_tree))
2724 goto cleanup;
2726 if ((any_pred_load_store || any_complicated_phi)
2727 && ((!flag_tree_loop_vectorize && !loop->force_vectorize)
2728 || loop->dont_vectorize))
2729 goto cleanup;
2731 if ((any_pred_load_store || any_complicated_phi)
2732 && !version_loop_for_if_conversion (loop))
2733 goto cleanup;
2735 /* Now all statements are if-convertible. Combine all the basic
2736 blocks into one huge basic block doing the if-conversion
2737 on-the-fly. */
2738 combine_blocks (loop);
2740 /* Delete dead predicate computations. */
2741 ifcvt_local_dce (loop->header);
2743 todo |= TODO_cleanup_cfg;
2745 cleanup:
2746 if (ifc_bbs)
2748 unsigned int i;
2750 for (i = 0; i < loop->num_nodes; i++)
2751 free_bb_predicate (ifc_bbs[i]);
2753 free (ifc_bbs);
2754 ifc_bbs = NULL;
2756 free_dominance_info (CDI_POST_DOMINATORS);
2758 return todo;
2761 /* Tree if-conversion pass management. */
2763 namespace {
2765 const pass_data pass_data_if_conversion =
2767 GIMPLE_PASS, /* type */
2768 "ifcvt", /* name */
2769 OPTGROUP_NONE, /* optinfo_flags */
2770 TV_TREE_LOOP_IFCVT, /* tv_id */
2771 ( PROP_cfg | PROP_ssa ), /* properties_required */
2772 0, /* properties_provided */
2773 0, /* properties_destroyed */
2774 0, /* todo_flags_start */
2775 0, /* todo_flags_finish */
2778 class pass_if_conversion : public gimple_opt_pass
2780 public:
2781 pass_if_conversion (gcc::context *ctxt)
2782 : gimple_opt_pass (pass_data_if_conversion, ctxt)
2785 /* opt_pass methods: */
2786 virtual bool gate (function *);
2787 virtual unsigned int execute (function *);
2789 }; // class pass_if_conversion
2791 bool
2792 pass_if_conversion::gate (function *fun)
2794 return (((flag_tree_loop_vectorize || fun->has_force_vectorize_loops)
2795 && flag_tree_loop_if_convert != 0)
2796 || flag_tree_loop_if_convert == 1
2797 || flag_tree_loop_if_convert_stores == 1);
2800 unsigned int
2801 pass_if_conversion::execute (function *fun)
2803 struct loop *loop;
2804 unsigned todo = 0;
2806 if (number_of_loops (fun) <= 1)
2807 return 0;
2809 /* If there are infinite loops, during CDI_POST_DOMINATORS computation
2810 we can pick pretty much random bb inside of the infinite loop that
2811 has the fake edge. If we are unlucky enough, this can confuse the
2812 add_to_predicate_list post-dominator check to optimize as if that
2813 bb or some other one is a join block when it actually is not.
2814 See PR70916. */
2815 connect_infinite_loops_to_exit ();
2817 FOR_EACH_LOOP (loop, 0)
2818 if (flag_tree_loop_if_convert == 1
2819 || flag_tree_loop_if_convert_stores == 1
2820 || ((flag_tree_loop_vectorize || loop->force_vectorize)
2821 && !loop->dont_vectorize))
2822 todo |= tree_if_conversion (loop);
2824 remove_fake_exit_edges ();
2826 if (flag_checking)
2828 basic_block bb;
2829 FOR_EACH_BB_FN (bb, fun)
2830 gcc_assert (!bb->aux);
2833 return todo;
2836 } // anon namespace
2838 gimple_opt_pass *
2839 make_pass_if_conversion (gcc::context *ctxt)
2841 return new pass_if_conversion (ctxt);