min(-x, -y), min(~x, ~y)
[official-gcc.git] / gcc / tree-if-conv.c
blob04a1d3e536db27591c09f7e3d785358e5bd5895c
1 /* If-conversion for vectorizer.
2 Copyright (C) 2004-2016 Free Software Foundation, Inc.
3 Contributed by Devang Patel <dpatel@apple.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* This pass implements a tree level if-conversion of loops. Its
22 initial goal is to help the vectorizer to vectorize loops with
23 conditions.
25 A short description of if-conversion:
27 o Decide if a loop is if-convertible or not.
28 o Walk all loop basic blocks in breadth first order (BFS order).
29 o Remove conditional statements (at the end of basic block)
30 and propagate condition into destination basic blocks'
31 predicate list.
32 o Replace modify expression with conditional modify expression
33 using current basic block's condition.
34 o Merge all basic blocks
35 o Replace phi nodes with conditional modify expr
36 o Merge all basic blocks into header
38 Sample transformation:
40 INPUT
41 -----
43 # i_23 = PHI <0(0), i_18(10)>;
44 <L0>:;
45 j_15 = A[i_23];
46 if (j_15 > 41) goto <L1>; else goto <L17>;
48 <L17>:;
49 goto <bb 3> (<L3>);
51 <L1>:;
53 # iftmp.2_4 = PHI <0(8), 42(2)>;
54 <L3>:;
55 A[i_23] = iftmp.2_4;
56 i_18 = i_23 + 1;
57 if (i_18 <= 15) goto <L19>; else goto <L18>;
59 <L19>:;
60 goto <bb 1> (<L0>);
62 <L18>:;
64 OUTPUT
65 ------
67 # i_23 = PHI <0(0), i_18(10)>;
68 <L0>:;
69 j_15 = A[i_23];
71 <L3>:;
72 iftmp.2_4 = j_15 > 41 ? 42 : 0;
73 A[i_23] = iftmp.2_4;
74 i_18 = i_23 + 1;
75 if (i_18 <= 15) goto <L19>; else goto <L18>;
77 <L19>:;
78 goto <bb 1> (<L0>);
80 <L18>:;
83 #include "config.h"
84 #include "system.h"
85 #include "coretypes.h"
86 #include "backend.h"
87 #include "rtl.h"
88 #include "tree.h"
89 #include "gimple.h"
90 #include "cfghooks.h"
91 #include "tree-pass.h"
92 #include "ssa.h"
93 #include "expmed.h"
94 #include "optabs-query.h"
95 #include "gimple-pretty-print.h"
96 #include "alias.h"
97 #include "fold-const.h"
98 #include "stor-layout.h"
99 #include "gimple-fold.h"
100 #include "gimplify.h"
101 #include "gimple-iterator.h"
102 #include "gimplify-me.h"
103 #include "tree-cfg.h"
104 #include "tree-into-ssa.h"
105 #include "tree-ssa.h"
106 #include "cfgloop.h"
107 #include "tree-data-ref.h"
108 #include "tree-scalar-evolution.h"
109 #include "tree-ssa-loop-ivopts.h"
110 #include "tree-ssa-address.h"
111 #include "dbgcnt.h"
112 #include "tree-hash-traits.h"
113 #include "varasm.h"
114 #include "builtins.h"
115 #include "params.h"
117 /* Hash for struct innermost_loop_behavior. It depends on the user to
118 free the memory. */
120 struct innermost_loop_behavior_hash : nofree_ptr_hash <innermost_loop_behavior>
122 static inline hashval_t hash (const value_type &);
123 static inline bool equal (const value_type &,
124 const compare_type &);
127 inline hashval_t
128 innermost_loop_behavior_hash::hash (const value_type &e)
130 hashval_t hash;
132 hash = iterative_hash_expr (e->base_address, 0);
133 hash = iterative_hash_expr (e->offset, hash);
134 hash = iterative_hash_expr (e->init, hash);
135 return iterative_hash_expr (e->step, hash);
138 inline bool
139 innermost_loop_behavior_hash::equal (const value_type &e1,
140 const compare_type &e2)
142 if ((e1->base_address && !e2->base_address)
143 || (!e1->base_address && e2->base_address)
144 || (!e1->offset && e2->offset)
145 || (e1->offset && !e2->offset)
146 || (!e1->init && e2->init)
147 || (e1->init && !e2->init)
148 || (!e1->step && e2->step)
149 || (e1->step && !e2->step))
150 return false;
152 if (e1->base_address && e2->base_address
153 && !operand_equal_p (e1->base_address, e2->base_address, 0))
154 return false;
155 if (e1->offset && e2->offset
156 && !operand_equal_p (e1->offset, e2->offset, 0))
157 return false;
158 if (e1->init && e2->init
159 && !operand_equal_p (e1->init, e2->init, 0))
160 return false;
161 if (e1->step && e2->step
162 && !operand_equal_p (e1->step, e2->step, 0))
163 return false;
165 return true;
168 /* List of basic blocks in if-conversion-suitable order. */
169 static basic_block *ifc_bbs;
171 /* Apply more aggressive (extended) if-conversion if true. */
172 static bool aggressive_if_conv;
174 /* Hash table to store <DR's innermost loop behavior, DR> pairs. */
175 static hash_map<innermost_loop_behavior_hash,
176 data_reference_p> *innermost_DR_map;
178 /* Hash table to store <base reference, DR> pairs. */
179 static hash_map<tree_operand_hash, data_reference_p> *baseref_DR_map;
181 /* Structure used to predicate basic blocks. This is attached to the
182 ->aux field of the BBs in the loop to be if-converted. */
183 struct bb_predicate {
185 /* The condition under which this basic block is executed. */
186 tree predicate;
188 /* PREDICATE is gimplified, and the sequence of statements is
189 recorded here, in order to avoid the duplication of computations
190 that occur in previous conditions. See PR44483. */
191 gimple_seq predicate_gimplified_stmts;
194 /* Returns true when the basic block BB has a predicate. */
196 static inline bool
197 bb_has_predicate (basic_block bb)
199 return bb->aux != NULL;
202 /* Returns the gimplified predicate for basic block BB. */
204 static inline tree
205 bb_predicate (basic_block bb)
207 return ((struct bb_predicate *) bb->aux)->predicate;
210 /* Sets the gimplified predicate COND for basic block BB. */
212 static inline void
213 set_bb_predicate (basic_block bb, tree cond)
215 gcc_assert ((TREE_CODE (cond) == TRUTH_NOT_EXPR
216 && is_gimple_condexpr (TREE_OPERAND (cond, 0)))
217 || is_gimple_condexpr (cond));
218 ((struct bb_predicate *) bb->aux)->predicate = cond;
221 /* Returns the sequence of statements of the gimplification of the
222 predicate for basic block BB. */
224 static inline gimple_seq
225 bb_predicate_gimplified_stmts (basic_block bb)
227 return ((struct bb_predicate *) bb->aux)->predicate_gimplified_stmts;
230 /* Sets the sequence of statements STMTS of the gimplification of the
231 predicate for basic block BB. */
233 static inline void
234 set_bb_predicate_gimplified_stmts (basic_block bb, gimple_seq stmts)
236 ((struct bb_predicate *) bb->aux)->predicate_gimplified_stmts = stmts;
239 /* Adds the sequence of statements STMTS to the sequence of statements
240 of the predicate for basic block BB. */
242 static inline void
243 add_bb_predicate_gimplified_stmts (basic_block bb, gimple_seq stmts)
245 gimple_seq_add_seq
246 (&(((struct bb_predicate *) bb->aux)->predicate_gimplified_stmts), stmts);
249 /* Initializes to TRUE the predicate of basic block BB. */
251 static inline void
252 init_bb_predicate (basic_block bb)
254 bb->aux = XNEW (struct bb_predicate);
255 set_bb_predicate_gimplified_stmts (bb, NULL);
256 set_bb_predicate (bb, boolean_true_node);
259 /* Release the SSA_NAMEs associated with the predicate of basic block BB,
260 but don't actually free it. */
262 static inline void
263 release_bb_predicate (basic_block bb)
265 gimple_seq stmts = bb_predicate_gimplified_stmts (bb);
266 if (stmts)
268 gimple_stmt_iterator i;
270 for (i = gsi_start (stmts); !gsi_end_p (i); gsi_next (&i))
271 free_stmt_operands (cfun, gsi_stmt (i));
272 set_bb_predicate_gimplified_stmts (bb, NULL);
276 /* Free the predicate of basic block BB. */
278 static inline void
279 free_bb_predicate (basic_block bb)
281 if (!bb_has_predicate (bb))
282 return;
284 release_bb_predicate (bb);
285 free (bb->aux);
286 bb->aux = NULL;
289 /* Reinitialize predicate of BB with the true predicate. */
291 static inline void
292 reset_bb_predicate (basic_block bb)
294 if (!bb_has_predicate (bb))
295 init_bb_predicate (bb);
296 else
298 release_bb_predicate (bb);
299 set_bb_predicate (bb, boolean_true_node);
303 /* Returns a new SSA_NAME of type TYPE that is assigned the value of
304 the expression EXPR. Inserts the statement created for this
305 computation before GSI and leaves the iterator GSI at the same
306 statement. */
308 static tree
309 ifc_temp_var (tree type, tree expr, gimple_stmt_iterator *gsi)
311 tree new_name = make_temp_ssa_name (type, NULL, "_ifc_");
312 gimple *stmt = gimple_build_assign (new_name, expr);
313 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
314 return new_name;
317 /* Return true when COND is a false predicate. */
319 static inline bool
320 is_false_predicate (tree cond)
322 return (cond != NULL_TREE
323 && (cond == boolean_false_node
324 || integer_zerop (cond)));
327 /* Return true when COND is a true predicate. */
329 static inline bool
330 is_true_predicate (tree cond)
332 return (cond == NULL_TREE
333 || cond == boolean_true_node
334 || integer_onep (cond));
337 /* Returns true when BB has a predicate that is not trivial: true or
338 NULL_TREE. */
340 static inline bool
341 is_predicated (basic_block bb)
343 return !is_true_predicate (bb_predicate (bb));
346 /* Parses the predicate COND and returns its comparison code and
347 operands OP0 and OP1. */
349 static enum tree_code
350 parse_predicate (tree cond, tree *op0, tree *op1)
352 gimple *s;
354 if (TREE_CODE (cond) == SSA_NAME
355 && is_gimple_assign (s = SSA_NAME_DEF_STMT (cond)))
357 if (TREE_CODE_CLASS (gimple_assign_rhs_code (s)) == tcc_comparison)
359 *op0 = gimple_assign_rhs1 (s);
360 *op1 = gimple_assign_rhs2 (s);
361 return gimple_assign_rhs_code (s);
364 else if (gimple_assign_rhs_code (s) == TRUTH_NOT_EXPR)
366 tree op = gimple_assign_rhs1 (s);
367 tree type = TREE_TYPE (op);
368 enum tree_code code = parse_predicate (op, op0, op1);
370 return code == ERROR_MARK ? ERROR_MARK
371 : invert_tree_comparison (code, HONOR_NANS (type));
374 return ERROR_MARK;
377 if (COMPARISON_CLASS_P (cond))
379 *op0 = TREE_OPERAND (cond, 0);
380 *op1 = TREE_OPERAND (cond, 1);
381 return TREE_CODE (cond);
384 return ERROR_MARK;
387 /* Returns the fold of predicate C1 OR C2 at location LOC. */
389 static tree
390 fold_or_predicates (location_t loc, tree c1, tree c2)
392 tree op1a, op1b, op2a, op2b;
393 enum tree_code code1 = parse_predicate (c1, &op1a, &op1b);
394 enum tree_code code2 = parse_predicate (c2, &op2a, &op2b);
396 if (code1 != ERROR_MARK && code2 != ERROR_MARK)
398 tree t = maybe_fold_or_comparisons (code1, op1a, op1b,
399 code2, op2a, op2b);
400 if (t)
401 return t;
404 return fold_build2_loc (loc, TRUTH_OR_EXPR, boolean_type_node, c1, c2);
407 /* Returns true if N is either a constant or a SSA_NAME. */
409 static bool
410 constant_or_ssa_name (tree n)
412 switch (TREE_CODE (n))
414 case SSA_NAME:
415 case INTEGER_CST:
416 case REAL_CST:
417 case COMPLEX_CST:
418 case VECTOR_CST:
419 return true;
420 default:
421 return false;
425 /* Returns either a COND_EXPR or the folded expression if the folded
426 expression is a MIN_EXPR, a MAX_EXPR, an ABS_EXPR,
427 a constant or a SSA_NAME. */
429 static tree
430 fold_build_cond_expr (tree type, tree cond, tree rhs, tree lhs)
432 tree rhs1, lhs1, cond_expr;
434 /* If COND is comparison r != 0 and r has boolean type, convert COND
435 to SSA_NAME to accept by vect bool pattern. */
436 if (TREE_CODE (cond) == NE_EXPR)
438 tree op0 = TREE_OPERAND (cond, 0);
439 tree op1 = TREE_OPERAND (cond, 1);
440 if (TREE_CODE (op0) == SSA_NAME
441 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
442 && (integer_zerop (op1)))
443 cond = op0;
445 cond_expr = fold_ternary (COND_EXPR, type, cond,
446 rhs, lhs);
448 if (cond_expr == NULL_TREE)
449 return build3 (COND_EXPR, type, cond, rhs, lhs);
451 STRIP_USELESS_TYPE_CONVERSION (cond_expr);
453 if (constant_or_ssa_name (cond_expr))
454 return cond_expr;
456 if (TREE_CODE (cond_expr) == ABS_EXPR)
458 rhs1 = TREE_OPERAND (cond_expr, 1);
459 STRIP_USELESS_TYPE_CONVERSION (rhs1);
460 if (constant_or_ssa_name (rhs1))
461 return build1 (ABS_EXPR, type, rhs1);
464 if (TREE_CODE (cond_expr) == MIN_EXPR
465 || TREE_CODE (cond_expr) == MAX_EXPR)
467 lhs1 = TREE_OPERAND (cond_expr, 0);
468 STRIP_USELESS_TYPE_CONVERSION (lhs1);
469 rhs1 = TREE_OPERAND (cond_expr, 1);
470 STRIP_USELESS_TYPE_CONVERSION (rhs1);
471 if (constant_or_ssa_name (rhs1)
472 && constant_or_ssa_name (lhs1))
473 return build2 (TREE_CODE (cond_expr), type, lhs1, rhs1);
475 return build3 (COND_EXPR, type, cond, rhs, lhs);
478 /* Add condition NC to the predicate list of basic block BB. LOOP is
479 the loop to be if-converted. Use predicate of cd-equivalent block
480 for join bb if it exists: we call basic blocks bb1 and bb2
481 cd-equivalent if they are executed under the same condition. */
483 static inline void
484 add_to_predicate_list (struct loop *loop, basic_block bb, tree nc)
486 tree bc, *tp;
487 basic_block dom_bb;
489 if (is_true_predicate (nc))
490 return;
492 /* If dominance tells us this basic block is always executed,
493 don't record any predicates for it. */
494 if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
495 return;
497 dom_bb = get_immediate_dominator (CDI_DOMINATORS, bb);
498 /* We use notion of cd equivalence to get simpler predicate for
499 join block, e.g. if join block has 2 predecessors with predicates
500 p1 & p2 and p1 & !p2, we'd like to get p1 for it instead of
501 p1 & p2 | p1 & !p2. */
502 if (dom_bb != loop->header
503 && get_immediate_dominator (CDI_POST_DOMINATORS, dom_bb) == bb)
505 gcc_assert (flow_bb_inside_loop_p (loop, dom_bb));
506 bc = bb_predicate (dom_bb);
507 if (!is_true_predicate (bc))
508 set_bb_predicate (bb, bc);
509 else
510 gcc_assert (is_true_predicate (bb_predicate (bb)));
511 if (dump_file && (dump_flags & TDF_DETAILS))
512 fprintf (dump_file, "Use predicate of bb#%d for bb#%d\n",
513 dom_bb->index, bb->index);
514 return;
517 if (!is_predicated (bb))
518 bc = nc;
519 else
521 bc = bb_predicate (bb);
522 bc = fold_or_predicates (EXPR_LOCATION (bc), nc, bc);
523 if (is_true_predicate (bc))
525 reset_bb_predicate (bb);
526 return;
530 /* Allow a TRUTH_NOT_EXPR around the main predicate. */
531 if (TREE_CODE (bc) == TRUTH_NOT_EXPR)
532 tp = &TREE_OPERAND (bc, 0);
533 else
534 tp = &bc;
535 if (!is_gimple_condexpr (*tp))
537 gimple_seq stmts;
538 *tp = force_gimple_operand_1 (*tp, &stmts, is_gimple_condexpr, NULL_TREE);
539 add_bb_predicate_gimplified_stmts (bb, stmts);
541 set_bb_predicate (bb, bc);
544 /* Add the condition COND to the previous condition PREV_COND, and add
545 this to the predicate list of the destination of edge E. LOOP is
546 the loop to be if-converted. */
548 static void
549 add_to_dst_predicate_list (struct loop *loop, edge e,
550 tree prev_cond, tree cond)
552 if (!flow_bb_inside_loop_p (loop, e->dest))
553 return;
555 if (!is_true_predicate (prev_cond))
556 cond = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
557 prev_cond, cond);
559 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, e->dest))
560 add_to_predicate_list (loop, e->dest, cond);
563 /* Return true if one of the successor edges of BB exits LOOP. */
565 static bool
566 bb_with_exit_edge_p (struct loop *loop, basic_block bb)
568 edge e;
569 edge_iterator ei;
571 FOR_EACH_EDGE (e, ei, bb->succs)
572 if (loop_exit_edge_p (loop, e))
573 return true;
575 return false;
578 /* Given PHI which has more than two arguments, this function checks if
579 it's if-convertible by degenerating its arguments. Specifically, if
580 below two conditions are satisfied:
582 1) Number of PHI arguments with different values equals to 2 and one
583 argument has the only occurrence.
584 2) The edge corresponding to the unique argument isn't critical edge.
586 Such PHI can be handled as PHIs have only two arguments. For example,
587 below PHI:
589 res = PHI <A_1(e1), A_1(e2), A_2(e3)>;
591 can be transformed into:
593 res = (predicate of e3) ? A_2 : A_1;
595 Return TRUE if it is the case, FALSE otherwise. */
597 static bool
598 phi_convertible_by_degenerating_args (gphi *phi)
600 edge e;
601 tree arg, t1 = NULL, t2 = NULL;
602 unsigned int i, i1 = 0, i2 = 0, n1 = 0, n2 = 0;
603 unsigned int num_args = gimple_phi_num_args (phi);
605 gcc_assert (num_args > 2);
607 for (i = 0; i < num_args; i++)
609 arg = gimple_phi_arg_def (phi, i);
610 if (t1 == NULL || operand_equal_p (t1, arg, 0))
612 n1++;
613 i1 = i;
614 t1 = arg;
616 else if (t2 == NULL || operand_equal_p (t2, arg, 0))
618 n2++;
619 i2 = i;
620 t2 = arg;
622 else
623 return false;
626 if (n1 != 1 && n2 != 1)
627 return false;
629 /* Check if the edge corresponding to the unique arg is critical. */
630 e = gimple_phi_arg_edge (phi, (n1 == 1) ? i1 : i2);
631 if (EDGE_COUNT (e->src->succs) > 1)
632 return false;
634 return true;
637 /* Return true when PHI is if-convertible. PHI is part of loop LOOP
638 and it belongs to basic block BB.
640 PHI is not if-convertible if:
641 - it has more than 2 arguments.
643 When we didn't see if-convertible stores, PHI is not
644 if-convertible if:
645 - a virtual PHI is immediately used in another PHI node,
646 - there is a virtual PHI in a BB other than the loop->header.
647 When the aggressive_if_conv is set, PHI can have more than
648 two arguments. */
650 static bool
651 if_convertible_phi_p (struct loop *loop, basic_block bb, gphi *phi,
652 bool any_mask_load_store)
654 if (dump_file && (dump_flags & TDF_DETAILS))
656 fprintf (dump_file, "-------------------------\n");
657 print_gimple_stmt (dump_file, phi, 0, TDF_SLIM);
660 if (bb != loop->header)
662 if (gimple_phi_num_args (phi) != 2
663 && !aggressive_if_conv
664 && !phi_convertible_by_degenerating_args (phi))
666 if (dump_file && (dump_flags & TDF_DETAILS))
667 fprintf (dump_file, "Phi can't be predicated by single cond.\n");
668 return false;
672 if (any_mask_load_store)
673 return true;
675 /* When there were no if-convertible stores, check
676 that there are no memory writes in the branches of the loop to be
677 if-converted. */
678 if (virtual_operand_p (gimple_phi_result (phi)))
680 imm_use_iterator imm_iter;
681 use_operand_p use_p;
683 if (bb != loop->header)
685 if (dump_file && (dump_flags & TDF_DETAILS))
686 fprintf (dump_file, "Virtual phi not on loop->header.\n");
687 return false;
690 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_phi_result (phi))
692 if (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI
693 && USE_STMT (use_p) != phi)
695 if (dump_file && (dump_flags & TDF_DETAILS))
696 fprintf (dump_file, "Difficult to handle this virtual phi.\n");
697 return false;
702 return true;
705 /* Records the status of a data reference. This struct is attached to
706 each DR->aux field. */
708 struct ifc_dr {
709 bool rw_unconditionally;
710 bool w_unconditionally;
711 bool written_at_least_once;
713 tree rw_predicate;
714 tree w_predicate;
715 tree base_w_predicate;
718 #define IFC_DR(DR) ((struct ifc_dr *) (DR)->aux)
719 #define DR_BASE_W_UNCONDITIONALLY(DR) (IFC_DR (DR)->written_at_least_once)
720 #define DR_RW_UNCONDITIONALLY(DR) (IFC_DR (DR)->rw_unconditionally)
721 #define DR_W_UNCONDITIONALLY(DR) (IFC_DR (DR)->w_unconditionally)
723 /* Iterates over DR's and stores refs, DR and base refs, DR pairs in
724 HASH tables. While storing them in HASH table, it checks if the
725 reference is unconditionally read or written and stores that as a flag
726 information. For base reference it checks if it is written atlest once
727 unconditionally and stores it as flag information along with DR.
728 In other words for every data reference A in STMT there exist other
729 accesses to a data reference with the same base with predicates that
730 add up (OR-up) to the true predicate: this ensures that the data
731 reference A is touched (read or written) on every iteration of the
732 if-converted loop. */
733 static void
734 hash_memrefs_baserefs_and_store_DRs_read_written_info (data_reference_p a)
737 data_reference_p *master_dr, *base_master_dr;
738 tree base_ref = DR_BASE_OBJECT (a);
739 innermost_loop_behavior *innermost = &DR_INNERMOST (a);
740 tree ca = bb_predicate (gimple_bb (DR_STMT (a)));
741 bool exist1, exist2;
743 master_dr = &innermost_DR_map->get_or_insert (innermost, &exist1);
744 if (!exist1)
745 *master_dr = a;
747 if (DR_IS_WRITE (a))
749 IFC_DR (*master_dr)->w_predicate
750 = fold_or_predicates (UNKNOWN_LOCATION, ca,
751 IFC_DR (*master_dr)->w_predicate);
752 if (is_true_predicate (IFC_DR (*master_dr)->w_predicate))
753 DR_W_UNCONDITIONALLY (*master_dr) = true;
755 IFC_DR (*master_dr)->rw_predicate
756 = fold_or_predicates (UNKNOWN_LOCATION, ca,
757 IFC_DR (*master_dr)->rw_predicate);
758 if (is_true_predicate (IFC_DR (*master_dr)->rw_predicate))
759 DR_RW_UNCONDITIONALLY (*master_dr) = true;
761 if (DR_IS_WRITE (a))
763 base_master_dr = &baseref_DR_map->get_or_insert (base_ref, &exist2);
764 if (!exist2)
765 *base_master_dr = a;
766 IFC_DR (*base_master_dr)->base_w_predicate
767 = fold_or_predicates (UNKNOWN_LOCATION, ca,
768 IFC_DR (*base_master_dr)->base_w_predicate);
769 if (is_true_predicate (IFC_DR (*base_master_dr)->base_w_predicate))
770 DR_BASE_W_UNCONDITIONALLY (*base_master_dr) = true;
774 /* Return true when the memory references of STMT won't trap in the
775 if-converted code. There are two things that we have to check for:
777 - writes to memory occur to writable memory: if-conversion of
778 memory writes transforms the conditional memory writes into
779 unconditional writes, i.e. "if (cond) A[i] = foo" is transformed
780 into "A[i] = cond ? foo : A[i]", and as the write to memory may not
781 be executed at all in the original code, it may be a readonly
782 memory. To check that A is not const-qualified, we check that
783 there exists at least an unconditional write to A in the current
784 function.
786 - reads or writes to memory are valid memory accesses for every
787 iteration. To check that the memory accesses are correctly formed
788 and that we are allowed to read and write in these locations, we
789 check that the memory accesses to be if-converted occur at every
790 iteration unconditionally.
792 Returns true for the memory reference in STMT, same memory reference
793 is read or written unconditionally atleast once and the base memory
794 reference is written unconditionally once. This is to check reference
795 will not write fault. Also retuns true if the memory reference is
796 unconditionally read once then we are conditionally writing to memory
797 which is defined as read and write and is bound to the definition
798 we are seeing. */
799 static bool
800 ifcvt_memrefs_wont_trap (gimple *stmt, vec<data_reference_p> drs)
802 data_reference_p *master_dr, *base_master_dr;
803 data_reference_p a = drs[gimple_uid (stmt) - 1];
805 tree base = DR_BASE_OBJECT (a);
806 innermost_loop_behavior *innermost = &DR_INNERMOST (a);
808 gcc_assert (DR_STMT (a) == stmt);
809 gcc_assert (DR_BASE_ADDRESS (a) || DR_OFFSET (a)
810 || DR_INIT (a) || DR_STEP (a));
812 master_dr = innermost_DR_map->get (innermost);
813 gcc_assert (master_dr != NULL);
815 base_master_dr = baseref_DR_map->get (base);
817 /* If a is unconditionally written to it doesn't trap. */
818 if (DR_W_UNCONDITIONALLY (*master_dr))
819 return true;
821 /* If a is unconditionally accessed then ... */
822 if (DR_RW_UNCONDITIONALLY (*master_dr))
824 /* an unconditional read won't trap. */
825 if (DR_IS_READ (a))
826 return true;
828 /* an unconditionaly write won't trap if the base is written
829 to unconditionally. */
830 if (base_master_dr
831 && DR_BASE_W_UNCONDITIONALLY (*base_master_dr))
832 return PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES);
833 else
835 /* or the base is know to be not readonly. */
836 tree base_tree = get_base_address (DR_REF (a));
837 if (DECL_P (base_tree)
838 && decl_binds_to_current_def_p (base_tree)
839 && ! TREE_READONLY (base_tree))
840 return PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES);
843 return false;
846 /* Return true if STMT could be converted into a masked load or store
847 (conditional load or store based on a mask computed from bb predicate). */
849 static bool
850 ifcvt_can_use_mask_load_store (gimple *stmt)
852 tree lhs, ref;
853 machine_mode mode;
854 basic_block bb = gimple_bb (stmt);
855 bool is_load;
857 if (!(flag_tree_loop_vectorize || bb->loop_father->force_vectorize)
858 || bb->loop_father->dont_vectorize
859 || !gimple_assign_single_p (stmt)
860 || gimple_has_volatile_ops (stmt))
861 return false;
863 /* Check whether this is a load or store. */
864 lhs = gimple_assign_lhs (stmt);
865 if (gimple_store_p (stmt))
867 if (!is_gimple_val (gimple_assign_rhs1 (stmt)))
868 return false;
869 is_load = false;
870 ref = lhs;
872 else if (gimple_assign_load_p (stmt))
874 is_load = true;
875 ref = gimple_assign_rhs1 (stmt);
877 else
878 return false;
880 if (may_be_nonaddressable_p (ref))
881 return false;
883 /* Mask should be integer mode of the same size as the load/store
884 mode. */
885 mode = TYPE_MODE (TREE_TYPE (lhs));
886 if (int_mode_for_mode (mode) == BLKmode
887 || VECTOR_MODE_P (mode))
888 return false;
890 if (can_vec_mask_load_store_p (mode, VOIDmode, is_load))
891 return true;
893 return false;
896 /* Return true when STMT is if-convertible.
898 GIMPLE_ASSIGN statement is not if-convertible if,
899 - it is not movable,
900 - it could trap,
901 - LHS is not var decl. */
903 static bool
904 if_convertible_gimple_assign_stmt_p (gimple *stmt,
905 vec<data_reference_p> refs,
906 bool *any_mask_load_store)
908 tree lhs = gimple_assign_lhs (stmt);
910 if (dump_file && (dump_flags & TDF_DETAILS))
912 fprintf (dump_file, "-------------------------\n");
913 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
916 if (!is_gimple_reg_type (TREE_TYPE (lhs)))
917 return false;
919 /* Some of these constrains might be too conservative. */
920 if (stmt_ends_bb_p (stmt)
921 || gimple_has_volatile_ops (stmt)
922 || (TREE_CODE (lhs) == SSA_NAME
923 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
924 || gimple_has_side_effects (stmt))
926 if (dump_file && (dump_flags & TDF_DETAILS))
927 fprintf (dump_file, "stmt not suitable for ifcvt\n");
928 return false;
931 /* tree-into-ssa.c uses GF_PLF_1, so avoid it, because
932 in between if_convertible_loop_p and combine_blocks
933 we can perform loop versioning. */
934 gimple_set_plf (stmt, GF_PLF_2, false);
936 if ((! gimple_vuse (stmt)
937 || gimple_could_trap_p_1 (stmt, false, false)
938 || ! ifcvt_memrefs_wont_trap (stmt, refs))
939 && gimple_could_trap_p (stmt))
941 if (ifcvt_can_use_mask_load_store (stmt))
943 gimple_set_plf (stmt, GF_PLF_2, true);
944 *any_mask_load_store = true;
945 return true;
947 if (dump_file && (dump_flags & TDF_DETAILS))
948 fprintf (dump_file, "tree could trap...\n");
949 return false;
952 /* When if-converting stores force versioning, likewise if we
953 ended up generating store data races. */
954 if (gimple_vdef (stmt))
955 *any_mask_load_store = true;
957 return true;
960 /* Return true when STMT is if-convertible.
962 A statement is if-convertible if:
963 - it is an if-convertible GIMPLE_ASSIGN,
964 - it is a GIMPLE_LABEL or a GIMPLE_COND,
965 - it is builtins call. */
967 static bool
968 if_convertible_stmt_p (gimple *stmt, vec<data_reference_p> refs,
969 bool *any_mask_load_store)
971 switch (gimple_code (stmt))
973 case GIMPLE_LABEL:
974 case GIMPLE_DEBUG:
975 case GIMPLE_COND:
976 return true;
978 case GIMPLE_ASSIGN:
979 return if_convertible_gimple_assign_stmt_p (stmt, refs,
980 any_mask_load_store);
982 case GIMPLE_CALL:
984 tree fndecl = gimple_call_fndecl (stmt);
985 if (fndecl)
987 int flags = gimple_call_flags (stmt);
988 if ((flags & ECF_CONST)
989 && !(flags & ECF_LOOPING_CONST_OR_PURE)
990 /* We can only vectorize some builtins at the moment,
991 so restrict if-conversion to those. */
992 && DECL_BUILT_IN (fndecl))
993 return true;
995 return false;
998 default:
999 /* Don't know what to do with 'em so don't do anything. */
1000 if (dump_file && (dump_flags & TDF_DETAILS))
1002 fprintf (dump_file, "don't know what to do\n");
1003 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1005 return false;
1006 break;
1009 return true;
1012 /* Assumes that BB has more than 1 predecessors.
1013 Returns false if at least one successor is not on critical edge
1014 and true otherwise. */
1016 static inline bool
1017 all_preds_critical_p (basic_block bb)
1019 edge e;
1020 edge_iterator ei;
1022 FOR_EACH_EDGE (e, ei, bb->preds)
1023 if (EDGE_COUNT (e->src->succs) == 1)
1024 return false;
1025 return true;
1028 /* Returns true if at least one successor in on critical edge. */
1029 static inline bool
1030 has_pred_critical_p (basic_block bb)
1032 edge e;
1033 edge_iterator ei;
1035 FOR_EACH_EDGE (e, ei, bb->preds)
1036 if (EDGE_COUNT (e->src->succs) > 1)
1037 return true;
1038 return false;
1041 /* Return true when BB is if-convertible. This routine does not check
1042 basic block's statements and phis.
1044 A basic block is not if-convertible if:
1045 - it is non-empty and it is after the exit block (in BFS order),
1046 - it is after the exit block but before the latch,
1047 - its edges are not normal.
1049 Last restriction is valid if aggressive_if_conv is false.
1051 EXIT_BB is the basic block containing the exit of the LOOP. BB is
1052 inside LOOP. */
1054 static bool
1055 if_convertible_bb_p (struct loop *loop, basic_block bb, basic_block exit_bb)
1057 edge e;
1058 edge_iterator ei;
1060 if (dump_file && (dump_flags & TDF_DETAILS))
1061 fprintf (dump_file, "----------[%d]-------------\n", bb->index);
1063 if (EDGE_COUNT (bb->succs) > 2)
1064 return false;
1066 if (exit_bb)
1068 if (bb != loop->latch)
1070 if (dump_file && (dump_flags & TDF_DETAILS))
1071 fprintf (dump_file, "basic block after exit bb but before latch\n");
1072 return false;
1074 else if (!empty_block_p (bb))
1076 if (dump_file && (dump_flags & TDF_DETAILS))
1077 fprintf (dump_file, "non empty basic block after exit bb\n");
1078 return false;
1080 else if (bb == loop->latch
1081 && bb != exit_bb
1082 && !dominated_by_p (CDI_DOMINATORS, bb, exit_bb))
1084 if (dump_file && (dump_flags & TDF_DETAILS))
1085 fprintf (dump_file, "latch is not dominated by exit_block\n");
1086 return false;
1090 /* Be less adventurous and handle only normal edges. */
1091 FOR_EACH_EDGE (e, ei, bb->succs)
1092 if (e->flags & (EDGE_EH | EDGE_ABNORMAL | EDGE_IRREDUCIBLE_LOOP))
1094 if (dump_file && (dump_flags & TDF_DETAILS))
1095 fprintf (dump_file, "Difficult to handle edges\n");
1096 return false;
1099 /* At least one incoming edge has to be non-critical as otherwise edge
1100 predicates are not equal to basic-block predicates of the edge
1101 source. This check is skipped if aggressive_if_conv is true. */
1102 if (!aggressive_if_conv
1103 && EDGE_COUNT (bb->preds) > 1
1104 && bb != loop->header
1105 && all_preds_critical_p (bb))
1107 if (dump_file && (dump_flags & TDF_DETAILS))
1108 fprintf (dump_file, "only critical predecessors\n");
1109 return false;
1112 return true;
1115 /* Return true when all predecessor blocks of BB are visited. The
1116 VISITED bitmap keeps track of the visited blocks. */
1118 static bool
1119 pred_blocks_visited_p (basic_block bb, bitmap *visited)
1121 edge e;
1122 edge_iterator ei;
1123 FOR_EACH_EDGE (e, ei, bb->preds)
1124 if (!bitmap_bit_p (*visited, e->src->index))
1125 return false;
1127 return true;
1130 /* Get body of a LOOP in suitable order for if-conversion. It is
1131 caller's responsibility to deallocate basic block list.
1132 If-conversion suitable order is, breadth first sort (BFS) order
1133 with an additional constraint: select a block only if all its
1134 predecessors are already selected. */
1136 static basic_block *
1137 get_loop_body_in_if_conv_order (const struct loop *loop)
1139 basic_block *blocks, *blocks_in_bfs_order;
1140 basic_block bb;
1141 bitmap visited;
1142 unsigned int index = 0;
1143 unsigned int visited_count = 0;
1145 gcc_assert (loop->num_nodes);
1146 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
1148 blocks = XCNEWVEC (basic_block, loop->num_nodes);
1149 visited = BITMAP_ALLOC (NULL);
1151 blocks_in_bfs_order = get_loop_body_in_bfs_order (loop);
1153 index = 0;
1154 while (index < loop->num_nodes)
1156 bb = blocks_in_bfs_order [index];
1158 if (bb->flags & BB_IRREDUCIBLE_LOOP)
1160 free (blocks_in_bfs_order);
1161 BITMAP_FREE (visited);
1162 free (blocks);
1163 return NULL;
1166 if (!bitmap_bit_p (visited, bb->index))
1168 if (pred_blocks_visited_p (bb, &visited)
1169 || bb == loop->header)
1171 /* This block is now visited. */
1172 bitmap_set_bit (visited, bb->index);
1173 blocks[visited_count++] = bb;
1177 index++;
1179 if (index == loop->num_nodes
1180 && visited_count != loop->num_nodes)
1181 /* Not done yet. */
1182 index = 0;
1184 free (blocks_in_bfs_order);
1185 BITMAP_FREE (visited);
1186 return blocks;
1189 /* Returns true when the analysis of the predicates for all the basic
1190 blocks in LOOP succeeded.
1192 predicate_bbs first allocates the predicates of the basic blocks.
1193 These fields are then initialized with the tree expressions
1194 representing the predicates under which a basic block is executed
1195 in the LOOP. As the loop->header is executed at each iteration, it
1196 has the "true" predicate. Other statements executed under a
1197 condition are predicated with that condition, for example
1199 | if (x)
1200 | S1;
1201 | else
1202 | S2;
1204 S1 will be predicated with "x", and
1205 S2 will be predicated with "!x". */
1207 static void
1208 predicate_bbs (loop_p loop)
1210 unsigned int i;
1212 for (i = 0; i < loop->num_nodes; i++)
1213 init_bb_predicate (ifc_bbs[i]);
1215 for (i = 0; i < loop->num_nodes; i++)
1217 basic_block bb = ifc_bbs[i];
1218 tree cond;
1219 gimple *stmt;
1221 /* The loop latch and loop exit block are always executed and
1222 have no extra conditions to be processed: skip them. */
1223 if (bb == loop->latch
1224 || bb_with_exit_edge_p (loop, bb))
1226 reset_bb_predicate (bb);
1227 continue;
1230 cond = bb_predicate (bb);
1231 stmt = last_stmt (bb);
1232 if (stmt && gimple_code (stmt) == GIMPLE_COND)
1234 tree c2;
1235 edge true_edge, false_edge;
1236 location_t loc = gimple_location (stmt);
1237 tree c = build2_loc (loc, gimple_cond_code (stmt),
1238 boolean_type_node,
1239 gimple_cond_lhs (stmt),
1240 gimple_cond_rhs (stmt));
1242 /* Add new condition into destination's predicate list. */
1243 extract_true_false_edges_from_block (gimple_bb (stmt),
1244 &true_edge, &false_edge);
1246 /* If C is true, then TRUE_EDGE is taken. */
1247 add_to_dst_predicate_list (loop, true_edge, unshare_expr (cond),
1248 unshare_expr (c));
1250 /* If C is false, then FALSE_EDGE is taken. */
1251 c2 = build1_loc (loc, TRUTH_NOT_EXPR, boolean_type_node,
1252 unshare_expr (c));
1253 add_to_dst_predicate_list (loop, false_edge,
1254 unshare_expr (cond), c2);
1256 cond = NULL_TREE;
1259 /* If current bb has only one successor, then consider it as an
1260 unconditional goto. */
1261 if (single_succ_p (bb))
1263 basic_block bb_n = single_succ (bb);
1265 /* The successor bb inherits the predicate of its
1266 predecessor. If there is no predicate in the predecessor
1267 bb, then consider the successor bb as always executed. */
1268 if (cond == NULL_TREE)
1269 cond = boolean_true_node;
1271 add_to_predicate_list (loop, bb_n, cond);
1275 /* The loop header is always executed. */
1276 reset_bb_predicate (loop->header);
1277 gcc_assert (bb_predicate_gimplified_stmts (loop->header) == NULL
1278 && bb_predicate_gimplified_stmts (loop->latch) == NULL);
1281 /* Return true when LOOP is if-convertible. This is a helper function
1282 for if_convertible_loop_p. REFS and DDRS are initialized and freed
1283 in if_convertible_loop_p. */
1285 static bool
1286 if_convertible_loop_p_1 (struct loop *loop,
1287 vec<data_reference_p> *refs,
1288 bool *any_mask_load_store)
1290 unsigned int i;
1291 basic_block exit_bb = NULL;
1293 if (find_data_references_in_loop (loop, refs) == chrec_dont_know)
1294 return false;
1296 calculate_dominance_info (CDI_DOMINATORS);
1297 calculate_dominance_info (CDI_POST_DOMINATORS);
1299 /* Allow statements that can be handled during if-conversion. */
1300 ifc_bbs = get_loop_body_in_if_conv_order (loop);
1301 if (!ifc_bbs)
1303 if (dump_file && (dump_flags & TDF_DETAILS))
1304 fprintf (dump_file, "Irreducible loop\n");
1305 return false;
1308 for (i = 0; i < loop->num_nodes; i++)
1310 basic_block bb = ifc_bbs[i];
1312 if (!if_convertible_bb_p (loop, bb, exit_bb))
1313 return false;
1315 if (bb_with_exit_edge_p (loop, bb))
1316 exit_bb = bb;
1319 for (i = 0; i < loop->num_nodes; i++)
1321 basic_block bb = ifc_bbs[i];
1322 gimple_stmt_iterator gsi;
1324 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1325 switch (gimple_code (gsi_stmt (gsi)))
1327 case GIMPLE_LABEL:
1328 case GIMPLE_ASSIGN:
1329 case GIMPLE_CALL:
1330 case GIMPLE_DEBUG:
1331 case GIMPLE_COND:
1332 gimple_set_uid (gsi_stmt (gsi), 0);
1333 break;
1334 default:
1335 return false;
1339 data_reference_p dr;
1341 innermost_DR_map
1342 = new hash_map<innermost_loop_behavior_hash, data_reference_p>;
1343 baseref_DR_map = new hash_map<tree_operand_hash, data_reference_p>;
1345 predicate_bbs (loop);
1347 for (i = 0; refs->iterate (i, &dr); i++)
1349 tree ref = DR_REF (dr);
1351 dr->aux = XNEW (struct ifc_dr);
1352 DR_BASE_W_UNCONDITIONALLY (dr) = false;
1353 DR_RW_UNCONDITIONALLY (dr) = false;
1354 DR_W_UNCONDITIONALLY (dr) = false;
1355 IFC_DR (dr)->rw_predicate = boolean_false_node;
1356 IFC_DR (dr)->w_predicate = boolean_false_node;
1357 IFC_DR (dr)->base_w_predicate = boolean_false_node;
1358 if (gimple_uid (DR_STMT (dr)) == 0)
1359 gimple_set_uid (DR_STMT (dr), i + 1);
1361 /* If DR doesn't have innermost loop behavior or it's a compound
1362 memory reference, we synthesize its innermost loop behavior
1363 for hashing. */
1364 if (TREE_CODE (ref) == COMPONENT_REF
1365 || TREE_CODE (ref) == IMAGPART_EXPR
1366 || TREE_CODE (ref) == REALPART_EXPR
1367 || !(DR_BASE_ADDRESS (dr) || DR_OFFSET (dr)
1368 || DR_INIT (dr) || DR_STEP (dr)))
1370 while (TREE_CODE (ref) == COMPONENT_REF
1371 || TREE_CODE (ref) == IMAGPART_EXPR
1372 || TREE_CODE (ref) == REALPART_EXPR)
1373 ref = TREE_OPERAND (ref, 0);
1375 DR_BASE_ADDRESS (dr) = ref;
1376 DR_OFFSET (dr) = NULL;
1377 DR_INIT (dr) = NULL;
1378 DR_STEP (dr) = NULL;
1379 DR_ALIGNED_TO (dr) = NULL;
1381 hash_memrefs_baserefs_and_store_DRs_read_written_info (dr);
1384 for (i = 0; i < loop->num_nodes; i++)
1386 basic_block bb = ifc_bbs[i];
1387 gimple_stmt_iterator itr;
1389 /* Check the if-convertibility of statements in predicated BBs. */
1390 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
1391 for (itr = gsi_start_bb (bb); !gsi_end_p (itr); gsi_next (&itr))
1392 if (!if_convertible_stmt_p (gsi_stmt (itr), *refs,
1393 any_mask_load_store))
1394 return false;
1397 for (i = 0; i < loop->num_nodes; i++)
1398 free_bb_predicate (ifc_bbs[i]);
1400 /* Checking PHIs needs to be done after stmts, as the fact whether there
1401 are any masked loads or stores affects the tests. */
1402 for (i = 0; i < loop->num_nodes; i++)
1404 basic_block bb = ifc_bbs[i];
1405 gphi_iterator itr;
1407 for (itr = gsi_start_phis (bb); !gsi_end_p (itr); gsi_next (&itr))
1408 if (!if_convertible_phi_p (loop, bb, itr.phi (),
1409 *any_mask_load_store))
1410 return false;
1413 if (dump_file)
1414 fprintf (dump_file, "Applying if-conversion\n");
1416 return true;
1419 /* Return true when LOOP is if-convertible.
1420 LOOP is if-convertible if:
1421 - it is innermost,
1422 - it has two or more basic blocks,
1423 - it has only one exit,
1424 - loop header is not the exit edge,
1425 - if its basic blocks and phi nodes are if convertible. */
1427 static bool
1428 if_convertible_loop_p (struct loop *loop, bool *any_mask_load_store)
1430 edge e;
1431 edge_iterator ei;
1432 bool res = false;
1433 vec<data_reference_p> refs;
1435 /* Handle only innermost loop. */
1436 if (!loop || loop->inner)
1438 if (dump_file && (dump_flags & TDF_DETAILS))
1439 fprintf (dump_file, "not innermost loop\n");
1440 return false;
1443 /* If only one block, no need for if-conversion. */
1444 if (loop->num_nodes <= 2)
1446 if (dump_file && (dump_flags & TDF_DETAILS))
1447 fprintf (dump_file, "less than 2 basic blocks\n");
1448 return false;
1451 /* More than one loop exit is too much to handle. */
1452 if (!single_exit (loop))
1454 if (dump_file && (dump_flags & TDF_DETAILS))
1455 fprintf (dump_file, "multiple exits\n");
1456 return false;
1459 /* If one of the loop header's edge is an exit edge then do not
1460 apply if-conversion. */
1461 FOR_EACH_EDGE (e, ei, loop->header->succs)
1462 if (loop_exit_edge_p (loop, e))
1463 return false;
1465 refs.create (5);
1466 res = if_convertible_loop_p_1 (loop, &refs, any_mask_load_store);
1468 data_reference_p dr;
1469 unsigned int i;
1470 for (i = 0; refs.iterate (i, &dr); i++)
1471 free (dr->aux);
1473 free_data_refs (refs);
1475 delete innermost_DR_map;
1476 innermost_DR_map = NULL;
1478 delete baseref_DR_map;
1479 baseref_DR_map = NULL;
1481 return res;
1484 /* Returns true if def-stmt for phi argument ARG is simple increment/decrement
1485 which is in predicated basic block.
1486 In fact, the following PHI pattern is searching:
1487 loop-header:
1488 reduc_1 = PHI <..., reduc_2>
1490 if (...)
1491 reduc_3 = ...
1492 reduc_2 = PHI <reduc_1, reduc_3>
1494 ARG_0 and ARG_1 are correspondent PHI arguments.
1495 REDUC, OP0 and OP1 contain reduction stmt and its operands.
1496 EXTENDED is true if PHI has > 2 arguments. */
1498 static bool
1499 is_cond_scalar_reduction (gimple *phi, gimple **reduc, tree arg_0, tree arg_1,
1500 tree *op0, tree *op1, bool extended)
1502 tree lhs, r_op1, r_op2;
1503 gimple *stmt;
1504 gimple *header_phi = NULL;
1505 enum tree_code reduction_op;
1506 basic_block bb = gimple_bb (phi);
1507 struct loop *loop = bb->loop_father;
1508 edge latch_e = loop_latch_edge (loop);
1509 imm_use_iterator imm_iter;
1510 use_operand_p use_p;
1511 edge e;
1512 edge_iterator ei;
1513 bool result = false;
1514 if (TREE_CODE (arg_0) != SSA_NAME || TREE_CODE (arg_1) != SSA_NAME)
1515 return false;
1517 if (!extended && gimple_code (SSA_NAME_DEF_STMT (arg_0)) == GIMPLE_PHI)
1519 lhs = arg_1;
1520 header_phi = SSA_NAME_DEF_STMT (arg_0);
1521 stmt = SSA_NAME_DEF_STMT (arg_1);
1523 else if (gimple_code (SSA_NAME_DEF_STMT (arg_1)) == GIMPLE_PHI)
1525 lhs = arg_0;
1526 header_phi = SSA_NAME_DEF_STMT (arg_1);
1527 stmt = SSA_NAME_DEF_STMT (arg_0);
1529 else
1530 return false;
1531 if (gimple_bb (header_phi) != loop->header)
1532 return false;
1534 if (PHI_ARG_DEF_FROM_EDGE (header_phi, latch_e) != PHI_RESULT (phi))
1535 return false;
1537 if (gimple_code (stmt) != GIMPLE_ASSIGN
1538 || gimple_has_volatile_ops (stmt))
1539 return false;
1541 if (!flow_bb_inside_loop_p (loop, gimple_bb (stmt)))
1542 return false;
1544 if (!is_predicated (gimple_bb (stmt)))
1545 return false;
1547 /* Check that stmt-block is predecessor of phi-block. */
1548 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
1549 if (e->dest == bb)
1551 result = true;
1552 break;
1554 if (!result)
1555 return false;
1557 if (!has_single_use (lhs))
1558 return false;
1560 reduction_op = gimple_assign_rhs_code (stmt);
1561 if (reduction_op != PLUS_EXPR && reduction_op != MINUS_EXPR)
1562 return false;
1563 r_op1 = gimple_assign_rhs1 (stmt);
1564 r_op2 = gimple_assign_rhs2 (stmt);
1566 /* Make R_OP1 to hold reduction variable. */
1567 if (r_op2 == PHI_RESULT (header_phi)
1568 && reduction_op == PLUS_EXPR)
1569 std::swap (r_op1, r_op2);
1570 else if (r_op1 != PHI_RESULT (header_phi))
1571 return false;
1573 /* Check that R_OP1 is used in reduction stmt or in PHI only. */
1574 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, r_op1)
1576 gimple *use_stmt = USE_STMT (use_p);
1577 if (is_gimple_debug (use_stmt))
1578 continue;
1579 if (use_stmt == stmt)
1580 continue;
1581 if (gimple_code (use_stmt) != GIMPLE_PHI)
1582 return false;
1585 *op0 = r_op1; *op1 = r_op2;
1586 *reduc = stmt;
1587 return true;
1590 /* Converts conditional scalar reduction into unconditional form, e.g.
1591 bb_4
1592 if (_5 != 0) goto bb_5 else goto bb_6
1593 end_bb_4
1594 bb_5
1595 res_6 = res_13 + 1;
1596 end_bb_5
1597 bb_6
1598 # res_2 = PHI <res_13(4), res_6(5)>
1599 end_bb_6
1601 will be converted into sequence
1602 _ifc__1 = _5 != 0 ? 1 : 0;
1603 res_2 = res_13 + _ifc__1;
1604 Argument SWAP tells that arguments of conditional expression should be
1605 swapped.
1606 Returns rhs of resulting PHI assignment. */
1608 static tree
1609 convert_scalar_cond_reduction (gimple *reduc, gimple_stmt_iterator *gsi,
1610 tree cond, tree op0, tree op1, bool swap)
1612 gimple_stmt_iterator stmt_it;
1613 gimple *new_assign;
1614 tree rhs;
1615 tree rhs1 = gimple_assign_rhs1 (reduc);
1616 tree tmp = make_temp_ssa_name (TREE_TYPE (rhs1), NULL, "_ifc_");
1617 tree c;
1618 tree zero = build_zero_cst (TREE_TYPE (rhs1));
1620 if (dump_file && (dump_flags & TDF_DETAILS))
1622 fprintf (dump_file, "Found cond scalar reduction.\n");
1623 print_gimple_stmt (dump_file, reduc, 0, TDF_SLIM);
1626 /* Build cond expression using COND and constant operand
1627 of reduction rhs. */
1628 c = fold_build_cond_expr (TREE_TYPE (rhs1),
1629 unshare_expr (cond),
1630 swap ? zero : op1,
1631 swap ? op1 : zero);
1633 /* Create assignment stmt and insert it at GSI. */
1634 new_assign = gimple_build_assign (tmp, c);
1635 gsi_insert_before (gsi, new_assign, GSI_SAME_STMT);
1636 /* Build rhs for unconditional increment/decrement. */
1637 rhs = fold_build2 (gimple_assign_rhs_code (reduc),
1638 TREE_TYPE (rhs1), op0, tmp);
1640 /* Delete original reduction stmt. */
1641 stmt_it = gsi_for_stmt (reduc);
1642 gsi_remove (&stmt_it, true);
1643 release_defs (reduc);
1644 return rhs;
1647 /* Produce condition for all occurrences of ARG in PHI node. */
1649 static tree
1650 gen_phi_arg_condition (gphi *phi, vec<int> *occur,
1651 gimple_stmt_iterator *gsi)
1653 int len;
1654 int i;
1655 tree cond = NULL_TREE;
1656 tree c;
1657 edge e;
1659 len = occur->length ();
1660 gcc_assert (len > 0);
1661 for (i = 0; i < len; i++)
1663 e = gimple_phi_arg_edge (phi, (*occur)[i]);
1664 c = bb_predicate (e->src);
1665 if (is_true_predicate (c))
1666 continue;
1667 c = force_gimple_operand_gsi_1 (gsi, unshare_expr (c),
1668 is_gimple_condexpr, NULL_TREE,
1669 true, GSI_SAME_STMT);
1670 if (cond != NULL_TREE)
1672 /* Must build OR expression. */
1673 cond = fold_or_predicates (EXPR_LOCATION (c), c, cond);
1674 cond = force_gimple_operand_gsi_1 (gsi, unshare_expr (cond),
1675 is_gimple_condexpr, NULL_TREE,
1676 true, GSI_SAME_STMT);
1678 else
1679 cond = c;
1681 gcc_assert (cond != NULL_TREE);
1682 return cond;
1685 /* Replace a scalar PHI node with a COND_EXPR using COND as condition.
1686 This routine can handle PHI nodes with more than two arguments.
1688 For example,
1689 S1: A = PHI <x1(1), x2(5)>
1690 is converted into,
1691 S2: A = cond ? x1 : x2;
1693 The generated code is inserted at GSI that points to the top of
1694 basic block's statement list.
1695 If PHI node has more than two arguments a chain of conditional
1696 expression is produced. */
1699 static void
1700 predicate_scalar_phi (gphi *phi, gimple_stmt_iterator *gsi)
1702 gimple *new_stmt = NULL, *reduc;
1703 tree rhs, res, arg0, arg1, op0, op1, scev;
1704 tree cond;
1705 unsigned int index0;
1706 unsigned int max, args_len;
1707 edge e;
1708 basic_block bb;
1709 unsigned int i;
1711 res = gimple_phi_result (phi);
1712 if (virtual_operand_p (res))
1713 return;
1715 if ((rhs = degenerate_phi_result (phi))
1716 || ((scev = analyze_scalar_evolution (gimple_bb (phi)->loop_father,
1717 res))
1718 && !chrec_contains_undetermined (scev)
1719 && scev != res
1720 && (rhs = gimple_phi_arg_def (phi, 0))))
1722 if (dump_file && (dump_flags & TDF_DETAILS))
1724 fprintf (dump_file, "Degenerate phi!\n");
1725 print_gimple_stmt (dump_file, phi, 0, TDF_SLIM);
1727 new_stmt = gimple_build_assign (res, rhs);
1728 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1729 update_stmt (new_stmt);
1730 return;
1733 bb = gimple_bb (phi);
1734 if (EDGE_COUNT (bb->preds) == 2)
1736 /* Predicate ordinary PHI node with 2 arguments. */
1737 edge first_edge, second_edge;
1738 basic_block true_bb;
1739 first_edge = EDGE_PRED (bb, 0);
1740 second_edge = EDGE_PRED (bb, 1);
1741 cond = bb_predicate (first_edge->src);
1742 if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
1743 std::swap (first_edge, second_edge);
1744 if (EDGE_COUNT (first_edge->src->succs) > 1)
1746 cond = bb_predicate (second_edge->src);
1747 if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
1748 cond = TREE_OPERAND (cond, 0);
1749 else
1750 first_edge = second_edge;
1752 else
1753 cond = bb_predicate (first_edge->src);
1754 /* Gimplify the condition to a valid cond-expr conditonal operand. */
1755 cond = force_gimple_operand_gsi_1 (gsi, unshare_expr (cond),
1756 is_gimple_condexpr, NULL_TREE,
1757 true, GSI_SAME_STMT);
1758 true_bb = first_edge->src;
1759 if (EDGE_PRED (bb, 1)->src == true_bb)
1761 arg0 = gimple_phi_arg_def (phi, 1);
1762 arg1 = gimple_phi_arg_def (phi, 0);
1764 else
1766 arg0 = gimple_phi_arg_def (phi, 0);
1767 arg1 = gimple_phi_arg_def (phi, 1);
1769 if (is_cond_scalar_reduction (phi, &reduc, arg0, arg1,
1770 &op0, &op1, false))
1771 /* Convert reduction stmt into vectorizable form. */
1772 rhs = convert_scalar_cond_reduction (reduc, gsi, cond, op0, op1,
1773 true_bb != gimple_bb (reduc));
1774 else
1775 /* Build new RHS using selected condition and arguments. */
1776 rhs = fold_build_cond_expr (TREE_TYPE (res), unshare_expr (cond),
1777 arg0, arg1);
1778 new_stmt = gimple_build_assign (res, rhs);
1779 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1780 update_stmt (new_stmt);
1782 if (dump_file && (dump_flags & TDF_DETAILS))
1784 fprintf (dump_file, "new phi replacement stmt\n");
1785 print_gimple_stmt (dump_file, new_stmt, 0, TDF_SLIM);
1787 return;
1790 /* Create hashmap for PHI node which contain vector of argument indexes
1791 having the same value. */
1792 bool swap = false;
1793 hash_map<tree_operand_hash, auto_vec<int> > phi_arg_map;
1794 unsigned int num_args = gimple_phi_num_args (phi);
1795 int max_ind = -1;
1796 /* Vector of different PHI argument values. */
1797 auto_vec<tree> args (num_args);
1799 /* Compute phi_arg_map. */
1800 for (i = 0; i < num_args; i++)
1802 tree arg;
1804 arg = gimple_phi_arg_def (phi, i);
1805 if (!phi_arg_map.get (arg))
1806 args.quick_push (arg);
1807 phi_arg_map.get_or_insert (arg).safe_push (i);
1810 /* Determine element with max number of occurrences. */
1811 max_ind = -1;
1812 max = 1;
1813 args_len = args.length ();
1814 for (i = 0; i < args_len; i++)
1816 unsigned int len;
1817 if ((len = phi_arg_map.get (args[i])->length ()) > max)
1819 max_ind = (int) i;
1820 max = len;
1824 /* Put element with max number of occurences to the end of ARGS. */
1825 if (max_ind != -1 && max_ind +1 != (int) args_len)
1826 std::swap (args[args_len - 1], args[max_ind]);
1828 /* Handle one special case when number of arguments with different values
1829 is equal 2 and one argument has the only occurrence. Such PHI can be
1830 handled as if would have only 2 arguments. */
1831 if (args_len == 2 && phi_arg_map.get (args[0])->length () == 1)
1833 vec<int> *indexes;
1834 indexes = phi_arg_map.get (args[0]);
1835 index0 = (*indexes)[0];
1836 arg0 = args[0];
1837 arg1 = args[1];
1838 e = gimple_phi_arg_edge (phi, index0);
1839 cond = bb_predicate (e->src);
1840 if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
1842 swap = true;
1843 cond = TREE_OPERAND (cond, 0);
1845 /* Gimplify the condition to a valid cond-expr conditonal operand. */
1846 cond = force_gimple_operand_gsi_1 (gsi, unshare_expr (cond),
1847 is_gimple_condexpr, NULL_TREE,
1848 true, GSI_SAME_STMT);
1849 if (!(is_cond_scalar_reduction (phi, &reduc, arg0 , arg1,
1850 &op0, &op1, true)))
1851 rhs = fold_build_cond_expr (TREE_TYPE (res), unshare_expr (cond),
1852 swap? arg1 : arg0,
1853 swap? arg0 : arg1);
1854 else
1855 /* Convert reduction stmt into vectorizable form. */
1856 rhs = convert_scalar_cond_reduction (reduc, gsi, cond, op0, op1,
1857 swap);
1858 new_stmt = gimple_build_assign (res, rhs);
1859 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1860 update_stmt (new_stmt);
1862 else
1864 /* Common case. */
1865 vec<int> *indexes;
1866 tree type = TREE_TYPE (gimple_phi_result (phi));
1867 tree lhs;
1868 arg1 = args[1];
1869 for (i = 0; i < args_len; i++)
1871 arg0 = args[i];
1872 indexes = phi_arg_map.get (args[i]);
1873 if (i != args_len - 1)
1874 lhs = make_temp_ssa_name (type, NULL, "_ifc_");
1875 else
1876 lhs = res;
1877 cond = gen_phi_arg_condition (phi, indexes, gsi);
1878 rhs = fold_build_cond_expr (type, unshare_expr (cond),
1879 arg0, arg1);
1880 new_stmt = gimple_build_assign (lhs, rhs);
1881 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1882 update_stmt (new_stmt);
1883 arg1 = lhs;
1887 if (dump_file && (dump_flags & TDF_DETAILS))
1889 fprintf (dump_file, "new extended phi replacement stmt\n");
1890 print_gimple_stmt (dump_file, new_stmt, 0, TDF_SLIM);
1894 /* Replaces in LOOP all the scalar phi nodes other than those in the
1895 LOOP->header block with conditional modify expressions. */
1897 static void
1898 predicate_all_scalar_phis (struct loop *loop)
1900 basic_block bb;
1901 unsigned int orig_loop_num_nodes = loop->num_nodes;
1902 unsigned int i;
1904 for (i = 1; i < orig_loop_num_nodes; i++)
1906 gphi *phi;
1907 gimple_stmt_iterator gsi;
1908 gphi_iterator phi_gsi;
1909 bb = ifc_bbs[i];
1911 if (bb == loop->header)
1912 continue;
1914 if (EDGE_COUNT (bb->preds) == 1)
1915 continue;
1917 phi_gsi = gsi_start_phis (bb);
1918 if (gsi_end_p (phi_gsi))
1919 continue;
1921 gsi = gsi_after_labels (bb);
1922 while (!gsi_end_p (phi_gsi))
1924 phi = phi_gsi.phi ();
1925 predicate_scalar_phi (phi, &gsi);
1926 release_phi_node (phi);
1927 gsi_next (&phi_gsi);
1930 set_phi_nodes (bb, NULL);
1934 /* Insert in each basic block of LOOP the statements produced by the
1935 gimplification of the predicates. */
1937 static void
1938 insert_gimplified_predicates (loop_p loop, bool any_mask_load_store)
1940 unsigned int i;
1942 for (i = 0; i < loop->num_nodes; i++)
1944 basic_block bb = ifc_bbs[i];
1945 gimple_seq stmts;
1946 if (!is_predicated (bb))
1947 gcc_assert (bb_predicate_gimplified_stmts (bb) == NULL);
1948 if (!is_predicated (bb))
1950 /* Do not insert statements for a basic block that is not
1951 predicated. Also make sure that the predicate of the
1952 basic block is set to true. */
1953 reset_bb_predicate (bb);
1954 continue;
1957 stmts = bb_predicate_gimplified_stmts (bb);
1958 if (stmts)
1960 if (any_mask_load_store)
1962 /* Insert the predicate of the BB just after the label,
1963 as the if-conversion of memory writes will use this
1964 predicate. */
1965 gimple_stmt_iterator gsi = gsi_after_labels (bb);
1966 gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
1968 else
1970 /* Insert the predicate of the BB at the end of the BB
1971 as this would reduce the register pressure: the only
1972 use of this predicate will be in successor BBs. */
1973 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1975 if (gsi_end_p (gsi)
1976 || stmt_ends_bb_p (gsi_stmt (gsi)))
1977 gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
1978 else
1979 gsi_insert_seq_after (&gsi, stmts, GSI_SAME_STMT);
1982 /* Once the sequence is code generated, set it to NULL. */
1983 set_bb_predicate_gimplified_stmts (bb, NULL);
1988 /* Helper function for predicate_mem_writes. Returns index of existent
1989 mask if it was created for given SIZE and -1 otherwise. */
1991 static int
1992 mask_exists (int size, vec<int> vec)
1994 unsigned int ix;
1995 int v;
1996 FOR_EACH_VEC_ELT (vec, ix, v)
1997 if (v == size)
1998 return (int) ix;
1999 return -1;
2002 /* Predicate each write to memory in LOOP.
2004 This function transforms control flow constructs containing memory
2005 writes of the form:
2007 | for (i = 0; i < N; i++)
2008 | if (cond)
2009 | A[i] = expr;
2011 into the following form that does not contain control flow:
2013 | for (i = 0; i < N; i++)
2014 | A[i] = cond ? expr : A[i];
2016 The original CFG looks like this:
2018 | bb_0
2019 | i = 0
2020 | end_bb_0
2022 | bb_1
2023 | if (i < N) goto bb_5 else goto bb_2
2024 | end_bb_1
2026 | bb_2
2027 | cond = some_computation;
2028 | if (cond) goto bb_3 else goto bb_4
2029 | end_bb_2
2031 | bb_3
2032 | A[i] = expr;
2033 | goto bb_4
2034 | end_bb_3
2036 | bb_4
2037 | goto bb_1
2038 | end_bb_4
2040 insert_gimplified_predicates inserts the computation of the COND
2041 expression at the beginning of the destination basic block:
2043 | bb_0
2044 | i = 0
2045 | end_bb_0
2047 | bb_1
2048 | if (i < N) goto bb_5 else goto bb_2
2049 | end_bb_1
2051 | bb_2
2052 | cond = some_computation;
2053 | if (cond) goto bb_3 else goto bb_4
2054 | end_bb_2
2056 | bb_3
2057 | cond = some_computation;
2058 | A[i] = expr;
2059 | goto bb_4
2060 | end_bb_3
2062 | bb_4
2063 | goto bb_1
2064 | end_bb_4
2066 predicate_mem_writes is then predicating the memory write as follows:
2068 | bb_0
2069 | i = 0
2070 | end_bb_0
2072 | bb_1
2073 | if (i < N) goto bb_5 else goto bb_2
2074 | end_bb_1
2076 | bb_2
2077 | if (cond) goto bb_3 else goto bb_4
2078 | end_bb_2
2080 | bb_3
2081 | cond = some_computation;
2082 | A[i] = cond ? expr : A[i];
2083 | goto bb_4
2084 | end_bb_3
2086 | bb_4
2087 | goto bb_1
2088 | end_bb_4
2090 and finally combine_blocks removes the basic block boundaries making
2091 the loop vectorizable:
2093 | bb_0
2094 | i = 0
2095 | if (i < N) goto bb_5 else goto bb_1
2096 | end_bb_0
2098 | bb_1
2099 | cond = some_computation;
2100 | A[i] = cond ? expr : A[i];
2101 | if (i < N) goto bb_5 else goto bb_4
2102 | end_bb_1
2104 | bb_4
2105 | goto bb_1
2106 | end_bb_4
2109 static void
2110 predicate_mem_writes (loop_p loop)
2112 unsigned int i, orig_loop_num_nodes = loop->num_nodes;
2113 auto_vec<int, 1> vect_sizes;
2114 auto_vec<tree, 1> vect_masks;
2116 for (i = 1; i < orig_loop_num_nodes; i++)
2118 gimple_stmt_iterator gsi;
2119 basic_block bb = ifc_bbs[i];
2120 tree cond = bb_predicate (bb);
2121 bool swap;
2122 gimple *stmt;
2123 int index;
2125 if (is_true_predicate (cond) || is_false_predicate (cond))
2126 continue;
2128 swap = false;
2129 if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
2131 swap = true;
2132 cond = TREE_OPERAND (cond, 0);
2135 vect_sizes.truncate (0);
2136 vect_masks.truncate (0);
2138 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2139 if (!gimple_assign_single_p (stmt = gsi_stmt (gsi)))
2140 continue;
2141 else if (gimple_plf (stmt, GF_PLF_2))
2143 tree lhs = gimple_assign_lhs (stmt);
2144 tree rhs = gimple_assign_rhs1 (stmt);
2145 tree ref, addr, ptr, mask;
2146 gimple *new_stmt;
2147 gimple_seq stmts = NULL;
2148 int bitsize = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (lhs)));
2149 ref = TREE_CODE (lhs) == SSA_NAME ? rhs : lhs;
2150 mark_addressable (ref);
2151 addr = force_gimple_operand_gsi (&gsi, build_fold_addr_expr (ref),
2152 true, NULL_TREE, true,
2153 GSI_SAME_STMT);
2154 if (!vect_sizes.is_empty ()
2155 && (index = mask_exists (bitsize, vect_sizes)) != -1)
2156 /* Use created mask. */
2157 mask = vect_masks[index];
2158 else
2160 if (COMPARISON_CLASS_P (cond))
2161 mask = gimple_build (&stmts, TREE_CODE (cond),
2162 boolean_type_node,
2163 TREE_OPERAND (cond, 0),
2164 TREE_OPERAND (cond, 1));
2165 else
2167 gcc_assert (TREE_CODE (cond) == SSA_NAME);
2168 mask = cond;
2171 if (swap)
2173 tree true_val
2174 = constant_boolean_node (true, TREE_TYPE (mask));
2175 mask = gimple_build (&stmts, BIT_XOR_EXPR,
2176 TREE_TYPE (mask), mask, true_val);
2178 gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
2180 mask = ifc_temp_var (TREE_TYPE (mask), mask, &gsi);
2181 /* Save mask and its size for further use. */
2182 vect_sizes.safe_push (bitsize);
2183 vect_masks.safe_push (mask);
2185 ptr = build_int_cst (reference_alias_ptr_type (ref),
2186 get_object_alignment (ref));
2187 /* Copy points-to info if possible. */
2188 if (TREE_CODE (addr) == SSA_NAME && !SSA_NAME_PTR_INFO (addr))
2189 copy_ref_info (build2 (MEM_REF, TREE_TYPE (ref), addr, ptr),
2190 ref);
2191 if (TREE_CODE (lhs) == SSA_NAME)
2193 new_stmt
2194 = gimple_build_call_internal (IFN_MASK_LOAD, 3, addr,
2195 ptr, mask);
2196 gimple_call_set_lhs (new_stmt, lhs);
2198 else
2199 new_stmt
2200 = gimple_build_call_internal (IFN_MASK_STORE, 4, addr, ptr,
2201 mask, rhs);
2202 gsi_replace (&gsi, new_stmt, true);
2204 else if (gimple_vdef (stmt))
2206 tree lhs = gimple_assign_lhs (stmt);
2207 tree rhs = gimple_assign_rhs1 (stmt);
2208 tree type = TREE_TYPE (lhs);
2210 lhs = ifc_temp_var (type, unshare_expr (lhs), &gsi);
2211 rhs = ifc_temp_var (type, unshare_expr (rhs), &gsi);
2212 if (swap)
2213 std::swap (lhs, rhs);
2214 cond = force_gimple_operand_gsi_1 (&gsi, unshare_expr (cond),
2215 is_gimple_condexpr, NULL_TREE,
2216 true, GSI_SAME_STMT);
2217 rhs = fold_build_cond_expr (type, unshare_expr (cond), rhs, lhs);
2218 gimple_assign_set_rhs1 (stmt, ifc_temp_var (type, rhs, &gsi));
2219 update_stmt (stmt);
2224 /* Remove all GIMPLE_CONDs and GIMPLE_LABELs of all the basic blocks
2225 other than the exit and latch of the LOOP. Also resets the
2226 GIMPLE_DEBUG information. */
2228 static void
2229 remove_conditions_and_labels (loop_p loop)
2231 gimple_stmt_iterator gsi;
2232 unsigned int i;
2234 for (i = 0; i < loop->num_nodes; i++)
2236 basic_block bb = ifc_bbs[i];
2238 if (bb_with_exit_edge_p (loop, bb)
2239 || bb == loop->latch)
2240 continue;
2242 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
2243 switch (gimple_code (gsi_stmt (gsi)))
2245 case GIMPLE_COND:
2246 case GIMPLE_LABEL:
2247 gsi_remove (&gsi, true);
2248 break;
2250 case GIMPLE_DEBUG:
2251 /* ??? Should there be conditional GIMPLE_DEBUG_BINDs? */
2252 if (gimple_debug_bind_p (gsi_stmt (gsi)))
2254 gimple_debug_bind_reset_value (gsi_stmt (gsi));
2255 update_stmt (gsi_stmt (gsi));
2257 gsi_next (&gsi);
2258 break;
2260 default:
2261 gsi_next (&gsi);
2266 /* Combine all the basic blocks from LOOP into one or two super basic
2267 blocks. Replace PHI nodes with conditional modify expressions. */
2269 static void
2270 combine_blocks (struct loop *loop, bool any_mask_load_store)
2272 basic_block bb, exit_bb, merge_target_bb;
2273 unsigned int orig_loop_num_nodes = loop->num_nodes;
2274 unsigned int i;
2275 edge e;
2276 edge_iterator ei;
2278 predicate_bbs (loop);
2279 remove_conditions_and_labels (loop);
2280 insert_gimplified_predicates (loop, any_mask_load_store);
2281 predicate_all_scalar_phis (loop);
2283 if (any_mask_load_store)
2284 predicate_mem_writes (loop);
2286 /* Merge basic blocks: first remove all the edges in the loop,
2287 except for those from the exit block. */
2288 exit_bb = NULL;
2289 bool *predicated = XNEWVEC (bool, orig_loop_num_nodes);
2290 for (i = 0; i < orig_loop_num_nodes; i++)
2292 bb = ifc_bbs[i];
2293 predicated[i] = !is_true_predicate (bb_predicate (bb));
2294 free_bb_predicate (bb);
2295 if (bb_with_exit_edge_p (loop, bb))
2297 gcc_assert (exit_bb == NULL);
2298 exit_bb = bb;
2301 gcc_assert (exit_bb != loop->latch);
2303 for (i = 1; i < orig_loop_num_nodes; i++)
2305 bb = ifc_bbs[i];
2307 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei));)
2309 if (e->src == exit_bb)
2310 ei_next (&ei);
2311 else
2312 remove_edge (e);
2316 if (exit_bb != NULL)
2318 if (exit_bb != loop->header)
2320 /* Connect this node to loop header. */
2321 make_edge (loop->header, exit_bb, EDGE_FALLTHRU);
2322 set_immediate_dominator (CDI_DOMINATORS, exit_bb, loop->header);
2325 /* Redirect non-exit edges to loop->latch. */
2326 FOR_EACH_EDGE (e, ei, exit_bb->succs)
2328 if (!loop_exit_edge_p (loop, e))
2329 redirect_edge_and_branch (e, loop->latch);
2331 set_immediate_dominator (CDI_DOMINATORS, loop->latch, exit_bb);
2333 else
2335 /* If the loop does not have an exit, reconnect header and latch. */
2336 make_edge (loop->header, loop->latch, EDGE_FALLTHRU);
2337 set_immediate_dominator (CDI_DOMINATORS, loop->latch, loop->header);
2340 merge_target_bb = loop->header;
2341 for (i = 1; i < orig_loop_num_nodes; i++)
2343 gimple_stmt_iterator gsi;
2344 gimple_stmt_iterator last;
2346 bb = ifc_bbs[i];
2348 if (bb == exit_bb || bb == loop->latch)
2349 continue;
2351 /* Make stmts member of loop->header and clear range info from all stmts
2352 in BB which is now no longer executed conditional on a predicate we
2353 could have derived it from. */
2354 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2356 gimple *stmt = gsi_stmt (gsi);
2357 gimple_set_bb (stmt, merge_target_bb);
2358 if (predicated[i])
2360 ssa_op_iter i;
2361 tree op;
2362 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
2363 reset_flow_sensitive_info (op);
2367 /* Update stmt list. */
2368 last = gsi_last_bb (merge_target_bb);
2369 gsi_insert_seq_after (&last, bb_seq (bb), GSI_NEW_STMT);
2370 set_bb_seq (bb, NULL);
2372 delete_basic_block (bb);
2375 /* If possible, merge loop header to the block with the exit edge.
2376 This reduces the number of basic blocks to two, to please the
2377 vectorizer that handles only loops with two nodes. */
2378 if (exit_bb
2379 && exit_bb != loop->header
2380 && can_merge_blocks_p (loop->header, exit_bb))
2381 merge_blocks (loop->header, exit_bb);
2383 free (ifc_bbs);
2384 ifc_bbs = NULL;
2385 free (predicated);
2388 /* Version LOOP before if-converting it; the original loop
2389 will be if-converted, the new copy of the loop will not,
2390 and the LOOP_VECTORIZED internal call will be guarding which
2391 loop to execute. The vectorizer pass will fold this
2392 internal call into either true or false. */
2394 static bool
2395 version_loop_for_if_conversion (struct loop *loop)
2397 basic_block cond_bb;
2398 tree cond = make_ssa_name (boolean_type_node);
2399 struct loop *new_loop;
2400 gimple *g;
2401 gimple_stmt_iterator gsi;
2403 g = gimple_build_call_internal (IFN_LOOP_VECTORIZED, 2,
2404 build_int_cst (integer_type_node, loop->num),
2405 integer_zero_node);
2406 gimple_call_set_lhs (g, cond);
2408 initialize_original_copy_tables ();
2409 new_loop = loop_version (loop, cond, &cond_bb,
2410 REG_BR_PROB_BASE, REG_BR_PROB_BASE,
2411 REG_BR_PROB_BASE, true);
2412 free_original_copy_tables ();
2413 if (new_loop == NULL)
2414 return false;
2415 new_loop->dont_vectorize = true;
2416 new_loop->force_vectorize = false;
2417 gsi = gsi_last_bb (cond_bb);
2418 gimple_call_set_arg (g, 1, build_int_cst (integer_type_node, new_loop->num));
2419 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2420 update_ssa (TODO_update_ssa);
2421 return true;
2424 /* Performs splitting of critical edges if aggressive_if_conv is true.
2425 Returns false if loop won't be if converted and true otherwise. */
2427 static bool
2428 ifcvt_split_critical_edges (struct loop *loop)
2430 basic_block *body;
2431 basic_block bb;
2432 unsigned int num = loop->num_nodes;
2433 unsigned int i;
2434 gimple *stmt;
2435 edge e;
2436 edge_iterator ei;
2438 if (num <= 2)
2439 return false;
2440 if (loop->inner)
2441 return false;
2442 if (!single_exit (loop))
2443 return false;
2445 body = get_loop_body (loop);
2446 for (i = 0; i < num; i++)
2448 bb = body[i];
2449 if (bb == loop->latch
2450 || bb_with_exit_edge_p (loop, bb))
2451 continue;
2452 stmt = last_stmt (bb);
2453 /* Skip basic blocks not ending with conditional branch. */
2454 if (!(stmt && gimple_code (stmt) == GIMPLE_COND))
2455 continue;
2456 FOR_EACH_EDGE (e, ei, bb->succs)
2457 if (EDGE_CRITICAL_P (e) && e->dest->loop_father == loop)
2458 split_edge (e);
2460 free (body);
2461 return true;
2464 /* Assumes that lhs of DEF_STMT have multiple uses.
2465 Delete one use by (1) creation of copy DEF_STMT with
2466 unique lhs; (2) change original use of lhs in one
2467 use statement with newly created lhs. */
2469 static void
2470 ifcvt_split_def_stmt (gimple *def_stmt, gimple *use_stmt)
2472 tree var;
2473 tree lhs;
2474 gimple *copy_stmt;
2475 gimple_stmt_iterator gsi;
2476 use_operand_p use_p;
2477 imm_use_iterator imm_iter;
2479 var = gimple_assign_lhs (def_stmt);
2480 copy_stmt = gimple_copy (def_stmt);
2481 lhs = make_temp_ssa_name (TREE_TYPE (var), NULL, "_ifc_");
2482 gimple_assign_set_lhs (copy_stmt, lhs);
2483 SSA_NAME_DEF_STMT (lhs) = copy_stmt;
2484 /* Insert copy of DEF_STMT. */
2485 gsi = gsi_for_stmt (def_stmt);
2486 gsi_insert_after (&gsi, copy_stmt, GSI_SAME_STMT);
2487 /* Change use of var to lhs in use_stmt. */
2488 if (dump_file && (dump_flags & TDF_DETAILS))
2490 fprintf (dump_file, "Change use of var ");
2491 print_generic_expr (dump_file, var, TDF_SLIM);
2492 fprintf (dump_file, " to ");
2493 print_generic_expr (dump_file, lhs, TDF_SLIM);
2494 fprintf (dump_file, "\n");
2496 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, var)
2498 if (USE_STMT (use_p) != use_stmt)
2499 continue;
2500 SET_USE (use_p, lhs);
2501 break;
2505 /* Traverse bool pattern recursively starting from VAR.
2506 Save its def and use statements to defuse_list if VAR does
2507 not have single use. */
2509 static void
2510 ifcvt_walk_pattern_tree (tree var, vec<gimple *> *defuse_list,
2511 gimple *use_stmt)
2513 tree rhs1, rhs2;
2514 enum tree_code code;
2515 gimple *def_stmt;
2517 def_stmt = SSA_NAME_DEF_STMT (var);
2518 if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
2519 return;
2520 if (!has_single_use (var))
2522 /* Put def and use stmts into defuse_list. */
2523 defuse_list->safe_push (def_stmt);
2524 defuse_list->safe_push (use_stmt);
2525 if (dump_file && (dump_flags & TDF_DETAILS))
2527 fprintf (dump_file, "Multiple lhs uses in stmt\n");
2528 print_gimple_stmt (dump_file, def_stmt, 0, TDF_SLIM);
2531 rhs1 = gimple_assign_rhs1 (def_stmt);
2532 code = gimple_assign_rhs_code (def_stmt);
2533 switch (code)
2535 case SSA_NAME:
2536 ifcvt_walk_pattern_tree (rhs1, defuse_list, def_stmt);
2537 break;
2538 CASE_CONVERT:
2539 if ((TYPE_PRECISION (TREE_TYPE (rhs1)) != 1
2540 || !TYPE_UNSIGNED (TREE_TYPE (rhs1)))
2541 && TREE_CODE (TREE_TYPE (rhs1)) != BOOLEAN_TYPE)
2542 break;
2543 ifcvt_walk_pattern_tree (rhs1, defuse_list, def_stmt);
2544 break;
2545 case BIT_NOT_EXPR:
2546 ifcvt_walk_pattern_tree (rhs1, defuse_list, def_stmt);
2547 break;
2548 case BIT_AND_EXPR:
2549 case BIT_IOR_EXPR:
2550 case BIT_XOR_EXPR:
2551 ifcvt_walk_pattern_tree (rhs1, defuse_list, def_stmt);
2552 rhs2 = gimple_assign_rhs2 (def_stmt);
2553 ifcvt_walk_pattern_tree (rhs2, defuse_list, def_stmt);
2554 break;
2555 default:
2556 break;
2558 return;
2561 /* Returns true if STMT can be a root of bool pattern applied
2562 by vectorizer. */
2564 static bool
2565 stmt_is_root_of_bool_pattern (gimple *stmt)
2567 enum tree_code code;
2568 tree lhs, rhs;
2570 code = gimple_assign_rhs_code (stmt);
2571 if (CONVERT_EXPR_CODE_P (code))
2573 lhs = gimple_assign_lhs (stmt);
2574 rhs = gimple_assign_rhs1 (stmt);
2575 if (TREE_CODE (TREE_TYPE (rhs)) != BOOLEAN_TYPE)
2576 return false;
2577 if (TREE_CODE (TREE_TYPE (lhs)) == BOOLEAN_TYPE)
2578 return false;
2579 return true;
2581 else if (code == COND_EXPR)
2583 rhs = gimple_assign_rhs1 (stmt);
2584 if (TREE_CODE (rhs) != SSA_NAME)
2585 return false;
2586 return true;
2588 return false;
2591 /* Traverse all statements in BB which correspond to loop header to
2592 find out all statements which can start bool pattern applied by
2593 vectorizer and convert multiple uses in it to conform pattern
2594 restrictions. Such case can occur if the same predicate is used both
2595 for phi node conversion and load/store mask. */
2597 static void
2598 ifcvt_repair_bool_pattern (basic_block bb)
2600 tree rhs;
2601 gimple *stmt;
2602 gimple_stmt_iterator gsi;
2603 vec<gimple *> defuse_list = vNULL;
2604 vec<gimple *> pattern_roots = vNULL;
2605 bool repeat = true;
2606 int niter = 0;
2607 unsigned int ix;
2609 /* Collect all root pattern statements. */
2610 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2612 stmt = gsi_stmt (gsi);
2613 if (gimple_code (stmt) != GIMPLE_ASSIGN)
2614 continue;
2615 if (!stmt_is_root_of_bool_pattern (stmt))
2616 continue;
2617 pattern_roots.safe_push (stmt);
2620 if (pattern_roots.is_empty ())
2621 return;
2623 /* Split all statements with multiple uses iteratively since splitting
2624 may create new multiple uses. */
2625 while (repeat)
2627 repeat = false;
2628 niter++;
2629 FOR_EACH_VEC_ELT (pattern_roots, ix, stmt)
2631 rhs = gimple_assign_rhs1 (stmt);
2632 ifcvt_walk_pattern_tree (rhs, &defuse_list, stmt);
2633 while (defuse_list.length () > 0)
2635 repeat = true;
2636 gimple *def_stmt, *use_stmt;
2637 use_stmt = defuse_list.pop ();
2638 def_stmt = defuse_list.pop ();
2639 ifcvt_split_def_stmt (def_stmt, use_stmt);
2644 if (dump_file && (dump_flags & TDF_DETAILS))
2645 fprintf (dump_file, "Repair bool pattern takes %d iterations. \n",
2646 niter);
2649 /* Delete redundant statements produced by predication which prevents
2650 loop vectorization. */
2652 static void
2653 ifcvt_local_dce (basic_block bb)
2655 gimple *stmt;
2656 gimple *stmt1;
2657 gimple *phi;
2658 gimple_stmt_iterator gsi;
2659 auto_vec<gimple *> worklist;
2660 enum gimple_code code;
2661 use_operand_p use_p;
2662 imm_use_iterator imm_iter;
2664 worklist.create (64);
2665 /* Consider all phi as live statements. */
2666 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2668 phi = gsi_stmt (gsi);
2669 gimple_set_plf (phi, GF_PLF_2, true);
2670 worklist.safe_push (phi);
2672 /* Consider load/store statements, CALL and COND as live. */
2673 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2675 stmt = gsi_stmt (gsi);
2676 if (gimple_store_p (stmt)
2677 || gimple_assign_load_p (stmt)
2678 || is_gimple_debug (stmt))
2680 gimple_set_plf (stmt, GF_PLF_2, true);
2681 worklist.safe_push (stmt);
2682 continue;
2684 code = gimple_code (stmt);
2685 if (code == GIMPLE_COND || code == GIMPLE_CALL)
2687 gimple_set_plf (stmt, GF_PLF_2, true);
2688 worklist.safe_push (stmt);
2689 continue;
2691 gimple_set_plf (stmt, GF_PLF_2, false);
2693 if (code == GIMPLE_ASSIGN)
2695 tree lhs = gimple_assign_lhs (stmt);
2696 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
2698 stmt1 = USE_STMT (use_p);
2699 if (gimple_bb (stmt1) != bb)
2701 gimple_set_plf (stmt, GF_PLF_2, true);
2702 worklist.safe_push (stmt);
2703 break;
2708 /* Propagate liveness through arguments of live stmt. */
2709 while (worklist.length () > 0)
2711 ssa_op_iter iter;
2712 use_operand_p use_p;
2713 tree use;
2715 stmt = worklist.pop ();
2716 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
2718 use = USE_FROM_PTR (use_p);
2719 if (TREE_CODE (use) != SSA_NAME)
2720 continue;
2721 stmt1 = SSA_NAME_DEF_STMT (use);
2722 if (gimple_bb (stmt1) != bb
2723 || gimple_plf (stmt1, GF_PLF_2))
2724 continue;
2725 gimple_set_plf (stmt1, GF_PLF_2, true);
2726 worklist.safe_push (stmt1);
2729 /* Delete dead statements. */
2730 gsi = gsi_start_bb (bb);
2731 while (!gsi_end_p (gsi))
2733 stmt = gsi_stmt (gsi);
2734 if (gimple_plf (stmt, GF_PLF_2))
2736 gsi_next (&gsi);
2737 continue;
2739 if (dump_file && (dump_flags & TDF_DETAILS))
2741 fprintf (dump_file, "Delete dead stmt in bb#%d\n", bb->index);
2742 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2744 gsi_remove (&gsi, true);
2745 release_defs (stmt);
2749 /* If-convert LOOP when it is legal. For the moment this pass has no
2750 profitability analysis. Returns non-zero todo flags when something
2751 changed. */
2753 static unsigned int
2754 tree_if_conversion (struct loop *loop)
2756 unsigned int todo = 0;
2757 ifc_bbs = NULL;
2758 bool any_mask_load_store = false;
2760 /* Set up aggressive if-conversion for loops marked with simd pragma. */
2761 aggressive_if_conv = loop->force_vectorize;
2762 /* Check either outer loop was marked with simd pragma. */
2763 if (!aggressive_if_conv)
2765 struct loop *outer_loop = loop_outer (loop);
2766 if (outer_loop && outer_loop->force_vectorize)
2767 aggressive_if_conv = true;
2770 if (aggressive_if_conv)
2771 if (!ifcvt_split_critical_edges (loop))
2772 goto cleanup;
2774 if (!if_convertible_loop_p (loop, &any_mask_load_store)
2775 || !dbg_cnt (if_conversion_tree))
2776 goto cleanup;
2778 if (any_mask_load_store
2779 && ((!flag_tree_loop_vectorize && !loop->force_vectorize)
2780 || loop->dont_vectorize))
2781 goto cleanup;
2783 if (any_mask_load_store && !version_loop_for_if_conversion (loop))
2784 goto cleanup;
2786 /* Now all statements are if-convertible. Combine all the basic
2787 blocks into one huge basic block doing the if-conversion
2788 on-the-fly. */
2789 combine_blocks (loop, any_mask_load_store);
2791 /* Delete dead predicate computations and repair tree correspondent
2792 to bool pattern to delete multiple uses of predicates. */
2793 if (aggressive_if_conv)
2795 ifcvt_local_dce (loop->header);
2796 ifcvt_repair_bool_pattern (loop->header);
2799 todo |= TODO_cleanup_cfg;
2800 if (any_mask_load_store)
2802 mark_virtual_operands_for_renaming (cfun);
2803 todo |= TODO_update_ssa_only_virtuals;
2806 cleanup:
2807 if (ifc_bbs)
2809 unsigned int i;
2811 for (i = 0; i < loop->num_nodes; i++)
2812 free_bb_predicate (ifc_bbs[i]);
2814 free (ifc_bbs);
2815 ifc_bbs = NULL;
2817 free_dominance_info (CDI_POST_DOMINATORS);
2819 return todo;
2822 /* Tree if-conversion pass management. */
2824 namespace {
2826 const pass_data pass_data_if_conversion =
2828 GIMPLE_PASS, /* type */
2829 "ifcvt", /* name */
2830 OPTGROUP_NONE, /* optinfo_flags */
2831 TV_NONE, /* tv_id */
2832 ( PROP_cfg | PROP_ssa ), /* properties_required */
2833 0, /* properties_provided */
2834 0, /* properties_destroyed */
2835 0, /* todo_flags_start */
2836 0, /* todo_flags_finish */
2839 class pass_if_conversion : public gimple_opt_pass
2841 public:
2842 pass_if_conversion (gcc::context *ctxt)
2843 : gimple_opt_pass (pass_data_if_conversion, ctxt)
2846 /* opt_pass methods: */
2847 virtual bool gate (function *);
2848 virtual unsigned int execute (function *);
2850 }; // class pass_if_conversion
2852 bool
2853 pass_if_conversion::gate (function *fun)
2855 return (((flag_tree_loop_vectorize || fun->has_force_vectorize_loops)
2856 && flag_tree_loop_if_convert != 0)
2857 || flag_tree_loop_if_convert == 1
2858 || flag_tree_loop_if_convert_stores == 1);
2861 unsigned int
2862 pass_if_conversion::execute (function *fun)
2864 struct loop *loop;
2865 unsigned todo = 0;
2867 if (number_of_loops (fun) <= 1)
2868 return 0;
2870 FOR_EACH_LOOP (loop, 0)
2871 if (flag_tree_loop_if_convert == 1
2872 || flag_tree_loop_if_convert_stores == 1
2873 || ((flag_tree_loop_vectorize || loop->force_vectorize)
2874 && !loop->dont_vectorize))
2875 todo |= tree_if_conversion (loop);
2877 if (flag_checking)
2879 basic_block bb;
2880 FOR_EACH_BB_FN (bb, fun)
2881 gcc_assert (!bb->aux);
2884 return todo;
2887 } // anon namespace
2889 gimple_opt_pass *
2890 make_pass_if_conversion (gcc::context *ctxt)
2892 return new pass_if_conversion (ctxt);