Merge trunk version 193672 into gupc branch.
[official-gcc.git] / gcc / tree-vect-loop-manip.c
blob34bde34ef2543e45a89f55a77e8700b636c136df
1 /* Vectorizer Specific Loop Manipulations
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2012
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "dumpfile.h"
27 #include "tm.h"
28 #include "ggc.h"
29 #include "tree.h"
30 #include "basic-block.h"
31 #include "gimple-pretty-print.h"
32 #include "tree-flow.h"
33 #include "tree-pass.h"
34 #include "cfgloop.h"
35 #include "diagnostic-core.h"
36 #include "tree-scalar-evolution.h"
37 #include "tree-vectorizer.h"
38 #include "langhooks.h"
40 /*************************************************************************
41 Simple Loop Peeling Utilities
43 Utilities to support loop peeling for vectorization purposes.
44 *************************************************************************/
47 /* Renames the use *OP_P. */
49 static void
50 rename_use_op (use_operand_p op_p)
52 tree new_name;
54 if (TREE_CODE (USE_FROM_PTR (op_p)) != SSA_NAME)
55 return;
57 new_name = get_current_def (USE_FROM_PTR (op_p));
59 /* Something defined outside of the loop. */
60 if (!new_name)
61 return;
63 /* An ordinary ssa name defined in the loop. */
65 SET_USE (op_p, new_name);
69 /* Renames the variables in basic block BB. */
71 void
72 rename_variables_in_bb (basic_block bb)
74 gimple_stmt_iterator gsi;
75 gimple stmt;
76 use_operand_p use_p;
77 ssa_op_iter iter;
78 edge e;
79 edge_iterator ei;
80 struct loop *loop = bb->loop_father;
82 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
84 stmt = gsi_stmt (gsi);
85 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES)
86 rename_use_op (use_p);
89 FOR_EACH_EDGE (e, ei, bb->succs)
91 if (!flow_bb_inside_loop_p (loop, e->dest))
92 continue;
93 for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
94 rename_use_op (PHI_ARG_DEF_PTR_FROM_EDGE (gsi_stmt (gsi), e));
99 /* Renames variables in new generated LOOP. */
101 void
102 rename_variables_in_loop (struct loop *loop)
104 unsigned i;
105 basic_block *bbs;
107 bbs = get_loop_body (loop);
109 for (i = 0; i < loop->num_nodes; i++)
110 rename_variables_in_bb (bbs[i]);
112 free (bbs);
115 typedef struct
117 tree from, to;
118 basic_block bb;
119 } adjust_info;
121 /* A stack of values to be adjusted in debug stmts. We have to
122 process them LIFO, so that the closest substitution applies. If we
123 processed them FIFO, without the stack, we might substitute uses
124 with a PHI DEF that would soon become non-dominant, and when we got
125 to the suitable one, it wouldn't have anything to substitute any
126 more. */
127 static vec<adjust_info, va_stack> adjust_vec;
129 /* Adjust any debug stmts that referenced AI->from values to use the
130 loop-closed AI->to, if the references are dominated by AI->bb and
131 not by the definition of AI->from. */
133 static void
134 adjust_debug_stmts_now (adjust_info *ai)
136 basic_block bbphi = ai->bb;
137 tree orig_def = ai->from;
138 tree new_def = ai->to;
139 imm_use_iterator imm_iter;
140 gimple stmt;
141 basic_block bbdef = gimple_bb (SSA_NAME_DEF_STMT (orig_def));
143 gcc_assert (dom_info_available_p (CDI_DOMINATORS));
145 /* Adjust any debug stmts that held onto non-loop-closed
146 references. */
147 FOR_EACH_IMM_USE_STMT (stmt, imm_iter, orig_def)
149 use_operand_p use_p;
150 basic_block bbuse;
152 if (!is_gimple_debug (stmt))
153 continue;
155 gcc_assert (gimple_debug_bind_p (stmt));
157 bbuse = gimple_bb (stmt);
159 if ((bbuse == bbphi
160 || dominated_by_p (CDI_DOMINATORS, bbuse, bbphi))
161 && !(bbuse == bbdef
162 || dominated_by_p (CDI_DOMINATORS, bbuse, bbdef)))
164 if (new_def)
165 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
166 SET_USE (use_p, new_def);
167 else
169 gimple_debug_bind_reset_value (stmt);
170 update_stmt (stmt);
176 /* Adjust debug stmts as scheduled before. */
178 static void
179 adjust_vec_debug_stmts (void)
181 if (!MAY_HAVE_DEBUG_STMTS)
182 return;
184 gcc_assert (adjust_vec.exists ());
186 while (!adjust_vec.is_empty ())
188 adjust_debug_stmts_now (&adjust_vec.last ());
189 adjust_vec.pop ();
192 adjust_vec.release ();
195 /* Adjust any debug stmts that referenced FROM values to use the
196 loop-closed TO, if the references are dominated by BB and not by
197 the definition of FROM. If adjust_vec is non-NULL, adjustments
198 will be postponed until adjust_vec_debug_stmts is called. */
200 static void
201 adjust_debug_stmts (tree from, tree to, basic_block bb)
203 adjust_info ai;
205 if (MAY_HAVE_DEBUG_STMTS
206 && TREE_CODE (from) == SSA_NAME
207 && ! virtual_operand_p (from))
209 ai.from = from;
210 ai.to = to;
211 ai.bb = bb;
213 if (adjust_vec.exists ())
214 adjust_vec.safe_push (ai);
215 else
216 adjust_debug_stmts_now (&ai);
220 /* Change E's phi arg in UPDATE_PHI to NEW_DEF, and record information
221 to adjust any debug stmts that referenced the old phi arg,
222 presumably non-loop-closed references left over from other
223 transformations. */
225 static void
226 adjust_phi_and_debug_stmts (gimple update_phi, edge e, tree new_def)
228 tree orig_def = PHI_ARG_DEF_FROM_EDGE (update_phi, e);
230 SET_PHI_ARG_DEF (update_phi, e->dest_idx, new_def);
232 if (MAY_HAVE_DEBUG_STMTS)
233 adjust_debug_stmts (orig_def, PHI_RESULT (update_phi),
234 gimple_bb (update_phi));
238 /* Update the PHI nodes of NEW_LOOP.
240 NEW_LOOP is a duplicate of ORIG_LOOP.
241 AFTER indicates whether NEW_LOOP executes before or after ORIG_LOOP:
242 AFTER is true if NEW_LOOP executes after ORIG_LOOP, and false if it
243 executes before it. */
245 static void
246 slpeel_update_phis_for_duplicate_loop (struct loop *orig_loop,
247 struct loop *new_loop, bool after)
249 tree new_ssa_name;
250 gimple phi_new, phi_orig;
251 tree def;
252 edge orig_loop_latch = loop_latch_edge (orig_loop);
253 edge orig_entry_e = loop_preheader_edge (orig_loop);
254 edge new_loop_exit_e = single_exit (new_loop);
255 edge new_loop_entry_e = loop_preheader_edge (new_loop);
256 edge entry_arg_e = (after ? orig_loop_latch : orig_entry_e);
257 gimple_stmt_iterator gsi_new, gsi_orig;
260 step 1. For each loop-header-phi:
261 Add the first phi argument for the phi in NEW_LOOP
262 (the one associated with the entry of NEW_LOOP)
264 step 2. For each loop-header-phi:
265 Add the second phi argument for the phi in NEW_LOOP
266 (the one associated with the latch of NEW_LOOP)
268 step 3. Update the phis in the successor block of NEW_LOOP.
270 case 1: NEW_LOOP was placed before ORIG_LOOP:
271 The successor block of NEW_LOOP is the header of ORIG_LOOP.
272 Updating the phis in the successor block can therefore be done
273 along with the scanning of the loop header phis, because the
274 header blocks of ORIG_LOOP and NEW_LOOP have exactly the same
275 phi nodes, organized in the same order.
277 case 2: NEW_LOOP was placed after ORIG_LOOP:
278 The successor block of NEW_LOOP is the original exit block of
279 ORIG_LOOP - the phis to be updated are the loop-closed-ssa phis.
280 We postpone updating these phis to a later stage (when
281 loop guards are added).
285 /* Scan the phis in the headers of the old and new loops
286 (they are organized in exactly the same order). */
288 for (gsi_new = gsi_start_phis (new_loop->header),
289 gsi_orig = gsi_start_phis (orig_loop->header);
290 !gsi_end_p (gsi_new) && !gsi_end_p (gsi_orig);
291 gsi_next (&gsi_new), gsi_next (&gsi_orig))
293 source_location locus;
294 phi_new = gsi_stmt (gsi_new);
295 phi_orig = gsi_stmt (gsi_orig);
297 /* step 1. */
298 def = PHI_ARG_DEF_FROM_EDGE (phi_orig, entry_arg_e);
299 locus = gimple_phi_arg_location_from_edge (phi_orig, entry_arg_e);
300 add_phi_arg (phi_new, def, new_loop_entry_e, locus);
302 /* step 2. */
303 def = PHI_ARG_DEF_FROM_EDGE (phi_orig, orig_loop_latch);
304 locus = gimple_phi_arg_location_from_edge (phi_orig, orig_loop_latch);
305 if (TREE_CODE (def) != SSA_NAME)
306 continue;
308 new_ssa_name = get_current_def (def);
309 if (!new_ssa_name)
311 /* This only happens if there are no definitions
312 inside the loop. use the phi_result in this case. */
313 new_ssa_name = PHI_RESULT (phi_new);
316 /* An ordinary ssa name defined in the loop. */
317 add_phi_arg (phi_new, new_ssa_name, loop_latch_edge (new_loop), locus);
319 /* Drop any debug references outside the loop, if they would
320 become ill-formed SSA. */
321 adjust_debug_stmts (def, NULL, single_exit (orig_loop)->dest);
323 /* step 3 (case 1). */
324 if (!after)
326 gcc_assert (new_loop_exit_e == orig_entry_e);
327 adjust_phi_and_debug_stmts (phi_orig, new_loop_exit_e, new_ssa_name);
333 /* Update PHI nodes for a guard of the LOOP.
335 Input:
336 - LOOP, GUARD_EDGE: LOOP is a loop for which we added guard code that
337 controls whether LOOP is to be executed. GUARD_EDGE is the edge that
338 originates from the guard-bb, skips LOOP and reaches the (unique) exit
339 bb of LOOP. This loop-exit-bb is an empty bb with one successor.
340 We denote this bb NEW_MERGE_BB because before the guard code was added
341 it had a single predecessor (the LOOP header), and now it became a merge
342 point of two paths - the path that ends with the LOOP exit-edge, and
343 the path that ends with GUARD_EDGE.
344 - NEW_EXIT_BB: New basic block that is added by this function between LOOP
345 and NEW_MERGE_BB. It is used to place loop-closed-ssa-form exit-phis.
347 ===> The CFG before the guard-code was added:
348 LOOP_header_bb:
349 loop_body
350 if (exit_loop) goto update_bb
351 else goto LOOP_header_bb
352 update_bb:
354 ==> The CFG after the guard-code was added:
355 guard_bb:
356 if (LOOP_guard_condition) goto new_merge_bb
357 else goto LOOP_header_bb
358 LOOP_header_bb:
359 loop_body
360 if (exit_loop_condition) goto new_merge_bb
361 else goto LOOP_header_bb
362 new_merge_bb:
363 goto update_bb
364 update_bb:
366 ==> The CFG after this function:
367 guard_bb:
368 if (LOOP_guard_condition) goto new_merge_bb
369 else goto LOOP_header_bb
370 LOOP_header_bb:
371 loop_body
372 if (exit_loop_condition) goto new_exit_bb
373 else goto LOOP_header_bb
374 new_exit_bb:
375 new_merge_bb:
376 goto update_bb
377 update_bb:
379 This function:
380 1. creates and updates the relevant phi nodes to account for the new
381 incoming edge (GUARD_EDGE) into NEW_MERGE_BB. This involves:
382 1.1. Create phi nodes at NEW_MERGE_BB.
383 1.2. Update the phi nodes at the successor of NEW_MERGE_BB (denoted
384 UPDATE_BB). UPDATE_BB was the exit-bb of LOOP before NEW_MERGE_BB
385 2. preserves loop-closed-ssa-form by creating the required phi nodes
386 at the exit of LOOP (i.e, in NEW_EXIT_BB).
388 There are two flavors to this function:
390 slpeel_update_phi_nodes_for_guard1:
391 Here the guard controls whether we enter or skip LOOP, where LOOP is a
392 prolog_loop (loop1 below), and the new phis created in NEW_MERGE_BB are
393 for variables that have phis in the loop header.
395 slpeel_update_phi_nodes_for_guard2:
396 Here the guard controls whether we enter or skip LOOP, where LOOP is an
397 epilog_loop (loop2 below), and the new phis created in NEW_MERGE_BB are
398 for variables that have phis in the loop exit.
400 I.E., the overall structure is:
402 loop1_preheader_bb:
403 guard1 (goto loop1/merge1_bb)
404 loop1
405 loop1_exit_bb:
406 guard2 (goto merge1_bb/merge2_bb)
407 merge1_bb
408 loop2
409 loop2_exit_bb
410 merge2_bb
411 next_bb
413 slpeel_update_phi_nodes_for_guard1 takes care of creating phis in
414 loop1_exit_bb and merge1_bb. These are entry phis (phis for the vars
415 that have phis in loop1->header).
417 slpeel_update_phi_nodes_for_guard2 takes care of creating phis in
418 loop2_exit_bb and merge2_bb. These are exit phis (phis for the vars
419 that have phis in next_bb). It also adds some of these phis to
420 loop1_exit_bb.
422 slpeel_update_phi_nodes_for_guard1 is always called before
423 slpeel_update_phi_nodes_for_guard2. They are both needed in order
424 to create correct data-flow and loop-closed-ssa-form.
426 Generally slpeel_update_phi_nodes_for_guard1 creates phis for variables
427 that change between iterations of a loop (and therefore have a phi-node
428 at the loop entry), whereas slpeel_update_phi_nodes_for_guard2 creates
429 phis for variables that are used out of the loop (and therefore have
430 loop-closed exit phis). Some variables may be both updated between
431 iterations and used after the loop. This is why in loop1_exit_bb we
432 may need both entry_phis (created by slpeel_update_phi_nodes_for_guard1)
433 and exit phis (created by slpeel_update_phi_nodes_for_guard2).
435 - IS_NEW_LOOP: if IS_NEW_LOOP is true, then LOOP is a newly created copy of
436 an original loop. i.e., we have:
438 orig_loop
439 guard_bb (goto LOOP/new_merge)
440 new_loop <-- LOOP
441 new_exit
442 new_merge
443 next_bb
445 If IS_NEW_LOOP is false, then LOOP is an original loop, in which case we
446 have:
448 new_loop
449 guard_bb (goto LOOP/new_merge)
450 orig_loop <-- LOOP
451 new_exit
452 new_merge
453 next_bb
455 The SSA names defined in the original loop have a current
456 reaching definition that that records the corresponding new
457 ssa-name used in the new duplicated loop copy.
460 /* Function slpeel_update_phi_nodes_for_guard1
462 Input:
463 - GUARD_EDGE, LOOP, IS_NEW_LOOP, NEW_EXIT_BB - as explained above.
464 - DEFS - a bitmap of ssa names to mark new names for which we recorded
465 information.
467 In the context of the overall structure, we have:
469 loop1_preheader_bb:
470 guard1 (goto loop1/merge1_bb)
471 LOOP-> loop1
472 loop1_exit_bb:
473 guard2 (goto merge1_bb/merge2_bb)
474 merge1_bb
475 loop2
476 loop2_exit_bb
477 merge2_bb
478 next_bb
480 For each name updated between loop iterations (i.e - for each name that has
481 an entry (loop-header) phi in LOOP) we create a new phi in:
482 1. merge1_bb (to account for the edge from guard1)
483 2. loop1_exit_bb (an exit-phi to keep LOOP in loop-closed form)
486 static void
487 slpeel_update_phi_nodes_for_guard1 (edge guard_edge, struct loop *loop,
488 bool is_new_loop, basic_block *new_exit_bb)
490 gimple orig_phi, new_phi;
491 gimple update_phi, update_phi2;
492 tree guard_arg, loop_arg;
493 basic_block new_merge_bb = guard_edge->dest;
494 edge e = EDGE_SUCC (new_merge_bb, 0);
495 basic_block update_bb = e->dest;
496 basic_block orig_bb = loop->header;
497 edge new_exit_e;
498 tree current_new_name;
499 gimple_stmt_iterator gsi_orig, gsi_update;
501 /* Create new bb between loop and new_merge_bb. */
502 *new_exit_bb = split_edge (single_exit (loop));
504 new_exit_e = EDGE_SUCC (*new_exit_bb, 0);
506 for (gsi_orig = gsi_start_phis (orig_bb),
507 gsi_update = gsi_start_phis (update_bb);
508 !gsi_end_p (gsi_orig) && !gsi_end_p (gsi_update);
509 gsi_next (&gsi_orig), gsi_next (&gsi_update))
511 source_location loop_locus, guard_locus;
512 tree new_res;
513 orig_phi = gsi_stmt (gsi_orig);
514 update_phi = gsi_stmt (gsi_update);
516 /** 1. Handle new-merge-point phis **/
518 /* 1.1. Generate new phi node in NEW_MERGE_BB: */
519 new_res = copy_ssa_name (PHI_RESULT (orig_phi), NULL);
520 new_phi = create_phi_node (new_res, new_merge_bb);
522 /* 1.2. NEW_MERGE_BB has two incoming edges: GUARD_EDGE and the exit-edge
523 of LOOP. Set the two phi args in NEW_PHI for these edges: */
524 loop_arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, EDGE_SUCC (loop->latch, 0));
525 loop_locus = gimple_phi_arg_location_from_edge (orig_phi,
526 EDGE_SUCC (loop->latch,
527 0));
528 guard_arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, loop_preheader_edge (loop));
529 guard_locus
530 = gimple_phi_arg_location_from_edge (orig_phi,
531 loop_preheader_edge (loop));
533 add_phi_arg (new_phi, loop_arg, new_exit_e, loop_locus);
534 add_phi_arg (new_phi, guard_arg, guard_edge, guard_locus);
536 /* 1.3. Update phi in successor block. */
537 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi, e) == loop_arg
538 || PHI_ARG_DEF_FROM_EDGE (update_phi, e) == guard_arg);
539 adjust_phi_and_debug_stmts (update_phi, e, PHI_RESULT (new_phi));
540 update_phi2 = new_phi;
543 /** 2. Handle loop-closed-ssa-form phis **/
545 if (virtual_operand_p (PHI_RESULT (orig_phi)))
546 continue;
548 /* 2.1. Generate new phi node in NEW_EXIT_BB: */
549 new_res = copy_ssa_name (PHI_RESULT (orig_phi), NULL);
550 new_phi = create_phi_node (new_res, *new_exit_bb);
552 /* 2.2. NEW_EXIT_BB has one incoming edge: the exit-edge of the loop. */
553 add_phi_arg (new_phi, loop_arg, single_exit (loop), loop_locus);
555 /* 2.3. Update phi in successor of NEW_EXIT_BB: */
556 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi2, new_exit_e) == loop_arg);
557 adjust_phi_and_debug_stmts (update_phi2, new_exit_e,
558 PHI_RESULT (new_phi));
560 /* 2.4. Record the newly created name with set_current_def.
561 We want to find a name such that
562 name = get_current_def (orig_loop_name)
563 and to set its current definition as follows:
564 set_current_def (name, new_phi_name)
566 If LOOP is a new loop then loop_arg is already the name we're
567 looking for. If LOOP is the original loop, then loop_arg is
568 the orig_loop_name and the relevant name is recorded in its
569 current reaching definition. */
570 if (is_new_loop)
571 current_new_name = loop_arg;
572 else
574 current_new_name = get_current_def (loop_arg);
575 /* current_def is not available only if the variable does not
576 change inside the loop, in which case we also don't care
577 about recording a current_def for it because we won't be
578 trying to create loop-exit-phis for it. */
579 if (!current_new_name)
580 continue;
582 gcc_assert (get_current_def (current_new_name) == NULL_TREE);
584 set_current_def (current_new_name, PHI_RESULT (new_phi));
589 /* Function slpeel_update_phi_nodes_for_guard2
591 Input:
592 - GUARD_EDGE, LOOP, IS_NEW_LOOP, NEW_EXIT_BB - as explained above.
594 In the context of the overall structure, we have:
596 loop1_preheader_bb:
597 guard1 (goto loop1/merge1_bb)
598 loop1
599 loop1_exit_bb:
600 guard2 (goto merge1_bb/merge2_bb)
601 merge1_bb
602 LOOP-> loop2
603 loop2_exit_bb
604 merge2_bb
605 next_bb
607 For each name used out side the loop (i.e - for each name that has an exit
608 phi in next_bb) we create a new phi in:
609 1. merge2_bb (to account for the edge from guard_bb)
610 2. loop2_exit_bb (an exit-phi to keep LOOP in loop-closed form)
611 3. guard2 bb (an exit phi to keep the preceding loop in loop-closed form),
612 if needed (if it wasn't handled by slpeel_update_phis_nodes_for_phi1).
615 static void
616 slpeel_update_phi_nodes_for_guard2 (edge guard_edge, struct loop *loop,
617 bool is_new_loop, basic_block *new_exit_bb)
619 gimple orig_phi, new_phi;
620 gimple update_phi, update_phi2;
621 tree guard_arg, loop_arg;
622 basic_block new_merge_bb = guard_edge->dest;
623 edge e = EDGE_SUCC (new_merge_bb, 0);
624 basic_block update_bb = e->dest;
625 edge new_exit_e;
626 tree orig_def, orig_def_new_name;
627 tree new_name, new_name2;
628 tree arg;
629 gimple_stmt_iterator gsi;
631 /* Create new bb between loop and new_merge_bb. */
632 *new_exit_bb = split_edge (single_exit (loop));
634 new_exit_e = EDGE_SUCC (*new_exit_bb, 0);
636 for (gsi = gsi_start_phis (update_bb); !gsi_end_p (gsi); gsi_next (&gsi))
638 tree new_res;
639 update_phi = gsi_stmt (gsi);
640 orig_phi = update_phi;
641 orig_def = PHI_ARG_DEF_FROM_EDGE (orig_phi, e);
642 /* This loop-closed-phi actually doesn't represent a use
643 out of the loop - the phi arg is a constant. */
644 if (TREE_CODE (orig_def) != SSA_NAME)
645 continue;
646 orig_def_new_name = get_current_def (orig_def);
647 arg = NULL_TREE;
649 /** 1. Handle new-merge-point phis **/
651 /* 1.1. Generate new phi node in NEW_MERGE_BB: */
652 new_res = copy_ssa_name (PHI_RESULT (orig_phi), NULL);
653 new_phi = create_phi_node (new_res, new_merge_bb);
655 /* 1.2. NEW_MERGE_BB has two incoming edges: GUARD_EDGE and the exit-edge
656 of LOOP. Set the two PHI args in NEW_PHI for these edges: */
657 new_name = orig_def;
658 new_name2 = NULL_TREE;
659 if (orig_def_new_name)
661 new_name = orig_def_new_name;
662 /* Some variables have both loop-entry-phis and loop-exit-phis.
663 Such variables were given yet newer names by phis placed in
664 guard_bb by slpeel_update_phi_nodes_for_guard1. I.e:
665 new_name2 = get_current_def (get_current_def (orig_name)). */
666 new_name2 = get_current_def (new_name);
669 if (is_new_loop)
671 guard_arg = orig_def;
672 loop_arg = new_name;
674 else
676 guard_arg = new_name;
677 loop_arg = orig_def;
679 if (new_name2)
680 guard_arg = new_name2;
682 add_phi_arg (new_phi, loop_arg, new_exit_e, UNKNOWN_LOCATION);
683 add_phi_arg (new_phi, guard_arg, guard_edge, UNKNOWN_LOCATION);
685 /* 1.3. Update phi in successor block. */
686 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi, e) == orig_def);
687 adjust_phi_and_debug_stmts (update_phi, e, PHI_RESULT (new_phi));
688 update_phi2 = new_phi;
691 /** 2. Handle loop-closed-ssa-form phis **/
693 /* 2.1. Generate new phi node in NEW_EXIT_BB: */
694 new_res = copy_ssa_name (PHI_RESULT (orig_phi), NULL);
695 new_phi = create_phi_node (new_res, *new_exit_bb);
697 /* 2.2. NEW_EXIT_BB has one incoming edge: the exit-edge of the loop. */
698 add_phi_arg (new_phi, loop_arg, single_exit (loop), UNKNOWN_LOCATION);
700 /* 2.3. Update phi in successor of NEW_EXIT_BB: */
701 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi2, new_exit_e) == loop_arg);
702 adjust_phi_and_debug_stmts (update_phi2, new_exit_e,
703 PHI_RESULT (new_phi));
706 /** 3. Handle loop-closed-ssa-form phis for first loop **/
708 /* 3.1. Find the relevant names that need an exit-phi in
709 GUARD_BB, i.e. names for which
710 slpeel_update_phi_nodes_for_guard1 had not already created a
711 phi node. This is the case for names that are used outside
712 the loop (and therefore need an exit phi) but are not updated
713 across loop iterations (and therefore don't have a
714 loop-header-phi).
716 slpeel_update_phi_nodes_for_guard1 is responsible for
717 creating loop-exit phis in GUARD_BB for names that have a
718 loop-header-phi. When such a phi is created we also record
719 the new name in its current definition. If this new name
720 exists, then guard_arg was set to this new name (see 1.2
721 above). Therefore, if guard_arg is not this new name, this
722 is an indication that an exit-phi in GUARD_BB was not yet
723 created, so we take care of it here. */
724 if (guard_arg == new_name2)
725 continue;
726 arg = guard_arg;
728 /* 3.2. Generate new phi node in GUARD_BB: */
729 new_res = copy_ssa_name (PHI_RESULT (orig_phi), NULL);
730 new_phi = create_phi_node (new_res, guard_edge->src);
732 /* 3.3. GUARD_BB has one incoming edge: */
733 gcc_assert (EDGE_COUNT (guard_edge->src->preds) == 1);
734 add_phi_arg (new_phi, arg, EDGE_PRED (guard_edge->src, 0),
735 UNKNOWN_LOCATION);
737 /* 3.4. Update phi in successor of GUARD_BB: */
738 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi2, guard_edge)
739 == guard_arg);
740 adjust_phi_and_debug_stmts (update_phi2, guard_edge,
741 PHI_RESULT (new_phi));
746 /* Make the LOOP iterate NITERS times. This is done by adding a new IV
747 that starts at zero, increases by one and its limit is NITERS.
749 Assumption: the exit-condition of LOOP is the last stmt in the loop. */
751 void
752 slpeel_make_loop_iterate_ntimes (struct loop *loop, tree niters)
754 tree indx_before_incr, indx_after_incr;
755 gimple cond_stmt;
756 gimple orig_cond;
757 edge exit_edge = single_exit (loop);
758 gimple_stmt_iterator loop_cond_gsi;
759 gimple_stmt_iterator incr_gsi;
760 bool insert_after;
761 tree init = build_int_cst (TREE_TYPE (niters), 0);
762 tree step = build_int_cst (TREE_TYPE (niters), 1);
763 LOC loop_loc;
764 enum tree_code code;
766 orig_cond = get_loop_exit_condition (loop);
767 gcc_assert (orig_cond);
768 loop_cond_gsi = gsi_for_stmt (orig_cond);
770 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
771 create_iv (init, step, NULL_TREE, loop,
772 &incr_gsi, insert_after, &indx_before_incr, &indx_after_incr);
774 indx_after_incr = force_gimple_operand_gsi (&loop_cond_gsi, indx_after_incr,
775 true, NULL_TREE, true,
776 GSI_SAME_STMT);
777 niters = force_gimple_operand_gsi (&loop_cond_gsi, niters, true, NULL_TREE,
778 true, GSI_SAME_STMT);
780 code = (exit_edge->flags & EDGE_TRUE_VALUE) ? GE_EXPR : LT_EXPR;
781 cond_stmt = gimple_build_cond (code, indx_after_incr, niters, NULL_TREE,
782 NULL_TREE);
784 gsi_insert_before (&loop_cond_gsi, cond_stmt, GSI_SAME_STMT);
786 /* Remove old loop exit test: */
787 gsi_remove (&loop_cond_gsi, true);
788 free_stmt_vec_info (orig_cond);
790 loop_loc = find_loop_location (loop);
791 if (dump_enabled_p ())
793 if (LOCATION_LOCUS (loop_loc) != UNKNOWN_LOC)
794 dump_printf (MSG_NOTE, "\nloop at %s:%d: ", LOC_FILE (loop_loc),
795 LOC_LINE (loop_loc));
796 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, cond_stmt, 0);
798 loop->nb_iterations = niters;
802 /* Given LOOP this function generates a new copy of it and puts it
803 on E which is either the entry or exit of LOOP. */
805 struct loop *
806 slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *loop, edge e)
808 struct loop *new_loop;
809 basic_block *new_bbs, *bbs;
810 bool at_exit;
811 bool was_imm_dom;
812 basic_block exit_dest;
813 gimple phi;
814 tree phi_arg;
815 edge exit, new_exit;
816 gimple_stmt_iterator gsi;
818 at_exit = (e == single_exit (loop));
819 if (!at_exit && e != loop_preheader_edge (loop))
820 return NULL;
822 bbs = get_loop_body (loop);
824 /* Check whether duplication is possible. */
825 if (!can_copy_bbs_p (bbs, loop->num_nodes))
827 free (bbs);
828 return NULL;
831 /* Generate new loop structure. */
832 new_loop = duplicate_loop (loop, loop_outer (loop));
833 if (!new_loop)
835 free (bbs);
836 return NULL;
839 exit_dest = single_exit (loop)->dest;
840 was_imm_dom = (get_immediate_dominator (CDI_DOMINATORS,
841 exit_dest) == loop->header ?
842 true : false);
844 new_bbs = XNEWVEC (basic_block, loop->num_nodes);
846 exit = single_exit (loop);
847 copy_bbs (bbs, loop->num_nodes, new_bbs,
848 &exit, 1, &new_exit, NULL,
849 e->src);
851 /* Duplicating phi args at exit bbs as coming
852 also from exit of duplicated loop. */
853 for (gsi = gsi_start_phis (exit_dest); !gsi_end_p (gsi); gsi_next (&gsi))
855 phi = gsi_stmt (gsi);
856 phi_arg = PHI_ARG_DEF_FROM_EDGE (phi, single_exit (loop));
857 if (phi_arg)
859 edge new_loop_exit_edge;
860 source_location locus;
862 locus = gimple_phi_arg_location_from_edge (phi, single_exit (loop));
863 if (EDGE_SUCC (new_loop->header, 0)->dest == new_loop->latch)
864 new_loop_exit_edge = EDGE_SUCC (new_loop->header, 1);
865 else
866 new_loop_exit_edge = EDGE_SUCC (new_loop->header, 0);
868 add_phi_arg (phi, phi_arg, new_loop_exit_edge, locus);
872 if (at_exit) /* Add the loop copy at exit. */
874 redirect_edge_and_branch_force (e, new_loop->header);
875 PENDING_STMT (e) = NULL;
876 set_immediate_dominator (CDI_DOMINATORS, new_loop->header, e->src);
877 if (was_imm_dom)
878 set_immediate_dominator (CDI_DOMINATORS, exit_dest, new_loop->header);
880 else /* Add the copy at entry. */
882 edge new_exit_e;
883 edge entry_e = loop_preheader_edge (loop);
884 basic_block preheader = entry_e->src;
886 if (!flow_bb_inside_loop_p (new_loop,
887 EDGE_SUCC (new_loop->header, 0)->dest))
888 new_exit_e = EDGE_SUCC (new_loop->header, 0);
889 else
890 new_exit_e = EDGE_SUCC (new_loop->header, 1);
892 redirect_edge_and_branch_force (new_exit_e, loop->header);
893 PENDING_STMT (new_exit_e) = NULL;
894 set_immediate_dominator (CDI_DOMINATORS, loop->header,
895 new_exit_e->src);
897 /* We have to add phi args to the loop->header here as coming
898 from new_exit_e edge. */
899 for (gsi = gsi_start_phis (loop->header);
900 !gsi_end_p (gsi);
901 gsi_next (&gsi))
903 phi = gsi_stmt (gsi);
904 phi_arg = PHI_ARG_DEF_FROM_EDGE (phi, entry_e);
905 if (phi_arg)
906 add_phi_arg (phi, phi_arg, new_exit_e,
907 gimple_phi_arg_location_from_edge (phi, entry_e));
910 redirect_edge_and_branch_force (entry_e, new_loop->header);
911 PENDING_STMT (entry_e) = NULL;
912 set_immediate_dominator (CDI_DOMINATORS, new_loop->header, preheader);
915 free (new_bbs);
916 free (bbs);
918 return new_loop;
922 /* Given the condition statement COND, put it as the last statement
923 of GUARD_BB; EXIT_BB is the basic block to skip the loop;
924 Assumes that this is the single exit of the guarded loop.
925 Returns the skip edge, inserts new stmts on the COND_EXPR_STMT_LIST. */
927 static edge
928 slpeel_add_loop_guard (basic_block guard_bb, tree cond,
929 gimple_seq cond_expr_stmt_list,
930 basic_block exit_bb, basic_block dom_bb,
931 int probability)
933 gimple_stmt_iterator gsi;
934 edge new_e, enter_e;
935 gimple cond_stmt;
936 gimple_seq gimplify_stmt_list = NULL;
938 enter_e = EDGE_SUCC (guard_bb, 0);
939 enter_e->flags &= ~EDGE_FALLTHRU;
940 enter_e->flags |= EDGE_FALSE_VALUE;
941 gsi = gsi_last_bb (guard_bb);
943 cond = force_gimple_operand_1 (cond, &gimplify_stmt_list, is_gimple_condexpr,
944 NULL_TREE);
945 if (gimplify_stmt_list)
946 gimple_seq_add_seq (&cond_expr_stmt_list, gimplify_stmt_list);
947 cond_stmt = gimple_build_cond_from_tree (cond, NULL_TREE, NULL_TREE);
948 if (cond_expr_stmt_list)
949 gsi_insert_seq_after (&gsi, cond_expr_stmt_list, GSI_NEW_STMT);
951 gsi = gsi_last_bb (guard_bb);
952 gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT);
954 /* Add new edge to connect guard block to the merge/loop-exit block. */
955 new_e = make_edge (guard_bb, exit_bb, EDGE_TRUE_VALUE);
957 new_e->count = guard_bb->count;
958 new_e->probability = probability;
959 new_e->count = apply_probability (enter_e->count, probability);
960 enter_e->count -= new_e->count;
961 enter_e->probability = inverse_probability (probability);
962 set_immediate_dominator (CDI_DOMINATORS, exit_bb, dom_bb);
963 return new_e;
967 /* This function verifies that the following restrictions apply to LOOP:
968 (1) it is innermost
969 (2) it consists of exactly 2 basic blocks - header, and an empty latch.
970 (3) it is single entry, single exit
971 (4) its exit condition is the last stmt in the header
972 (5) E is the entry/exit edge of LOOP.
975 bool
976 slpeel_can_duplicate_loop_p (const struct loop *loop, const_edge e)
978 edge exit_e = single_exit (loop);
979 edge entry_e = loop_preheader_edge (loop);
980 gimple orig_cond = get_loop_exit_condition (loop);
981 gimple_stmt_iterator loop_exit_gsi = gsi_last_bb (exit_e->src);
983 if (need_ssa_update_p (cfun))
984 return false;
986 if (loop->inner
987 /* All loops have an outer scope; the only case loop->outer is NULL is for
988 the function itself. */
989 || !loop_outer (loop)
990 || loop->num_nodes != 2
991 || !empty_block_p (loop->latch)
992 || !single_exit (loop)
993 /* Verify that new loop exit condition can be trivially modified. */
994 || (!orig_cond || orig_cond != gsi_stmt (loop_exit_gsi))
995 || (e != exit_e && e != entry_e))
996 return false;
998 return true;
1001 #ifdef ENABLE_CHECKING
1002 static void
1003 slpeel_verify_cfg_after_peeling (struct loop *first_loop,
1004 struct loop *second_loop)
1006 basic_block loop1_exit_bb = single_exit (first_loop)->dest;
1007 basic_block loop2_entry_bb = loop_preheader_edge (second_loop)->src;
1008 basic_block loop1_entry_bb = loop_preheader_edge (first_loop)->src;
1010 /* A guard that controls whether the second_loop is to be executed or skipped
1011 is placed in first_loop->exit. first_loop->exit therefore has two
1012 successors - one is the preheader of second_loop, and the other is a bb
1013 after second_loop.
1015 gcc_assert (EDGE_COUNT (loop1_exit_bb->succs) == 2);
1017 /* 1. Verify that one of the successors of first_loop->exit is the preheader
1018 of second_loop. */
1020 /* The preheader of new_loop is expected to have two predecessors:
1021 first_loop->exit and the block that precedes first_loop. */
1023 gcc_assert (EDGE_COUNT (loop2_entry_bb->preds) == 2
1024 && ((EDGE_PRED (loop2_entry_bb, 0)->src == loop1_exit_bb
1025 && EDGE_PRED (loop2_entry_bb, 1)->src == loop1_entry_bb)
1026 || (EDGE_PRED (loop2_entry_bb, 1)->src == loop1_exit_bb
1027 && EDGE_PRED (loop2_entry_bb, 0)->src == loop1_entry_bb)));
1029 /* Verify that the other successor of first_loop->exit is after the
1030 second_loop. */
1031 /* TODO */
1033 #endif
1035 /* If the run time cost model check determines that vectorization is
1036 not profitable and hence scalar loop should be generated then set
1037 FIRST_NITERS to prologue peeled iterations. This will allow all the
1038 iterations to be executed in the prologue peeled scalar loop. */
1040 static void
1041 set_prologue_iterations (basic_block bb_before_first_loop,
1042 tree *first_niters,
1043 struct loop *loop,
1044 unsigned int th,
1045 int probability)
1047 edge e;
1048 basic_block cond_bb, then_bb;
1049 tree var, prologue_after_cost_adjust_name;
1050 gimple_stmt_iterator gsi;
1051 gimple newphi;
1052 edge e_true, e_false, e_fallthru;
1053 gimple cond_stmt;
1054 gimple_seq stmts = NULL;
1055 tree cost_pre_condition = NULL_TREE;
1056 tree scalar_loop_iters =
1057 unshare_expr (LOOP_VINFO_NITERS_UNCHANGED (loop_vec_info_for_loop (loop)));
1059 e = single_pred_edge (bb_before_first_loop);
1060 cond_bb = split_edge(e);
1062 e = single_pred_edge (bb_before_first_loop);
1063 then_bb = split_edge(e);
1064 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
1066 e_false = make_single_succ_edge (cond_bb, bb_before_first_loop,
1067 EDGE_FALSE_VALUE);
1068 set_immediate_dominator (CDI_DOMINATORS, bb_before_first_loop, cond_bb);
1070 e_true = EDGE_PRED (then_bb, 0);
1071 e_true->flags &= ~EDGE_FALLTHRU;
1072 e_true->flags |= EDGE_TRUE_VALUE;
1074 e_true->probability = probability;
1075 e_false->probability = inverse_probability (probability);
1076 e_true->count = apply_probability (cond_bb->count, probability);
1077 e_false->count = cond_bb->count - e_true->count;
1078 then_bb->frequency = EDGE_FREQUENCY (e_true);
1079 then_bb->count = e_true->count;
1081 e_fallthru = EDGE_SUCC (then_bb, 0);
1082 e_fallthru->count = then_bb->count;
1084 gsi = gsi_last_bb (cond_bb);
1085 cost_pre_condition =
1086 fold_build2 (LE_EXPR, boolean_type_node, scalar_loop_iters,
1087 build_int_cst (TREE_TYPE (scalar_loop_iters), th));
1088 cost_pre_condition =
1089 force_gimple_operand_gsi_1 (&gsi, cost_pre_condition, is_gimple_condexpr,
1090 NULL_TREE, false, GSI_CONTINUE_LINKING);
1091 cond_stmt = gimple_build_cond_from_tree (cost_pre_condition,
1092 NULL_TREE, NULL_TREE);
1093 gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT);
1095 var = create_tmp_var (TREE_TYPE (scalar_loop_iters),
1096 "prologue_after_cost_adjust");
1097 prologue_after_cost_adjust_name =
1098 force_gimple_operand (scalar_loop_iters, &stmts, false, var);
1100 gsi = gsi_last_bb (then_bb);
1101 if (stmts)
1102 gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT);
1104 newphi = create_phi_node (var, bb_before_first_loop);
1105 add_phi_arg (newphi, prologue_after_cost_adjust_name, e_fallthru,
1106 UNKNOWN_LOCATION);
1107 add_phi_arg (newphi, *first_niters, e_false, UNKNOWN_LOCATION);
1109 *first_niters = PHI_RESULT (newphi);
1112 /* Function slpeel_tree_peel_loop_to_edge.
1114 Peel the first (last) iterations of LOOP into a new prolog (epilog) loop
1115 that is placed on the entry (exit) edge E of LOOP. After this transformation
1116 we have two loops one after the other - first-loop iterates FIRST_NITERS
1117 times, and second-loop iterates the remainder NITERS - FIRST_NITERS times.
1118 If the cost model indicates that it is profitable to emit a scalar
1119 loop instead of the vector one, then the prolog (epilog) loop will iterate
1120 for the entire unchanged scalar iterations of the loop.
1122 Input:
1123 - LOOP: the loop to be peeled.
1124 - E: the exit or entry edge of LOOP.
1125 If it is the entry edge, we peel the first iterations of LOOP. In this
1126 case first-loop is LOOP, and second-loop is the newly created loop.
1127 If it is the exit edge, we peel the last iterations of LOOP. In this
1128 case, first-loop is the newly created loop, and second-loop is LOOP.
1129 - NITERS: the number of iterations that LOOP iterates.
1130 - FIRST_NITERS: the number of iterations that the first-loop should iterate.
1131 - UPDATE_FIRST_LOOP_COUNT: specified whether this function is responsible
1132 for updating the loop bound of the first-loop to FIRST_NITERS. If it
1133 is false, the caller of this function may want to take care of this
1134 (this can be useful if we don't want new stmts added to first-loop).
1135 - TH: cost model profitability threshold of iterations for vectorization.
1136 - CHECK_PROFITABILITY: specify whether cost model check has not occurred
1137 during versioning and hence needs to occur during
1138 prologue generation or whether cost model check
1139 has not occurred during prologue generation and hence
1140 needs to occur during epilogue generation.
1141 - BOUND1 is the upper bound on number of iterations of the first loop (if known)
1142 - BOUND2 is the upper bound on number of iterations of the second loop (if known)
1145 Output:
1146 The function returns a pointer to the new loop-copy, or NULL if it failed
1147 to perform the transformation.
1149 The function generates two if-then-else guards: one before the first loop,
1150 and the other before the second loop:
1151 The first guard is:
1152 if (FIRST_NITERS == 0) then skip the first loop,
1153 and go directly to the second loop.
1154 The second guard is:
1155 if (FIRST_NITERS == NITERS) then skip the second loop.
1157 If the optional COND_EXPR and COND_EXPR_STMT_LIST arguments are given
1158 then the generated condition is combined with COND_EXPR and the
1159 statements in COND_EXPR_STMT_LIST are emitted together with it.
1161 FORNOW only simple loops are supported (see slpeel_can_duplicate_loop_p).
1162 FORNOW the resulting code will not be in loop-closed-ssa form.
1165 static struct loop*
1166 slpeel_tree_peel_loop_to_edge (struct loop *loop,
1167 edge e, tree *first_niters,
1168 tree niters, bool update_first_loop_count,
1169 unsigned int th, bool check_profitability,
1170 tree cond_expr, gimple_seq cond_expr_stmt_list,
1171 int bound1, int bound2)
1173 struct loop *new_loop = NULL, *first_loop, *second_loop;
1174 edge skip_e;
1175 tree pre_condition = NULL_TREE;
1176 basic_block bb_before_second_loop, bb_after_second_loop;
1177 basic_block bb_before_first_loop;
1178 basic_block bb_between_loops;
1179 basic_block new_exit_bb;
1180 gimple_stmt_iterator gsi;
1181 edge exit_e = single_exit (loop);
1182 LOC loop_loc;
1183 tree cost_pre_condition = NULL_TREE;
1184 /* There are many aspects to how likely the first loop is going to be executed.
1185 Without histogram we can't really do good job. Simply set it to
1186 2/3, so the first loop is not reordered to the end of function and
1187 the hot path through stays short. */
1188 int first_guard_probability = 2 * REG_BR_PROB_BASE / 3;
1189 int second_guard_probability = 2 * REG_BR_PROB_BASE / 3;
1190 int probability_of_second_loop;
1192 if (!slpeel_can_duplicate_loop_p (loop, e))
1193 return NULL;
1195 /* If the loop has a virtual PHI, but exit bb doesn't, create a virtual PHI
1196 in the exit bb and rename all the uses after the loop. This simplifies
1197 the *guard[12] routines, which assume loop closed SSA form for all PHIs
1198 (but normally loop closed SSA form doesn't require virtual PHIs to be
1199 in the same form). Doing this early simplifies the checking what
1200 uses should be renamed. */
1201 for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
1202 if (virtual_operand_p (gimple_phi_result (gsi_stmt (gsi))))
1204 gimple phi = gsi_stmt (gsi);
1205 for (gsi = gsi_start_phis (exit_e->dest);
1206 !gsi_end_p (gsi); gsi_next (&gsi))
1207 if (virtual_operand_p (gimple_phi_result (gsi_stmt (gsi))))
1208 break;
1209 if (gsi_end_p (gsi))
1211 tree new_vop = copy_ssa_name (PHI_RESULT (phi), NULL);
1212 gimple new_phi = create_phi_node (new_vop, exit_e->dest);
1213 tree vop = PHI_ARG_DEF_FROM_EDGE (phi, EDGE_SUCC (loop->latch, 0));
1214 imm_use_iterator imm_iter;
1215 gimple stmt;
1216 use_operand_p use_p;
1218 add_phi_arg (new_phi, vop, exit_e, UNKNOWN_LOCATION);
1219 gimple_phi_set_result (new_phi, new_vop);
1220 FOR_EACH_IMM_USE_STMT (stmt, imm_iter, vop)
1221 if (stmt != new_phi && gimple_bb (stmt) != loop->header)
1222 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
1223 SET_USE (use_p, new_vop);
1225 break;
1228 /* 1. Generate a copy of LOOP and put it on E (E is the entry/exit of LOOP).
1229 Resulting CFG would be:
1231 first_loop:
1232 do {
1233 } while ...
1235 second_loop:
1236 do {
1237 } while ...
1239 orig_exit_bb:
1242 if (!(new_loop = slpeel_tree_duplicate_loop_to_edge_cfg (loop, e)))
1244 loop_loc = find_loop_location (loop);
1245 dump_printf_loc (MSG_MISSED_OPTIMIZATION, loop_loc,
1246 "tree_duplicate_loop_to_edge_cfg failed.\n");
1247 return NULL;
1250 if (MAY_HAVE_DEBUG_STMTS)
1252 gcc_assert (!adjust_vec.exists ());
1253 vec_stack_alloc (adjust_info, adjust_vec, 32);
1256 if (e == exit_e)
1258 /* NEW_LOOP was placed after LOOP. */
1259 first_loop = loop;
1260 second_loop = new_loop;
1262 else
1264 /* NEW_LOOP was placed before LOOP. */
1265 first_loop = new_loop;
1266 second_loop = loop;
1269 slpeel_update_phis_for_duplicate_loop (loop, new_loop, e == exit_e);
1270 rename_variables_in_loop (new_loop);
1273 /* 2. Add the guard code in one of the following ways:
1275 2.a Add the guard that controls whether the first loop is executed.
1276 This occurs when this function is invoked for prologue or epilogue
1277 generation and when the cost model check can be done at compile time.
1279 Resulting CFG would be:
1281 bb_before_first_loop:
1282 if (FIRST_NITERS == 0) GOTO bb_before_second_loop
1283 GOTO first-loop
1285 first_loop:
1286 do {
1287 } while ...
1289 bb_before_second_loop:
1291 second_loop:
1292 do {
1293 } while ...
1295 orig_exit_bb:
1297 2.b Add the cost model check that allows the prologue
1298 to iterate for the entire unchanged scalar
1299 iterations of the loop in the event that the cost
1300 model indicates that the scalar loop is more
1301 profitable than the vector one. This occurs when
1302 this function is invoked for prologue generation
1303 and the cost model check needs to be done at run
1304 time.
1306 Resulting CFG after prologue peeling would be:
1308 if (scalar_loop_iterations <= th)
1309 FIRST_NITERS = scalar_loop_iterations
1311 bb_before_first_loop:
1312 if (FIRST_NITERS == 0) GOTO bb_before_second_loop
1313 GOTO first-loop
1315 first_loop:
1316 do {
1317 } while ...
1319 bb_before_second_loop:
1321 second_loop:
1322 do {
1323 } while ...
1325 orig_exit_bb:
1327 2.c Add the cost model check that allows the epilogue
1328 to iterate for the entire unchanged scalar
1329 iterations of the loop in the event that the cost
1330 model indicates that the scalar loop is more
1331 profitable than the vector one. This occurs when
1332 this function is invoked for epilogue generation
1333 and the cost model check needs to be done at run
1334 time. This check is combined with any pre-existing
1335 check in COND_EXPR to avoid versioning.
1337 Resulting CFG after prologue peeling would be:
1339 bb_before_first_loop:
1340 if ((scalar_loop_iterations <= th)
1342 FIRST_NITERS == 0) GOTO bb_before_second_loop
1343 GOTO first-loop
1345 first_loop:
1346 do {
1347 } while ...
1349 bb_before_second_loop:
1351 second_loop:
1352 do {
1353 } while ...
1355 orig_exit_bb:
1358 bb_before_first_loop = split_edge (loop_preheader_edge (first_loop));
1359 bb_before_second_loop = split_edge (single_exit (first_loop));
1361 probability_of_second_loop = (inverse_probability (first_guard_probability)
1362 + combine_probabilities (second_guard_probability,
1363 first_guard_probability));
1364 /* Theoretically preheader edge of first loop and exit edge should have
1365 same frequencies. Loop exit probablities are however easy to get wrong.
1366 It is safer to copy value from original loop entry. */
1367 bb_before_second_loop->frequency
1368 = apply_probability (bb_before_first_loop->frequency,
1369 probability_of_second_loop);
1370 bb_before_second_loop->count
1371 = apply_probability (bb_before_first_loop->count,
1372 probability_of_second_loop);
1373 single_succ_edge (bb_before_second_loop)->count
1374 = bb_before_second_loop->count;
1376 /* Epilogue peeling. */
1377 if (!update_first_loop_count)
1379 pre_condition =
1380 fold_build2 (LE_EXPR, boolean_type_node, *first_niters,
1381 build_int_cst (TREE_TYPE (*first_niters), 0));
1382 if (check_profitability)
1384 tree scalar_loop_iters
1385 = unshare_expr (LOOP_VINFO_NITERS_UNCHANGED
1386 (loop_vec_info_for_loop (loop)));
1387 cost_pre_condition =
1388 fold_build2 (LE_EXPR, boolean_type_node, scalar_loop_iters,
1389 build_int_cst (TREE_TYPE (scalar_loop_iters), th));
1391 pre_condition = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1392 cost_pre_condition, pre_condition);
1394 if (cond_expr)
1396 pre_condition =
1397 fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1398 pre_condition,
1399 fold_build1 (TRUTH_NOT_EXPR, boolean_type_node,
1400 cond_expr));
1404 /* Prologue peeling. */
1405 else
1407 if (check_profitability)
1408 set_prologue_iterations (bb_before_first_loop, first_niters,
1409 loop, th, first_guard_probability);
1411 pre_condition =
1412 fold_build2 (LE_EXPR, boolean_type_node, *first_niters,
1413 build_int_cst (TREE_TYPE (*first_niters), 0));
1416 skip_e = slpeel_add_loop_guard (bb_before_first_loop, pre_condition,
1417 cond_expr_stmt_list,
1418 bb_before_second_loop, bb_before_first_loop,
1419 inverse_probability (first_guard_probability));
1420 scale_loop_profile (first_loop, first_guard_probability,
1421 check_profitability && (int)th > bound1 ? th : bound1);
1422 slpeel_update_phi_nodes_for_guard1 (skip_e, first_loop,
1423 first_loop == new_loop,
1424 &new_exit_bb);
1427 /* 3. Add the guard that controls whether the second loop is executed.
1428 Resulting CFG would be:
1430 bb_before_first_loop:
1431 if (FIRST_NITERS == 0) GOTO bb_before_second_loop (skip first loop)
1432 GOTO first-loop
1434 first_loop:
1435 do {
1436 } while ...
1438 bb_between_loops:
1439 if (FIRST_NITERS == NITERS) GOTO bb_after_second_loop (skip second loop)
1440 GOTO bb_before_second_loop
1442 bb_before_second_loop:
1444 second_loop:
1445 do {
1446 } while ...
1448 bb_after_second_loop:
1450 orig_exit_bb:
1453 bb_between_loops = new_exit_bb;
1454 bb_after_second_loop = split_edge (single_exit (second_loop));
1456 pre_condition =
1457 fold_build2 (EQ_EXPR, boolean_type_node, *first_niters, niters);
1458 skip_e = slpeel_add_loop_guard (bb_between_loops, pre_condition, NULL,
1459 bb_after_second_loop, bb_before_first_loop,
1460 inverse_probability (second_guard_probability));
1461 scale_loop_profile (second_loop, probability_of_second_loop, bound2);
1462 slpeel_update_phi_nodes_for_guard2 (skip_e, second_loop,
1463 second_loop == new_loop, &new_exit_bb);
1465 /* 4. Make first-loop iterate FIRST_NITERS times, if requested.
1467 if (update_first_loop_count)
1468 slpeel_make_loop_iterate_ntimes (first_loop, *first_niters);
1470 delete_update_ssa ();
1472 adjust_vec_debug_stmts ();
1474 return new_loop;
1477 /* Function vect_get_loop_location.
1479 Extract the location of the loop in the source code.
1480 If the loop is not well formed for vectorization, an estimated
1481 location is calculated.
1482 Return the loop location if succeed and NULL if not. */
1485 find_loop_location (struct loop *loop)
1487 gimple stmt = NULL;
1488 basic_block bb;
1489 gimple_stmt_iterator si;
1491 if (!loop)
1492 return UNKNOWN_LOC;
1494 stmt = get_loop_exit_condition (loop);
1496 if (stmt && gimple_location (stmt) != UNKNOWN_LOC)
1497 return gimple_location (stmt);
1499 /* If we got here the loop is probably not "well formed",
1500 try to estimate the loop location */
1502 if (!loop->header)
1503 return UNKNOWN_LOC;
1505 bb = loop->header;
1507 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1509 stmt = gsi_stmt (si);
1510 if (gimple_location (stmt) != UNKNOWN_LOC)
1511 return gimple_location (stmt);
1514 return UNKNOWN_LOC;
1518 /* This function builds ni_name = number of iterations loop executes
1519 on the loop preheader. If SEQ is given the stmt is instead emitted
1520 there. */
1522 static tree
1523 vect_build_loop_niters (loop_vec_info loop_vinfo, gimple_seq seq)
1525 tree ni_name, var;
1526 gimple_seq stmts = NULL;
1527 edge pe;
1528 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1529 tree ni = unshare_expr (LOOP_VINFO_NITERS (loop_vinfo));
1531 var = create_tmp_var (TREE_TYPE (ni), "niters");
1532 ni_name = force_gimple_operand (ni, &stmts, false, var);
1534 pe = loop_preheader_edge (loop);
1535 if (stmts)
1537 if (seq)
1538 gimple_seq_add_seq (&seq, stmts);
1539 else
1541 basic_block new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
1542 gcc_assert (!new_bb);
1546 return ni_name;
1550 /* This function generates the following statements:
1552 ni_name = number of iterations loop executes
1553 ratio = ni_name / vf
1554 ratio_mult_vf_name = ratio * vf
1556 and places them at the loop preheader edge or in COND_EXPR_STMT_LIST
1557 if that is non-NULL. */
1559 static void
1560 vect_generate_tmps_on_preheader (loop_vec_info loop_vinfo,
1561 tree *ni_name_ptr,
1562 tree *ratio_mult_vf_name_ptr,
1563 tree *ratio_name_ptr,
1564 gimple_seq cond_expr_stmt_list)
1567 edge pe;
1568 basic_block new_bb;
1569 gimple_seq stmts;
1570 tree ni_name, ni_minus_gap_name;
1571 tree var;
1572 tree ratio_name;
1573 tree ratio_mult_vf_name;
1574 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1575 tree ni = LOOP_VINFO_NITERS (loop_vinfo);
1576 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1577 tree log_vf;
1579 pe = loop_preheader_edge (loop);
1581 /* Generate temporary variable that contains
1582 number of iterations loop executes. */
1584 ni_name = vect_build_loop_niters (loop_vinfo, cond_expr_stmt_list);
1585 log_vf = build_int_cst (TREE_TYPE (ni), exact_log2 (vf));
1587 /* If epilogue loop is required because of data accesses with gaps, we
1588 subtract one iteration from the total number of iterations here for
1589 correct calculation of RATIO. */
1590 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
1592 ni_minus_gap_name = fold_build2 (MINUS_EXPR, TREE_TYPE (ni_name),
1593 ni_name,
1594 build_one_cst (TREE_TYPE (ni_name)));
1595 if (!is_gimple_val (ni_minus_gap_name))
1597 var = create_tmp_var (TREE_TYPE (ni), "ni_gap");
1599 stmts = NULL;
1600 ni_minus_gap_name = force_gimple_operand (ni_minus_gap_name, &stmts,
1601 true, var);
1602 if (cond_expr_stmt_list)
1603 gimple_seq_add_seq (&cond_expr_stmt_list, stmts);
1604 else
1606 pe = loop_preheader_edge (loop);
1607 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
1608 gcc_assert (!new_bb);
1612 else
1613 ni_minus_gap_name = ni_name;
1615 /* Create: ratio = ni >> log2(vf) */
1617 ratio_name = fold_build2 (RSHIFT_EXPR, TREE_TYPE (ni_minus_gap_name),
1618 ni_minus_gap_name, log_vf);
1619 if (!is_gimple_val (ratio_name))
1621 var = create_tmp_var (TREE_TYPE (ni), "bnd");
1623 stmts = NULL;
1624 ratio_name = force_gimple_operand (ratio_name, &stmts, true, var);
1625 if (cond_expr_stmt_list)
1626 gimple_seq_add_seq (&cond_expr_stmt_list, stmts);
1627 else
1629 pe = loop_preheader_edge (loop);
1630 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
1631 gcc_assert (!new_bb);
1635 /* Create: ratio_mult_vf = ratio << log2 (vf). */
1637 ratio_mult_vf_name = fold_build2 (LSHIFT_EXPR, TREE_TYPE (ratio_name),
1638 ratio_name, log_vf);
1639 if (!is_gimple_val (ratio_mult_vf_name))
1641 var = create_tmp_var (TREE_TYPE (ni), "ratio_mult_vf");
1643 stmts = NULL;
1644 ratio_mult_vf_name = force_gimple_operand (ratio_mult_vf_name, &stmts,
1645 true, var);
1646 if (cond_expr_stmt_list)
1647 gimple_seq_add_seq (&cond_expr_stmt_list, stmts);
1648 else
1650 pe = loop_preheader_edge (loop);
1651 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
1652 gcc_assert (!new_bb);
1656 *ni_name_ptr = ni_name;
1657 *ratio_mult_vf_name_ptr = ratio_mult_vf_name;
1658 *ratio_name_ptr = ratio_name;
1660 return;
1663 /* Function vect_can_advance_ivs_p
1665 In case the number of iterations that LOOP iterates is unknown at compile
1666 time, an epilog loop will be generated, and the loop induction variables
1667 (IVs) will be "advanced" to the value they are supposed to take just before
1668 the epilog loop. Here we check that the access function of the loop IVs
1669 and the expression that represents the loop bound are simple enough.
1670 These restrictions will be relaxed in the future. */
1672 bool
1673 vect_can_advance_ivs_p (loop_vec_info loop_vinfo)
1675 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1676 basic_block bb = loop->header;
1677 gimple phi;
1678 gimple_stmt_iterator gsi;
1680 /* Analyze phi functions of the loop header. */
1682 if (dump_enabled_p ())
1683 dump_printf_loc (MSG_NOTE, vect_location, "vect_can_advance_ivs_p:");
1684 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1686 tree access_fn = NULL;
1687 tree evolution_part;
1689 phi = gsi_stmt (gsi);
1690 if (dump_enabled_p ())
1692 dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
1693 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
1696 /* Skip virtual phi's. The data dependences that are associated with
1697 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
1699 if (virtual_operand_p (PHI_RESULT (phi)))
1701 if (dump_enabled_p ())
1702 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1703 "virtual phi. skip.");
1704 continue;
1707 /* Skip reduction phis. */
1709 if (STMT_VINFO_DEF_TYPE (vinfo_for_stmt (phi)) == vect_reduction_def)
1711 if (dump_enabled_p ())
1712 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1713 "reduc phi. skip.");
1714 continue;
1717 /* Analyze the evolution function. */
1719 access_fn = instantiate_parameters
1720 (loop, analyze_scalar_evolution (loop, PHI_RESULT (phi)));
1722 if (!access_fn)
1724 if (dump_enabled_p ())
1725 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1726 "No Access function.");
1727 return false;
1730 if (dump_enabled_p ())
1732 dump_printf_loc (MSG_NOTE, vect_location,
1733 "Access function of PHI: ");
1734 dump_generic_expr (MSG_NOTE, TDF_SLIM, access_fn);
1737 evolution_part = evolution_part_in_loop_num (access_fn, loop->num);
1739 if (evolution_part == NULL_TREE)
1741 if (dump_enabled_p ())
1742 dump_printf (MSG_MISSED_OPTIMIZATION, "No evolution.");
1743 return false;
1746 /* FORNOW: We do not transform initial conditions of IVs
1747 which evolution functions are a polynomial of degree >= 2. */
1749 if (tree_is_chrec (evolution_part))
1750 return false;
1753 return true;
1757 /* Function vect_update_ivs_after_vectorizer.
1759 "Advance" the induction variables of LOOP to the value they should take
1760 after the execution of LOOP. This is currently necessary because the
1761 vectorizer does not handle induction variables that are used after the
1762 loop. Such a situation occurs when the last iterations of LOOP are
1763 peeled, because:
1764 1. We introduced new uses after LOOP for IVs that were not originally used
1765 after LOOP: the IVs of LOOP are now used by an epilog loop.
1766 2. LOOP is going to be vectorized; this means that it will iterate N/VF
1767 times, whereas the loop IVs should be bumped N times.
1769 Input:
1770 - LOOP - a loop that is going to be vectorized. The last few iterations
1771 of LOOP were peeled.
1772 - NITERS - the number of iterations that LOOP executes (before it is
1773 vectorized). i.e, the number of times the ivs should be bumped.
1774 - UPDATE_E - a successor edge of LOOP->exit that is on the (only) path
1775 coming out from LOOP on which there are uses of the LOOP ivs
1776 (this is the path from LOOP->exit to epilog_loop->preheader).
1778 The new definitions of the ivs are placed in LOOP->exit.
1779 The phi args associated with the edge UPDATE_E in the bb
1780 UPDATE_E->dest are updated accordingly.
1782 Assumption 1: Like the rest of the vectorizer, this function assumes
1783 a single loop exit that has a single predecessor.
1785 Assumption 2: The phi nodes in the LOOP header and in update_bb are
1786 organized in the same order.
1788 Assumption 3: The access function of the ivs is simple enough (see
1789 vect_can_advance_ivs_p). This assumption will be relaxed in the future.
1791 Assumption 4: Exactly one of the successors of LOOP exit-bb is on a path
1792 coming out of LOOP on which the ivs of LOOP are used (this is the path
1793 that leads to the epilog loop; other paths skip the epilog loop). This
1794 path starts with the edge UPDATE_E, and its destination (denoted update_bb)
1795 needs to have its phis updated.
1798 static void
1799 vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo, tree niters,
1800 edge update_e)
1802 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1803 basic_block exit_bb = single_exit (loop)->dest;
1804 gimple phi, phi1;
1805 gimple_stmt_iterator gsi, gsi1;
1806 basic_block update_bb = update_e->dest;
1808 /* gcc_assert (vect_can_advance_ivs_p (loop_vinfo)); */
1810 /* Make sure there exists a single-predecessor exit bb: */
1811 gcc_assert (single_pred_p (exit_bb));
1813 for (gsi = gsi_start_phis (loop->header), gsi1 = gsi_start_phis (update_bb);
1814 !gsi_end_p (gsi) && !gsi_end_p (gsi1);
1815 gsi_next (&gsi), gsi_next (&gsi1))
1817 tree init_expr;
1818 tree step_expr, off;
1819 tree type;
1820 tree var, ni, ni_name;
1821 gimple_stmt_iterator last_gsi;
1822 stmt_vec_info stmt_info;
1824 phi = gsi_stmt (gsi);
1825 phi1 = gsi_stmt (gsi1);
1826 if (dump_enabled_p ())
1828 dump_printf_loc (MSG_NOTE, vect_location,
1829 "vect_update_ivs_after_vectorizer: phi: ");
1830 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
1833 /* Skip virtual phi's. */
1834 if (virtual_operand_p (PHI_RESULT (phi)))
1836 if (dump_enabled_p ())
1837 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1838 "virtual phi. skip.");
1839 continue;
1842 /* Skip reduction phis. */
1843 stmt_info = vinfo_for_stmt (phi);
1844 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
1846 if (dump_enabled_p ())
1847 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1848 "reduc phi. skip.");
1849 continue;
1852 type = TREE_TYPE (gimple_phi_result (phi));
1853 step_expr = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_info);
1854 step_expr = unshare_expr (step_expr);
1856 /* FORNOW: We do not support IVs whose evolution function is a polynomial
1857 of degree >= 2 or exponential. */
1858 gcc_assert (!tree_is_chrec (step_expr));
1860 init_expr = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
1862 off = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
1863 fold_convert (TREE_TYPE (step_expr), niters),
1864 step_expr);
1865 if (POINTER_TYPE_P (type))
1866 ni = fold_build_pointer_plus (init_expr, off);
1867 else
1868 ni = fold_build2 (PLUS_EXPR, type,
1869 init_expr, fold_convert (type, off));
1871 var = create_tmp_var (type, "tmp");
1873 last_gsi = gsi_last_bb (exit_bb);
1874 ni_name = force_gimple_operand_gsi (&last_gsi, ni, false, var,
1875 true, GSI_SAME_STMT);
1877 /* Fix phi expressions in the successor bb. */
1878 adjust_phi_and_debug_stmts (phi1, update_e, ni_name);
1882 /* Function vect_do_peeling_for_loop_bound
1884 Peel the last iterations of the loop represented by LOOP_VINFO.
1885 The peeled iterations form a new epilog loop. Given that the loop now
1886 iterates NITERS times, the new epilog loop iterates
1887 NITERS % VECTORIZATION_FACTOR times.
1889 The original loop will later be made to iterate
1890 NITERS / VECTORIZATION_FACTOR times (this value is placed into RATIO).
1892 COND_EXPR and COND_EXPR_STMT_LIST are combined with a new generated
1893 test. */
1895 void
1896 vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo, tree *ratio,
1897 unsigned int th, bool check_profitability)
1899 tree ni_name, ratio_mult_vf_name;
1900 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1901 struct loop *new_loop;
1902 edge update_e;
1903 basic_block preheader;
1904 int loop_num;
1905 int max_iter;
1906 tree cond_expr = NULL_TREE;
1907 gimple_seq cond_expr_stmt_list = NULL;
1909 if (dump_enabled_p ())
1910 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
1911 "=== vect_do_peeling_for_loop_bound ===");
1913 initialize_original_copy_tables ();
1915 /* Generate the following variables on the preheader of original loop:
1917 ni_name = number of iteration the original loop executes
1918 ratio = ni_name / vf
1919 ratio_mult_vf_name = ratio * vf */
1920 vect_generate_tmps_on_preheader (loop_vinfo, &ni_name,
1921 &ratio_mult_vf_name, ratio,
1922 cond_expr_stmt_list);
1924 loop_num = loop->num;
1926 new_loop = slpeel_tree_peel_loop_to_edge (loop, single_exit (loop),
1927 &ratio_mult_vf_name, ni_name, false,
1928 th, check_profitability,
1929 cond_expr, cond_expr_stmt_list,
1930 0, LOOP_VINFO_VECT_FACTOR (loop_vinfo));
1931 gcc_assert (new_loop);
1932 gcc_assert (loop_num == loop->num);
1933 #ifdef ENABLE_CHECKING
1934 slpeel_verify_cfg_after_peeling (loop, new_loop);
1935 #endif
1937 /* A guard that controls whether the new_loop is to be executed or skipped
1938 is placed in LOOP->exit. LOOP->exit therefore has two successors - one
1939 is the preheader of NEW_LOOP, where the IVs from LOOP are used. The other
1940 is a bb after NEW_LOOP, where these IVs are not used. Find the edge that
1941 is on the path where the LOOP IVs are used and need to be updated. */
1943 preheader = loop_preheader_edge (new_loop)->src;
1944 if (EDGE_PRED (preheader, 0)->src == single_exit (loop)->dest)
1945 update_e = EDGE_PRED (preheader, 0);
1946 else
1947 update_e = EDGE_PRED (preheader, 1);
1949 /* Update IVs of original loop as if they were advanced
1950 by ratio_mult_vf_name steps. */
1951 vect_update_ivs_after_vectorizer (loop_vinfo, ratio_mult_vf_name, update_e);
1953 /* For vectorization factor N, we need to copy last N-1 values in epilogue
1954 and this means N-2 loopback edge executions.
1956 PEELING_FOR_GAPS works by subtracting last iteration and thus the epilogue
1957 will execute at least LOOP_VINFO_VECT_FACTOR times. */
1958 max_iter = (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
1959 ? LOOP_VINFO_VECT_FACTOR (loop_vinfo) * 2
1960 : LOOP_VINFO_VECT_FACTOR (loop_vinfo)) - 2;
1961 if (check_profitability)
1962 max_iter = MAX (max_iter, (int) th - 1);
1963 record_niter_bound (new_loop, double_int::from_shwi (max_iter), false, true);
1964 dump_printf (MSG_OPTIMIZED_LOCATIONS,
1965 "Setting upper bound of nb iterations for epilogue "
1966 "loop to %d\n", max_iter);
1968 /* After peeling we have to reset scalar evolution analyzer. */
1969 scev_reset ();
1971 free_original_copy_tables ();
1975 /* Function vect_gen_niters_for_prolog_loop
1977 Set the number of iterations for the loop represented by LOOP_VINFO
1978 to the minimum between LOOP_NITERS (the original iteration count of the loop)
1979 and the misalignment of DR - the data reference recorded in
1980 LOOP_VINFO_UNALIGNED_DR (LOOP_VINFO). As a result, after the execution of
1981 this loop, the data reference DR will refer to an aligned location.
1983 The following computation is generated:
1985 If the misalignment of DR is known at compile time:
1986 addr_mis = int mis = DR_MISALIGNMENT (dr);
1987 Else, compute address misalignment in bytes:
1988 addr_mis = addr & (vectype_align - 1)
1990 prolog_niters = min (LOOP_NITERS, ((VF - addr_mis/elem_size)&(VF-1))/step)
1992 (elem_size = element type size; an element is the scalar element whose type
1993 is the inner type of the vectype)
1995 When the step of the data-ref in the loop is not 1 (as in interleaved data
1996 and SLP), the number of iterations of the prolog must be divided by the step
1997 (which is equal to the size of interleaved group).
1999 The above formulas assume that VF == number of elements in the vector. This
2000 may not hold when there are multiple-types in the loop.
2001 In this case, for some data-references in the loop the VF does not represent
2002 the number of elements that fit in the vector. Therefore, instead of VF we
2003 use TYPE_VECTOR_SUBPARTS. */
2005 static tree
2006 vect_gen_niters_for_prolog_loop (loop_vec_info loop_vinfo, tree loop_niters, int *bound)
2008 struct data_reference *dr = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
2009 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2010 tree var;
2011 gimple_seq stmts;
2012 tree iters, iters_name;
2013 edge pe;
2014 basic_block new_bb;
2015 gimple dr_stmt = DR_STMT (dr);
2016 stmt_vec_info stmt_info = vinfo_for_stmt (dr_stmt);
2017 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2018 int vectype_align = TYPE_ALIGN (vectype) / BITS_PER_UNIT;
2019 tree niters_type = TREE_TYPE (loop_niters);
2020 int nelements = TYPE_VECTOR_SUBPARTS (vectype);
2022 pe = loop_preheader_edge (loop);
2024 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) > 0)
2026 int npeel = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo);
2028 if (dump_enabled_p ())
2029 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
2030 "known peeling = %d.", npeel);
2032 iters = build_int_cst (niters_type, npeel);
2033 *bound = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo);
2035 else
2037 gimple_seq new_stmts = NULL;
2038 bool negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0;
2039 tree offset = negative
2040 ? size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1) : NULL_TREE;
2041 tree start_addr = vect_create_addr_base_for_vector_ref (dr_stmt,
2042 &new_stmts, offset, loop);
2043 tree type = unsigned_type_for (TREE_TYPE (start_addr));
2044 tree vectype_align_minus_1 = build_int_cst (type, vectype_align - 1);
2045 HOST_WIDE_INT elem_size =
2046 int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
2047 tree elem_size_log = build_int_cst (type, exact_log2 (elem_size));
2048 tree nelements_minus_1 = build_int_cst (type, nelements - 1);
2049 tree nelements_tree = build_int_cst (type, nelements);
2050 tree byte_misalign;
2051 tree elem_misalign;
2053 new_bb = gsi_insert_seq_on_edge_immediate (pe, new_stmts);
2054 gcc_assert (!new_bb);
2056 /* Create: byte_misalign = addr & (vectype_align - 1) */
2057 byte_misalign =
2058 fold_build2 (BIT_AND_EXPR, type, fold_convert (type, start_addr),
2059 vectype_align_minus_1);
2061 /* Create: elem_misalign = byte_misalign / element_size */
2062 elem_misalign =
2063 fold_build2 (RSHIFT_EXPR, type, byte_misalign, elem_size_log);
2065 /* Create: (niters_type) (nelements - elem_misalign)&(nelements - 1) */
2066 if (negative)
2067 iters = fold_build2 (MINUS_EXPR, type, elem_misalign, nelements_tree);
2068 else
2069 iters = fold_build2 (MINUS_EXPR, type, nelements_tree, elem_misalign);
2070 iters = fold_build2 (BIT_AND_EXPR, type, iters, nelements_minus_1);
2071 iters = fold_convert (niters_type, iters);
2072 *bound = nelements;
2075 /* Create: prolog_loop_niters = min (iters, loop_niters) */
2076 /* If the loop bound is known at compile time we already verified that it is
2077 greater than vf; since the misalignment ('iters') is at most vf, there's
2078 no need to generate the MIN_EXPR in this case. */
2079 if (TREE_CODE (loop_niters) != INTEGER_CST)
2080 iters = fold_build2 (MIN_EXPR, niters_type, iters, loop_niters);
2082 if (dump_enabled_p ())
2084 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
2085 "niters for prolog loop: ");
2086 dump_generic_expr (MSG_OPTIMIZED_LOCATIONS, TDF_SLIM, iters);
2089 var = create_tmp_var (niters_type, "prolog_loop_niters");
2090 stmts = NULL;
2091 iters_name = force_gimple_operand (iters, &stmts, false, var);
2093 /* Insert stmt on loop preheader edge. */
2094 if (stmts)
2096 basic_block new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
2097 gcc_assert (!new_bb);
2100 return iters_name;
2104 /* Function vect_update_init_of_dr
2106 NITERS iterations were peeled from LOOP. DR represents a data reference
2107 in LOOP. This function updates the information recorded in DR to
2108 account for the fact that the first NITERS iterations had already been
2109 executed. Specifically, it updates the OFFSET field of DR. */
2111 static void
2112 vect_update_init_of_dr (struct data_reference *dr, tree niters)
2114 tree offset = DR_OFFSET (dr);
2116 niters = fold_build2 (MULT_EXPR, sizetype,
2117 fold_convert (sizetype, niters),
2118 fold_convert (sizetype, DR_STEP (dr)));
2119 offset = fold_build2 (PLUS_EXPR, sizetype,
2120 fold_convert (sizetype, offset), niters);
2121 DR_OFFSET (dr) = offset;
2125 /* Function vect_update_inits_of_drs
2127 NITERS iterations were peeled from the loop represented by LOOP_VINFO.
2128 This function updates the information recorded for the data references in
2129 the loop to account for the fact that the first NITERS iterations had
2130 already been executed. Specifically, it updates the initial_condition of
2131 the access_function of all the data_references in the loop. */
2133 static void
2134 vect_update_inits_of_drs (loop_vec_info loop_vinfo, tree niters)
2136 unsigned int i;
2137 vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
2138 struct data_reference *dr;
2140 if (dump_enabled_p ())
2141 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
2142 "=== vect_update_inits_of_dr ===");
2144 FOR_EACH_VEC_ELT (datarefs, i, dr)
2145 vect_update_init_of_dr (dr, niters);
2149 /* Function vect_do_peeling_for_alignment
2151 Peel the first 'niters' iterations of the loop represented by LOOP_VINFO.
2152 'niters' is set to the misalignment of one of the data references in the
2153 loop, thereby forcing it to refer to an aligned location at the beginning
2154 of the execution of this loop. The data reference for which we are
2155 peeling is recorded in LOOP_VINFO_UNALIGNED_DR. */
2157 void
2158 vect_do_peeling_for_alignment (loop_vec_info loop_vinfo,
2159 unsigned int th, bool check_profitability)
2161 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2162 tree niters_of_prolog_loop, ni_name;
2163 tree n_iters;
2164 tree wide_prolog_niters;
2165 struct loop *new_loop;
2166 int max_iter;
2167 int bound = 0;
2169 if (dump_enabled_p ())
2170 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
2171 "=== vect_do_peeling_for_alignment ===");
2173 initialize_original_copy_tables ();
2175 ni_name = vect_build_loop_niters (loop_vinfo, NULL);
2176 niters_of_prolog_loop = vect_gen_niters_for_prolog_loop (loop_vinfo,
2177 ni_name,
2178 &bound);
2180 /* Peel the prolog loop and iterate it niters_of_prolog_loop. */
2181 new_loop =
2182 slpeel_tree_peel_loop_to_edge (loop, loop_preheader_edge (loop),
2183 &niters_of_prolog_loop, ni_name, true,
2184 th, check_profitability, NULL_TREE, NULL,
2185 bound,
2188 gcc_assert (new_loop);
2189 #ifdef ENABLE_CHECKING
2190 slpeel_verify_cfg_after_peeling (new_loop, loop);
2191 #endif
2192 /* For vectorization factor N, we need to copy at most N-1 values
2193 for alignment and this means N-2 loopback edge executions. */
2194 max_iter = LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 2;
2195 if (check_profitability)
2196 max_iter = MAX (max_iter, (int) th - 1);
2197 record_niter_bound (new_loop, double_int::from_shwi (max_iter), false, true);
2198 dump_printf (MSG_OPTIMIZED_LOCATIONS,
2199 "Setting upper bound of nb iterations for prologue "
2200 "loop to %d\n", max_iter);
2202 /* Update number of times loop executes. */
2203 n_iters = LOOP_VINFO_NITERS (loop_vinfo);
2204 LOOP_VINFO_NITERS (loop_vinfo) = fold_build2 (MINUS_EXPR,
2205 TREE_TYPE (n_iters), n_iters, niters_of_prolog_loop);
2207 if (types_compatible_p (sizetype, TREE_TYPE (niters_of_prolog_loop)))
2208 wide_prolog_niters = niters_of_prolog_loop;
2209 else
2211 gimple_seq seq = NULL;
2212 edge pe = loop_preheader_edge (loop);
2213 tree wide_iters = fold_convert (sizetype, niters_of_prolog_loop);
2214 tree var = create_tmp_var (sizetype, "prolog_loop_adjusted_niters");
2215 wide_prolog_niters = force_gimple_operand (wide_iters, &seq, false,
2216 var);
2217 if (seq)
2219 /* Insert stmt on loop preheader edge. */
2220 basic_block new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
2221 gcc_assert (!new_bb);
2225 /* Update the init conditions of the access functions of all data refs. */
2226 vect_update_inits_of_drs (loop_vinfo, wide_prolog_niters);
2228 /* After peeling we have to reset scalar evolution analyzer. */
2229 scev_reset ();
2231 free_original_copy_tables ();
2235 /* Function vect_create_cond_for_align_checks.
2237 Create a conditional expression that represents the alignment checks for
2238 all of data references (array element references) whose alignment must be
2239 checked at runtime.
2241 Input:
2242 COND_EXPR - input conditional expression. New conditions will be chained
2243 with logical AND operation.
2244 LOOP_VINFO - two fields of the loop information are used.
2245 LOOP_VINFO_PTR_MASK is the mask used to check the alignment.
2246 LOOP_VINFO_MAY_MISALIGN_STMTS contains the refs to be checked.
2248 Output:
2249 COND_EXPR_STMT_LIST - statements needed to construct the conditional
2250 expression.
2251 The returned value is the conditional expression to be used in the if
2252 statement that controls which version of the loop gets executed at runtime.
2254 The algorithm makes two assumptions:
2255 1) The number of bytes "n" in a vector is a power of 2.
2256 2) An address "a" is aligned if a%n is zero and that this
2257 test can be done as a&(n-1) == 0. For example, for 16
2258 byte vectors the test is a&0xf == 0. */
2260 static void
2261 vect_create_cond_for_align_checks (loop_vec_info loop_vinfo,
2262 tree *cond_expr,
2263 gimple_seq *cond_expr_stmt_list)
2265 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2266 vec<gimple> may_misalign_stmts
2267 = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo);
2268 gimple ref_stmt;
2269 int mask = LOOP_VINFO_PTR_MASK (loop_vinfo);
2270 tree mask_cst;
2271 unsigned int i;
2272 tree int_ptrsize_type;
2273 char tmp_name[20];
2274 tree or_tmp_name = NULL_TREE;
2275 tree and_tmp_name;
2276 gimple and_stmt;
2277 tree ptrsize_zero;
2278 tree part_cond_expr;
2280 /* Check that mask is one less than a power of 2, i.e., mask is
2281 all zeros followed by all ones. */
2282 gcc_assert ((mask != 0) && ((mask & (mask+1)) == 0));
2284 int_ptrsize_type = signed_type_for (ptr_type_node);
2286 /* Create expression (mask & (dr_1 || ... || dr_n)) where dr_i is the address
2287 of the first vector of the i'th data reference. */
2289 FOR_EACH_VEC_ELT (may_misalign_stmts, i, ref_stmt)
2291 gimple_seq new_stmt_list = NULL;
2292 tree addr_base;
2293 tree addr_tmp_name;
2294 tree new_or_tmp_name;
2295 gimple addr_stmt, or_stmt;
2296 stmt_vec_info stmt_vinfo = vinfo_for_stmt (ref_stmt);
2297 tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
2298 bool negative = tree_int_cst_compare
2299 (DR_STEP (STMT_VINFO_DATA_REF (stmt_vinfo)), size_zero_node) < 0;
2300 tree offset = negative
2301 ? size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1) : NULL_TREE;
2303 /* create: addr_tmp = (int)(address_of_first_vector) */
2304 addr_base =
2305 vect_create_addr_base_for_vector_ref (ref_stmt, &new_stmt_list,
2306 offset, loop);
2307 if (new_stmt_list != NULL)
2308 gimple_seq_add_seq (cond_expr_stmt_list, new_stmt_list);
2310 sprintf (tmp_name, "addr2int%d", i);
2311 addr_tmp_name = make_temp_ssa_name (int_ptrsize_type, NULL, tmp_name);
2312 addr_stmt = gimple_build_assign_with_ops (NOP_EXPR, addr_tmp_name,
2313 addr_base, NULL_TREE);
2314 gimple_seq_add_stmt (cond_expr_stmt_list, addr_stmt);
2316 /* The addresses are OR together. */
2318 if (or_tmp_name != NULL_TREE)
2320 /* create: or_tmp = or_tmp | addr_tmp */
2321 sprintf (tmp_name, "orptrs%d", i);
2322 new_or_tmp_name = make_temp_ssa_name (int_ptrsize_type, NULL, tmp_name);
2323 or_stmt = gimple_build_assign_with_ops (BIT_IOR_EXPR,
2324 new_or_tmp_name,
2325 or_tmp_name, addr_tmp_name);
2326 gimple_seq_add_stmt (cond_expr_stmt_list, or_stmt);
2327 or_tmp_name = new_or_tmp_name;
2329 else
2330 or_tmp_name = addr_tmp_name;
2332 } /* end for i */
2334 mask_cst = build_int_cst (int_ptrsize_type, mask);
2336 /* create: and_tmp = or_tmp & mask */
2337 and_tmp_name = make_temp_ssa_name (int_ptrsize_type, NULL, "andmask");
2339 and_stmt = gimple_build_assign_with_ops (BIT_AND_EXPR, and_tmp_name,
2340 or_tmp_name, mask_cst);
2341 gimple_seq_add_stmt (cond_expr_stmt_list, and_stmt);
2343 /* Make and_tmp the left operand of the conditional test against zero.
2344 if and_tmp has a nonzero bit then some address is unaligned. */
2345 ptrsize_zero = build_int_cst (int_ptrsize_type, 0);
2346 part_cond_expr = fold_build2 (EQ_EXPR, boolean_type_node,
2347 and_tmp_name, ptrsize_zero);
2348 if (*cond_expr)
2349 *cond_expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
2350 *cond_expr, part_cond_expr);
2351 else
2352 *cond_expr = part_cond_expr;
2356 /* Function vect_vfa_segment_size.
2358 Create an expression that computes the size of segment
2359 that will be accessed for a data reference. The functions takes into
2360 account that realignment loads may access one more vector.
2362 Input:
2363 DR: The data reference.
2364 LENGTH_FACTOR: segment length to consider.
2366 Return an expression whose value is the size of segment which will be
2367 accessed by DR. */
2369 static tree
2370 vect_vfa_segment_size (struct data_reference *dr, tree length_factor)
2372 tree segment_length;
2374 if (integer_zerop (DR_STEP (dr)))
2375 segment_length = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
2376 else
2377 segment_length = size_binop (MULT_EXPR,
2378 fold_convert (sizetype, DR_STEP (dr)),
2379 fold_convert (sizetype, length_factor));
2381 if (vect_supportable_dr_alignment (dr, false)
2382 == dr_explicit_realign_optimized)
2384 tree vector_size = TYPE_SIZE_UNIT
2385 (STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr))));
2387 segment_length = size_binop (PLUS_EXPR, segment_length, vector_size);
2389 return segment_length;
2393 /* Function vect_create_cond_for_alias_checks.
2395 Create a conditional expression that represents the run-time checks for
2396 overlapping of address ranges represented by a list of data references
2397 relations passed as input.
2399 Input:
2400 COND_EXPR - input conditional expression. New conditions will be chained
2401 with logical AND operation.
2402 LOOP_VINFO - field LOOP_VINFO_MAY_ALIAS_STMTS contains the list of ddrs
2403 to be checked.
2405 Output:
2406 COND_EXPR - conditional expression.
2407 COND_EXPR_STMT_LIST - statements needed to construct the conditional
2408 expression.
2411 The returned value is the conditional expression to be used in the if
2412 statement that controls which version of the loop gets executed at runtime.
2415 static void
2416 vect_create_cond_for_alias_checks (loop_vec_info loop_vinfo,
2417 tree * cond_expr,
2418 gimple_seq * cond_expr_stmt_list)
2420 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2421 vec<ddr_p> may_alias_ddrs =
2422 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo);
2423 int vect_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2424 tree scalar_loop_iters = LOOP_VINFO_NITERS (loop_vinfo);
2426 ddr_p ddr;
2427 unsigned int i;
2428 tree part_cond_expr, length_factor;
2430 /* Create expression
2431 ((store_ptr_0 + store_segment_length_0) <= load_ptr_0)
2432 || (load_ptr_0 + load_segment_length_0) <= store_ptr_0))
2436 ((store_ptr_n + store_segment_length_n) <= load_ptr_n)
2437 || (load_ptr_n + load_segment_length_n) <= store_ptr_n)) */
2439 if (may_alias_ddrs.is_empty ())
2440 return;
2442 FOR_EACH_VEC_ELT (may_alias_ddrs, i, ddr)
2444 struct data_reference *dr_a, *dr_b;
2445 gimple dr_group_first_a, dr_group_first_b;
2446 tree addr_base_a, addr_base_b;
2447 tree segment_length_a, segment_length_b;
2448 gimple stmt_a, stmt_b;
2449 tree seg_a_min, seg_a_max, seg_b_min, seg_b_max;
2451 dr_a = DDR_A (ddr);
2452 stmt_a = DR_STMT (DDR_A (ddr));
2453 dr_group_first_a = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_a));
2454 if (dr_group_first_a)
2456 stmt_a = dr_group_first_a;
2457 dr_a = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_a));
2460 dr_b = DDR_B (ddr);
2461 stmt_b = DR_STMT (DDR_B (ddr));
2462 dr_group_first_b = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_b));
2463 if (dr_group_first_b)
2465 stmt_b = dr_group_first_b;
2466 dr_b = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_b));
2469 addr_base_a =
2470 vect_create_addr_base_for_vector_ref (stmt_a, cond_expr_stmt_list,
2471 NULL_TREE, loop);
2472 addr_base_b =
2473 vect_create_addr_base_for_vector_ref (stmt_b, cond_expr_stmt_list,
2474 NULL_TREE, loop);
2476 if (!operand_equal_p (DR_STEP (dr_a), DR_STEP (dr_b), 0))
2477 length_factor = scalar_loop_iters;
2478 else
2479 length_factor = size_int (vect_factor);
2480 segment_length_a = vect_vfa_segment_size (dr_a, length_factor);
2481 segment_length_b = vect_vfa_segment_size (dr_b, length_factor);
2483 if (dump_enabled_p ())
2485 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
2486 "create runtime check for data references ");
2487 dump_generic_expr (MSG_OPTIMIZED_LOCATIONS, TDF_SLIM, DR_REF (dr_a));
2488 dump_printf (MSG_OPTIMIZED_LOCATIONS, " and ");
2489 dump_generic_expr (MSG_OPTIMIZED_LOCATIONS, TDF_SLIM, DR_REF (dr_b));
2492 seg_a_min = addr_base_a;
2493 seg_a_max = fold_build_pointer_plus (addr_base_a, segment_length_a);
2494 if (tree_int_cst_compare (DR_STEP (dr_a), size_zero_node) < 0)
2495 seg_a_min = seg_a_max, seg_a_max = addr_base_a;
2497 seg_b_min = addr_base_b;
2498 seg_b_max = fold_build_pointer_plus (addr_base_b, segment_length_b);
2499 if (tree_int_cst_compare (DR_STEP (dr_b), size_zero_node) < 0)
2500 seg_b_min = seg_b_max, seg_b_max = addr_base_b;
2502 part_cond_expr =
2503 fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
2504 fold_build2 (LE_EXPR, boolean_type_node, seg_a_max, seg_b_min),
2505 fold_build2 (LE_EXPR, boolean_type_node, seg_b_max, seg_a_min));
2507 if (*cond_expr)
2508 *cond_expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
2509 *cond_expr, part_cond_expr);
2510 else
2511 *cond_expr = part_cond_expr;
2514 if (dump_enabled_p ())
2515 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
2516 "created %u versioning for alias checks.\n",
2517 may_alias_ddrs.length ());
2521 /* Function vect_loop_versioning.
2523 If the loop has data references that may or may not be aligned or/and
2524 has data reference relations whose independence was not proven then
2525 two versions of the loop need to be generated, one which is vectorized
2526 and one which isn't. A test is then generated to control which of the
2527 loops is executed. The test checks for the alignment of all of the
2528 data references that may or may not be aligned. An additional
2529 sequence of runtime tests is generated for each pairs of DDRs whose
2530 independence was not proven. The vectorized version of loop is
2531 executed only if both alias and alignment tests are passed.
2533 The test generated to check which version of loop is executed
2534 is modified to also check for profitability as indicated by the
2535 cost model initially.
2537 The versioning precondition(s) are placed in *COND_EXPR and
2538 *COND_EXPR_STMT_LIST. */
2540 void
2541 vect_loop_versioning (loop_vec_info loop_vinfo,
2542 unsigned int th, bool check_profitability)
2544 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2545 basic_block condition_bb;
2546 gimple_stmt_iterator gsi, cond_exp_gsi;
2547 basic_block merge_bb;
2548 basic_block new_exit_bb;
2549 edge new_exit_e, e;
2550 gimple orig_phi, new_phi;
2551 tree cond_expr = NULL_TREE;
2552 gimple_seq cond_expr_stmt_list = NULL;
2553 tree arg;
2554 unsigned prob = 4 * REG_BR_PROB_BASE / 5;
2555 gimple_seq gimplify_stmt_list = NULL;
2556 tree scalar_loop_iters = LOOP_VINFO_NITERS (loop_vinfo);
2558 if (check_profitability)
2560 cond_expr = fold_build2 (GT_EXPR, boolean_type_node, scalar_loop_iters,
2561 build_int_cst (TREE_TYPE (scalar_loop_iters), th));
2562 cond_expr = force_gimple_operand_1 (cond_expr, &cond_expr_stmt_list,
2563 is_gimple_condexpr, NULL_TREE);
2566 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
2567 vect_create_cond_for_align_checks (loop_vinfo, &cond_expr,
2568 &cond_expr_stmt_list);
2570 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2571 vect_create_cond_for_alias_checks (loop_vinfo, &cond_expr,
2572 &cond_expr_stmt_list);
2574 cond_expr = force_gimple_operand_1 (cond_expr, &gimplify_stmt_list,
2575 is_gimple_condexpr, NULL_TREE);
2576 gimple_seq_add_seq (&cond_expr_stmt_list, gimplify_stmt_list);
2578 initialize_original_copy_tables ();
2579 loop_version (loop, cond_expr, &condition_bb,
2580 prob, prob, REG_BR_PROB_BASE - prob, true);
2581 free_original_copy_tables();
2583 /* Loop versioning violates an assumption we try to maintain during
2584 vectorization - that the loop exit block has a single predecessor.
2585 After versioning, the exit block of both loop versions is the same
2586 basic block (i.e. it has two predecessors). Just in order to simplify
2587 following transformations in the vectorizer, we fix this situation
2588 here by adding a new (empty) block on the exit-edge of the loop,
2589 with the proper loop-exit phis to maintain loop-closed-form. */
2591 merge_bb = single_exit (loop)->dest;
2592 gcc_assert (EDGE_COUNT (merge_bb->preds) == 2);
2593 new_exit_bb = split_edge (single_exit (loop));
2594 new_exit_e = single_exit (loop);
2595 e = EDGE_SUCC (new_exit_bb, 0);
2597 for (gsi = gsi_start_phis (merge_bb); !gsi_end_p (gsi); gsi_next (&gsi))
2599 tree new_res;
2600 orig_phi = gsi_stmt (gsi);
2601 new_res = copy_ssa_name (PHI_RESULT (orig_phi), NULL);
2602 new_phi = create_phi_node (new_res, new_exit_bb);
2603 arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, e);
2604 add_phi_arg (new_phi, arg, new_exit_e,
2605 gimple_phi_arg_location_from_edge (orig_phi, e));
2606 adjust_phi_and_debug_stmts (orig_phi, e, PHI_RESULT (new_phi));
2609 /* End loop-exit-fixes after versioning. */
2611 update_ssa (TODO_update_ssa);
2612 if (cond_expr_stmt_list)
2614 cond_exp_gsi = gsi_last_bb (condition_bb);
2615 gsi_insert_seq_before (&cond_exp_gsi, cond_expr_stmt_list,
2616 GSI_SAME_STMT);