re PR bootstrap/54281 (Fails to bootstrap with --disable-nls)
[official-gcc.git] / gcc / tree-vect-loop-manip.c
blobbdf3fd23dbd12e2cd36a613bc1fa2f959c2dc842
1 /* Vectorizer Specific Loop Manipulations
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2012
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "basic-block.h"
30 #include "gimple-pretty-print.h"
31 #include "tree-flow.h"
32 #include "tree-pass.h"
33 #include "cfgloop.h"
34 #include "diagnostic-core.h"
35 #include "tree-scalar-evolution.h"
36 #include "tree-vectorizer.h"
37 #include "langhooks.h"
39 /*************************************************************************
40 Simple Loop Peeling Utilities
42 Utilities to support loop peeling for vectorization purposes.
43 *************************************************************************/
46 /* Renames the use *OP_P. */
48 static void
49 rename_use_op (use_operand_p op_p)
51 tree new_name;
53 if (TREE_CODE (USE_FROM_PTR (op_p)) != SSA_NAME)
54 return;
56 new_name = get_current_def (USE_FROM_PTR (op_p));
58 /* Something defined outside of the loop. */
59 if (!new_name)
60 return;
62 /* An ordinary ssa name defined in the loop. */
64 SET_USE (op_p, new_name);
68 /* Renames the variables in basic block BB. */
70 void
71 rename_variables_in_bb (basic_block bb)
73 gimple_stmt_iterator gsi;
74 gimple stmt;
75 use_operand_p use_p;
76 ssa_op_iter iter;
77 edge e;
78 edge_iterator ei;
79 struct loop *loop = bb->loop_father;
81 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
83 stmt = gsi_stmt (gsi);
84 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES)
85 rename_use_op (use_p);
88 FOR_EACH_EDGE (e, ei, bb->succs)
90 if (!flow_bb_inside_loop_p (loop, e->dest))
91 continue;
92 for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
93 rename_use_op (PHI_ARG_DEF_PTR_FROM_EDGE (gsi_stmt (gsi), e));
98 /* Renames variables in new generated LOOP. */
100 void
101 rename_variables_in_loop (struct loop *loop)
103 unsigned i;
104 basic_block *bbs;
106 bbs = get_loop_body (loop);
108 for (i = 0; i < loop->num_nodes; i++)
109 rename_variables_in_bb (bbs[i]);
111 free (bbs);
114 typedef struct
116 tree from, to;
117 basic_block bb;
118 } adjust_info;
120 DEF_VEC_O(adjust_info);
121 DEF_VEC_ALLOC_O_STACK(adjust_info);
122 #define VEC_adjust_info_stack_alloc(alloc) VEC_stack_alloc (adjust_info, alloc)
124 /* A stack of values to be adjusted in debug stmts. We have to
125 process them LIFO, so that the closest substitution applies. If we
126 processed them FIFO, without the stack, we might substitute uses
127 with a PHI DEF that would soon become non-dominant, and when we got
128 to the suitable one, it wouldn't have anything to substitute any
129 more. */
130 static VEC(adjust_info, stack) *adjust_vec;
132 /* Adjust any debug stmts that referenced AI->from values to use the
133 loop-closed AI->to, if the references are dominated by AI->bb and
134 not by the definition of AI->from. */
136 static void
137 adjust_debug_stmts_now (adjust_info *ai)
139 basic_block bbphi = ai->bb;
140 tree orig_def = ai->from;
141 tree new_def = ai->to;
142 imm_use_iterator imm_iter;
143 gimple stmt;
144 basic_block bbdef = gimple_bb (SSA_NAME_DEF_STMT (orig_def));
146 gcc_assert (dom_info_available_p (CDI_DOMINATORS));
148 /* Adjust any debug stmts that held onto non-loop-closed
149 references. */
150 FOR_EACH_IMM_USE_STMT (stmt, imm_iter, orig_def)
152 use_operand_p use_p;
153 basic_block bbuse;
155 if (!is_gimple_debug (stmt))
156 continue;
158 gcc_assert (gimple_debug_bind_p (stmt));
160 bbuse = gimple_bb (stmt);
162 if ((bbuse == bbphi
163 || dominated_by_p (CDI_DOMINATORS, bbuse, bbphi))
164 && !(bbuse == bbdef
165 || dominated_by_p (CDI_DOMINATORS, bbuse, bbdef)))
167 if (new_def)
168 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
169 SET_USE (use_p, new_def);
170 else
172 gimple_debug_bind_reset_value (stmt);
173 update_stmt (stmt);
179 /* Adjust debug stmts as scheduled before. */
181 static void
182 adjust_vec_debug_stmts (void)
184 if (!MAY_HAVE_DEBUG_STMTS)
185 return;
187 gcc_assert (adjust_vec);
189 while (!VEC_empty (adjust_info, adjust_vec))
191 adjust_debug_stmts_now (&VEC_last (adjust_info, adjust_vec));
192 VEC_pop (adjust_info, adjust_vec);
195 VEC_free (adjust_info, stack, adjust_vec);
198 /* Adjust any debug stmts that referenced FROM values to use the
199 loop-closed TO, if the references are dominated by BB and not by
200 the definition of FROM. If adjust_vec is non-NULL, adjustments
201 will be postponed until adjust_vec_debug_stmts is called. */
203 static void
204 adjust_debug_stmts (tree from, tree to, basic_block bb)
206 adjust_info ai;
208 if (MAY_HAVE_DEBUG_STMTS
209 && TREE_CODE (from) == SSA_NAME
210 && ! virtual_operand_p (from))
212 ai.from = from;
213 ai.to = to;
214 ai.bb = bb;
216 if (adjust_vec)
217 VEC_safe_push (adjust_info, stack, adjust_vec, &ai);
218 else
219 adjust_debug_stmts_now (&ai);
223 /* Change E's phi arg in UPDATE_PHI to NEW_DEF, and record information
224 to adjust any debug stmts that referenced the old phi arg,
225 presumably non-loop-closed references left over from other
226 transformations. */
228 static void
229 adjust_phi_and_debug_stmts (gimple update_phi, edge e, tree new_def)
231 tree orig_def = PHI_ARG_DEF_FROM_EDGE (update_phi, e);
233 SET_PHI_ARG_DEF (update_phi, e->dest_idx, new_def);
235 if (MAY_HAVE_DEBUG_STMTS)
236 adjust_debug_stmts (orig_def, PHI_RESULT (update_phi),
237 gimple_bb (update_phi));
241 /* Update the PHI nodes of NEW_LOOP.
243 NEW_LOOP is a duplicate of ORIG_LOOP.
244 AFTER indicates whether NEW_LOOP executes before or after ORIG_LOOP:
245 AFTER is true if NEW_LOOP executes after ORIG_LOOP, and false if it
246 executes before it. */
248 static void
249 slpeel_update_phis_for_duplicate_loop (struct loop *orig_loop,
250 struct loop *new_loop, bool after)
252 tree new_ssa_name;
253 gimple phi_new, phi_orig;
254 tree def;
255 edge orig_loop_latch = loop_latch_edge (orig_loop);
256 edge orig_entry_e = loop_preheader_edge (orig_loop);
257 edge new_loop_exit_e = single_exit (new_loop);
258 edge new_loop_entry_e = loop_preheader_edge (new_loop);
259 edge entry_arg_e = (after ? orig_loop_latch : orig_entry_e);
260 gimple_stmt_iterator gsi_new, gsi_orig;
263 step 1. For each loop-header-phi:
264 Add the first phi argument for the phi in NEW_LOOP
265 (the one associated with the entry of NEW_LOOP)
267 step 2. For each loop-header-phi:
268 Add the second phi argument for the phi in NEW_LOOP
269 (the one associated with the latch of NEW_LOOP)
271 step 3. Update the phis in the successor block of NEW_LOOP.
273 case 1: NEW_LOOP was placed before ORIG_LOOP:
274 The successor block of NEW_LOOP is the header of ORIG_LOOP.
275 Updating the phis in the successor block can therefore be done
276 along with the scanning of the loop header phis, because the
277 header blocks of ORIG_LOOP and NEW_LOOP have exactly the same
278 phi nodes, organized in the same order.
280 case 2: NEW_LOOP was placed after ORIG_LOOP:
281 The successor block of NEW_LOOP is the original exit block of
282 ORIG_LOOP - the phis to be updated are the loop-closed-ssa phis.
283 We postpone updating these phis to a later stage (when
284 loop guards are added).
288 /* Scan the phis in the headers of the old and new loops
289 (they are organized in exactly the same order). */
291 for (gsi_new = gsi_start_phis (new_loop->header),
292 gsi_orig = gsi_start_phis (orig_loop->header);
293 !gsi_end_p (gsi_new) && !gsi_end_p (gsi_orig);
294 gsi_next (&gsi_new), gsi_next (&gsi_orig))
296 source_location locus;
297 phi_new = gsi_stmt (gsi_new);
298 phi_orig = gsi_stmt (gsi_orig);
300 /* step 1. */
301 def = PHI_ARG_DEF_FROM_EDGE (phi_orig, entry_arg_e);
302 locus = gimple_phi_arg_location_from_edge (phi_orig, entry_arg_e);
303 add_phi_arg (phi_new, def, new_loop_entry_e, locus);
305 /* step 2. */
306 def = PHI_ARG_DEF_FROM_EDGE (phi_orig, orig_loop_latch);
307 locus = gimple_phi_arg_location_from_edge (phi_orig, orig_loop_latch);
308 if (TREE_CODE (def) != SSA_NAME)
309 continue;
311 new_ssa_name = get_current_def (def);
312 if (!new_ssa_name)
314 /* This only happens if there are no definitions
315 inside the loop. use the phi_result in this case. */
316 new_ssa_name = PHI_RESULT (phi_new);
319 /* An ordinary ssa name defined in the loop. */
320 add_phi_arg (phi_new, new_ssa_name, loop_latch_edge (new_loop), locus);
322 /* Drop any debug references outside the loop, if they would
323 become ill-formed SSA. */
324 adjust_debug_stmts (def, NULL, single_exit (orig_loop)->dest);
326 /* step 3 (case 1). */
327 if (!after)
329 gcc_assert (new_loop_exit_e == orig_entry_e);
330 adjust_phi_and_debug_stmts (phi_orig, new_loop_exit_e, new_ssa_name);
336 /* Update PHI nodes for a guard of the LOOP.
338 Input:
339 - LOOP, GUARD_EDGE: LOOP is a loop for which we added guard code that
340 controls whether LOOP is to be executed. GUARD_EDGE is the edge that
341 originates from the guard-bb, skips LOOP and reaches the (unique) exit
342 bb of LOOP. This loop-exit-bb is an empty bb with one successor.
343 We denote this bb NEW_MERGE_BB because before the guard code was added
344 it had a single predecessor (the LOOP header), and now it became a merge
345 point of two paths - the path that ends with the LOOP exit-edge, and
346 the path that ends with GUARD_EDGE.
347 - NEW_EXIT_BB: New basic block that is added by this function between LOOP
348 and NEW_MERGE_BB. It is used to place loop-closed-ssa-form exit-phis.
350 ===> The CFG before the guard-code was added:
351 LOOP_header_bb:
352 loop_body
353 if (exit_loop) goto update_bb
354 else goto LOOP_header_bb
355 update_bb:
357 ==> The CFG after the guard-code was added:
358 guard_bb:
359 if (LOOP_guard_condition) goto new_merge_bb
360 else goto LOOP_header_bb
361 LOOP_header_bb:
362 loop_body
363 if (exit_loop_condition) goto new_merge_bb
364 else goto LOOP_header_bb
365 new_merge_bb:
366 goto update_bb
367 update_bb:
369 ==> The CFG after this function:
370 guard_bb:
371 if (LOOP_guard_condition) goto new_merge_bb
372 else goto LOOP_header_bb
373 LOOP_header_bb:
374 loop_body
375 if (exit_loop_condition) goto new_exit_bb
376 else goto LOOP_header_bb
377 new_exit_bb:
378 new_merge_bb:
379 goto update_bb
380 update_bb:
382 This function:
383 1. creates and updates the relevant phi nodes to account for the new
384 incoming edge (GUARD_EDGE) into NEW_MERGE_BB. This involves:
385 1.1. Create phi nodes at NEW_MERGE_BB.
386 1.2. Update the phi nodes at the successor of NEW_MERGE_BB (denoted
387 UPDATE_BB). UPDATE_BB was the exit-bb of LOOP before NEW_MERGE_BB
388 2. preserves loop-closed-ssa-form by creating the required phi nodes
389 at the exit of LOOP (i.e, in NEW_EXIT_BB).
391 There are two flavors to this function:
393 slpeel_update_phi_nodes_for_guard1:
394 Here the guard controls whether we enter or skip LOOP, where LOOP is a
395 prolog_loop (loop1 below), and the new phis created in NEW_MERGE_BB are
396 for variables that have phis in the loop header.
398 slpeel_update_phi_nodes_for_guard2:
399 Here the guard controls whether we enter or skip LOOP, where LOOP is an
400 epilog_loop (loop2 below), and the new phis created in NEW_MERGE_BB are
401 for variables that have phis in the loop exit.
403 I.E., the overall structure is:
405 loop1_preheader_bb:
406 guard1 (goto loop1/merge1_bb)
407 loop1
408 loop1_exit_bb:
409 guard2 (goto merge1_bb/merge2_bb)
410 merge1_bb
411 loop2
412 loop2_exit_bb
413 merge2_bb
414 next_bb
416 slpeel_update_phi_nodes_for_guard1 takes care of creating phis in
417 loop1_exit_bb and merge1_bb. These are entry phis (phis for the vars
418 that have phis in loop1->header).
420 slpeel_update_phi_nodes_for_guard2 takes care of creating phis in
421 loop2_exit_bb and merge2_bb. These are exit phis (phis for the vars
422 that have phis in next_bb). It also adds some of these phis to
423 loop1_exit_bb.
425 slpeel_update_phi_nodes_for_guard1 is always called before
426 slpeel_update_phi_nodes_for_guard2. They are both needed in order
427 to create correct data-flow and loop-closed-ssa-form.
429 Generally slpeel_update_phi_nodes_for_guard1 creates phis for variables
430 that change between iterations of a loop (and therefore have a phi-node
431 at the loop entry), whereas slpeel_update_phi_nodes_for_guard2 creates
432 phis for variables that are used out of the loop (and therefore have
433 loop-closed exit phis). Some variables may be both updated between
434 iterations and used after the loop. This is why in loop1_exit_bb we
435 may need both entry_phis (created by slpeel_update_phi_nodes_for_guard1)
436 and exit phis (created by slpeel_update_phi_nodes_for_guard2).
438 - IS_NEW_LOOP: if IS_NEW_LOOP is true, then LOOP is a newly created copy of
439 an original loop. i.e., we have:
441 orig_loop
442 guard_bb (goto LOOP/new_merge)
443 new_loop <-- LOOP
444 new_exit
445 new_merge
446 next_bb
448 If IS_NEW_LOOP is false, then LOOP is an original loop, in which case we
449 have:
451 new_loop
452 guard_bb (goto LOOP/new_merge)
453 orig_loop <-- LOOP
454 new_exit
455 new_merge
456 next_bb
458 The SSA names defined in the original loop have a current
459 reaching definition that that records the corresponding new
460 ssa-name used in the new duplicated loop copy.
463 /* Function slpeel_update_phi_nodes_for_guard1
465 Input:
466 - GUARD_EDGE, LOOP, IS_NEW_LOOP, NEW_EXIT_BB - as explained above.
467 - DEFS - a bitmap of ssa names to mark new names for which we recorded
468 information.
470 In the context of the overall structure, we have:
472 loop1_preheader_bb:
473 guard1 (goto loop1/merge1_bb)
474 LOOP-> loop1
475 loop1_exit_bb:
476 guard2 (goto merge1_bb/merge2_bb)
477 merge1_bb
478 loop2
479 loop2_exit_bb
480 merge2_bb
481 next_bb
483 For each name updated between loop iterations (i.e - for each name that has
484 an entry (loop-header) phi in LOOP) we create a new phi in:
485 1. merge1_bb (to account for the edge from guard1)
486 2. loop1_exit_bb (an exit-phi to keep LOOP in loop-closed form)
489 static void
490 slpeel_update_phi_nodes_for_guard1 (edge guard_edge, struct loop *loop,
491 bool is_new_loop, basic_block *new_exit_bb)
493 gimple orig_phi, new_phi;
494 gimple update_phi, update_phi2;
495 tree guard_arg, loop_arg;
496 basic_block new_merge_bb = guard_edge->dest;
497 edge e = EDGE_SUCC (new_merge_bb, 0);
498 basic_block update_bb = e->dest;
499 basic_block orig_bb = loop->header;
500 edge new_exit_e;
501 tree current_new_name;
502 gimple_stmt_iterator gsi_orig, gsi_update;
504 /* Create new bb between loop and new_merge_bb. */
505 *new_exit_bb = split_edge (single_exit (loop));
507 new_exit_e = EDGE_SUCC (*new_exit_bb, 0);
509 for (gsi_orig = gsi_start_phis (orig_bb),
510 gsi_update = gsi_start_phis (update_bb);
511 !gsi_end_p (gsi_orig) && !gsi_end_p (gsi_update);
512 gsi_next (&gsi_orig), gsi_next (&gsi_update))
514 source_location loop_locus, guard_locus;
515 tree new_res;
516 orig_phi = gsi_stmt (gsi_orig);
517 update_phi = gsi_stmt (gsi_update);
519 /** 1. Handle new-merge-point phis **/
521 /* 1.1. Generate new phi node in NEW_MERGE_BB: */
522 new_res = copy_ssa_name (PHI_RESULT (orig_phi), NULL);
523 new_phi = create_phi_node (new_res, new_merge_bb);
525 /* 1.2. NEW_MERGE_BB has two incoming edges: GUARD_EDGE and the exit-edge
526 of LOOP. Set the two phi args in NEW_PHI for these edges: */
527 loop_arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, EDGE_SUCC (loop->latch, 0));
528 loop_locus = gimple_phi_arg_location_from_edge (orig_phi,
529 EDGE_SUCC (loop->latch,
530 0));
531 guard_arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, loop_preheader_edge (loop));
532 guard_locus
533 = gimple_phi_arg_location_from_edge (orig_phi,
534 loop_preheader_edge (loop));
536 add_phi_arg (new_phi, loop_arg, new_exit_e, loop_locus);
537 add_phi_arg (new_phi, guard_arg, guard_edge, guard_locus);
539 /* 1.3. Update phi in successor block. */
540 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi, e) == loop_arg
541 || PHI_ARG_DEF_FROM_EDGE (update_phi, e) == guard_arg);
542 adjust_phi_and_debug_stmts (update_phi, e, PHI_RESULT (new_phi));
543 update_phi2 = new_phi;
546 /** 2. Handle loop-closed-ssa-form phis **/
548 if (virtual_operand_p (PHI_RESULT (orig_phi)))
549 continue;
551 /* 2.1. Generate new phi node in NEW_EXIT_BB: */
552 new_res = copy_ssa_name (PHI_RESULT (orig_phi), NULL);
553 new_phi = create_phi_node (new_res, *new_exit_bb);
555 /* 2.2. NEW_EXIT_BB has one incoming edge: the exit-edge of the loop. */
556 add_phi_arg (new_phi, loop_arg, single_exit (loop), loop_locus);
558 /* 2.3. Update phi in successor of NEW_EXIT_BB: */
559 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi2, new_exit_e) == loop_arg);
560 adjust_phi_and_debug_stmts (update_phi2, new_exit_e,
561 PHI_RESULT (new_phi));
563 /* 2.4. Record the newly created name with set_current_def.
564 We want to find a name such that
565 name = get_current_def (orig_loop_name)
566 and to set its current definition as follows:
567 set_current_def (name, new_phi_name)
569 If LOOP is a new loop then loop_arg is already the name we're
570 looking for. If LOOP is the original loop, then loop_arg is
571 the orig_loop_name and the relevant name is recorded in its
572 current reaching definition. */
573 if (is_new_loop)
574 current_new_name = loop_arg;
575 else
577 current_new_name = get_current_def (loop_arg);
578 /* current_def is not available only if the variable does not
579 change inside the loop, in which case we also don't care
580 about recording a current_def for it because we won't be
581 trying to create loop-exit-phis for it. */
582 if (!current_new_name)
583 continue;
585 gcc_assert (get_current_def (current_new_name) == NULL_TREE);
587 set_current_def (current_new_name, PHI_RESULT (new_phi));
592 /* Function slpeel_update_phi_nodes_for_guard2
594 Input:
595 - GUARD_EDGE, LOOP, IS_NEW_LOOP, NEW_EXIT_BB - as explained above.
597 In the context of the overall structure, we have:
599 loop1_preheader_bb:
600 guard1 (goto loop1/merge1_bb)
601 loop1
602 loop1_exit_bb:
603 guard2 (goto merge1_bb/merge2_bb)
604 merge1_bb
605 LOOP-> loop2
606 loop2_exit_bb
607 merge2_bb
608 next_bb
610 For each name used out side the loop (i.e - for each name that has an exit
611 phi in next_bb) we create a new phi in:
612 1. merge2_bb (to account for the edge from guard_bb)
613 2. loop2_exit_bb (an exit-phi to keep LOOP in loop-closed form)
614 3. guard2 bb (an exit phi to keep the preceding loop in loop-closed form),
615 if needed (if it wasn't handled by slpeel_update_phis_nodes_for_phi1).
618 static void
619 slpeel_update_phi_nodes_for_guard2 (edge guard_edge, struct loop *loop,
620 bool is_new_loop, basic_block *new_exit_bb)
622 gimple orig_phi, new_phi;
623 gimple update_phi, update_phi2;
624 tree guard_arg, loop_arg;
625 basic_block new_merge_bb = guard_edge->dest;
626 edge e = EDGE_SUCC (new_merge_bb, 0);
627 basic_block update_bb = e->dest;
628 edge new_exit_e;
629 tree orig_def, orig_def_new_name;
630 tree new_name, new_name2;
631 tree arg;
632 gimple_stmt_iterator gsi;
634 /* Create new bb between loop and new_merge_bb. */
635 *new_exit_bb = split_edge (single_exit (loop));
637 new_exit_e = EDGE_SUCC (*new_exit_bb, 0);
639 for (gsi = gsi_start_phis (update_bb); !gsi_end_p (gsi); gsi_next (&gsi))
641 tree new_res;
642 update_phi = gsi_stmt (gsi);
643 orig_phi = update_phi;
644 orig_def = PHI_ARG_DEF_FROM_EDGE (orig_phi, e);
645 /* This loop-closed-phi actually doesn't represent a use
646 out of the loop - the phi arg is a constant. */
647 if (TREE_CODE (orig_def) != SSA_NAME)
648 continue;
649 orig_def_new_name = get_current_def (orig_def);
650 arg = NULL_TREE;
652 /** 1. Handle new-merge-point phis **/
654 /* 1.1. Generate new phi node in NEW_MERGE_BB: */
655 new_res = copy_ssa_name (PHI_RESULT (orig_phi), NULL);
656 new_phi = create_phi_node (new_res, new_merge_bb);
658 /* 1.2. NEW_MERGE_BB has two incoming edges: GUARD_EDGE and the exit-edge
659 of LOOP. Set the two PHI args in NEW_PHI for these edges: */
660 new_name = orig_def;
661 new_name2 = NULL_TREE;
662 if (orig_def_new_name)
664 new_name = orig_def_new_name;
665 /* Some variables have both loop-entry-phis and loop-exit-phis.
666 Such variables were given yet newer names by phis placed in
667 guard_bb by slpeel_update_phi_nodes_for_guard1. I.e:
668 new_name2 = get_current_def (get_current_def (orig_name)). */
669 new_name2 = get_current_def (new_name);
672 if (is_new_loop)
674 guard_arg = orig_def;
675 loop_arg = new_name;
677 else
679 guard_arg = new_name;
680 loop_arg = orig_def;
682 if (new_name2)
683 guard_arg = new_name2;
685 add_phi_arg (new_phi, loop_arg, new_exit_e, UNKNOWN_LOCATION);
686 add_phi_arg (new_phi, guard_arg, guard_edge, UNKNOWN_LOCATION);
688 /* 1.3. Update phi in successor block. */
689 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi, e) == orig_def);
690 adjust_phi_and_debug_stmts (update_phi, e, PHI_RESULT (new_phi));
691 update_phi2 = new_phi;
694 /** 2. Handle loop-closed-ssa-form phis **/
696 /* 2.1. Generate new phi node in NEW_EXIT_BB: */
697 new_res = copy_ssa_name (PHI_RESULT (orig_phi), NULL);
698 new_phi = create_phi_node (new_res, *new_exit_bb);
700 /* 2.2. NEW_EXIT_BB has one incoming edge: the exit-edge of the loop. */
701 add_phi_arg (new_phi, loop_arg, single_exit (loop), UNKNOWN_LOCATION);
703 /* 2.3. Update phi in successor of NEW_EXIT_BB: */
704 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi2, new_exit_e) == loop_arg);
705 adjust_phi_and_debug_stmts (update_phi2, new_exit_e,
706 PHI_RESULT (new_phi));
709 /** 3. Handle loop-closed-ssa-form phis for first loop **/
711 /* 3.1. Find the relevant names that need an exit-phi in
712 GUARD_BB, i.e. names for which
713 slpeel_update_phi_nodes_for_guard1 had not already created a
714 phi node. This is the case for names that are used outside
715 the loop (and therefore need an exit phi) but are not updated
716 across loop iterations (and therefore don't have a
717 loop-header-phi).
719 slpeel_update_phi_nodes_for_guard1 is responsible for
720 creating loop-exit phis in GUARD_BB for names that have a
721 loop-header-phi. When such a phi is created we also record
722 the new name in its current definition. If this new name
723 exists, then guard_arg was set to this new name (see 1.2
724 above). Therefore, if guard_arg is not this new name, this
725 is an indication that an exit-phi in GUARD_BB was not yet
726 created, so we take care of it here. */
727 if (guard_arg == new_name2)
728 continue;
729 arg = guard_arg;
731 /* 3.2. Generate new phi node in GUARD_BB: */
732 new_res = copy_ssa_name (PHI_RESULT (orig_phi), NULL);
733 new_phi = create_phi_node (new_res, guard_edge->src);
735 /* 3.3. GUARD_BB has one incoming edge: */
736 gcc_assert (EDGE_COUNT (guard_edge->src->preds) == 1);
737 add_phi_arg (new_phi, arg, EDGE_PRED (guard_edge->src, 0),
738 UNKNOWN_LOCATION);
740 /* 3.4. Update phi in successor of GUARD_BB: */
741 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi2, guard_edge)
742 == guard_arg);
743 adjust_phi_and_debug_stmts (update_phi2, guard_edge,
744 PHI_RESULT (new_phi));
749 /* Make the LOOP iterate NITERS times. This is done by adding a new IV
750 that starts at zero, increases by one and its limit is NITERS.
752 Assumption: the exit-condition of LOOP is the last stmt in the loop. */
754 void
755 slpeel_make_loop_iterate_ntimes (struct loop *loop, tree niters)
757 tree indx_before_incr, indx_after_incr;
758 gimple cond_stmt;
759 gimple orig_cond;
760 edge exit_edge = single_exit (loop);
761 gimple_stmt_iterator loop_cond_gsi;
762 gimple_stmt_iterator incr_gsi;
763 bool insert_after;
764 tree init = build_int_cst (TREE_TYPE (niters), 0);
765 tree step = build_int_cst (TREE_TYPE (niters), 1);
766 LOC loop_loc;
767 enum tree_code code;
769 orig_cond = get_loop_exit_condition (loop);
770 gcc_assert (orig_cond);
771 loop_cond_gsi = gsi_for_stmt (orig_cond);
773 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
774 create_iv (init, step, NULL_TREE, loop,
775 &incr_gsi, insert_after, &indx_before_incr, &indx_after_incr);
777 indx_after_incr = force_gimple_operand_gsi (&loop_cond_gsi, indx_after_incr,
778 true, NULL_TREE, true,
779 GSI_SAME_STMT);
780 niters = force_gimple_operand_gsi (&loop_cond_gsi, niters, true, NULL_TREE,
781 true, GSI_SAME_STMT);
783 code = (exit_edge->flags & EDGE_TRUE_VALUE) ? GE_EXPR : LT_EXPR;
784 cond_stmt = gimple_build_cond (code, indx_after_incr, niters, NULL_TREE,
785 NULL_TREE);
787 gsi_insert_before (&loop_cond_gsi, cond_stmt, GSI_SAME_STMT);
789 /* Remove old loop exit test: */
790 gsi_remove (&loop_cond_gsi, true);
792 loop_loc = find_loop_location (loop);
793 if (dump_file && (dump_flags & TDF_DETAILS))
795 if (loop_loc != UNKNOWN_LOC)
796 fprintf (dump_file, "\nloop at %s:%d: ",
797 LOC_FILE (loop_loc), LOC_LINE (loop_loc));
798 print_gimple_stmt (dump_file, cond_stmt, 0, TDF_SLIM);
801 loop->nb_iterations = niters;
805 /* Given LOOP this function generates a new copy of it and puts it
806 on E which is either the entry or exit of LOOP. */
808 struct loop *
809 slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *loop, edge e)
811 struct loop *new_loop;
812 basic_block *new_bbs, *bbs;
813 bool at_exit;
814 bool was_imm_dom;
815 basic_block exit_dest;
816 gimple phi;
817 tree phi_arg;
818 edge exit, new_exit;
819 gimple_stmt_iterator gsi;
821 at_exit = (e == single_exit (loop));
822 if (!at_exit && e != loop_preheader_edge (loop))
823 return NULL;
825 bbs = get_loop_body (loop);
827 /* Check whether duplication is possible. */
828 if (!can_copy_bbs_p (bbs, loop->num_nodes))
830 free (bbs);
831 return NULL;
834 /* Generate new loop structure. */
835 new_loop = duplicate_loop (loop, loop_outer (loop));
836 if (!new_loop)
838 free (bbs);
839 return NULL;
842 exit_dest = single_exit (loop)->dest;
843 was_imm_dom = (get_immediate_dominator (CDI_DOMINATORS,
844 exit_dest) == loop->header ?
845 true : false);
847 new_bbs = XNEWVEC (basic_block, loop->num_nodes);
849 exit = single_exit (loop);
850 copy_bbs (bbs, loop->num_nodes, new_bbs,
851 &exit, 1, &new_exit, NULL,
852 e->src);
854 /* Duplicating phi args at exit bbs as coming
855 also from exit of duplicated loop. */
856 for (gsi = gsi_start_phis (exit_dest); !gsi_end_p (gsi); gsi_next (&gsi))
858 phi = gsi_stmt (gsi);
859 phi_arg = PHI_ARG_DEF_FROM_EDGE (phi, single_exit (loop));
860 if (phi_arg)
862 edge new_loop_exit_edge;
863 source_location locus;
865 locus = gimple_phi_arg_location_from_edge (phi, single_exit (loop));
866 if (EDGE_SUCC (new_loop->header, 0)->dest == new_loop->latch)
867 new_loop_exit_edge = EDGE_SUCC (new_loop->header, 1);
868 else
869 new_loop_exit_edge = EDGE_SUCC (new_loop->header, 0);
871 add_phi_arg (phi, phi_arg, new_loop_exit_edge, locus);
875 if (at_exit) /* Add the loop copy at exit. */
877 redirect_edge_and_branch_force (e, new_loop->header);
878 PENDING_STMT (e) = NULL;
879 set_immediate_dominator (CDI_DOMINATORS, new_loop->header, e->src);
880 if (was_imm_dom)
881 set_immediate_dominator (CDI_DOMINATORS, exit_dest, new_loop->header);
883 else /* Add the copy at entry. */
885 edge new_exit_e;
886 edge entry_e = loop_preheader_edge (loop);
887 basic_block preheader = entry_e->src;
889 if (!flow_bb_inside_loop_p (new_loop,
890 EDGE_SUCC (new_loop->header, 0)->dest))
891 new_exit_e = EDGE_SUCC (new_loop->header, 0);
892 else
893 new_exit_e = EDGE_SUCC (new_loop->header, 1);
895 redirect_edge_and_branch_force (new_exit_e, loop->header);
896 PENDING_STMT (new_exit_e) = NULL;
897 set_immediate_dominator (CDI_DOMINATORS, loop->header,
898 new_exit_e->src);
900 /* We have to add phi args to the loop->header here as coming
901 from new_exit_e edge. */
902 for (gsi = gsi_start_phis (loop->header);
903 !gsi_end_p (gsi);
904 gsi_next (&gsi))
906 phi = gsi_stmt (gsi);
907 phi_arg = PHI_ARG_DEF_FROM_EDGE (phi, entry_e);
908 if (phi_arg)
909 add_phi_arg (phi, phi_arg, new_exit_e,
910 gimple_phi_arg_location_from_edge (phi, entry_e));
913 redirect_edge_and_branch_force (entry_e, new_loop->header);
914 PENDING_STMT (entry_e) = NULL;
915 set_immediate_dominator (CDI_DOMINATORS, new_loop->header, preheader);
918 free (new_bbs);
919 free (bbs);
921 return new_loop;
925 /* Given the condition statement COND, put it as the last statement
926 of GUARD_BB; EXIT_BB is the basic block to skip the loop;
927 Assumes that this is the single exit of the guarded loop.
928 Returns the skip edge, inserts new stmts on the COND_EXPR_STMT_LIST. */
930 static edge
931 slpeel_add_loop_guard (basic_block guard_bb, tree cond,
932 gimple_seq cond_expr_stmt_list,
933 basic_block exit_bb, basic_block dom_bb)
935 gimple_stmt_iterator gsi;
936 edge new_e, enter_e;
937 gimple cond_stmt;
938 gimple_seq gimplify_stmt_list = NULL;
940 enter_e = EDGE_SUCC (guard_bb, 0);
941 enter_e->flags &= ~EDGE_FALLTHRU;
942 enter_e->flags |= EDGE_FALSE_VALUE;
943 gsi = gsi_last_bb (guard_bb);
945 cond = force_gimple_operand_1 (cond, &gimplify_stmt_list, is_gimple_condexpr,
946 NULL_TREE);
947 if (gimplify_stmt_list)
948 gimple_seq_add_seq (&cond_expr_stmt_list, gimplify_stmt_list);
949 cond_stmt = gimple_build_cond_from_tree (cond, NULL_TREE, NULL_TREE);
950 if (cond_expr_stmt_list)
951 gsi_insert_seq_after (&gsi, cond_expr_stmt_list, GSI_NEW_STMT);
953 gsi = gsi_last_bb (guard_bb);
954 gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT);
956 /* Add new edge to connect guard block to the merge/loop-exit block. */
957 new_e = make_edge (guard_bb, exit_bb, EDGE_TRUE_VALUE);
958 set_immediate_dominator (CDI_DOMINATORS, exit_bb, dom_bb);
959 return new_e;
963 /* This function verifies that the following restrictions apply to LOOP:
964 (1) it is innermost
965 (2) it consists of exactly 2 basic blocks - header, and an empty latch.
966 (3) it is single entry, single exit
967 (4) its exit condition is the last stmt in the header
968 (5) E is the entry/exit edge of LOOP.
971 bool
972 slpeel_can_duplicate_loop_p (const struct loop *loop, const_edge e)
974 edge exit_e = single_exit (loop);
975 edge entry_e = loop_preheader_edge (loop);
976 gimple orig_cond = get_loop_exit_condition (loop);
977 gimple_stmt_iterator loop_exit_gsi = gsi_last_bb (exit_e->src);
979 if (need_ssa_update_p (cfun))
980 return false;
982 if (loop->inner
983 /* All loops have an outer scope; the only case loop->outer is NULL is for
984 the function itself. */
985 || !loop_outer (loop)
986 || loop->num_nodes != 2
987 || !empty_block_p (loop->latch)
988 || !single_exit (loop)
989 /* Verify that new loop exit condition can be trivially modified. */
990 || (!orig_cond || orig_cond != gsi_stmt (loop_exit_gsi))
991 || (e != exit_e && e != entry_e))
992 return false;
994 return true;
997 #ifdef ENABLE_CHECKING
998 static void
999 slpeel_verify_cfg_after_peeling (struct loop *first_loop,
1000 struct loop *second_loop)
1002 basic_block loop1_exit_bb = single_exit (first_loop)->dest;
1003 basic_block loop2_entry_bb = loop_preheader_edge (second_loop)->src;
1004 basic_block loop1_entry_bb = loop_preheader_edge (first_loop)->src;
1006 /* A guard that controls whether the second_loop is to be executed or skipped
1007 is placed in first_loop->exit. first_loop->exit therefore has two
1008 successors - one is the preheader of second_loop, and the other is a bb
1009 after second_loop.
1011 gcc_assert (EDGE_COUNT (loop1_exit_bb->succs) == 2);
1013 /* 1. Verify that one of the successors of first_loop->exit is the preheader
1014 of second_loop. */
1016 /* The preheader of new_loop is expected to have two predecessors:
1017 first_loop->exit and the block that precedes first_loop. */
1019 gcc_assert (EDGE_COUNT (loop2_entry_bb->preds) == 2
1020 && ((EDGE_PRED (loop2_entry_bb, 0)->src == loop1_exit_bb
1021 && EDGE_PRED (loop2_entry_bb, 1)->src == loop1_entry_bb)
1022 || (EDGE_PRED (loop2_entry_bb, 1)->src == loop1_exit_bb
1023 && EDGE_PRED (loop2_entry_bb, 0)->src == loop1_entry_bb)));
1025 /* Verify that the other successor of first_loop->exit is after the
1026 second_loop. */
1027 /* TODO */
1029 #endif
1031 /* If the run time cost model check determines that vectorization is
1032 not profitable and hence scalar loop should be generated then set
1033 FIRST_NITERS to prologue peeled iterations. This will allow all the
1034 iterations to be executed in the prologue peeled scalar loop. */
1036 static void
1037 set_prologue_iterations (basic_block bb_before_first_loop,
1038 tree *first_niters,
1039 struct loop *loop,
1040 unsigned int th)
1042 edge e;
1043 basic_block cond_bb, then_bb;
1044 tree var, prologue_after_cost_adjust_name;
1045 gimple_stmt_iterator gsi;
1046 gimple newphi;
1047 edge e_true, e_false, e_fallthru;
1048 gimple cond_stmt;
1049 gimple_seq stmts = NULL;
1050 tree cost_pre_condition = NULL_TREE;
1051 tree scalar_loop_iters =
1052 unshare_expr (LOOP_VINFO_NITERS_UNCHANGED (loop_vec_info_for_loop (loop)));
1054 e = single_pred_edge (bb_before_first_loop);
1055 cond_bb = split_edge(e);
1057 e = single_pred_edge (bb_before_first_loop);
1058 then_bb = split_edge(e);
1059 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
1061 e_false = make_single_succ_edge (cond_bb, bb_before_first_loop,
1062 EDGE_FALSE_VALUE);
1063 set_immediate_dominator (CDI_DOMINATORS, bb_before_first_loop, cond_bb);
1065 e_true = EDGE_PRED (then_bb, 0);
1066 e_true->flags &= ~EDGE_FALLTHRU;
1067 e_true->flags |= EDGE_TRUE_VALUE;
1069 e_fallthru = EDGE_SUCC (then_bb, 0);
1071 gsi = gsi_last_bb (cond_bb);
1072 cost_pre_condition =
1073 fold_build2 (LE_EXPR, boolean_type_node, scalar_loop_iters,
1074 build_int_cst (TREE_TYPE (scalar_loop_iters), th));
1075 cost_pre_condition =
1076 force_gimple_operand_gsi_1 (&gsi, cost_pre_condition, is_gimple_condexpr,
1077 NULL_TREE, false, GSI_CONTINUE_LINKING);
1078 cond_stmt = gimple_build_cond_from_tree (cost_pre_condition,
1079 NULL_TREE, NULL_TREE);
1080 gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT);
1082 var = create_tmp_var (TREE_TYPE (scalar_loop_iters),
1083 "prologue_after_cost_adjust");
1084 prologue_after_cost_adjust_name =
1085 force_gimple_operand (scalar_loop_iters, &stmts, false, var);
1087 gsi = gsi_last_bb (then_bb);
1088 if (stmts)
1089 gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT);
1091 newphi = create_phi_node (var, bb_before_first_loop);
1092 add_phi_arg (newphi, prologue_after_cost_adjust_name, e_fallthru,
1093 UNKNOWN_LOCATION);
1094 add_phi_arg (newphi, *first_niters, e_false, UNKNOWN_LOCATION);
1096 *first_niters = PHI_RESULT (newphi);
1099 /* Function slpeel_tree_peel_loop_to_edge.
1101 Peel the first (last) iterations of LOOP into a new prolog (epilog) loop
1102 that is placed on the entry (exit) edge E of LOOP. After this transformation
1103 we have two loops one after the other - first-loop iterates FIRST_NITERS
1104 times, and second-loop iterates the remainder NITERS - FIRST_NITERS times.
1105 If the cost model indicates that it is profitable to emit a scalar
1106 loop instead of the vector one, then the prolog (epilog) loop will iterate
1107 for the entire unchanged scalar iterations of the loop.
1109 Input:
1110 - LOOP: the loop to be peeled.
1111 - E: the exit or entry edge of LOOP.
1112 If it is the entry edge, we peel the first iterations of LOOP. In this
1113 case first-loop is LOOP, and second-loop is the newly created loop.
1114 If it is the exit edge, we peel the last iterations of LOOP. In this
1115 case, first-loop is the newly created loop, and second-loop is LOOP.
1116 - NITERS: the number of iterations that LOOP iterates.
1117 - FIRST_NITERS: the number of iterations that the first-loop should iterate.
1118 - UPDATE_FIRST_LOOP_COUNT: specified whether this function is responsible
1119 for updating the loop bound of the first-loop to FIRST_NITERS. If it
1120 is false, the caller of this function may want to take care of this
1121 (this can be useful if we don't want new stmts added to first-loop).
1122 - TH: cost model profitability threshold of iterations for vectorization.
1123 - CHECK_PROFITABILITY: specify whether cost model check has not occurred
1124 during versioning and hence needs to occur during
1125 prologue generation or whether cost model check
1126 has not occurred during prologue generation and hence
1127 needs to occur during epilogue generation.
1130 Output:
1131 The function returns a pointer to the new loop-copy, or NULL if it failed
1132 to perform the transformation.
1134 The function generates two if-then-else guards: one before the first loop,
1135 and the other before the second loop:
1136 The first guard is:
1137 if (FIRST_NITERS == 0) then skip the first loop,
1138 and go directly to the second loop.
1139 The second guard is:
1140 if (FIRST_NITERS == NITERS) then skip the second loop.
1142 If the optional COND_EXPR and COND_EXPR_STMT_LIST arguments are given
1143 then the generated condition is combined with COND_EXPR and the
1144 statements in COND_EXPR_STMT_LIST are emitted together with it.
1146 FORNOW only simple loops are supported (see slpeel_can_duplicate_loop_p).
1147 FORNOW the resulting code will not be in loop-closed-ssa form.
1150 static struct loop*
1151 slpeel_tree_peel_loop_to_edge (struct loop *loop,
1152 edge e, tree *first_niters,
1153 tree niters, bool update_first_loop_count,
1154 unsigned int th, bool check_profitability,
1155 tree cond_expr, gimple_seq cond_expr_stmt_list)
1157 struct loop *new_loop = NULL, *first_loop, *second_loop;
1158 edge skip_e;
1159 tree pre_condition = NULL_TREE;
1160 basic_block bb_before_second_loop, bb_after_second_loop;
1161 basic_block bb_before_first_loop;
1162 basic_block bb_between_loops;
1163 basic_block new_exit_bb;
1164 gimple_stmt_iterator gsi;
1165 edge exit_e = single_exit (loop);
1166 LOC loop_loc;
1167 tree cost_pre_condition = NULL_TREE;
1169 if (!slpeel_can_duplicate_loop_p (loop, e))
1170 return NULL;
1172 /* If the loop has a virtual PHI, but exit bb doesn't, create a virtual PHI
1173 in the exit bb and rename all the uses after the loop. This simplifies
1174 the *guard[12] routines, which assume loop closed SSA form for all PHIs
1175 (but normally loop closed SSA form doesn't require virtual PHIs to be
1176 in the same form). Doing this early simplifies the checking what
1177 uses should be renamed. */
1178 for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
1179 if (virtual_operand_p (gimple_phi_result (gsi_stmt (gsi))))
1181 gimple phi = gsi_stmt (gsi);
1182 for (gsi = gsi_start_phis (exit_e->dest);
1183 !gsi_end_p (gsi); gsi_next (&gsi))
1184 if (virtual_operand_p (gimple_phi_result (gsi_stmt (gsi))))
1185 break;
1186 if (gsi_end_p (gsi))
1188 tree new_vop = copy_ssa_name (PHI_RESULT (phi), NULL);
1189 gimple new_phi = create_phi_node (new_vop, exit_e->dest);
1190 tree vop = PHI_ARG_DEF_FROM_EDGE (phi, EDGE_SUCC (loop->latch, 0));
1191 imm_use_iterator imm_iter;
1192 gimple stmt;
1193 use_operand_p use_p;
1195 add_phi_arg (new_phi, vop, exit_e, UNKNOWN_LOCATION);
1196 gimple_phi_set_result (new_phi, new_vop);
1197 FOR_EACH_IMM_USE_STMT (stmt, imm_iter, vop)
1198 if (stmt != new_phi && gimple_bb (stmt) != loop->header)
1199 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
1200 SET_USE (use_p, new_vop);
1202 break;
1205 /* 1. Generate a copy of LOOP and put it on E (E is the entry/exit of LOOP).
1206 Resulting CFG would be:
1208 first_loop:
1209 do {
1210 } while ...
1212 second_loop:
1213 do {
1214 } while ...
1216 orig_exit_bb:
1219 if (!(new_loop = slpeel_tree_duplicate_loop_to_edge_cfg (loop, e)))
1221 loop_loc = find_loop_location (loop);
1222 if (dump_file && (dump_flags & TDF_DETAILS))
1224 if (loop_loc != UNKNOWN_LOC)
1225 fprintf (dump_file, "\n%s:%d: note: ",
1226 LOC_FILE (loop_loc), LOC_LINE (loop_loc));
1227 fprintf (dump_file, "tree_duplicate_loop_to_edge_cfg failed.\n");
1229 return NULL;
1232 if (MAY_HAVE_DEBUG_STMTS)
1234 gcc_assert (!adjust_vec);
1235 adjust_vec = VEC_alloc (adjust_info, stack, 32);
1238 if (e == exit_e)
1240 /* NEW_LOOP was placed after LOOP. */
1241 first_loop = loop;
1242 second_loop = new_loop;
1244 else
1246 /* NEW_LOOP was placed before LOOP. */
1247 first_loop = new_loop;
1248 second_loop = loop;
1251 slpeel_update_phis_for_duplicate_loop (loop, new_loop, e == exit_e);
1252 rename_variables_in_loop (new_loop);
1255 /* 2. Add the guard code in one of the following ways:
1257 2.a Add the guard that controls whether the first loop is executed.
1258 This occurs when this function is invoked for prologue or epilogue
1259 generation and when the cost model check can be done at compile time.
1261 Resulting CFG would be:
1263 bb_before_first_loop:
1264 if (FIRST_NITERS == 0) GOTO bb_before_second_loop
1265 GOTO first-loop
1267 first_loop:
1268 do {
1269 } while ...
1271 bb_before_second_loop:
1273 second_loop:
1274 do {
1275 } while ...
1277 orig_exit_bb:
1279 2.b Add the cost model check that allows the prologue
1280 to iterate for the entire unchanged scalar
1281 iterations of the loop in the event that the cost
1282 model indicates that the scalar loop is more
1283 profitable than the vector one. This occurs when
1284 this function is invoked for prologue generation
1285 and the cost model check needs to be done at run
1286 time.
1288 Resulting CFG after prologue peeling would be:
1290 if (scalar_loop_iterations <= th)
1291 FIRST_NITERS = scalar_loop_iterations
1293 bb_before_first_loop:
1294 if (FIRST_NITERS == 0) GOTO bb_before_second_loop
1295 GOTO first-loop
1297 first_loop:
1298 do {
1299 } while ...
1301 bb_before_second_loop:
1303 second_loop:
1304 do {
1305 } while ...
1307 orig_exit_bb:
1309 2.c Add the cost model check that allows the epilogue
1310 to iterate for the entire unchanged scalar
1311 iterations of the loop in the event that the cost
1312 model indicates that the scalar loop is more
1313 profitable than the vector one. This occurs when
1314 this function is invoked for epilogue generation
1315 and the cost model check needs to be done at run
1316 time. This check is combined with any pre-existing
1317 check in COND_EXPR to avoid versioning.
1319 Resulting CFG after prologue peeling would be:
1321 bb_before_first_loop:
1322 if ((scalar_loop_iterations <= th)
1324 FIRST_NITERS == 0) GOTO bb_before_second_loop
1325 GOTO first-loop
1327 first_loop:
1328 do {
1329 } while ...
1331 bb_before_second_loop:
1333 second_loop:
1334 do {
1335 } while ...
1337 orig_exit_bb:
1340 bb_before_first_loop = split_edge (loop_preheader_edge (first_loop));
1341 bb_before_second_loop = split_edge (single_exit (first_loop));
1343 /* Epilogue peeling. */
1344 if (!update_first_loop_count)
1346 pre_condition =
1347 fold_build2 (LE_EXPR, boolean_type_node, *first_niters,
1348 build_int_cst (TREE_TYPE (*first_niters), 0));
1349 if (check_profitability)
1351 tree scalar_loop_iters
1352 = unshare_expr (LOOP_VINFO_NITERS_UNCHANGED
1353 (loop_vec_info_for_loop (loop)));
1354 cost_pre_condition =
1355 fold_build2 (LE_EXPR, boolean_type_node, scalar_loop_iters,
1356 build_int_cst (TREE_TYPE (scalar_loop_iters), th));
1358 pre_condition = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1359 cost_pre_condition, pre_condition);
1361 if (cond_expr)
1363 pre_condition =
1364 fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1365 pre_condition,
1366 fold_build1 (TRUTH_NOT_EXPR, boolean_type_node,
1367 cond_expr));
1371 /* Prologue peeling. */
1372 else
1374 if (check_profitability)
1375 set_prologue_iterations (bb_before_first_loop, first_niters,
1376 loop, th);
1378 pre_condition =
1379 fold_build2 (LE_EXPR, boolean_type_node, *first_niters,
1380 build_int_cst (TREE_TYPE (*first_niters), 0));
1383 skip_e = slpeel_add_loop_guard (bb_before_first_loop, pre_condition,
1384 cond_expr_stmt_list,
1385 bb_before_second_loop, bb_before_first_loop);
1386 slpeel_update_phi_nodes_for_guard1 (skip_e, first_loop,
1387 first_loop == new_loop,
1388 &new_exit_bb);
1391 /* 3. Add the guard that controls whether the second loop is executed.
1392 Resulting CFG would be:
1394 bb_before_first_loop:
1395 if (FIRST_NITERS == 0) GOTO bb_before_second_loop (skip first loop)
1396 GOTO first-loop
1398 first_loop:
1399 do {
1400 } while ...
1402 bb_between_loops:
1403 if (FIRST_NITERS == NITERS) GOTO bb_after_second_loop (skip second loop)
1404 GOTO bb_before_second_loop
1406 bb_before_second_loop:
1408 second_loop:
1409 do {
1410 } while ...
1412 bb_after_second_loop:
1414 orig_exit_bb:
1417 bb_between_loops = new_exit_bb;
1418 bb_after_second_loop = split_edge (single_exit (second_loop));
1420 pre_condition =
1421 fold_build2 (EQ_EXPR, boolean_type_node, *first_niters, niters);
1422 skip_e = slpeel_add_loop_guard (bb_between_loops, pre_condition, NULL,
1423 bb_after_second_loop, bb_before_first_loop);
1424 slpeel_update_phi_nodes_for_guard2 (skip_e, second_loop,
1425 second_loop == new_loop, &new_exit_bb);
1427 /* 4. Make first-loop iterate FIRST_NITERS times, if requested.
1429 if (update_first_loop_count)
1430 slpeel_make_loop_iterate_ntimes (first_loop, *first_niters);
1432 delete_update_ssa ();
1434 adjust_vec_debug_stmts ();
1436 return new_loop;
1439 /* Function vect_get_loop_location.
1441 Extract the location of the loop in the source code.
1442 If the loop is not well formed for vectorization, an estimated
1443 location is calculated.
1444 Return the loop location if succeed and NULL if not. */
1447 find_loop_location (struct loop *loop)
1449 gimple stmt = NULL;
1450 basic_block bb;
1451 gimple_stmt_iterator si;
1453 if (!loop)
1454 return UNKNOWN_LOC;
1456 stmt = get_loop_exit_condition (loop);
1458 if (stmt && gimple_location (stmt) != UNKNOWN_LOC)
1459 return gimple_location (stmt);
1461 /* If we got here the loop is probably not "well formed",
1462 try to estimate the loop location */
1464 if (!loop->header)
1465 return UNKNOWN_LOC;
1467 bb = loop->header;
1469 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1471 stmt = gsi_stmt (si);
1472 if (gimple_location (stmt) != UNKNOWN_LOC)
1473 return gimple_location (stmt);
1476 return UNKNOWN_LOC;
1480 /* This function builds ni_name = number of iterations loop executes
1481 on the loop preheader. If SEQ is given the stmt is instead emitted
1482 there. */
1484 static tree
1485 vect_build_loop_niters (loop_vec_info loop_vinfo, gimple_seq seq)
1487 tree ni_name, var;
1488 gimple_seq stmts = NULL;
1489 edge pe;
1490 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1491 tree ni = unshare_expr (LOOP_VINFO_NITERS (loop_vinfo));
1493 var = create_tmp_var (TREE_TYPE (ni), "niters");
1494 ni_name = force_gimple_operand (ni, &stmts, false, var);
1496 pe = loop_preheader_edge (loop);
1497 if (stmts)
1499 if (seq)
1500 gimple_seq_add_seq (&seq, stmts);
1501 else
1503 basic_block new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
1504 gcc_assert (!new_bb);
1508 return ni_name;
1512 /* This function generates the following statements:
1514 ni_name = number of iterations loop executes
1515 ratio = ni_name / vf
1516 ratio_mult_vf_name = ratio * vf
1518 and places them at the loop preheader edge or in COND_EXPR_STMT_LIST
1519 if that is non-NULL. */
1521 static void
1522 vect_generate_tmps_on_preheader (loop_vec_info loop_vinfo,
1523 tree *ni_name_ptr,
1524 tree *ratio_mult_vf_name_ptr,
1525 tree *ratio_name_ptr,
1526 gimple_seq cond_expr_stmt_list)
1529 edge pe;
1530 basic_block new_bb;
1531 gimple_seq stmts;
1532 tree ni_name, ni_minus_gap_name;
1533 tree var;
1534 tree ratio_name;
1535 tree ratio_mult_vf_name;
1536 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1537 tree ni = LOOP_VINFO_NITERS (loop_vinfo);
1538 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1539 tree log_vf;
1541 pe = loop_preheader_edge (loop);
1543 /* Generate temporary variable that contains
1544 number of iterations loop executes. */
1546 ni_name = vect_build_loop_niters (loop_vinfo, cond_expr_stmt_list);
1547 log_vf = build_int_cst (TREE_TYPE (ni), exact_log2 (vf));
1549 /* If epilogue loop is required because of data accesses with gaps, we
1550 subtract one iteration from the total number of iterations here for
1551 correct calculation of RATIO. */
1552 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
1554 ni_minus_gap_name = fold_build2 (MINUS_EXPR, TREE_TYPE (ni_name),
1555 ni_name,
1556 build_one_cst (TREE_TYPE (ni_name)));
1557 if (!is_gimple_val (ni_minus_gap_name))
1559 var = create_tmp_var (TREE_TYPE (ni), "ni_gap");
1561 stmts = NULL;
1562 ni_minus_gap_name = force_gimple_operand (ni_minus_gap_name, &stmts,
1563 true, var);
1564 if (cond_expr_stmt_list)
1565 gimple_seq_add_seq (&cond_expr_stmt_list, stmts);
1566 else
1568 pe = loop_preheader_edge (loop);
1569 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
1570 gcc_assert (!new_bb);
1574 else
1575 ni_minus_gap_name = ni_name;
1577 /* Create: ratio = ni >> log2(vf) */
1579 ratio_name = fold_build2 (RSHIFT_EXPR, TREE_TYPE (ni_minus_gap_name),
1580 ni_minus_gap_name, log_vf);
1581 if (!is_gimple_val (ratio_name))
1583 var = create_tmp_var (TREE_TYPE (ni), "bnd");
1585 stmts = NULL;
1586 ratio_name = force_gimple_operand (ratio_name, &stmts, true, var);
1587 if (cond_expr_stmt_list)
1588 gimple_seq_add_seq (&cond_expr_stmt_list, stmts);
1589 else
1591 pe = loop_preheader_edge (loop);
1592 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
1593 gcc_assert (!new_bb);
1597 /* Create: ratio_mult_vf = ratio << log2 (vf). */
1599 ratio_mult_vf_name = fold_build2 (LSHIFT_EXPR, TREE_TYPE (ratio_name),
1600 ratio_name, log_vf);
1601 if (!is_gimple_val (ratio_mult_vf_name))
1603 var = create_tmp_var (TREE_TYPE (ni), "ratio_mult_vf");
1605 stmts = NULL;
1606 ratio_mult_vf_name = force_gimple_operand (ratio_mult_vf_name, &stmts,
1607 true, var);
1608 if (cond_expr_stmt_list)
1609 gimple_seq_add_seq (&cond_expr_stmt_list, stmts);
1610 else
1612 pe = loop_preheader_edge (loop);
1613 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
1614 gcc_assert (!new_bb);
1618 *ni_name_ptr = ni_name;
1619 *ratio_mult_vf_name_ptr = ratio_mult_vf_name;
1620 *ratio_name_ptr = ratio_name;
1622 return;
1625 /* Function vect_can_advance_ivs_p
1627 In case the number of iterations that LOOP iterates is unknown at compile
1628 time, an epilog loop will be generated, and the loop induction variables
1629 (IVs) will be "advanced" to the value they are supposed to take just before
1630 the epilog loop. Here we check that the access function of the loop IVs
1631 and the expression that represents the loop bound are simple enough.
1632 These restrictions will be relaxed in the future. */
1634 bool
1635 vect_can_advance_ivs_p (loop_vec_info loop_vinfo)
1637 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1638 basic_block bb = loop->header;
1639 gimple phi;
1640 gimple_stmt_iterator gsi;
1642 /* Analyze phi functions of the loop header. */
1644 if (vect_print_dump_info (REPORT_DETAILS))
1645 fprintf (vect_dump, "vect_can_advance_ivs_p:");
1647 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1649 tree access_fn = NULL;
1650 tree evolution_part;
1652 phi = gsi_stmt (gsi);
1653 if (vect_print_dump_info (REPORT_DETAILS))
1655 fprintf (vect_dump, "Analyze phi: ");
1656 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
1659 /* Skip virtual phi's. The data dependences that are associated with
1660 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
1662 if (virtual_operand_p (PHI_RESULT (phi)))
1664 if (vect_print_dump_info (REPORT_DETAILS))
1665 fprintf (vect_dump, "virtual phi. skip.");
1666 continue;
1669 /* Skip reduction phis. */
1671 if (STMT_VINFO_DEF_TYPE (vinfo_for_stmt (phi)) == vect_reduction_def)
1673 if (vect_print_dump_info (REPORT_DETAILS))
1674 fprintf (vect_dump, "reduc phi. skip.");
1675 continue;
1678 /* Analyze the evolution function. */
1680 access_fn = instantiate_parameters
1681 (loop, analyze_scalar_evolution (loop, PHI_RESULT (phi)));
1683 if (!access_fn)
1685 if (vect_print_dump_info (REPORT_DETAILS))
1686 fprintf (vect_dump, "No Access function.");
1687 return false;
1690 if (vect_print_dump_info (REPORT_DETAILS))
1692 fprintf (vect_dump, "Access function of PHI: ");
1693 print_generic_expr (vect_dump, access_fn, TDF_SLIM);
1696 evolution_part = evolution_part_in_loop_num (access_fn, loop->num);
1698 if (evolution_part == NULL_TREE)
1700 if (vect_print_dump_info (REPORT_DETAILS))
1701 fprintf (vect_dump, "No evolution.");
1702 return false;
1705 /* FORNOW: We do not transform initial conditions of IVs
1706 which evolution functions are a polynomial of degree >= 2. */
1708 if (tree_is_chrec (evolution_part))
1709 return false;
1712 return true;
1716 /* Function vect_update_ivs_after_vectorizer.
1718 "Advance" the induction variables of LOOP to the value they should take
1719 after the execution of LOOP. This is currently necessary because the
1720 vectorizer does not handle induction variables that are used after the
1721 loop. Such a situation occurs when the last iterations of LOOP are
1722 peeled, because:
1723 1. We introduced new uses after LOOP for IVs that were not originally used
1724 after LOOP: the IVs of LOOP are now used by an epilog loop.
1725 2. LOOP is going to be vectorized; this means that it will iterate N/VF
1726 times, whereas the loop IVs should be bumped N times.
1728 Input:
1729 - LOOP - a loop that is going to be vectorized. The last few iterations
1730 of LOOP were peeled.
1731 - NITERS - the number of iterations that LOOP executes (before it is
1732 vectorized). i.e, the number of times the ivs should be bumped.
1733 - UPDATE_E - a successor edge of LOOP->exit that is on the (only) path
1734 coming out from LOOP on which there are uses of the LOOP ivs
1735 (this is the path from LOOP->exit to epilog_loop->preheader).
1737 The new definitions of the ivs are placed in LOOP->exit.
1738 The phi args associated with the edge UPDATE_E in the bb
1739 UPDATE_E->dest are updated accordingly.
1741 Assumption 1: Like the rest of the vectorizer, this function assumes
1742 a single loop exit that has a single predecessor.
1744 Assumption 2: The phi nodes in the LOOP header and in update_bb are
1745 organized in the same order.
1747 Assumption 3: The access function of the ivs is simple enough (see
1748 vect_can_advance_ivs_p). This assumption will be relaxed in the future.
1750 Assumption 4: Exactly one of the successors of LOOP exit-bb is on a path
1751 coming out of LOOP on which the ivs of LOOP are used (this is the path
1752 that leads to the epilog loop; other paths skip the epilog loop). This
1753 path starts with the edge UPDATE_E, and its destination (denoted update_bb)
1754 needs to have its phis updated.
1757 static void
1758 vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo, tree niters,
1759 edge update_e)
1761 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1762 basic_block exit_bb = single_exit (loop)->dest;
1763 gimple phi, phi1;
1764 gimple_stmt_iterator gsi, gsi1;
1765 basic_block update_bb = update_e->dest;
1767 /* gcc_assert (vect_can_advance_ivs_p (loop_vinfo)); */
1769 /* Make sure there exists a single-predecessor exit bb: */
1770 gcc_assert (single_pred_p (exit_bb));
1772 for (gsi = gsi_start_phis (loop->header), gsi1 = gsi_start_phis (update_bb);
1773 !gsi_end_p (gsi) && !gsi_end_p (gsi1);
1774 gsi_next (&gsi), gsi_next (&gsi1))
1776 tree init_expr;
1777 tree step_expr, off;
1778 tree type;
1779 tree var, ni, ni_name;
1780 gimple_stmt_iterator last_gsi;
1781 stmt_vec_info stmt_info;
1783 phi = gsi_stmt (gsi);
1784 phi1 = gsi_stmt (gsi1);
1785 if (vect_print_dump_info (REPORT_DETAILS))
1787 fprintf (vect_dump, "vect_update_ivs_after_vectorizer: phi: ");
1788 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
1791 /* Skip virtual phi's. */
1792 if (virtual_operand_p (PHI_RESULT (phi)))
1794 if (vect_print_dump_info (REPORT_DETAILS))
1795 fprintf (vect_dump, "virtual phi. skip.");
1796 continue;
1799 /* Skip reduction phis. */
1800 stmt_info = vinfo_for_stmt (phi);
1801 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
1803 if (vect_print_dump_info (REPORT_DETAILS))
1804 fprintf (vect_dump, "reduc phi. skip.");
1805 continue;
1808 type = TREE_TYPE (gimple_phi_result (phi));
1809 step_expr = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_info);
1810 step_expr = unshare_expr (step_expr);
1812 /* FORNOW: We do not support IVs whose evolution function is a polynomial
1813 of degree >= 2 or exponential. */
1814 gcc_assert (!tree_is_chrec (step_expr));
1816 init_expr = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
1818 off = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
1819 fold_convert (TREE_TYPE (step_expr), niters),
1820 step_expr);
1821 if (POINTER_TYPE_P (type))
1822 ni = fold_build_pointer_plus (init_expr, off);
1823 else
1824 ni = fold_build2 (PLUS_EXPR, type,
1825 init_expr, fold_convert (type, off));
1827 var = create_tmp_var (type, "tmp");
1829 last_gsi = gsi_last_bb (exit_bb);
1830 ni_name = force_gimple_operand_gsi (&last_gsi, ni, false, var,
1831 true, GSI_SAME_STMT);
1833 /* Fix phi expressions in the successor bb. */
1834 adjust_phi_and_debug_stmts (phi1, update_e, ni_name);
1838 /* Function vect_do_peeling_for_loop_bound
1840 Peel the last iterations of the loop represented by LOOP_VINFO.
1841 The peeled iterations form a new epilog loop. Given that the loop now
1842 iterates NITERS times, the new epilog loop iterates
1843 NITERS % VECTORIZATION_FACTOR times.
1845 The original loop will later be made to iterate
1846 NITERS / VECTORIZATION_FACTOR times (this value is placed into RATIO).
1848 COND_EXPR and COND_EXPR_STMT_LIST are combined with a new generated
1849 test. */
1851 void
1852 vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo, tree *ratio,
1853 unsigned int th, bool check_profitability)
1855 tree ni_name, ratio_mult_vf_name;
1856 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1857 struct loop *new_loop;
1858 edge update_e;
1859 basic_block preheader;
1860 int loop_num;
1861 int max_iter;
1862 tree cond_expr = NULL_TREE;
1863 gimple_seq cond_expr_stmt_list = NULL;
1865 if (vect_print_dump_info (REPORT_DETAILS))
1866 fprintf (vect_dump, "=== vect_do_peeling_for_loop_bound ===");
1868 initialize_original_copy_tables ();
1870 /* Generate the following variables on the preheader of original loop:
1872 ni_name = number of iteration the original loop executes
1873 ratio = ni_name / vf
1874 ratio_mult_vf_name = ratio * vf */
1875 vect_generate_tmps_on_preheader (loop_vinfo, &ni_name,
1876 &ratio_mult_vf_name, ratio,
1877 cond_expr_stmt_list);
1879 loop_num = loop->num;
1881 new_loop = slpeel_tree_peel_loop_to_edge (loop, single_exit (loop),
1882 &ratio_mult_vf_name, ni_name, false,
1883 th, check_profitability,
1884 cond_expr, cond_expr_stmt_list);
1885 gcc_assert (new_loop);
1886 gcc_assert (loop_num == loop->num);
1887 #ifdef ENABLE_CHECKING
1888 slpeel_verify_cfg_after_peeling (loop, new_loop);
1889 #endif
1891 /* A guard that controls whether the new_loop is to be executed or skipped
1892 is placed in LOOP->exit. LOOP->exit therefore has two successors - one
1893 is the preheader of NEW_LOOP, where the IVs from LOOP are used. The other
1894 is a bb after NEW_LOOP, where these IVs are not used. Find the edge that
1895 is on the path where the LOOP IVs are used and need to be updated. */
1897 preheader = loop_preheader_edge (new_loop)->src;
1898 if (EDGE_PRED (preheader, 0)->src == single_exit (loop)->dest)
1899 update_e = EDGE_PRED (preheader, 0);
1900 else
1901 update_e = EDGE_PRED (preheader, 1);
1903 /* Update IVs of original loop as if they were advanced
1904 by ratio_mult_vf_name steps. */
1905 vect_update_ivs_after_vectorizer (loop_vinfo, ratio_mult_vf_name, update_e);
1907 max_iter = LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1;
1908 if (check_profitability)
1909 max_iter = MAX (max_iter, (int) th);
1910 record_niter_bound (new_loop, shwi_to_double_int (max_iter), false, true);
1911 if (dump_file && (dump_flags & TDF_DETAILS))
1912 fprintf (dump_file, "Setting upper bound of nb iterations for epilogue "
1913 "loop to %d\n", max_iter);
1915 /* After peeling we have to reset scalar evolution analyzer. */
1916 scev_reset ();
1918 free_original_copy_tables ();
1922 /* Function vect_gen_niters_for_prolog_loop
1924 Set the number of iterations for the loop represented by LOOP_VINFO
1925 to the minimum between LOOP_NITERS (the original iteration count of the loop)
1926 and the misalignment of DR - the data reference recorded in
1927 LOOP_VINFO_UNALIGNED_DR (LOOP_VINFO). As a result, after the execution of
1928 this loop, the data reference DR will refer to an aligned location.
1930 The following computation is generated:
1932 If the misalignment of DR is known at compile time:
1933 addr_mis = int mis = DR_MISALIGNMENT (dr);
1934 Else, compute address misalignment in bytes:
1935 addr_mis = addr & (vectype_align - 1)
1937 prolog_niters = min (LOOP_NITERS, ((VF - addr_mis/elem_size)&(VF-1))/step)
1939 (elem_size = element type size; an element is the scalar element whose type
1940 is the inner type of the vectype)
1942 When the step of the data-ref in the loop is not 1 (as in interleaved data
1943 and SLP), the number of iterations of the prolog must be divided by the step
1944 (which is equal to the size of interleaved group).
1946 The above formulas assume that VF == number of elements in the vector. This
1947 may not hold when there are multiple-types in the loop.
1948 In this case, for some data-references in the loop the VF does not represent
1949 the number of elements that fit in the vector. Therefore, instead of VF we
1950 use TYPE_VECTOR_SUBPARTS. */
1952 static tree
1953 vect_gen_niters_for_prolog_loop (loop_vec_info loop_vinfo, tree loop_niters)
1955 struct data_reference *dr = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
1956 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1957 tree var;
1958 gimple_seq stmts;
1959 tree iters, iters_name;
1960 edge pe;
1961 basic_block new_bb;
1962 gimple dr_stmt = DR_STMT (dr);
1963 stmt_vec_info stmt_info = vinfo_for_stmt (dr_stmt);
1964 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1965 int vectype_align = TYPE_ALIGN (vectype) / BITS_PER_UNIT;
1966 tree niters_type = TREE_TYPE (loop_niters);
1967 int nelements = TYPE_VECTOR_SUBPARTS (vectype);
1969 pe = loop_preheader_edge (loop);
1971 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) > 0)
1973 int npeel = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo);
1975 if (vect_print_dump_info (REPORT_DETAILS))
1976 fprintf (vect_dump, "known peeling = %d.", npeel);
1978 iters = build_int_cst (niters_type, npeel);
1980 else
1982 gimple_seq new_stmts = NULL;
1983 bool negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0;
1984 tree offset = negative
1985 ? size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1) : NULL_TREE;
1986 tree start_addr = vect_create_addr_base_for_vector_ref (dr_stmt,
1987 &new_stmts, offset, loop);
1988 tree type = unsigned_type_for (TREE_TYPE (start_addr));
1989 tree vectype_align_minus_1 = build_int_cst (type, vectype_align - 1);
1990 HOST_WIDE_INT elem_size =
1991 int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
1992 tree elem_size_log = build_int_cst (type, exact_log2 (elem_size));
1993 tree nelements_minus_1 = build_int_cst (type, nelements - 1);
1994 tree nelements_tree = build_int_cst (type, nelements);
1995 tree byte_misalign;
1996 tree elem_misalign;
1998 new_bb = gsi_insert_seq_on_edge_immediate (pe, new_stmts);
1999 gcc_assert (!new_bb);
2001 /* Create: byte_misalign = addr & (vectype_align - 1) */
2002 byte_misalign =
2003 fold_build2 (BIT_AND_EXPR, type, fold_convert (type, start_addr),
2004 vectype_align_minus_1);
2006 /* Create: elem_misalign = byte_misalign / element_size */
2007 elem_misalign =
2008 fold_build2 (RSHIFT_EXPR, type, byte_misalign, elem_size_log);
2010 /* Create: (niters_type) (nelements - elem_misalign)&(nelements - 1) */
2011 if (negative)
2012 iters = fold_build2 (MINUS_EXPR, type, elem_misalign, nelements_tree);
2013 else
2014 iters = fold_build2 (MINUS_EXPR, type, nelements_tree, elem_misalign);
2015 iters = fold_build2 (BIT_AND_EXPR, type, iters, nelements_minus_1);
2016 iters = fold_convert (niters_type, iters);
2019 /* Create: prolog_loop_niters = min (iters, loop_niters) */
2020 /* If the loop bound is known at compile time we already verified that it is
2021 greater than vf; since the misalignment ('iters') is at most vf, there's
2022 no need to generate the MIN_EXPR in this case. */
2023 if (TREE_CODE (loop_niters) != INTEGER_CST)
2024 iters = fold_build2 (MIN_EXPR, niters_type, iters, loop_niters);
2026 if (vect_print_dump_info (REPORT_DETAILS))
2028 fprintf (vect_dump, "niters for prolog loop: ");
2029 print_generic_expr (vect_dump, iters, TDF_SLIM);
2032 var = create_tmp_var (niters_type, "prolog_loop_niters");
2033 stmts = NULL;
2034 iters_name = force_gimple_operand (iters, &stmts, false, var);
2036 /* Insert stmt on loop preheader edge. */
2037 if (stmts)
2039 basic_block new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
2040 gcc_assert (!new_bb);
2043 return iters_name;
2047 /* Function vect_update_init_of_dr
2049 NITERS iterations were peeled from LOOP. DR represents a data reference
2050 in LOOP. This function updates the information recorded in DR to
2051 account for the fact that the first NITERS iterations had already been
2052 executed. Specifically, it updates the OFFSET field of DR. */
2054 static void
2055 vect_update_init_of_dr (struct data_reference *dr, tree niters)
2057 tree offset = DR_OFFSET (dr);
2059 niters = fold_build2 (MULT_EXPR, sizetype,
2060 fold_convert (sizetype, niters),
2061 fold_convert (sizetype, DR_STEP (dr)));
2062 offset = fold_build2 (PLUS_EXPR, sizetype,
2063 fold_convert (sizetype, offset), niters);
2064 DR_OFFSET (dr) = offset;
2068 /* Function vect_update_inits_of_drs
2070 NITERS iterations were peeled from the loop represented by LOOP_VINFO.
2071 This function updates the information recorded for the data references in
2072 the loop to account for the fact that the first NITERS iterations had
2073 already been executed. Specifically, it updates the initial_condition of
2074 the access_function of all the data_references in the loop. */
2076 static void
2077 vect_update_inits_of_drs (loop_vec_info loop_vinfo, tree niters)
2079 unsigned int i;
2080 VEC (data_reference_p, heap) *datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
2081 struct data_reference *dr;
2083 if (vect_print_dump_info (REPORT_DETAILS))
2084 fprintf (vect_dump, "=== vect_update_inits_of_dr ===");
2086 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
2087 vect_update_init_of_dr (dr, niters);
2091 /* Function vect_do_peeling_for_alignment
2093 Peel the first 'niters' iterations of the loop represented by LOOP_VINFO.
2094 'niters' is set to the misalignment of one of the data references in the
2095 loop, thereby forcing it to refer to an aligned location at the beginning
2096 of the execution of this loop. The data reference for which we are
2097 peeling is recorded in LOOP_VINFO_UNALIGNED_DR. */
2099 void
2100 vect_do_peeling_for_alignment (loop_vec_info loop_vinfo,
2101 unsigned int th, bool check_profitability)
2103 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2104 tree niters_of_prolog_loop, ni_name;
2105 tree n_iters;
2106 tree wide_prolog_niters;
2107 struct loop *new_loop;
2108 int max_iter;
2110 if (vect_print_dump_info (REPORT_DETAILS))
2111 fprintf (vect_dump, "=== vect_do_peeling_for_alignment ===");
2113 initialize_original_copy_tables ();
2115 ni_name = vect_build_loop_niters (loop_vinfo, NULL);
2116 niters_of_prolog_loop = vect_gen_niters_for_prolog_loop (loop_vinfo,
2117 ni_name);
2119 /* Peel the prolog loop and iterate it niters_of_prolog_loop. */
2120 new_loop =
2121 slpeel_tree_peel_loop_to_edge (loop, loop_preheader_edge (loop),
2122 &niters_of_prolog_loop, ni_name, true,
2123 th, check_profitability, NULL_TREE, NULL);
2125 gcc_assert (new_loop);
2126 #ifdef ENABLE_CHECKING
2127 slpeel_verify_cfg_after_peeling (new_loop, loop);
2128 #endif
2129 max_iter = LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1;
2130 if (check_profitability)
2131 max_iter = MAX (max_iter, (int) th);
2132 record_niter_bound (new_loop, shwi_to_double_int (max_iter), false, true);
2133 if (dump_file && (dump_flags & TDF_DETAILS))
2134 fprintf (dump_file, "Setting upper bound of nb iterations for prologue "
2135 "loop to %d\n", max_iter);
2137 /* Update number of times loop executes. */
2138 n_iters = LOOP_VINFO_NITERS (loop_vinfo);
2139 LOOP_VINFO_NITERS (loop_vinfo) = fold_build2 (MINUS_EXPR,
2140 TREE_TYPE (n_iters), n_iters, niters_of_prolog_loop);
2142 if (types_compatible_p (sizetype, TREE_TYPE (niters_of_prolog_loop)))
2143 wide_prolog_niters = niters_of_prolog_loop;
2144 else
2146 gimple_seq seq = NULL;
2147 edge pe = loop_preheader_edge (loop);
2148 tree wide_iters = fold_convert (sizetype, niters_of_prolog_loop);
2149 tree var = create_tmp_var (sizetype, "prolog_loop_adjusted_niters");
2150 wide_prolog_niters = force_gimple_operand (wide_iters, &seq, false,
2151 var);
2152 if (seq)
2154 /* Insert stmt on loop preheader edge. */
2155 basic_block new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
2156 gcc_assert (!new_bb);
2160 /* Update the init conditions of the access functions of all data refs. */
2161 vect_update_inits_of_drs (loop_vinfo, wide_prolog_niters);
2163 /* After peeling we have to reset scalar evolution analyzer. */
2164 scev_reset ();
2166 free_original_copy_tables ();
2170 /* Function vect_create_cond_for_align_checks.
2172 Create a conditional expression that represents the alignment checks for
2173 all of data references (array element references) whose alignment must be
2174 checked at runtime.
2176 Input:
2177 COND_EXPR - input conditional expression. New conditions will be chained
2178 with logical AND operation.
2179 LOOP_VINFO - two fields of the loop information are used.
2180 LOOP_VINFO_PTR_MASK is the mask used to check the alignment.
2181 LOOP_VINFO_MAY_MISALIGN_STMTS contains the refs to be checked.
2183 Output:
2184 COND_EXPR_STMT_LIST - statements needed to construct the conditional
2185 expression.
2186 The returned value is the conditional expression to be used in the if
2187 statement that controls which version of the loop gets executed at runtime.
2189 The algorithm makes two assumptions:
2190 1) The number of bytes "n" in a vector is a power of 2.
2191 2) An address "a" is aligned if a%n is zero and that this
2192 test can be done as a&(n-1) == 0. For example, for 16
2193 byte vectors the test is a&0xf == 0. */
2195 static void
2196 vect_create_cond_for_align_checks (loop_vec_info loop_vinfo,
2197 tree *cond_expr,
2198 gimple_seq *cond_expr_stmt_list)
2200 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2201 VEC(gimple,heap) *may_misalign_stmts
2202 = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo);
2203 gimple ref_stmt;
2204 int mask = LOOP_VINFO_PTR_MASK (loop_vinfo);
2205 tree mask_cst;
2206 unsigned int i;
2207 tree int_ptrsize_type;
2208 char tmp_name[20];
2209 tree or_tmp_name = NULL_TREE;
2210 tree and_tmp_name;
2211 gimple and_stmt;
2212 tree ptrsize_zero;
2213 tree part_cond_expr;
2215 /* Check that mask is one less than a power of 2, i.e., mask is
2216 all zeros followed by all ones. */
2217 gcc_assert ((mask != 0) && ((mask & (mask+1)) == 0));
2219 int_ptrsize_type = signed_type_for (ptr_type_node);
2221 /* Create expression (mask & (dr_1 || ... || dr_n)) where dr_i is the address
2222 of the first vector of the i'th data reference. */
2224 FOR_EACH_VEC_ELT (gimple, may_misalign_stmts, i, ref_stmt)
2226 gimple_seq new_stmt_list = NULL;
2227 tree addr_base;
2228 tree addr_tmp_name;
2229 tree new_or_tmp_name;
2230 gimple addr_stmt, or_stmt;
2231 stmt_vec_info stmt_vinfo = vinfo_for_stmt (ref_stmt);
2232 tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
2233 bool negative = tree_int_cst_compare
2234 (DR_STEP (STMT_VINFO_DATA_REF (stmt_vinfo)), size_zero_node) < 0;
2235 tree offset = negative
2236 ? size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1) : NULL_TREE;
2238 /* create: addr_tmp = (int)(address_of_first_vector) */
2239 addr_base =
2240 vect_create_addr_base_for_vector_ref (ref_stmt, &new_stmt_list,
2241 offset, loop);
2242 if (new_stmt_list != NULL)
2243 gimple_seq_add_seq (cond_expr_stmt_list, new_stmt_list);
2245 sprintf (tmp_name, "addr2int%d", i);
2246 addr_tmp_name = make_temp_ssa_name (int_ptrsize_type, NULL, tmp_name);
2247 addr_stmt = gimple_build_assign_with_ops (NOP_EXPR, addr_tmp_name,
2248 addr_base, NULL_TREE);
2249 gimple_seq_add_stmt (cond_expr_stmt_list, addr_stmt);
2251 /* The addresses are OR together. */
2253 if (or_tmp_name != NULL_TREE)
2255 /* create: or_tmp = or_tmp | addr_tmp */
2256 sprintf (tmp_name, "orptrs%d", i);
2257 new_or_tmp_name = make_temp_ssa_name (int_ptrsize_type, NULL, tmp_name);
2258 or_stmt = gimple_build_assign_with_ops (BIT_IOR_EXPR,
2259 new_or_tmp_name,
2260 or_tmp_name, addr_tmp_name);
2261 gimple_seq_add_stmt (cond_expr_stmt_list, or_stmt);
2262 or_tmp_name = new_or_tmp_name;
2264 else
2265 or_tmp_name = addr_tmp_name;
2267 } /* end for i */
2269 mask_cst = build_int_cst (int_ptrsize_type, mask);
2271 /* create: and_tmp = or_tmp & mask */
2272 and_tmp_name = make_temp_ssa_name (int_ptrsize_type, NULL, "andmask");
2274 and_stmt = gimple_build_assign_with_ops (BIT_AND_EXPR, and_tmp_name,
2275 or_tmp_name, mask_cst);
2276 gimple_seq_add_stmt (cond_expr_stmt_list, and_stmt);
2278 /* Make and_tmp the left operand of the conditional test against zero.
2279 if and_tmp has a nonzero bit then some address is unaligned. */
2280 ptrsize_zero = build_int_cst (int_ptrsize_type, 0);
2281 part_cond_expr = fold_build2 (EQ_EXPR, boolean_type_node,
2282 and_tmp_name, ptrsize_zero);
2283 if (*cond_expr)
2284 *cond_expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
2285 *cond_expr, part_cond_expr);
2286 else
2287 *cond_expr = part_cond_expr;
2291 /* Function vect_vfa_segment_size.
2293 Create an expression that computes the size of segment
2294 that will be accessed for a data reference. The functions takes into
2295 account that realignment loads may access one more vector.
2297 Input:
2298 DR: The data reference.
2299 LENGTH_FACTOR: segment length to consider.
2301 Return an expression whose value is the size of segment which will be
2302 accessed by DR. */
2304 static tree
2305 vect_vfa_segment_size (struct data_reference *dr, tree length_factor)
2307 tree segment_length;
2309 if (integer_zerop (DR_STEP (dr)))
2310 segment_length = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
2311 else
2312 segment_length = size_binop (MULT_EXPR,
2313 fold_convert (sizetype, DR_STEP (dr)),
2314 fold_convert (sizetype, length_factor));
2316 if (vect_supportable_dr_alignment (dr, false)
2317 == dr_explicit_realign_optimized)
2319 tree vector_size = TYPE_SIZE_UNIT
2320 (STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr))));
2322 segment_length = size_binop (PLUS_EXPR, segment_length, vector_size);
2324 return segment_length;
2328 /* Function vect_create_cond_for_alias_checks.
2330 Create a conditional expression that represents the run-time checks for
2331 overlapping of address ranges represented by a list of data references
2332 relations passed as input.
2334 Input:
2335 COND_EXPR - input conditional expression. New conditions will be chained
2336 with logical AND operation.
2337 LOOP_VINFO - field LOOP_VINFO_MAY_ALIAS_STMTS contains the list of ddrs
2338 to be checked.
2340 Output:
2341 COND_EXPR - conditional expression.
2342 COND_EXPR_STMT_LIST - statements needed to construct the conditional
2343 expression.
2346 The returned value is the conditional expression to be used in the if
2347 statement that controls which version of the loop gets executed at runtime.
2350 static void
2351 vect_create_cond_for_alias_checks (loop_vec_info loop_vinfo,
2352 tree * cond_expr,
2353 gimple_seq * cond_expr_stmt_list)
2355 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2356 VEC (ddr_p, heap) * may_alias_ddrs =
2357 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo);
2358 int vect_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2359 tree scalar_loop_iters = LOOP_VINFO_NITERS (loop_vinfo);
2361 ddr_p ddr;
2362 unsigned int i;
2363 tree part_cond_expr, length_factor;
2365 /* Create expression
2366 ((store_ptr_0 + store_segment_length_0) <= load_ptr_0)
2367 || (load_ptr_0 + load_segment_length_0) <= store_ptr_0))
2371 ((store_ptr_n + store_segment_length_n) <= load_ptr_n)
2372 || (load_ptr_n + load_segment_length_n) <= store_ptr_n)) */
2374 if (VEC_empty (ddr_p, may_alias_ddrs))
2375 return;
2377 FOR_EACH_VEC_ELT (ddr_p, may_alias_ddrs, i, ddr)
2379 struct data_reference *dr_a, *dr_b;
2380 gimple dr_group_first_a, dr_group_first_b;
2381 tree addr_base_a, addr_base_b;
2382 tree segment_length_a, segment_length_b;
2383 gimple stmt_a, stmt_b;
2384 tree seg_a_min, seg_a_max, seg_b_min, seg_b_max;
2386 dr_a = DDR_A (ddr);
2387 stmt_a = DR_STMT (DDR_A (ddr));
2388 dr_group_first_a = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_a));
2389 if (dr_group_first_a)
2391 stmt_a = dr_group_first_a;
2392 dr_a = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_a));
2395 dr_b = DDR_B (ddr);
2396 stmt_b = DR_STMT (DDR_B (ddr));
2397 dr_group_first_b = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_b));
2398 if (dr_group_first_b)
2400 stmt_b = dr_group_first_b;
2401 dr_b = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_b));
2404 addr_base_a =
2405 vect_create_addr_base_for_vector_ref (stmt_a, cond_expr_stmt_list,
2406 NULL_TREE, loop);
2407 addr_base_b =
2408 vect_create_addr_base_for_vector_ref (stmt_b, cond_expr_stmt_list,
2409 NULL_TREE, loop);
2411 if (!operand_equal_p (DR_STEP (dr_a), DR_STEP (dr_b), 0))
2412 length_factor = scalar_loop_iters;
2413 else
2414 length_factor = size_int (vect_factor);
2415 segment_length_a = vect_vfa_segment_size (dr_a, length_factor);
2416 segment_length_b = vect_vfa_segment_size (dr_b, length_factor);
2418 if (vect_print_dump_info (REPORT_DR_DETAILS))
2420 fprintf (vect_dump,
2421 "create runtime check for data references ");
2422 print_generic_expr (vect_dump, DR_REF (dr_a), TDF_SLIM);
2423 fprintf (vect_dump, " and ");
2424 print_generic_expr (vect_dump, DR_REF (dr_b), TDF_SLIM);
2427 seg_a_min = addr_base_a;
2428 seg_a_max = fold_build_pointer_plus (addr_base_a, segment_length_a);
2429 if (tree_int_cst_compare (DR_STEP (dr_a), size_zero_node) < 0)
2430 seg_a_min = seg_a_max, seg_a_max = addr_base_a;
2432 seg_b_min = addr_base_b;
2433 seg_b_max = fold_build_pointer_plus (addr_base_b, segment_length_b);
2434 if (tree_int_cst_compare (DR_STEP (dr_b), size_zero_node) < 0)
2435 seg_b_min = seg_b_max, seg_b_max = addr_base_b;
2437 part_cond_expr =
2438 fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
2439 fold_build2 (LE_EXPR, boolean_type_node, seg_a_max, seg_b_min),
2440 fold_build2 (LE_EXPR, boolean_type_node, seg_b_max, seg_a_min));
2442 if (*cond_expr)
2443 *cond_expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
2444 *cond_expr, part_cond_expr);
2445 else
2446 *cond_expr = part_cond_expr;
2449 if (vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
2450 fprintf (vect_dump, "created %u versioning for alias checks.\n",
2451 VEC_length (ddr_p, may_alias_ddrs));
2455 /* Function vect_loop_versioning.
2457 If the loop has data references that may or may not be aligned or/and
2458 has data reference relations whose independence was not proven then
2459 two versions of the loop need to be generated, one which is vectorized
2460 and one which isn't. A test is then generated to control which of the
2461 loops is executed. The test checks for the alignment of all of the
2462 data references that may or may not be aligned. An additional
2463 sequence of runtime tests is generated for each pairs of DDRs whose
2464 independence was not proven. The vectorized version of loop is
2465 executed only if both alias and alignment tests are passed.
2467 The test generated to check which version of loop is executed
2468 is modified to also check for profitability as indicated by the
2469 cost model initially.
2471 The versioning precondition(s) are placed in *COND_EXPR and
2472 *COND_EXPR_STMT_LIST. */
2474 void
2475 vect_loop_versioning (loop_vec_info loop_vinfo,
2476 unsigned int th, bool check_profitability)
2478 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2479 basic_block condition_bb;
2480 gimple_stmt_iterator gsi, cond_exp_gsi;
2481 basic_block merge_bb;
2482 basic_block new_exit_bb;
2483 edge new_exit_e, e;
2484 gimple orig_phi, new_phi;
2485 tree cond_expr = NULL_TREE;
2486 gimple_seq cond_expr_stmt_list = NULL;
2487 tree arg;
2488 unsigned prob = 4 * REG_BR_PROB_BASE / 5;
2489 gimple_seq gimplify_stmt_list = NULL;
2490 tree scalar_loop_iters = LOOP_VINFO_NITERS (loop_vinfo);
2492 if (check_profitability)
2494 cond_expr = fold_build2 (GT_EXPR, boolean_type_node, scalar_loop_iters,
2495 build_int_cst (TREE_TYPE (scalar_loop_iters), th));
2496 cond_expr = force_gimple_operand_1 (cond_expr, &cond_expr_stmt_list,
2497 is_gimple_condexpr, NULL_TREE);
2500 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
2501 vect_create_cond_for_align_checks (loop_vinfo, &cond_expr,
2502 &cond_expr_stmt_list);
2504 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2505 vect_create_cond_for_alias_checks (loop_vinfo, &cond_expr,
2506 &cond_expr_stmt_list);
2508 cond_expr = force_gimple_operand_1 (cond_expr, &gimplify_stmt_list,
2509 is_gimple_condexpr, NULL_TREE);
2510 gimple_seq_add_seq (&cond_expr_stmt_list, gimplify_stmt_list);
2512 initialize_original_copy_tables ();
2513 loop_version (loop, cond_expr, &condition_bb,
2514 prob, prob, REG_BR_PROB_BASE - prob, true);
2515 free_original_copy_tables();
2517 /* Loop versioning violates an assumption we try to maintain during
2518 vectorization - that the loop exit block has a single predecessor.
2519 After versioning, the exit block of both loop versions is the same
2520 basic block (i.e. it has two predecessors). Just in order to simplify
2521 following transformations in the vectorizer, we fix this situation
2522 here by adding a new (empty) block on the exit-edge of the loop,
2523 with the proper loop-exit phis to maintain loop-closed-form. */
2525 merge_bb = single_exit (loop)->dest;
2526 gcc_assert (EDGE_COUNT (merge_bb->preds) == 2);
2527 new_exit_bb = split_edge (single_exit (loop));
2528 new_exit_e = single_exit (loop);
2529 e = EDGE_SUCC (new_exit_bb, 0);
2531 for (gsi = gsi_start_phis (merge_bb); !gsi_end_p (gsi); gsi_next (&gsi))
2533 tree new_res;
2534 orig_phi = gsi_stmt (gsi);
2535 new_res = copy_ssa_name (PHI_RESULT (orig_phi), NULL);
2536 new_phi = create_phi_node (new_res, new_exit_bb);
2537 arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, e);
2538 add_phi_arg (new_phi, arg, new_exit_e,
2539 gimple_phi_arg_location_from_edge (orig_phi, e));
2540 adjust_phi_and_debug_stmts (orig_phi, e, PHI_RESULT (new_phi));
2543 /* End loop-exit-fixes after versioning. */
2545 update_ssa (TODO_update_ssa);
2546 if (cond_expr_stmt_list)
2548 cond_exp_gsi = gsi_last_bb (condition_bb);
2549 gsi_insert_seq_before (&cond_exp_gsi, cond_expr_stmt_list,
2550 GSI_SAME_STMT);