2 Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
22 /* Loop Vectorization Pass.
24 This pass tries to vectorize loops. This first implementation focuses on
25 simple inner-most loops, with no conditional control flow, and a set of
26 simple operations which vector form can be expressed using existing
27 tree codes (PLUS, MULT etc).
29 For example, the vectorizer transforms the following simple loop:
31 short a[N]; short b[N]; short c[N]; int i;
37 as if it was manually vectorized by rewriting the source code into:
39 typedef int __attribute__((mode(V8HI))) v8hi;
40 short a[N]; short b[N]; short c[N]; int i;
41 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
44 for (i=0; i<N/8; i++){
51 The main entry to this pass is vectorize_loops(), in which
52 the vectorizer applies a set of analyses on a given set of loops,
53 followed by the actual vectorization transformation for the loops that
54 had successfully passed the analysis phase.
56 Throughout this pass we make a distinction between two types of
57 data: scalars (which are represented by SSA_NAMES), and memory references
58 ("data-refs"). These two types of data require different handling both
59 during analysis and transformation. The types of data-refs that the
60 vectorizer currently supports are ARRAY_REFS which base is an array DECL
61 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
62 accesses are required to have a simple (consecutive) access pattern.
66 The driver for the analysis phase is vect_analyze_loop_nest().
67 It applies a set of analyses, some of which rely on the scalar evolution
68 analyzer (scev) developed by Sebastian Pop.
70 During the analysis phase the vectorizer records some information
71 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
72 loop, as well as general information about the loop as a whole, which is
73 recorded in a "loop_vec_info" struct attached to each loop.
77 The loop transformation phase scans all the stmts in the loop, and
78 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
79 the loop that needs to be vectorized. It insert the vector code sequence
80 just before the scalar stmt S, and records a pointer to the vector code
81 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
82 attached to S). This pointer will be used for the vectorization of following
83 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
84 otherwise, we rely on dead code elimination for removing it.
86 For example, say stmt S1 was vectorized into stmt VS1:
89 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
92 To vectorize stmt S2, the vectorizer first finds the stmt that defines
93 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
94 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
95 resulting sequence would be:
98 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
100 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
102 Operands that are not SSA_NAMEs, are data-refs that appear in
103 load/store operations (like 'x[i]' in S1), and are handled differently.
107 Currently the only target specific information that is used is the
108 size of the vector (in bytes) - "UNITS_PER_SIMD_WORD". Targets that can
109 support different sizes of vectors, for now will need to specify one value
110 for "UNITS_PER_SIMD_WORD". More flexibility will be added in the future.
112 Since we only vectorize operations which vector form can be
113 expressed using existing tree codes, to verify that an operation is
114 supported, the vectorizer checks the relevant optab at the relevant
115 machine_mode (e.g, add_optab->handlers[(int) V8HImode].insn_code). If
116 the value found is CODE_FOR_nothing, then there's no target support, and
117 we can't vectorize the stmt.
119 For additional information on this project see:
120 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
125 #include "coretypes.h"
131 #include "basic-block.h"
132 #include "diagnostic.h"
133 #include "tree-flow.h"
134 #include "tree-dump.h"
137 #include "cfglayout.h"
143 #include "tree-chrec.h"
144 #include "tree-data-ref.h"
145 #include "tree-scalar-evolution.h"
147 #include "tree-vectorizer.h"
148 #include "tree-pass.h"
150 /*************************************************************************
151 Simple Loop Peeling Utilities
152 *************************************************************************/
153 static void slpeel_update_phis_for_duplicate_loop
154 (struct loop
*, struct loop
*, bool after
);
155 static void slpeel_update_phi_nodes_for_guard1
156 (edge
, struct loop
*, bool, basic_block
*, bitmap
*);
157 static void slpeel_update_phi_nodes_for_guard2
158 (edge
, struct loop
*, bool, basic_block
*);
159 static edge
slpeel_add_loop_guard (basic_block
, tree
, basic_block
, basic_block
);
161 static void rename_use_op (use_operand_p
);
162 static void rename_variables_in_bb (basic_block
);
163 static void rename_variables_in_loop (struct loop
*);
165 /*************************************************************************
166 General Vectorization Utilities
167 *************************************************************************/
168 static void vect_set_dump_settings (void);
170 /* vect_dump will be set to stderr or dump_file if exist. */
173 /* vect_verbosity_level set to an invalid value
174 to mark that it's uninitialized. */
175 enum verbosity_levels vect_verbosity_level
= MAX_VERBOSITY_LEVEL
;
178 static LOC vect_loop_location
;
180 /* Bitmap of virtual variables to be renamed. */
181 bitmap vect_memsyms_to_rename
;
183 /*************************************************************************
184 Simple Loop Peeling Utilities
186 Utilities to support loop peeling for vectorization purposes.
187 *************************************************************************/
190 /* Renames the use *OP_P. */
193 rename_use_op (use_operand_p op_p
)
197 if (TREE_CODE (USE_FROM_PTR (op_p
)) != SSA_NAME
)
200 new_name
= get_current_def (USE_FROM_PTR (op_p
));
202 /* Something defined outside of the loop. */
206 /* An ordinary ssa name defined in the loop. */
208 SET_USE (op_p
, new_name
);
212 /* Renames the variables in basic block BB. */
215 rename_variables_in_bb (basic_block bb
)
218 block_stmt_iterator bsi
;
224 struct loop
*loop
= bb
->loop_father
;
226 for (bsi
= bsi_start (bb
); !bsi_end_p (bsi
); bsi_next (&bsi
))
228 stmt
= bsi_stmt (bsi
);
229 FOR_EACH_SSA_USE_OPERAND (use_p
, stmt
, iter
, SSA_OP_ALL_USES
)
230 rename_use_op (use_p
);
233 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
235 if (!flow_bb_inside_loop_p (loop
, e
->dest
))
237 for (phi
= phi_nodes (e
->dest
); phi
; phi
= PHI_CHAIN (phi
))
238 rename_use_op (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, e
));
243 /* Renames variables in new generated LOOP. */
246 rename_variables_in_loop (struct loop
*loop
)
251 bbs
= get_loop_body (loop
);
253 for (i
= 0; i
< loop
->num_nodes
; i
++)
254 rename_variables_in_bb (bbs
[i
]);
260 /* Update the PHI nodes of NEW_LOOP.
262 NEW_LOOP is a duplicate of ORIG_LOOP.
263 AFTER indicates whether NEW_LOOP executes before or after ORIG_LOOP:
264 AFTER is true if NEW_LOOP executes after ORIG_LOOP, and false if it
265 executes before it. */
268 slpeel_update_phis_for_duplicate_loop (struct loop
*orig_loop
,
269 struct loop
*new_loop
, bool after
)
272 tree phi_new
, phi_orig
;
274 edge orig_loop_latch
= loop_latch_edge (orig_loop
);
275 edge orig_entry_e
= loop_preheader_edge (orig_loop
);
276 edge new_loop_exit_e
= single_exit (new_loop
);
277 edge new_loop_entry_e
= loop_preheader_edge (new_loop
);
278 edge entry_arg_e
= (after
? orig_loop_latch
: orig_entry_e
);
281 step 1. For each loop-header-phi:
282 Add the first phi argument for the phi in NEW_LOOP
283 (the one associated with the entry of NEW_LOOP)
285 step 2. For each loop-header-phi:
286 Add the second phi argument for the phi in NEW_LOOP
287 (the one associated with the latch of NEW_LOOP)
289 step 3. Update the phis in the successor block of NEW_LOOP.
291 case 1: NEW_LOOP was placed before ORIG_LOOP:
292 The successor block of NEW_LOOP is the header of ORIG_LOOP.
293 Updating the phis in the successor block can therefore be done
294 along with the scanning of the loop header phis, because the
295 header blocks of ORIG_LOOP and NEW_LOOP have exactly the same
296 phi nodes, organized in the same order.
298 case 2: NEW_LOOP was placed after ORIG_LOOP:
299 The successor block of NEW_LOOP is the original exit block of
300 ORIG_LOOP - the phis to be updated are the loop-closed-ssa phis.
301 We postpone updating these phis to a later stage (when
302 loop guards are added).
306 /* Scan the phis in the headers of the old and new loops
307 (they are organized in exactly the same order). */
309 for (phi_new
= phi_nodes (new_loop
->header
),
310 phi_orig
= phi_nodes (orig_loop
->header
);
312 phi_new
= PHI_CHAIN (phi_new
), phi_orig
= PHI_CHAIN (phi_orig
))
315 def
= PHI_ARG_DEF_FROM_EDGE (phi_orig
, entry_arg_e
);
316 add_phi_arg (phi_new
, def
, new_loop_entry_e
);
319 def
= PHI_ARG_DEF_FROM_EDGE (phi_orig
, orig_loop_latch
);
320 if (TREE_CODE (def
) != SSA_NAME
)
323 new_ssa_name
= get_current_def (def
);
326 /* This only happens if there are no definitions
327 inside the loop. use the phi_result in this case. */
328 new_ssa_name
= PHI_RESULT (phi_new
);
331 /* An ordinary ssa name defined in the loop. */
332 add_phi_arg (phi_new
, new_ssa_name
, loop_latch_edge (new_loop
));
334 /* step 3 (case 1). */
337 gcc_assert (new_loop_exit_e
== orig_entry_e
);
338 SET_PHI_ARG_DEF (phi_orig
,
339 new_loop_exit_e
->dest_idx
,
346 /* Update PHI nodes for a guard of the LOOP.
349 - LOOP, GUARD_EDGE: LOOP is a loop for which we added guard code that
350 controls whether LOOP is to be executed. GUARD_EDGE is the edge that
351 originates from the guard-bb, skips LOOP and reaches the (unique) exit
352 bb of LOOP. This loop-exit-bb is an empty bb with one successor.
353 We denote this bb NEW_MERGE_BB because before the guard code was added
354 it had a single predecessor (the LOOP header), and now it became a merge
355 point of two paths - the path that ends with the LOOP exit-edge, and
356 the path that ends with GUARD_EDGE.
357 - NEW_EXIT_BB: New basic block that is added by this function between LOOP
358 and NEW_MERGE_BB. It is used to place loop-closed-ssa-form exit-phis.
360 ===> The CFG before the guard-code was added:
363 if (exit_loop) goto update_bb
364 else goto LOOP_header_bb
367 ==> The CFG after the guard-code was added:
369 if (LOOP_guard_condition) goto new_merge_bb
370 else goto LOOP_header_bb
373 if (exit_loop_condition) goto new_merge_bb
374 else goto LOOP_header_bb
379 ==> The CFG after this function:
381 if (LOOP_guard_condition) goto new_merge_bb
382 else goto LOOP_header_bb
385 if (exit_loop_condition) goto new_exit_bb
386 else goto LOOP_header_bb
393 1. creates and updates the relevant phi nodes to account for the new
394 incoming edge (GUARD_EDGE) into NEW_MERGE_BB. This involves:
395 1.1. Create phi nodes at NEW_MERGE_BB.
396 1.2. Update the phi nodes at the successor of NEW_MERGE_BB (denoted
397 UPDATE_BB). UPDATE_BB was the exit-bb of LOOP before NEW_MERGE_BB
398 2. preserves loop-closed-ssa-form by creating the required phi nodes
399 at the exit of LOOP (i.e, in NEW_EXIT_BB).
401 There are two flavors to this function:
403 slpeel_update_phi_nodes_for_guard1:
404 Here the guard controls whether we enter or skip LOOP, where LOOP is a
405 prolog_loop (loop1 below), and the new phis created in NEW_MERGE_BB are
406 for variables that have phis in the loop header.
408 slpeel_update_phi_nodes_for_guard2:
409 Here the guard controls whether we enter or skip LOOP, where LOOP is an
410 epilog_loop (loop2 below), and the new phis created in NEW_MERGE_BB are
411 for variables that have phis in the loop exit.
413 I.E., the overall structure is:
416 guard1 (goto loop1/merg1_bb)
419 guard2 (goto merge1_bb/merge2_bb)
426 slpeel_update_phi_nodes_for_guard1 takes care of creating phis in
427 loop1_exit_bb and merge1_bb. These are entry phis (phis for the vars
428 that have phis in loop1->header).
430 slpeel_update_phi_nodes_for_guard2 takes care of creating phis in
431 loop2_exit_bb and merge2_bb. These are exit phis (phis for the vars
432 that have phis in next_bb). It also adds some of these phis to
435 slpeel_update_phi_nodes_for_guard1 is always called before
436 slpeel_update_phi_nodes_for_guard2. They are both needed in order
437 to create correct data-flow and loop-closed-ssa-form.
439 Generally slpeel_update_phi_nodes_for_guard1 creates phis for variables
440 that change between iterations of a loop (and therefore have a phi-node
441 at the loop entry), whereas slpeel_update_phi_nodes_for_guard2 creates
442 phis for variables that are used out of the loop (and therefore have
443 loop-closed exit phis). Some variables may be both updated between
444 iterations and used after the loop. This is why in loop1_exit_bb we
445 may need both entry_phis (created by slpeel_update_phi_nodes_for_guard1)
446 and exit phis (created by slpeel_update_phi_nodes_for_guard2).
448 - IS_NEW_LOOP: if IS_NEW_LOOP is true, then LOOP is a newly created copy of
449 an original loop. i.e., we have:
452 guard_bb (goto LOOP/new_merge)
458 If IS_NEW_LOOP is false, then LOOP is an original loop, in which case we
462 guard_bb (goto LOOP/new_merge)
468 The SSA names defined in the original loop have a current
469 reaching definition that that records the corresponding new
470 ssa-name used in the new duplicated loop copy.
473 /* Function slpeel_update_phi_nodes_for_guard1
476 - GUARD_EDGE, LOOP, IS_NEW_LOOP, NEW_EXIT_BB - as explained above.
477 - DEFS - a bitmap of ssa names to mark new names for which we recorded
480 In the context of the overall structure, we have:
483 guard1 (goto loop1/merg1_bb)
486 guard2 (goto merge1_bb/merge2_bb)
493 For each name updated between loop iterations (i.e - for each name that has
494 an entry (loop-header) phi in LOOP) we create a new phi in:
495 1. merge1_bb (to account for the edge from guard1)
496 2. loop1_exit_bb (an exit-phi to keep LOOP in loop-closed form)
500 slpeel_update_phi_nodes_for_guard1 (edge guard_edge
, struct loop
*loop
,
501 bool is_new_loop
, basic_block
*new_exit_bb
,
504 tree orig_phi
, new_phi
;
505 tree update_phi
, update_phi2
;
506 tree guard_arg
, loop_arg
;
507 basic_block new_merge_bb
= guard_edge
->dest
;
508 edge e
= EDGE_SUCC (new_merge_bb
, 0);
509 basic_block update_bb
= e
->dest
;
510 basic_block orig_bb
= loop
->header
;
512 tree current_new_name
;
515 /* Create new bb between loop and new_merge_bb. */
516 *new_exit_bb
= split_edge (single_exit (loop
));
518 new_exit_e
= EDGE_SUCC (*new_exit_bb
, 0);
520 for (orig_phi
= phi_nodes (orig_bb
), update_phi
= phi_nodes (update_bb
);
521 orig_phi
&& update_phi
;
522 orig_phi
= PHI_CHAIN (orig_phi
), update_phi
= PHI_CHAIN (update_phi
))
524 /* Virtual phi; Mark it for renaming. We actually want to call
525 mar_sym_for_renaming, but since all ssa renaming datastructures
526 are going to be freed before we get to call ssa_upate, we just
527 record this name for now in a bitmap, and will mark it for
529 name
= PHI_RESULT (orig_phi
);
530 if (!is_gimple_reg (SSA_NAME_VAR (name
)))
531 bitmap_set_bit (vect_memsyms_to_rename
, DECL_UID (SSA_NAME_VAR (name
)));
533 /** 1. Handle new-merge-point phis **/
535 /* 1.1. Generate new phi node in NEW_MERGE_BB: */
536 new_phi
= create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi
)),
539 /* 1.2. NEW_MERGE_BB has two incoming edges: GUARD_EDGE and the exit-edge
540 of LOOP. Set the two phi args in NEW_PHI for these edges: */
541 loop_arg
= PHI_ARG_DEF_FROM_EDGE (orig_phi
, EDGE_SUCC (loop
->latch
, 0));
542 guard_arg
= PHI_ARG_DEF_FROM_EDGE (orig_phi
, loop_preheader_edge (loop
));
544 add_phi_arg (new_phi
, loop_arg
, new_exit_e
);
545 add_phi_arg (new_phi
, guard_arg
, guard_edge
);
547 /* 1.3. Update phi in successor block. */
548 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi
, e
) == loop_arg
549 || PHI_ARG_DEF_FROM_EDGE (update_phi
, e
) == guard_arg
);
550 SET_PHI_ARG_DEF (update_phi
, e
->dest_idx
, PHI_RESULT (new_phi
));
551 update_phi2
= new_phi
;
554 /** 2. Handle loop-closed-ssa-form phis **/
556 if (!is_gimple_reg (PHI_RESULT (orig_phi
)))
559 /* 2.1. Generate new phi node in NEW_EXIT_BB: */
560 new_phi
= create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi
)),
563 /* 2.2. NEW_EXIT_BB has one incoming edge: the exit-edge of the loop. */
564 add_phi_arg (new_phi
, loop_arg
, single_exit (loop
));
566 /* 2.3. Update phi in successor of NEW_EXIT_BB: */
567 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi2
, new_exit_e
) == loop_arg
);
568 SET_PHI_ARG_DEF (update_phi2
, new_exit_e
->dest_idx
, PHI_RESULT (new_phi
));
570 /* 2.4. Record the newly created name with set_current_def.
571 We want to find a name such that
572 name = get_current_def (orig_loop_name)
573 and to set its current definition as follows:
574 set_current_def (name, new_phi_name)
576 If LOOP is a new loop then loop_arg is already the name we're
577 looking for. If LOOP is the original loop, then loop_arg is
578 the orig_loop_name and the relevant name is recorded in its
579 current reaching definition. */
581 current_new_name
= loop_arg
;
584 current_new_name
= get_current_def (loop_arg
);
585 /* current_def is not available only if the variable does not
586 change inside the loop, in which case we also don't care
587 about recording a current_def for it because we won't be
588 trying to create loop-exit-phis for it. */
589 if (!current_new_name
)
592 gcc_assert (get_current_def (current_new_name
) == NULL_TREE
);
594 set_current_def (current_new_name
, PHI_RESULT (new_phi
));
595 bitmap_set_bit (*defs
, SSA_NAME_VERSION (current_new_name
));
598 set_phi_nodes (new_merge_bb
, phi_reverse (phi_nodes (new_merge_bb
)));
602 /* Function slpeel_update_phi_nodes_for_guard2
605 - GUARD_EDGE, LOOP, IS_NEW_LOOP, NEW_EXIT_BB - as explained above.
607 In the context of the overall structure, we have:
610 guard1 (goto loop1/merg1_bb)
613 guard2 (goto merge1_bb/merge2_bb)
620 For each name used out side the loop (i.e - for each name that has an exit
621 phi in next_bb) we create a new phi in:
622 1. merge2_bb (to account for the edge from guard_bb)
623 2. loop2_exit_bb (an exit-phi to keep LOOP in loop-closed form)
624 3. guard2 bb (an exit phi to keep the preceding loop in loop-closed form),
625 if needed (if it wasn't handled by slpeel_update_phis_nodes_for_phi1).
629 slpeel_update_phi_nodes_for_guard2 (edge guard_edge
, struct loop
*loop
,
630 bool is_new_loop
, basic_block
*new_exit_bb
)
632 tree orig_phi
, new_phi
;
633 tree update_phi
, update_phi2
;
634 tree guard_arg
, loop_arg
;
635 basic_block new_merge_bb
= guard_edge
->dest
;
636 edge e
= EDGE_SUCC (new_merge_bb
, 0);
637 basic_block update_bb
= e
->dest
;
639 tree orig_def
, orig_def_new_name
;
640 tree new_name
, new_name2
;
643 /* Create new bb between loop and new_merge_bb. */
644 *new_exit_bb
= split_edge (single_exit (loop
));
646 new_exit_e
= EDGE_SUCC (*new_exit_bb
, 0);
648 for (update_phi
= phi_nodes (update_bb
); update_phi
;
649 update_phi
= PHI_CHAIN (update_phi
))
651 orig_phi
= update_phi
;
652 orig_def
= PHI_ARG_DEF_FROM_EDGE (orig_phi
, e
);
653 /* This loop-closed-phi actually doesn't represent a use
654 out of the loop - the phi arg is a constant. */
655 if (TREE_CODE (orig_def
) != SSA_NAME
)
657 orig_def_new_name
= get_current_def (orig_def
);
660 /** 1. Handle new-merge-point phis **/
662 /* 1.1. Generate new phi node in NEW_MERGE_BB: */
663 new_phi
= create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi
)),
666 /* 1.2. NEW_MERGE_BB has two incoming edges: GUARD_EDGE and the exit-edge
667 of LOOP. Set the two PHI args in NEW_PHI for these edges: */
669 new_name2
= NULL_TREE
;
670 if (orig_def_new_name
)
672 new_name
= orig_def_new_name
;
673 /* Some variables have both loop-entry-phis and loop-exit-phis.
674 Such variables were given yet newer names by phis placed in
675 guard_bb by slpeel_update_phi_nodes_for_guard1. I.e:
676 new_name2 = get_current_def (get_current_def (orig_name)). */
677 new_name2
= get_current_def (new_name
);
682 guard_arg
= orig_def
;
687 guard_arg
= new_name
;
691 guard_arg
= new_name2
;
693 add_phi_arg (new_phi
, loop_arg
, new_exit_e
);
694 add_phi_arg (new_phi
, guard_arg
, guard_edge
);
696 /* 1.3. Update phi in successor block. */
697 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi
, e
) == orig_def
);
698 SET_PHI_ARG_DEF (update_phi
, e
->dest_idx
, PHI_RESULT (new_phi
));
699 update_phi2
= new_phi
;
702 /** 2. Handle loop-closed-ssa-form phis **/
704 /* 2.1. Generate new phi node in NEW_EXIT_BB: */
705 new_phi
= create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi
)),
708 /* 2.2. NEW_EXIT_BB has one incoming edge: the exit-edge of the loop. */
709 add_phi_arg (new_phi
, loop_arg
, single_exit (loop
));
711 /* 2.3. Update phi in successor of NEW_EXIT_BB: */
712 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi2
, new_exit_e
) == loop_arg
);
713 SET_PHI_ARG_DEF (update_phi2
, new_exit_e
->dest_idx
, PHI_RESULT (new_phi
));
716 /** 3. Handle loop-closed-ssa-form phis for first loop **/
718 /* 3.1. Find the relevant names that need an exit-phi in
719 GUARD_BB, i.e. names for which
720 slpeel_update_phi_nodes_for_guard1 had not already created a
721 phi node. This is the case for names that are used outside
722 the loop (and therefore need an exit phi) but are not updated
723 across loop iterations (and therefore don't have a
726 slpeel_update_phi_nodes_for_guard1 is responsible for
727 creating loop-exit phis in GUARD_BB for names that have a
728 loop-header-phi. When such a phi is created we also record
729 the new name in its current definition. If this new name
730 exists, then guard_arg was set to this new name (see 1.2
731 above). Therefore, if guard_arg is not this new name, this
732 is an indication that an exit-phi in GUARD_BB was not yet
733 created, so we take care of it here. */
734 if (guard_arg
== new_name2
)
738 /* 3.2. Generate new phi node in GUARD_BB: */
739 new_phi
= create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi
)),
742 /* 3.3. GUARD_BB has one incoming edge: */
743 gcc_assert (EDGE_COUNT (guard_edge
->src
->preds
) == 1);
744 add_phi_arg (new_phi
, arg
, EDGE_PRED (guard_edge
->src
, 0));
746 /* 3.4. Update phi in successor of GUARD_BB: */
747 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi2
, guard_edge
)
749 SET_PHI_ARG_DEF (update_phi2
, guard_edge
->dest_idx
, PHI_RESULT (new_phi
));
752 set_phi_nodes (new_merge_bb
, phi_reverse (phi_nodes (new_merge_bb
)));
756 /* Make the LOOP iterate NITERS times. This is done by adding a new IV
757 that starts at zero, increases by one and its limit is NITERS.
759 Assumption: the exit-condition of LOOP is the last stmt in the loop. */
762 slpeel_make_loop_iterate_ntimes (struct loop
*loop
, tree niters
)
764 tree indx_before_incr
, indx_after_incr
, cond_stmt
, cond
;
766 edge exit_edge
= single_exit (loop
);
767 block_stmt_iterator loop_cond_bsi
;
768 block_stmt_iterator incr_bsi
;
770 tree begin_label
= tree_block_label (loop
->latch
);
771 tree exit_label
= tree_block_label (single_exit (loop
)->dest
);
772 tree init
= build_int_cst (TREE_TYPE (niters
), 0);
773 tree step
= build_int_cst (TREE_TYPE (niters
), 1);
778 orig_cond
= get_loop_exit_condition (loop
);
779 gcc_assert (orig_cond
);
780 loop_cond_bsi
= bsi_for_stmt (orig_cond
);
782 standard_iv_increment_position (loop
, &incr_bsi
, &insert_after
);
783 create_iv (init
, step
, NULL_TREE
, loop
,
784 &incr_bsi
, insert_after
, &indx_before_incr
, &indx_after_incr
);
786 if (exit_edge
->flags
& EDGE_TRUE_VALUE
) /* 'then' edge exits the loop. */
788 cond
= build2 (GE_EXPR
, boolean_type_node
, indx_after_incr
, niters
);
789 then_label
= build1 (GOTO_EXPR
, void_type_node
, exit_label
);
790 else_label
= build1 (GOTO_EXPR
, void_type_node
, begin_label
);
792 else /* 'then' edge loops back. */
794 cond
= build2 (LT_EXPR
, boolean_type_node
, indx_after_incr
, niters
);
795 then_label
= build1 (GOTO_EXPR
, void_type_node
, begin_label
);
796 else_label
= build1 (GOTO_EXPR
, void_type_node
, exit_label
);
799 cond_stmt
= build3 (COND_EXPR
, TREE_TYPE (orig_cond
), cond
,
800 then_label
, else_label
);
801 bsi_insert_before (&loop_cond_bsi
, cond_stmt
, BSI_SAME_STMT
);
803 /* Remove old loop exit test: */
804 bsi_remove (&loop_cond_bsi
, true);
806 loop_loc
= find_loop_location (loop
);
807 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
809 if (loop_loc
!= UNKNOWN_LOC
)
810 fprintf (dump_file
, "\nloop at %s:%d: ",
811 LOC_FILE (loop_loc
), LOC_LINE (loop_loc
));
812 print_generic_expr (dump_file
, cond_stmt
, TDF_SLIM
);
815 loop
->nb_iterations
= niters
;
819 /* Given LOOP this function generates a new copy of it and puts it
820 on E which is either the entry or exit of LOOP. */
823 slpeel_tree_duplicate_loop_to_edge_cfg (struct loop
*loop
, edge e
)
825 struct loop
*new_loop
;
826 basic_block
*new_bbs
, *bbs
;
829 basic_block exit_dest
;
833 at_exit
= (e
== single_exit (loop
));
834 if (!at_exit
&& e
!= loop_preheader_edge (loop
))
837 bbs
= get_loop_body (loop
);
839 /* Check whether duplication is possible. */
840 if (!can_copy_bbs_p (bbs
, loop
->num_nodes
))
846 /* Generate new loop structure. */
847 new_loop
= duplicate_loop (loop
, loop
->outer
);
854 exit_dest
= single_exit (loop
)->dest
;
855 was_imm_dom
= (get_immediate_dominator (CDI_DOMINATORS
,
856 exit_dest
) == loop
->header
?
859 new_bbs
= XNEWVEC (basic_block
, loop
->num_nodes
);
861 exit
= single_exit (loop
);
862 copy_bbs (bbs
, loop
->num_nodes
, new_bbs
,
863 &exit
, 1, &new_exit
, NULL
,
866 /* Duplicating phi args at exit bbs as coming
867 also from exit of duplicated loop. */
868 for (phi
= phi_nodes (exit_dest
); phi
; phi
= PHI_CHAIN (phi
))
870 phi_arg
= PHI_ARG_DEF_FROM_EDGE (phi
, single_exit (loop
));
873 edge new_loop_exit_edge
;
875 if (EDGE_SUCC (new_loop
->header
, 0)->dest
== new_loop
->latch
)
876 new_loop_exit_edge
= EDGE_SUCC (new_loop
->header
, 1);
878 new_loop_exit_edge
= EDGE_SUCC (new_loop
->header
, 0);
880 add_phi_arg (phi
, phi_arg
, new_loop_exit_edge
);
884 if (at_exit
) /* Add the loop copy at exit. */
886 redirect_edge_and_branch_force (e
, new_loop
->header
);
887 set_immediate_dominator (CDI_DOMINATORS
, new_loop
->header
, e
->src
);
889 set_immediate_dominator (CDI_DOMINATORS
, exit_dest
, new_loop
->header
);
891 else /* Add the copy at entry. */
894 edge entry_e
= loop_preheader_edge (loop
);
895 basic_block preheader
= entry_e
->src
;
897 if (!flow_bb_inside_loop_p (new_loop
,
898 EDGE_SUCC (new_loop
->header
, 0)->dest
))
899 new_exit_e
= EDGE_SUCC (new_loop
->header
, 0);
901 new_exit_e
= EDGE_SUCC (new_loop
->header
, 1);
903 redirect_edge_and_branch_force (new_exit_e
, loop
->header
);
904 set_immediate_dominator (CDI_DOMINATORS
, loop
->header
,
907 /* We have to add phi args to the loop->header here as coming
908 from new_exit_e edge. */
909 for (phi
= phi_nodes (loop
->header
); phi
; phi
= PHI_CHAIN (phi
))
911 phi_arg
= PHI_ARG_DEF_FROM_EDGE (phi
, entry_e
);
913 add_phi_arg (phi
, phi_arg
, new_exit_e
);
916 redirect_edge_and_branch_force (entry_e
, new_loop
->header
);
917 set_immediate_dominator (CDI_DOMINATORS
, new_loop
->header
, preheader
);
927 /* Given the condition statement COND, put it as the last statement
928 of GUARD_BB; EXIT_BB is the basic block to skip the loop;
929 Assumes that this is the single exit of the guarded loop.
930 Returns the skip edge. */
933 slpeel_add_loop_guard (basic_block guard_bb
, tree cond
, basic_block exit_bb
,
936 block_stmt_iterator bsi
;
938 tree cond_stmt
, then_label
, else_label
;
940 enter_e
= EDGE_SUCC (guard_bb
, 0);
941 enter_e
->flags
&= ~EDGE_FALLTHRU
;
942 enter_e
->flags
|= EDGE_FALSE_VALUE
;
943 bsi
= bsi_last (guard_bb
);
945 then_label
= build1 (GOTO_EXPR
, void_type_node
,
946 tree_block_label (exit_bb
));
947 else_label
= build1 (GOTO_EXPR
, void_type_node
,
948 tree_block_label (enter_e
->dest
));
949 cond_stmt
= build3 (COND_EXPR
, void_type_node
, cond
,
950 then_label
, else_label
);
951 bsi_insert_after (&bsi
, cond_stmt
, BSI_NEW_STMT
);
952 /* Add new edge to connect guard block to the merge/loop-exit block. */
953 new_e
= make_edge (guard_bb
, exit_bb
, EDGE_TRUE_VALUE
);
954 set_immediate_dominator (CDI_DOMINATORS
, exit_bb
, dom_bb
);
959 /* This function verifies that the following restrictions apply to LOOP:
961 (2) it consists of exactly 2 basic blocks - header, and an empty latch.
962 (3) it is single entry, single exit
963 (4) its exit condition is the last stmt in the header
964 (5) E is the entry/exit edge of LOOP.
968 slpeel_can_duplicate_loop_p (struct loop
*loop
, edge e
)
970 edge exit_e
= single_exit (loop
);
971 edge entry_e
= loop_preheader_edge (loop
);
972 tree orig_cond
= get_loop_exit_condition (loop
);
973 block_stmt_iterator loop_exit_bsi
= bsi_last (exit_e
->src
);
975 if (need_ssa_update_p ())
979 /* All loops have an outer scope; the only case loop->outer is NULL is for
980 the function itself. */
982 || loop
->num_nodes
!= 2
983 || !empty_block_p (loop
->latch
)
984 || !single_exit (loop
)
985 /* Verify that new loop exit condition can be trivially modified. */
986 || (!orig_cond
|| orig_cond
!= bsi_stmt (loop_exit_bsi
))
987 || (e
!= exit_e
&& e
!= entry_e
))
993 #ifdef ENABLE_CHECKING
995 slpeel_verify_cfg_after_peeling (struct loop
*first_loop
,
996 struct loop
*second_loop
)
998 basic_block loop1_exit_bb
= single_exit (first_loop
)->dest
;
999 basic_block loop2_entry_bb
= loop_preheader_edge (second_loop
)->src
;
1000 basic_block loop1_entry_bb
= loop_preheader_edge (first_loop
)->src
;
1002 /* A guard that controls whether the second_loop is to be executed or skipped
1003 is placed in first_loop->exit. first_loopt->exit therefore has two
1004 successors - one is the preheader of second_loop, and the other is a bb
1007 gcc_assert (EDGE_COUNT (loop1_exit_bb
->succs
) == 2);
1009 /* 1. Verify that one of the successors of first_loopt->exit is the preheader
1012 /* The preheader of new_loop is expected to have two predecessors:
1013 first_loop->exit and the block that precedes first_loop. */
1015 gcc_assert (EDGE_COUNT (loop2_entry_bb
->preds
) == 2
1016 && ((EDGE_PRED (loop2_entry_bb
, 0)->src
== loop1_exit_bb
1017 && EDGE_PRED (loop2_entry_bb
, 1)->src
== loop1_entry_bb
)
1018 || (EDGE_PRED (loop2_entry_bb
, 1)->src
== loop1_exit_bb
1019 && EDGE_PRED (loop2_entry_bb
, 0)->src
== loop1_entry_bb
)));
1021 /* Verify that the other successor of first_loopt->exit is after the
1027 /* Function slpeel_tree_peel_loop_to_edge.
1029 Peel the first (last) iterations of LOOP into a new prolog (epilog) loop
1030 that is placed on the entry (exit) edge E of LOOP. After this transformation
1031 we have two loops one after the other - first-loop iterates FIRST_NITERS
1032 times, and second-loop iterates the remainder NITERS - FIRST_NITERS times.
1035 - LOOP: the loop to be peeled.
1036 - E: the exit or entry edge of LOOP.
1037 If it is the entry edge, we peel the first iterations of LOOP. In this
1038 case first-loop is LOOP, and second-loop is the newly created loop.
1039 If it is the exit edge, we peel the last iterations of LOOP. In this
1040 case, first-loop is the newly created loop, and second-loop is LOOP.
1041 - NITERS: the number of iterations that LOOP iterates.
1042 - FIRST_NITERS: the number of iterations that the first-loop should iterate.
1043 - UPDATE_FIRST_LOOP_COUNT: specified whether this function is responsible
1044 for updating the loop bound of the first-loop to FIRST_NITERS. If it
1045 is false, the caller of this function may want to take care of this
1046 (this can be useful if we don't want new stmts added to first-loop).
1049 The function returns a pointer to the new loop-copy, or NULL if it failed
1050 to perform the transformation.
1052 The function generates two if-then-else guards: one before the first loop,
1053 and the other before the second loop:
1055 if (FIRST_NITERS == 0) then skip the first loop,
1056 and go directly to the second loop.
1057 The second guard is:
1058 if (FIRST_NITERS == NITERS) then skip the second loop.
1060 FORNOW only simple loops are supported (see slpeel_can_duplicate_loop_p).
1061 FORNOW the resulting code will not be in loop-closed-ssa form.
1065 slpeel_tree_peel_loop_to_edge (struct loop
*loop
,
1066 edge e
, tree first_niters
,
1067 tree niters
, bool update_first_loop_count
,
1070 struct loop
*new_loop
= NULL
, *first_loop
, *second_loop
;
1074 basic_block bb_before_second_loop
, bb_after_second_loop
;
1075 basic_block bb_before_first_loop
;
1076 basic_block bb_between_loops
;
1077 basic_block new_exit_bb
;
1078 edge exit_e
= single_exit (loop
);
1081 if (!slpeel_can_duplicate_loop_p (loop
, e
))
1084 /* We have to initialize cfg_hooks. Then, when calling
1085 cfg_hooks->split_edge, the function tree_split_edge
1086 is actually called and, when calling cfg_hooks->duplicate_block,
1087 the function tree_duplicate_bb is called. */
1088 tree_register_cfg_hooks ();
1091 /* 1. Generate a copy of LOOP and put it on E (E is the entry/exit of LOOP).
1092 Resulting CFG would be:
1105 if (!(new_loop
= slpeel_tree_duplicate_loop_to_edge_cfg (loop
, e
)))
1107 loop_loc
= find_loop_location (loop
);
1108 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1110 if (loop_loc
!= UNKNOWN_LOC
)
1111 fprintf (dump_file
, "\n%s:%d: note: ",
1112 LOC_FILE (loop_loc
), LOC_LINE (loop_loc
));
1113 fprintf (dump_file
, "tree_duplicate_loop_to_edge_cfg failed.\n");
1120 /* NEW_LOOP was placed after LOOP. */
1122 second_loop
= new_loop
;
1126 /* NEW_LOOP was placed before LOOP. */
1127 first_loop
= new_loop
;
1131 definitions
= ssa_names_to_replace ();
1132 slpeel_update_phis_for_duplicate_loop (loop
, new_loop
, e
== exit_e
);
1133 rename_variables_in_loop (new_loop
);
1136 /* 2. Add the guard that controls whether the first loop is executed.
1137 Resulting CFG would be:
1139 bb_before_first_loop:
1140 if (FIRST_NITERS == 0) GOTO bb_before_second_loop
1147 bb_before_second_loop:
1156 bb_before_first_loop
= split_edge (loop_preheader_edge (first_loop
));
1157 bb_before_second_loop
= split_edge (single_exit (first_loop
));
1160 fold_build2 (LE_EXPR
, boolean_type_node
, first_niters
,
1161 build_int_cst (TREE_TYPE (first_niters
), th
));
1163 skip_e
= slpeel_add_loop_guard (bb_before_first_loop
, pre_condition
,
1164 bb_before_second_loop
, bb_before_first_loop
);
1165 slpeel_update_phi_nodes_for_guard1 (skip_e
, first_loop
,
1166 first_loop
== new_loop
,
1167 &new_exit_bb
, &definitions
);
1170 /* 3. Add the guard that controls whether the second loop is executed.
1171 Resulting CFG would be:
1173 bb_before_first_loop:
1174 if (FIRST_NITERS == 0) GOTO bb_before_second_loop (skip first loop)
1182 if (FIRST_NITERS == NITERS) GOTO bb_after_second_loop (skip second loop)
1183 GOTO bb_before_second_loop
1185 bb_before_second_loop:
1191 bb_after_second_loop:
1196 bb_between_loops
= new_exit_bb
;
1197 bb_after_second_loop
= split_edge (single_exit (second_loop
));
1200 fold_build2 (EQ_EXPR
, boolean_type_node
, first_niters
, niters
);
1201 skip_e
= slpeel_add_loop_guard (bb_between_loops
, pre_condition
,
1202 bb_after_second_loop
, bb_before_first_loop
);
1203 slpeel_update_phi_nodes_for_guard2 (skip_e
, second_loop
,
1204 second_loop
== new_loop
, &new_exit_bb
);
1206 /* 4. Make first-loop iterate FIRST_NITERS times, if requested.
1208 if (update_first_loop_count
)
1209 slpeel_make_loop_iterate_ntimes (first_loop
, first_niters
);
1211 BITMAP_FREE (definitions
);
1212 delete_update_ssa ();
1217 /* Function vect_get_loop_location.
1219 Extract the location of the loop in the source code.
1220 If the loop is not well formed for vectorization, an estimated
1221 location is calculated.
1222 Return the loop location if succeed and NULL if not. */
1225 find_loop_location (struct loop
*loop
)
1227 tree node
= NULL_TREE
;
1229 block_stmt_iterator si
;
1234 node
= get_loop_exit_condition (loop
);
1236 if (node
&& CAN_HAVE_LOCATION_P (node
) && EXPR_HAS_LOCATION (node
)
1237 && EXPR_FILENAME (node
) && EXPR_LINENO (node
))
1238 return EXPR_LOC (node
);
1240 /* If we got here the loop is probably not "well formed",
1241 try to estimate the loop location */
1248 for (si
= bsi_start (bb
); !bsi_end_p (si
); bsi_next (&si
))
1250 node
= bsi_stmt (si
);
1251 if (node
&& CAN_HAVE_LOCATION_P (node
) && EXPR_HAS_LOCATION (node
))
1252 return EXPR_LOC (node
);
1259 /*************************************************************************
1260 Vectorization Debug Information.
1261 *************************************************************************/
1263 /* Function vect_set_verbosity_level.
1265 Called from toplev.c upon detection of the
1266 -ftree-vectorizer-verbose=N option. */
1269 vect_set_verbosity_level (const char *val
)
1274 if (vl
< MAX_VERBOSITY_LEVEL
)
1275 vect_verbosity_level
= vl
;
1277 vect_verbosity_level
= MAX_VERBOSITY_LEVEL
- 1;
1281 /* Function vect_set_dump_settings.
1283 Fix the verbosity level of the vectorizer if the
1284 requested level was not set explicitly using the flag
1285 -ftree-vectorizer-verbose=N.
1286 Decide where to print the debugging information (dump_file/stderr).
1287 If the user defined the verbosity level, but there is no dump file,
1288 print to stderr, otherwise print to the dump file. */
1291 vect_set_dump_settings (void)
1293 vect_dump
= dump_file
;
1295 /* Check if the verbosity level was defined by the user: */
1296 if (vect_verbosity_level
!= MAX_VERBOSITY_LEVEL
)
1298 /* If there is no dump file, print to stderr. */
1304 /* User didn't specify verbosity level: */
1305 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1306 vect_verbosity_level
= REPORT_DETAILS
;
1307 else if (dump_file
&& (dump_flags
& TDF_STATS
))
1308 vect_verbosity_level
= REPORT_UNVECTORIZED_LOOPS
;
1310 vect_verbosity_level
= REPORT_NONE
;
1312 gcc_assert (dump_file
|| vect_verbosity_level
== REPORT_NONE
);
1316 /* Function debug_loop_details.
1318 For vectorization debug dumps. */
1321 vect_print_dump_info (enum verbosity_levels vl
)
1323 if (vl
> vect_verbosity_level
)
1326 if (!current_function_decl
|| !vect_dump
)
1329 if (vect_loop_location
== UNKNOWN_LOC
)
1330 fprintf (vect_dump
, "\n%s:%d: note: ",
1331 DECL_SOURCE_FILE (current_function_decl
),
1332 DECL_SOURCE_LINE (current_function_decl
));
1334 fprintf (vect_dump
, "\n%s:%d: note: ",
1335 LOC_FILE (vect_loop_location
), LOC_LINE (vect_loop_location
));
1341 /*************************************************************************
1342 Vectorization Utilities.
1343 *************************************************************************/
1345 /* Function new_stmt_vec_info.
1347 Create and initialize a new stmt_vec_info struct for STMT. */
1350 new_stmt_vec_info (tree stmt
, loop_vec_info loop_vinfo
)
1353 res
= (stmt_vec_info
) xcalloc (1, sizeof (struct _stmt_vec_info
));
1355 STMT_VINFO_TYPE (res
) = undef_vec_info_type
;
1356 STMT_VINFO_STMT (res
) = stmt
;
1357 STMT_VINFO_LOOP_VINFO (res
) = loop_vinfo
;
1358 STMT_VINFO_RELEVANT (res
) = 0;
1359 STMT_VINFO_LIVE_P (res
) = false;
1360 STMT_VINFO_VECTYPE (res
) = NULL
;
1361 STMT_VINFO_VEC_STMT (res
) = NULL
;
1362 STMT_VINFO_IN_PATTERN_P (res
) = false;
1363 STMT_VINFO_RELATED_STMT (res
) = NULL
;
1364 STMT_VINFO_DATA_REF (res
) = NULL
;
1365 if (TREE_CODE (stmt
) == PHI_NODE
)
1366 STMT_VINFO_DEF_TYPE (res
) = vect_unknown_def_type
;
1368 STMT_VINFO_DEF_TYPE (res
) = vect_loop_def
;
1369 STMT_VINFO_SAME_ALIGN_REFS (res
) = VEC_alloc (dr_p
, heap
, 5);
1370 DR_GROUP_FIRST_DR (res
) = NULL_TREE
;
1371 DR_GROUP_NEXT_DR (res
) = NULL_TREE
;
1372 DR_GROUP_SIZE (res
) = 0;
1373 DR_GROUP_STORE_COUNT (res
) = 0;
1374 DR_GROUP_GAP (res
) = 0;
1375 DR_GROUP_SAME_DR_STMT (res
) = NULL_TREE
;
1376 DR_GROUP_READ_WRITE_DEPENDENCE (res
) = false;
1382 /* Function new_loop_vec_info.
1384 Create and initialize a new loop_vec_info struct for LOOP, as well as
1385 stmt_vec_info structs for all the stmts in LOOP. */
1388 new_loop_vec_info (struct loop
*loop
)
1392 block_stmt_iterator si
;
1395 res
= (loop_vec_info
) xcalloc (1, sizeof (struct _loop_vec_info
));
1397 bbs
= get_loop_body (loop
);
1399 /* Create stmt_info for all stmts in the loop. */
1400 for (i
= 0; i
< loop
->num_nodes
; i
++)
1402 basic_block bb
= bbs
[i
];
1405 for (phi
= phi_nodes (bb
); phi
; phi
= PHI_CHAIN (phi
))
1407 stmt_ann_t ann
= get_stmt_ann (phi
);
1408 set_stmt_info (ann
, new_stmt_vec_info (phi
, res
));
1411 for (si
= bsi_start (bb
); !bsi_end_p (si
); bsi_next (&si
))
1413 tree stmt
= bsi_stmt (si
);
1416 ann
= stmt_ann (stmt
);
1417 set_stmt_info (ann
, new_stmt_vec_info (stmt
, res
));
1421 LOOP_VINFO_LOOP (res
) = loop
;
1422 LOOP_VINFO_BBS (res
) = bbs
;
1423 LOOP_VINFO_EXIT_COND (res
) = NULL
;
1424 LOOP_VINFO_NITERS (res
) = NULL
;
1425 LOOP_VINFO_VECTORIZABLE_P (res
) = 0;
1426 LOOP_PEELING_FOR_ALIGNMENT (res
) = 0;
1427 LOOP_VINFO_VECT_FACTOR (res
) = 0;
1428 LOOP_VINFO_DATAREFS (res
) = VEC_alloc (data_reference_p
, heap
, 10);
1429 LOOP_VINFO_DDRS (res
) = VEC_alloc (ddr_p
, heap
, 10 * 10);
1430 LOOP_VINFO_UNALIGNED_DR (res
) = NULL
;
1431 LOOP_VINFO_MAY_MISALIGN_STMTS (res
)
1432 = VEC_alloc (tree
, heap
, PARAM_VALUE (PARAM_VECT_MAX_VERSION_CHECKS
));
1438 /* Function destroy_loop_vec_info.
1440 Free LOOP_VINFO struct, as well as all the stmt_vec_info structs of all the
1441 stmts in the loop. */
1444 destroy_loop_vec_info (loop_vec_info loop_vinfo
)
1449 block_stmt_iterator si
;
1455 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1457 bbs
= LOOP_VINFO_BBS (loop_vinfo
);
1458 nbbs
= loop
->num_nodes
;
1460 for (j
= 0; j
< nbbs
; j
++)
1462 basic_block bb
= bbs
[j
];
1464 stmt_vec_info stmt_info
;
1466 for (phi
= phi_nodes (bb
); phi
; phi
= PHI_CHAIN (phi
))
1468 stmt_ann_t ann
= stmt_ann (phi
);
1470 stmt_info
= vinfo_for_stmt (phi
);
1472 set_stmt_info (ann
, NULL
);
1475 for (si
= bsi_start (bb
); !bsi_end_p (si
); )
1477 tree stmt
= bsi_stmt (si
);
1478 stmt_ann_t ann
= stmt_ann (stmt
);
1479 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1483 /* Check if this is a "pattern stmt" (introduced by the
1484 vectorizer during the pattern recognition pass). */
1485 bool remove_stmt_p
= false;
1486 tree orig_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
1489 stmt_vec_info orig_stmt_info
= vinfo_for_stmt (orig_stmt
);
1491 && STMT_VINFO_IN_PATTERN_P (orig_stmt_info
))
1492 remove_stmt_p
= true;
1495 /* Free stmt_vec_info. */
1496 VEC_free (dr_p
, heap
, STMT_VINFO_SAME_ALIGN_REFS (stmt_info
));
1498 set_stmt_info (ann
, NULL
);
1500 /* Remove dead "pattern stmts". */
1502 bsi_remove (&si
, true);
1508 free (LOOP_VINFO_BBS (loop_vinfo
));
1509 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo
));
1510 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo
));
1511 VEC_free (tree
, heap
, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo
));
1517 /* Function vect_force_dr_alignment_p.
1519 Returns whether the alignment of a DECL can be forced to be aligned
1520 on ALIGNMENT bit boundary. */
1523 vect_can_force_dr_alignment_p (tree decl
, unsigned int alignment
)
1525 if (TREE_CODE (decl
) != VAR_DECL
)
1528 if (DECL_EXTERNAL (decl
))
1531 if (TREE_ASM_WRITTEN (decl
))
1534 if (TREE_STATIC (decl
))
1535 return (alignment
<= MAX_OFILE_ALIGNMENT
);
1537 /* This is not 100% correct. The absolute correct stack alignment
1538 is STACK_BOUNDARY. We're supposed to hope, but not assume, that
1539 PREFERRED_STACK_BOUNDARY is honored by all translation units.
1540 However, until someone implements forced stack alignment, SSE
1541 isn't really usable without this. */
1542 return (alignment
<= PREFERRED_STACK_BOUNDARY
);
1546 /* Function get_vectype_for_scalar_type.
1548 Returns the vector type corresponding to SCALAR_TYPE as supported
1552 get_vectype_for_scalar_type (tree scalar_type
)
1554 enum machine_mode inner_mode
= TYPE_MODE (scalar_type
);
1555 int nbytes
= GET_MODE_SIZE (inner_mode
);
1559 if (nbytes
== 0 || nbytes
>= UNITS_PER_SIMD_WORD
)
1562 /* FORNOW: Only a single vector size per target (UNITS_PER_SIMD_WORD)
1564 nunits
= UNITS_PER_SIMD_WORD
/ nbytes
;
1566 vectype
= build_vector_type (scalar_type
, nunits
);
1567 if (vect_print_dump_info (REPORT_DETAILS
))
1569 fprintf (vect_dump
, "get vectype with %d units of type ", nunits
);
1570 print_generic_expr (vect_dump
, scalar_type
, TDF_SLIM
);
1576 if (vect_print_dump_info (REPORT_DETAILS
))
1578 fprintf (vect_dump
, "vectype: ");
1579 print_generic_expr (vect_dump
, vectype
, TDF_SLIM
);
1582 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
1583 && !INTEGRAL_MODE_P (TYPE_MODE (vectype
)))
1585 if (vect_print_dump_info (REPORT_DETAILS
))
1586 fprintf (vect_dump
, "mode not supported by target.");
1594 /* Function vect_supportable_dr_alignment
1596 Return whether the data reference DR is supported with respect to its
1599 enum dr_alignment_support
1600 vect_supportable_dr_alignment (struct data_reference
*dr
)
1602 tree vectype
= STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr
)));
1603 enum machine_mode mode
= (int) TYPE_MODE (vectype
);
1605 if (aligned_access_p (dr
))
1608 /* Possibly unaligned access. */
1610 if (DR_IS_READ (dr
))
1612 if (vec_realign_load_optab
->handlers
[mode
].insn_code
!= CODE_FOR_nothing
1613 && (!targetm
.vectorize
.builtin_mask_for_load
1614 || targetm
.vectorize
.builtin_mask_for_load ()))
1615 return dr_unaligned_software_pipeline
;
1617 if (movmisalign_optab
->handlers
[mode
].insn_code
!= CODE_FOR_nothing
)
1618 /* Can't software pipeline the loads, but can at least do them. */
1619 return dr_unaligned_supported
;
1623 return dr_unaligned_unsupported
;
1627 /* Function vect_is_simple_use.
1630 LOOP - the loop that is being vectorized.
1631 OPERAND - operand of a stmt in LOOP.
1632 DEF - the defining stmt in case OPERAND is an SSA_NAME.
1634 Returns whether a stmt with OPERAND can be vectorized.
1635 Supportable operands are constants, loop invariants, and operands that are
1636 defined by the current iteration of the loop. Unsupportable operands are
1637 those that are defined by a previous iteration of the loop (as is the case
1638 in reduction/induction computations). */
1641 vect_is_simple_use (tree operand
, loop_vec_info loop_vinfo
, tree
*def_stmt
,
1642 tree
*def
, enum vect_def_type
*dt
)
1645 stmt_vec_info stmt_vinfo
;
1646 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1648 *def_stmt
= NULL_TREE
;
1651 if (vect_print_dump_info (REPORT_DETAILS
))
1653 fprintf (vect_dump
, "vect_is_simple_use: operand ");
1654 print_generic_expr (vect_dump
, operand
, TDF_SLIM
);
1657 if (TREE_CODE (operand
) == INTEGER_CST
|| TREE_CODE (operand
) == REAL_CST
)
1659 *dt
= vect_constant_def
;
1663 if (TREE_CODE (operand
) != SSA_NAME
)
1665 if (vect_print_dump_info (REPORT_DETAILS
))
1666 fprintf (vect_dump
, "not ssa-name.");
1670 *def_stmt
= SSA_NAME_DEF_STMT (operand
);
1671 if (*def_stmt
== NULL_TREE
)
1673 if (vect_print_dump_info (REPORT_DETAILS
))
1674 fprintf (vect_dump
, "no def_stmt.");
1678 if (vect_print_dump_info (REPORT_DETAILS
))
1680 fprintf (vect_dump
, "def_stmt: ");
1681 print_generic_expr (vect_dump
, *def_stmt
, TDF_SLIM
);
1684 /* empty stmt is expected only in case of a function argument.
1685 (Otherwise - we expect a phi_node or a GIMPLE_MODIFY_STMT). */
1686 if (IS_EMPTY_STMT (*def_stmt
))
1688 tree arg
= TREE_OPERAND (*def_stmt
, 0);
1689 if (TREE_CODE (arg
) == INTEGER_CST
|| TREE_CODE (arg
) == REAL_CST
)
1692 *dt
= vect_invariant_def
;
1696 if (vect_print_dump_info (REPORT_DETAILS
))
1697 fprintf (vect_dump
, "Unexpected empty stmt.");
1701 bb
= bb_for_stmt (*def_stmt
);
1702 if (!flow_bb_inside_loop_p (loop
, bb
))
1703 *dt
= vect_invariant_def
;
1706 stmt_vinfo
= vinfo_for_stmt (*def_stmt
);
1707 *dt
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
1710 if (*dt
== vect_unknown_def_type
)
1712 if (vect_print_dump_info (REPORT_DETAILS
))
1713 fprintf (vect_dump
, "Unsupported pattern.");
1717 /* stmts inside the loop that have been identified as performing
1718 a reduction operation cannot have uses in the loop. */
1719 if (*dt
== vect_reduction_def
&& TREE_CODE (*def_stmt
) != PHI_NODE
)
1721 if (vect_print_dump_info (REPORT_DETAILS
))
1722 fprintf (vect_dump
, "reduction used in loop.");
1726 if (vect_print_dump_info (REPORT_DETAILS
))
1727 fprintf (vect_dump
, "type of def: %d.",*dt
);
1729 switch (TREE_CODE (*def_stmt
))
1732 *def
= PHI_RESULT (*def_stmt
);
1733 gcc_assert (*dt
== vect_induction_def
|| *dt
== vect_reduction_def
1734 || *dt
== vect_invariant_def
);
1737 case GIMPLE_MODIFY_STMT
:
1738 *def
= GIMPLE_STMT_OPERAND (*def_stmt
, 0);
1739 gcc_assert (*dt
== vect_loop_def
|| *dt
== vect_invariant_def
);
1743 if (vect_print_dump_info (REPORT_DETAILS
))
1744 fprintf (vect_dump
, "unsupported defining stmt: ");
1752 /* Function supportable_widening_operation
1754 Check whether an operation represented by the code CODE is a
1755 widening operation that is supported by the target platform in
1756 vector form (i.e., when operating on arguments of type VECTYPE).
1758 The two kinds of widening operations we currently support are
1759 NOP and WIDEN_MULT. This function checks if these operations
1760 are supported by the target platform either directly (via vector
1761 tree-codes), or via target builtins.
1764 - CODE1 and CODE2 are codes of vector operations to be used when
1765 vectorizing the operation, if available.
1766 - DECL1 and DECL2 are decls of target builtin functions to be used
1767 when vectorizing the operation, if available. In this case,
1768 CODE1 and CODE2 are CALL_EXPR. */
1771 supportable_widening_operation (enum tree_code code
, tree stmt
, tree vectype
,
1772 tree
*decl1
, tree
*decl2
,
1773 enum tree_code
*code1
, enum tree_code
*code2
)
1775 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1777 enum machine_mode vec_mode
;
1778 enum insn_code icode1
, icode2
;
1779 optab optab1
, optab2
;
1780 tree expr
= GIMPLE_STMT_OPERAND (stmt
, 1);
1781 tree type
= TREE_TYPE (expr
);
1782 tree wide_vectype
= get_vectype_for_scalar_type (type
);
1783 enum tree_code c1
, c2
;
1785 /* The result of a vectorized widening operation usually requires two vectors
1786 (because the widened results do not fit int one vector). The generated
1787 vector results would normally be expected to be generated in the same
1788 order as in the original scalar computation. i.e. if 8 results are
1789 generated in each vector iteration, they are to be organized as follows:
1790 vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
1792 However, in the special case that the result of the widening operation is
1793 used in a reduction computation only, the order doesn't matter (because
1794 when vectorizing a reduction we change the order of the computation).
1795 Some targets can take advantage of this and generate more efficient code.
1796 For example, targets like Altivec, that support widen_mult using a sequence
1797 of {mult_even,mult_odd} generate the following vectors:
1798 vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8]. */
1800 if (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_by_reduction
)
1806 && code
== WIDEN_MULT_EXPR
1807 && targetm
.vectorize
.builtin_mul_widen_even
1808 && targetm
.vectorize
.builtin_mul_widen_even (vectype
)
1809 && targetm
.vectorize
.builtin_mul_widen_odd
1810 && targetm
.vectorize
.builtin_mul_widen_odd (vectype
))
1812 if (vect_print_dump_info (REPORT_DETAILS
))
1813 fprintf (vect_dump
, "Unordered widening operation detected.");
1815 *code1
= *code2
= CALL_EXPR
;
1816 *decl1
= targetm
.vectorize
.builtin_mul_widen_even (vectype
);
1817 *decl2
= targetm
.vectorize
.builtin_mul_widen_odd (vectype
);
1823 case WIDEN_MULT_EXPR
:
1824 if (BYTES_BIG_ENDIAN
)
1826 c1
= VEC_WIDEN_MULT_HI_EXPR
;
1827 c2
= VEC_WIDEN_MULT_LO_EXPR
;
1831 c2
= VEC_WIDEN_MULT_HI_EXPR
;
1832 c1
= VEC_WIDEN_MULT_LO_EXPR
;
1837 if (BYTES_BIG_ENDIAN
)
1839 c1
= VEC_UNPACK_HI_EXPR
;
1840 c2
= VEC_UNPACK_LO_EXPR
;
1844 c2
= VEC_UNPACK_HI_EXPR
;
1845 c1
= VEC_UNPACK_LO_EXPR
;
1855 optab1
= optab_for_tree_code (c1
, vectype
);
1856 optab2
= optab_for_tree_code (c2
, vectype
);
1858 if (!optab1
|| !optab2
)
1861 vec_mode
= TYPE_MODE (vectype
);
1862 if ((icode1
= optab1
->handlers
[(int) vec_mode
].insn_code
) == CODE_FOR_nothing
1863 || insn_data
[icode1
].operand
[0].mode
!= TYPE_MODE (wide_vectype
)
1864 || (icode2
= optab2
->handlers
[(int) vec_mode
].insn_code
)
1866 || insn_data
[icode2
].operand
[0].mode
!= TYPE_MODE (wide_vectype
))
1873 /* Function reduction_code_for_scalar_code
1876 CODE - tree_code of a reduction operations.
1879 REDUC_CODE - the corresponding tree-code to be used to reduce the
1880 vector of partial results into a single scalar result (which
1881 will also reside in a vector).
1883 Return TRUE if a corresponding REDUC_CODE was found, FALSE otherwise. */
1886 reduction_code_for_scalar_code (enum tree_code code
,
1887 enum tree_code
*reduc_code
)
1892 *reduc_code
= REDUC_MAX_EXPR
;
1896 *reduc_code
= REDUC_MIN_EXPR
;
1900 *reduc_code
= REDUC_PLUS_EXPR
;
1909 /* Function vect_is_simple_reduction
1911 Detect a cross-iteration def-use cucle that represents a simple
1912 reduction computation. We look for the following pattern:
1917 a2 = operation (a3, a1)
1920 1. operation is commutative and associative and it is safe to
1921 change the order of the computation.
1922 2. no uses for a2 in the loop (a2 is used out of the loop)
1923 3. no uses of a1 in the loop besides the reduction operation.
1925 Condition 1 is tested here.
1926 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized. */
1929 vect_is_simple_reduction (struct loop
*loop
, tree phi
)
1931 edge latch_e
= loop_latch_edge (loop
);
1932 tree loop_arg
= PHI_ARG_DEF_FROM_EDGE (phi
, latch_e
);
1933 tree def_stmt
, def1
, def2
;
1934 enum tree_code code
;
1936 tree operation
, op1
, op2
;
1940 imm_use_iterator imm_iter
;
1941 use_operand_p use_p
;
1943 name
= PHI_RESULT (phi
);
1945 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, name
)
1947 tree use_stmt
= USE_STMT (use_p
);
1948 if (flow_bb_inside_loop_p (loop
, bb_for_stmt (use_stmt
))
1949 && vinfo_for_stmt (use_stmt
)
1950 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt
)))
1954 if (vect_print_dump_info (REPORT_DETAILS
))
1955 fprintf (vect_dump
, "reduction used in loop.");
1960 if (TREE_CODE (loop_arg
) != SSA_NAME
)
1962 if (vect_print_dump_info (REPORT_DETAILS
))
1964 fprintf (vect_dump
, "reduction: not ssa_name: ");
1965 print_generic_expr (vect_dump
, loop_arg
, TDF_SLIM
);
1970 def_stmt
= SSA_NAME_DEF_STMT (loop_arg
);
1973 if (vect_print_dump_info (REPORT_DETAILS
))
1974 fprintf (vect_dump
, "reduction: no def_stmt.");
1978 if (TREE_CODE (def_stmt
) != GIMPLE_MODIFY_STMT
)
1980 if (vect_print_dump_info (REPORT_DETAILS
))
1981 print_generic_expr (vect_dump
, def_stmt
, TDF_SLIM
);
1985 name
= GIMPLE_STMT_OPERAND (def_stmt
, 0);
1987 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, name
)
1989 tree use_stmt
= USE_STMT (use_p
);
1990 if (flow_bb_inside_loop_p (loop
, bb_for_stmt (use_stmt
))
1991 && vinfo_for_stmt (use_stmt
)
1992 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt
)))
1996 if (vect_print_dump_info (REPORT_DETAILS
))
1997 fprintf (vect_dump
, "reduction used in loop.");
2002 operation
= GIMPLE_STMT_OPERAND (def_stmt
, 1);
2003 code
= TREE_CODE (operation
);
2004 if (!commutative_tree_code (code
) || !associative_tree_code (code
))
2006 if (vect_print_dump_info (REPORT_DETAILS
))
2008 fprintf (vect_dump
, "reduction: not commutative/associative: ");
2009 print_generic_expr (vect_dump
, operation
, TDF_SLIM
);
2014 op_type
= TREE_OPERAND_LENGTH (operation
);
2015 if (op_type
!= binary_op
)
2017 if (vect_print_dump_info (REPORT_DETAILS
))
2019 fprintf (vect_dump
, "reduction: not binary operation: ");
2020 print_generic_expr (vect_dump
, operation
, TDF_SLIM
);
2025 op1
= TREE_OPERAND (operation
, 0);
2026 op2
= TREE_OPERAND (operation
, 1);
2027 if (TREE_CODE (op1
) != SSA_NAME
|| TREE_CODE (op2
) != SSA_NAME
)
2029 if (vect_print_dump_info (REPORT_DETAILS
))
2031 fprintf (vect_dump
, "reduction: uses not ssa_names: ");
2032 print_generic_expr (vect_dump
, operation
, TDF_SLIM
);
2037 /* Check that it's ok to change the order of the computation. */
2038 type
= TREE_TYPE (operation
);
2039 if (TYPE_MAIN_VARIANT (type
) != TYPE_MAIN_VARIANT (TREE_TYPE (op1
))
2040 || TYPE_MAIN_VARIANT (type
) != TYPE_MAIN_VARIANT (TREE_TYPE (op2
)))
2042 if (vect_print_dump_info (REPORT_DETAILS
))
2044 fprintf (vect_dump
, "reduction: multiple types: operation type: ");
2045 print_generic_expr (vect_dump
, type
, TDF_SLIM
);
2046 fprintf (vect_dump
, ", operands types: ");
2047 print_generic_expr (vect_dump
, TREE_TYPE (op1
), TDF_SLIM
);
2048 fprintf (vect_dump
, ",");
2049 print_generic_expr (vect_dump
, TREE_TYPE (op2
), TDF_SLIM
);
2054 /* CHECKME: check for !flag_finite_math_only too? */
2055 if (SCALAR_FLOAT_TYPE_P (type
) && !flag_unsafe_math_optimizations
)
2057 /* Changing the order of operations changes the semantics. */
2058 if (vect_print_dump_info (REPORT_DETAILS
))
2060 fprintf (vect_dump
, "reduction: unsafe fp math optimization: ");
2061 print_generic_expr (vect_dump
, operation
, TDF_SLIM
);
2065 else if (INTEGRAL_TYPE_P (type
) && TYPE_OVERFLOW_TRAPS (type
))
2067 /* Changing the order of operations changes the semantics. */
2068 if (vect_print_dump_info (REPORT_DETAILS
))
2070 fprintf (vect_dump
, "reduction: unsafe int math optimization: ");
2071 print_generic_expr (vect_dump
, operation
, TDF_SLIM
);
2076 /* reduction is safe. we're dealing with one of the following:
2077 1) integer arithmetic and no trapv
2078 2) floating point arithmetic, and special flags permit this optimization.
2080 def1
= SSA_NAME_DEF_STMT (op1
);
2081 def2
= SSA_NAME_DEF_STMT (op2
);
2082 if (!def1
|| !def2
|| IS_EMPTY_STMT (def1
) || IS_EMPTY_STMT (def2
))
2084 if (vect_print_dump_info (REPORT_DETAILS
))
2086 fprintf (vect_dump
, "reduction: no defs for operands: ");
2087 print_generic_expr (vect_dump
, operation
, TDF_SLIM
);
2093 /* Check that one def is the reduction def, defined by PHI,
2094 the other def is either defined in the loop by a GIMPLE_MODIFY_STMT,
2095 or it's an induction (defined by some phi node). */
2098 && flow_bb_inside_loop_p (loop
, bb_for_stmt (def1
))
2099 && (TREE_CODE (def1
) == GIMPLE_MODIFY_STMT
2100 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1
)) == vect_induction_def
))
2102 if (vect_print_dump_info (REPORT_DETAILS
))
2104 fprintf (vect_dump
, "detected reduction:");
2105 print_generic_expr (vect_dump
, operation
, TDF_SLIM
);
2109 else if (def1
== phi
2110 && flow_bb_inside_loop_p (loop
, bb_for_stmt (def2
))
2111 && (TREE_CODE (def2
) == GIMPLE_MODIFY_STMT
2112 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2
)) == vect_induction_def
))
2114 /* Swap operands (just for simplicity - so that the rest of the code
2115 can assume that the reduction variable is always the last (second)
2117 if (vect_print_dump_info (REPORT_DETAILS
))
2119 fprintf (vect_dump
, "detected reduction: need to swap operands:");
2120 print_generic_expr (vect_dump
, operation
, TDF_SLIM
);
2122 swap_tree_operands (def_stmt
, &TREE_OPERAND (operation
, 0),
2123 &TREE_OPERAND (operation
, 1));
2128 if (vect_print_dump_info (REPORT_DETAILS
))
2130 fprintf (vect_dump
, "reduction: unknown pattern.");
2131 print_generic_expr (vect_dump
, operation
, TDF_SLIM
);
2138 /* Function vect_is_simple_iv_evolution.
2140 FORNOW: A simple evolution of an induction variables in the loop is
2141 considered a polynomial evolution with constant step. */
2144 vect_is_simple_iv_evolution (unsigned loop_nb
, tree access_fn
, tree
* init
,
2149 tree evolution_part
= evolution_part_in_loop_num (access_fn
, loop_nb
);
2151 /* When there is no evolution in this loop, the evolution function
2153 if (evolution_part
== NULL_TREE
)
2156 /* When the evolution is a polynomial of degree >= 2
2157 the evolution function is not "simple". */
2158 if (tree_is_chrec (evolution_part
))
2161 step_expr
= evolution_part
;
2162 init_expr
= unshare_expr (initial_condition_in_loop_num (access_fn
, loop_nb
));
2164 if (vect_print_dump_info (REPORT_DETAILS
))
2166 fprintf (vect_dump
, "step: ");
2167 print_generic_expr (vect_dump
, step_expr
, TDF_SLIM
);
2168 fprintf (vect_dump
, ", init: ");
2169 print_generic_expr (vect_dump
, init_expr
, TDF_SLIM
);
2175 if (TREE_CODE (step_expr
) != INTEGER_CST
)
2177 if (vect_print_dump_info (REPORT_DETAILS
))
2178 fprintf (vect_dump
, "step unknown.");
2186 /* Function vectorize_loops.
2188 Entry Point to loop vectorization phase. */
2191 vectorize_loops (void)
2194 unsigned int num_vectorized_loops
= 0;
2195 unsigned int vect_loops_num
;
2199 /* Fix the verbosity level if not defined explicitly by the user. */
2200 vect_set_dump_settings ();
2202 /* Allocate the bitmap that records which virtual variables that
2203 need to be renamed. */
2204 vect_memsyms_to_rename
= BITMAP_ALLOC (NULL
);
2206 /* ----------- Analyze loops. ----------- */
2208 /* If some loop was duplicated, it gets bigger number
2209 than all previously defined loops. This fact allows us to run
2210 only over initial loops skipping newly generated ones. */
2211 vect_loops_num
= number_of_loops ();
2212 FOR_EACH_LOOP (li
, loop
, 0)
2214 loop_vec_info loop_vinfo
;
2216 vect_loop_location
= find_loop_location (loop
);
2217 loop_vinfo
= vect_analyze_loop (loop
);
2218 loop
->aux
= loop_vinfo
;
2220 if (!loop_vinfo
|| !LOOP_VINFO_VECTORIZABLE_P (loop_vinfo
))
2223 vect_transform_loop (loop_vinfo
);
2224 num_vectorized_loops
++;
2226 vect_loop_location
= UNKNOWN_LOC
;
2228 if (vect_print_dump_info (REPORT_VECTORIZED_LOOPS
))
2229 fprintf (vect_dump
, "vectorized %u loops in function.\n",
2230 num_vectorized_loops
);
2232 /* ----------- Finalize. ----------- */
2234 BITMAP_FREE (vect_memsyms_to_rename
);
2236 for (i
= 1; i
< vect_loops_num
; i
++)
2238 loop_vec_info loop_vinfo
;
2240 loop
= get_loop (i
);
2243 loop_vinfo
= loop
->aux
;
2244 destroy_loop_vec_info (loop_vinfo
);
2248 return num_vectorized_loops
> 0 ? TODO_cleanup_cfg
: 0;
2251 /* Increase alignment of global arrays to improve vectorization potential.
2253 - Consider also structs that have an array field.
2254 - Use ipa analysis to prune arrays that can't be vectorized?
2255 This should involve global alignment analysis and in the future also
2259 increase_alignment (void)
2261 struct varpool_node
*vnode
;
2263 /* Increase the alignment of all global arrays for vectorization. */
2264 for (vnode
= varpool_nodes_queue
;
2266 vnode
= vnode
->next_needed
)
2268 tree vectype
, decl
= vnode
->decl
;
2269 unsigned int alignment
;
2271 if (TREE_CODE (TREE_TYPE (decl
)) != ARRAY_TYPE
)
2273 vectype
= get_vectype_for_scalar_type (TREE_TYPE (TREE_TYPE (decl
)));
2276 alignment
= TYPE_ALIGN (vectype
);
2277 if (DECL_ALIGN (decl
) >= alignment
)
2280 if (vect_can_force_dr_alignment_p (decl
, alignment
))
2282 DECL_ALIGN (decl
) = TYPE_ALIGN (vectype
);
2283 DECL_USER_ALIGN (decl
) = 1;
2286 fprintf (dump_file
, "Increasing alignment of decl: ");
2287 print_generic_expr (dump_file
, decl
, TDF_SLIM
);
2295 gate_increase_alignment (void)
2297 return flag_section_anchors
&& flag_tree_vectorize
;
2300 struct tree_opt_pass pass_ipa_increase_alignment
=
2302 "increase_alignment", /* name */
2303 gate_increase_alignment
, /* gate */
2304 increase_alignment
, /* execute */
2307 0, /* static_pass_number */
2309 0, /* properties_required */
2310 0, /* properties_provided */
2311 0, /* properties_destroyed */
2312 0, /* todo_flags_start */
2313 0, /* todo_flags_finish */