2 Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Loop Vectorization Pass.
23 This pass tries to vectorize loops. This first implementation focuses on
24 simple inner-most loops, with no conditional control flow, and a set of
25 simple operations which vector form can be expressed using existing
26 tree codes (PLUS, MULT etc).
28 For example, the vectorizer transforms the following simple loop:
30 short a[N]; short b[N]; short c[N]; int i;
36 as if it was manually vectorized by rewriting the source code into:
38 typedef int __attribute__((mode(V8HI))) v8hi;
39 short a[N]; short b[N]; short c[N]; int i;
40 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
43 for (i=0; i<N/8; i++){
50 The main entry to this pass is vectorize_loops(), in which
51 the vectorizer applies a set of analyses on a given set of loops,
52 followed by the actual vectorization transformation for the loops that
53 had successfully passed the analysis phase.
55 Throughout this pass we make a distinction between two types of
56 data: scalars (which are represented by SSA_NAMES), and memory references
57 ("data-refs"). These two types of data require different handling both
58 during analysis and transformation. The types of data-refs that the
59 vectorizer currently supports are ARRAY_REFS which base is an array DECL
60 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
61 accesses are required to have a simple (consecutive) access pattern.
65 The driver for the analysis phase is vect_analyze_loop_nest().
66 It applies a set of analyses, some of which rely on the scalar evolution
67 analyzer (scev) developed by Sebastian Pop.
69 During the analysis phase the vectorizer records some information
70 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
71 loop, as well as general information about the loop as a whole, which is
72 recorded in a "loop_vec_info" struct attached to each loop.
76 The loop transformation phase scans all the stmts in the loop, and
77 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
78 the loop that needs to be vectorized. It insert the vector code sequence
79 just before the scalar stmt S, and records a pointer to the vector code
80 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
81 attached to S). This pointer will be used for the vectorization of following
82 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
83 otherwise, we rely on dead code elimination for removing it.
85 For example, say stmt S1 was vectorized into stmt VS1:
88 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
91 To vectorize stmt S2, the vectorizer first finds the stmt that defines
92 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
93 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
94 resulting sequence would be:
97 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
99 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
101 Operands that are not SSA_NAMEs, are data-refs that appear in
102 load/store operations (like 'x[i]' in S1), and are handled differently.
106 Currently the only target specific information that is used is the
107 size of the vector (in bytes) - "UNITS_PER_SIMD_WORD". Targets that can
108 support different sizes of vectors, for now will need to specify one value
109 for "UNITS_PER_SIMD_WORD". More flexibility will be added in the future.
111 Since we only vectorize operations which vector form can be
112 expressed using existing tree codes, to verify that an operation is
113 supported, the vectorizer checks the relevant optab at the relevant
114 machine_mode (e.g, optab_handler (add_optab, V8HImode)->insn_code). If
115 the value found is CODE_FOR_nothing, then there's no target support, and
116 we can't vectorize the stmt.
118 For additional information on this project see:
119 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
124 #include "coretypes.h"
130 #include "basic-block.h"
131 #include "diagnostic.h"
132 #include "tree-flow.h"
133 #include "tree-dump.h"
136 #include "cfglayout.h"
142 #include "tree-chrec.h"
143 #include "tree-data-ref.h"
144 #include "tree-scalar-evolution.h"
146 #include "tree-vectorizer.h"
147 #include "tree-pass.h"
149 /*************************************************************************
150 Simple Loop Peeling Utilities
151 *************************************************************************/
152 static void slpeel_update_phis_for_duplicate_loop
153 (struct loop
*, struct loop
*, bool after
);
154 static void slpeel_update_phi_nodes_for_guard1
155 (edge
, struct loop
*, bool, basic_block
*, bitmap
*);
156 static void slpeel_update_phi_nodes_for_guard2
157 (edge
, struct loop
*, bool, basic_block
*);
158 static edge
slpeel_add_loop_guard (basic_block
, tree
, basic_block
, basic_block
);
160 static void rename_use_op (use_operand_p
);
161 static void rename_variables_in_bb (basic_block
);
162 static void rename_variables_in_loop (struct loop
*);
164 /*************************************************************************
165 General Vectorization Utilities
166 *************************************************************************/
167 static void vect_set_dump_settings (void);
169 /* vect_dump will be set to stderr or dump_file if exist. */
172 /* vect_verbosity_level set to an invalid value
173 to mark that it's uninitialized. */
174 enum verbosity_levels vect_verbosity_level
= MAX_VERBOSITY_LEVEL
;
177 static LOC vect_loop_location
;
179 /* Bitmap of virtual variables to be renamed. */
180 bitmap vect_memsyms_to_rename
;
182 /*************************************************************************
183 Simple Loop Peeling Utilities
185 Utilities to support loop peeling for vectorization purposes.
186 *************************************************************************/
189 /* Renames the use *OP_P. */
192 rename_use_op (use_operand_p op_p
)
196 if (TREE_CODE (USE_FROM_PTR (op_p
)) != SSA_NAME
)
199 new_name
= get_current_def (USE_FROM_PTR (op_p
));
201 /* Something defined outside of the loop. */
205 /* An ordinary ssa name defined in the loop. */
207 SET_USE (op_p
, new_name
);
211 /* Renames the variables in basic block BB. */
214 rename_variables_in_bb (basic_block bb
)
217 block_stmt_iterator bsi
;
223 struct loop
*loop
= bb
->loop_father
;
225 for (bsi
= bsi_start (bb
); !bsi_end_p (bsi
); bsi_next (&bsi
))
227 stmt
= bsi_stmt (bsi
);
228 FOR_EACH_SSA_USE_OPERAND (use_p
, stmt
, iter
, SSA_OP_ALL_USES
)
229 rename_use_op (use_p
);
232 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
234 if (!flow_bb_inside_loop_p (loop
, e
->dest
))
236 for (phi
= phi_nodes (e
->dest
); phi
; phi
= PHI_CHAIN (phi
))
237 rename_use_op (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, e
));
242 /* Renames variables in new generated LOOP. */
245 rename_variables_in_loop (struct loop
*loop
)
250 bbs
= get_loop_body (loop
);
252 for (i
= 0; i
< loop
->num_nodes
; i
++)
253 rename_variables_in_bb (bbs
[i
]);
259 /* Update the PHI nodes of NEW_LOOP.
261 NEW_LOOP is a duplicate of ORIG_LOOP.
262 AFTER indicates whether NEW_LOOP executes before or after ORIG_LOOP:
263 AFTER is true if NEW_LOOP executes after ORIG_LOOP, and false if it
264 executes before it. */
267 slpeel_update_phis_for_duplicate_loop (struct loop
*orig_loop
,
268 struct loop
*new_loop
, bool after
)
271 tree phi_new
, phi_orig
;
273 edge orig_loop_latch
= loop_latch_edge (orig_loop
);
274 edge orig_entry_e
= loop_preheader_edge (orig_loop
);
275 edge new_loop_exit_e
= single_exit (new_loop
);
276 edge new_loop_entry_e
= loop_preheader_edge (new_loop
);
277 edge entry_arg_e
= (after
? orig_loop_latch
: orig_entry_e
);
280 step 1. For each loop-header-phi:
281 Add the first phi argument for the phi in NEW_LOOP
282 (the one associated with the entry of NEW_LOOP)
284 step 2. For each loop-header-phi:
285 Add the second phi argument for the phi in NEW_LOOP
286 (the one associated with the latch of NEW_LOOP)
288 step 3. Update the phis in the successor block of NEW_LOOP.
290 case 1: NEW_LOOP was placed before ORIG_LOOP:
291 The successor block of NEW_LOOP is the header of ORIG_LOOP.
292 Updating the phis in the successor block can therefore be done
293 along with the scanning of the loop header phis, because the
294 header blocks of ORIG_LOOP and NEW_LOOP have exactly the same
295 phi nodes, organized in the same order.
297 case 2: NEW_LOOP was placed after ORIG_LOOP:
298 The successor block of NEW_LOOP is the original exit block of
299 ORIG_LOOP - the phis to be updated are the loop-closed-ssa phis.
300 We postpone updating these phis to a later stage (when
301 loop guards are added).
305 /* Scan the phis in the headers of the old and new loops
306 (they are organized in exactly the same order). */
308 for (phi_new
= phi_nodes (new_loop
->header
),
309 phi_orig
= phi_nodes (orig_loop
->header
);
311 phi_new
= PHI_CHAIN (phi_new
), phi_orig
= PHI_CHAIN (phi_orig
))
314 def
= PHI_ARG_DEF_FROM_EDGE (phi_orig
, entry_arg_e
);
315 add_phi_arg (phi_new
, def
, new_loop_entry_e
);
318 def
= PHI_ARG_DEF_FROM_EDGE (phi_orig
, orig_loop_latch
);
319 if (TREE_CODE (def
) != SSA_NAME
)
322 new_ssa_name
= get_current_def (def
);
325 /* This only happens if there are no definitions
326 inside the loop. use the phi_result in this case. */
327 new_ssa_name
= PHI_RESULT (phi_new
);
330 /* An ordinary ssa name defined in the loop. */
331 add_phi_arg (phi_new
, new_ssa_name
, loop_latch_edge (new_loop
));
333 /* step 3 (case 1). */
336 gcc_assert (new_loop_exit_e
== orig_entry_e
);
337 SET_PHI_ARG_DEF (phi_orig
,
338 new_loop_exit_e
->dest_idx
,
345 /* Update PHI nodes for a guard of the LOOP.
348 - LOOP, GUARD_EDGE: LOOP is a loop for which we added guard code that
349 controls whether LOOP is to be executed. GUARD_EDGE is the edge that
350 originates from the guard-bb, skips LOOP and reaches the (unique) exit
351 bb of LOOP. This loop-exit-bb is an empty bb with one successor.
352 We denote this bb NEW_MERGE_BB because before the guard code was added
353 it had a single predecessor (the LOOP header), and now it became a merge
354 point of two paths - the path that ends with the LOOP exit-edge, and
355 the path that ends with GUARD_EDGE.
356 - NEW_EXIT_BB: New basic block that is added by this function between LOOP
357 and NEW_MERGE_BB. It is used to place loop-closed-ssa-form exit-phis.
359 ===> The CFG before the guard-code was added:
362 if (exit_loop) goto update_bb
363 else goto LOOP_header_bb
366 ==> The CFG after the guard-code was added:
368 if (LOOP_guard_condition) goto new_merge_bb
369 else goto LOOP_header_bb
372 if (exit_loop_condition) goto new_merge_bb
373 else goto LOOP_header_bb
378 ==> The CFG after this function:
380 if (LOOP_guard_condition) goto new_merge_bb
381 else goto LOOP_header_bb
384 if (exit_loop_condition) goto new_exit_bb
385 else goto LOOP_header_bb
392 1. creates and updates the relevant phi nodes to account for the new
393 incoming edge (GUARD_EDGE) into NEW_MERGE_BB. This involves:
394 1.1. Create phi nodes at NEW_MERGE_BB.
395 1.2. Update the phi nodes at the successor of NEW_MERGE_BB (denoted
396 UPDATE_BB). UPDATE_BB was the exit-bb of LOOP before NEW_MERGE_BB
397 2. preserves loop-closed-ssa-form by creating the required phi nodes
398 at the exit of LOOP (i.e, in NEW_EXIT_BB).
400 There are two flavors to this function:
402 slpeel_update_phi_nodes_for_guard1:
403 Here the guard controls whether we enter or skip LOOP, where LOOP is a
404 prolog_loop (loop1 below), and the new phis created in NEW_MERGE_BB are
405 for variables that have phis in the loop header.
407 slpeel_update_phi_nodes_for_guard2:
408 Here the guard controls whether we enter or skip LOOP, where LOOP is an
409 epilog_loop (loop2 below), and the new phis created in NEW_MERGE_BB are
410 for variables that have phis in the loop exit.
412 I.E., the overall structure is:
415 guard1 (goto loop1/merg1_bb)
418 guard2 (goto merge1_bb/merge2_bb)
425 slpeel_update_phi_nodes_for_guard1 takes care of creating phis in
426 loop1_exit_bb and merge1_bb. These are entry phis (phis for the vars
427 that have phis in loop1->header).
429 slpeel_update_phi_nodes_for_guard2 takes care of creating phis in
430 loop2_exit_bb and merge2_bb. These are exit phis (phis for the vars
431 that have phis in next_bb). It also adds some of these phis to
434 slpeel_update_phi_nodes_for_guard1 is always called before
435 slpeel_update_phi_nodes_for_guard2. They are both needed in order
436 to create correct data-flow and loop-closed-ssa-form.
438 Generally slpeel_update_phi_nodes_for_guard1 creates phis for variables
439 that change between iterations of a loop (and therefore have a phi-node
440 at the loop entry), whereas slpeel_update_phi_nodes_for_guard2 creates
441 phis for variables that are used out of the loop (and therefore have
442 loop-closed exit phis). Some variables may be both updated between
443 iterations and used after the loop. This is why in loop1_exit_bb we
444 may need both entry_phis (created by slpeel_update_phi_nodes_for_guard1)
445 and exit phis (created by slpeel_update_phi_nodes_for_guard2).
447 - IS_NEW_LOOP: if IS_NEW_LOOP is true, then LOOP is a newly created copy of
448 an original loop. i.e., we have:
451 guard_bb (goto LOOP/new_merge)
457 If IS_NEW_LOOP is false, then LOOP is an original loop, in which case we
461 guard_bb (goto LOOP/new_merge)
467 The SSA names defined in the original loop have a current
468 reaching definition that that records the corresponding new
469 ssa-name used in the new duplicated loop copy.
472 /* Function slpeel_update_phi_nodes_for_guard1
475 - GUARD_EDGE, LOOP, IS_NEW_LOOP, NEW_EXIT_BB - as explained above.
476 - DEFS - a bitmap of ssa names to mark new names for which we recorded
479 In the context of the overall structure, we have:
482 guard1 (goto loop1/merg1_bb)
485 guard2 (goto merge1_bb/merge2_bb)
492 For each name updated between loop iterations (i.e - for each name that has
493 an entry (loop-header) phi in LOOP) we create a new phi in:
494 1. merge1_bb (to account for the edge from guard1)
495 2. loop1_exit_bb (an exit-phi to keep LOOP in loop-closed form)
499 slpeel_update_phi_nodes_for_guard1 (edge guard_edge
, struct loop
*loop
,
500 bool is_new_loop
, basic_block
*new_exit_bb
,
503 tree orig_phi
, new_phi
;
504 tree update_phi
, update_phi2
;
505 tree guard_arg
, loop_arg
;
506 basic_block new_merge_bb
= guard_edge
->dest
;
507 edge e
= EDGE_SUCC (new_merge_bb
, 0);
508 basic_block update_bb
= e
->dest
;
509 basic_block orig_bb
= loop
->header
;
511 tree current_new_name
;
514 /* Create new bb between loop and new_merge_bb. */
515 *new_exit_bb
= split_edge (single_exit (loop
));
517 new_exit_e
= EDGE_SUCC (*new_exit_bb
, 0);
519 for (orig_phi
= phi_nodes (orig_bb
), update_phi
= phi_nodes (update_bb
);
520 orig_phi
&& update_phi
;
521 orig_phi
= PHI_CHAIN (orig_phi
), update_phi
= PHI_CHAIN (update_phi
))
523 /* Virtual phi; Mark it for renaming. We actually want to call
524 mar_sym_for_renaming, but since all ssa renaming datastructures
525 are going to be freed before we get to call ssa_upate, we just
526 record this name for now in a bitmap, and will mark it for
528 name
= PHI_RESULT (orig_phi
);
529 if (!is_gimple_reg (SSA_NAME_VAR (name
)))
530 bitmap_set_bit (vect_memsyms_to_rename
, DECL_UID (SSA_NAME_VAR (name
)));
532 /** 1. Handle new-merge-point phis **/
534 /* 1.1. Generate new phi node in NEW_MERGE_BB: */
535 new_phi
= create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi
)),
538 /* 1.2. NEW_MERGE_BB has two incoming edges: GUARD_EDGE and the exit-edge
539 of LOOP. Set the two phi args in NEW_PHI for these edges: */
540 loop_arg
= PHI_ARG_DEF_FROM_EDGE (orig_phi
, EDGE_SUCC (loop
->latch
, 0));
541 guard_arg
= PHI_ARG_DEF_FROM_EDGE (orig_phi
, loop_preheader_edge (loop
));
543 add_phi_arg (new_phi
, loop_arg
, new_exit_e
);
544 add_phi_arg (new_phi
, guard_arg
, guard_edge
);
546 /* 1.3. Update phi in successor block. */
547 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi
, e
) == loop_arg
548 || PHI_ARG_DEF_FROM_EDGE (update_phi
, e
) == guard_arg
);
549 SET_PHI_ARG_DEF (update_phi
, e
->dest_idx
, PHI_RESULT (new_phi
));
550 update_phi2
= new_phi
;
553 /** 2. Handle loop-closed-ssa-form phis **/
555 if (!is_gimple_reg (PHI_RESULT (orig_phi
)))
558 /* 2.1. Generate new phi node in NEW_EXIT_BB: */
559 new_phi
= create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi
)),
562 /* 2.2. NEW_EXIT_BB has one incoming edge: the exit-edge of the loop. */
563 add_phi_arg (new_phi
, loop_arg
, single_exit (loop
));
565 /* 2.3. Update phi in successor of NEW_EXIT_BB: */
566 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi2
, new_exit_e
) == loop_arg
);
567 SET_PHI_ARG_DEF (update_phi2
, new_exit_e
->dest_idx
, PHI_RESULT (new_phi
));
569 /* 2.4. Record the newly created name with set_current_def.
570 We want to find a name such that
571 name = get_current_def (orig_loop_name)
572 and to set its current definition as follows:
573 set_current_def (name, new_phi_name)
575 If LOOP is a new loop then loop_arg is already the name we're
576 looking for. If LOOP is the original loop, then loop_arg is
577 the orig_loop_name and the relevant name is recorded in its
578 current reaching definition. */
580 current_new_name
= loop_arg
;
583 current_new_name
= get_current_def (loop_arg
);
584 /* current_def is not available only if the variable does not
585 change inside the loop, in which case we also don't care
586 about recording a current_def for it because we won't be
587 trying to create loop-exit-phis for it. */
588 if (!current_new_name
)
591 gcc_assert (get_current_def (current_new_name
) == NULL_TREE
);
593 set_current_def (current_new_name
, PHI_RESULT (new_phi
));
594 bitmap_set_bit (*defs
, SSA_NAME_VERSION (current_new_name
));
597 set_phi_nodes (new_merge_bb
, phi_reverse (phi_nodes (new_merge_bb
)));
601 /* Function slpeel_update_phi_nodes_for_guard2
604 - GUARD_EDGE, LOOP, IS_NEW_LOOP, NEW_EXIT_BB - as explained above.
606 In the context of the overall structure, we have:
609 guard1 (goto loop1/merg1_bb)
612 guard2 (goto merge1_bb/merge2_bb)
619 For each name used out side the loop (i.e - for each name that has an exit
620 phi in next_bb) we create a new phi in:
621 1. merge2_bb (to account for the edge from guard_bb)
622 2. loop2_exit_bb (an exit-phi to keep LOOP in loop-closed form)
623 3. guard2 bb (an exit phi to keep the preceding loop in loop-closed form),
624 if needed (if it wasn't handled by slpeel_update_phis_nodes_for_phi1).
628 slpeel_update_phi_nodes_for_guard2 (edge guard_edge
, struct loop
*loop
,
629 bool is_new_loop
, basic_block
*new_exit_bb
)
631 tree orig_phi
, new_phi
;
632 tree update_phi
, update_phi2
;
633 tree guard_arg
, loop_arg
;
634 basic_block new_merge_bb
= guard_edge
->dest
;
635 edge e
= EDGE_SUCC (new_merge_bb
, 0);
636 basic_block update_bb
= e
->dest
;
638 tree orig_def
, orig_def_new_name
;
639 tree new_name
, new_name2
;
642 /* Create new bb between loop and new_merge_bb. */
643 *new_exit_bb
= split_edge (single_exit (loop
));
645 new_exit_e
= EDGE_SUCC (*new_exit_bb
, 0);
647 for (update_phi
= phi_nodes (update_bb
); update_phi
;
648 update_phi
= PHI_CHAIN (update_phi
))
650 orig_phi
= update_phi
;
651 orig_def
= PHI_ARG_DEF_FROM_EDGE (orig_phi
, e
);
652 /* This loop-closed-phi actually doesn't represent a use
653 out of the loop - the phi arg is a constant. */
654 if (TREE_CODE (orig_def
) != SSA_NAME
)
656 orig_def_new_name
= get_current_def (orig_def
);
659 /** 1. Handle new-merge-point phis **/
661 /* 1.1. Generate new phi node in NEW_MERGE_BB: */
662 new_phi
= create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi
)),
665 /* 1.2. NEW_MERGE_BB has two incoming edges: GUARD_EDGE and the exit-edge
666 of LOOP. Set the two PHI args in NEW_PHI for these edges: */
668 new_name2
= NULL_TREE
;
669 if (orig_def_new_name
)
671 new_name
= orig_def_new_name
;
672 /* Some variables have both loop-entry-phis and loop-exit-phis.
673 Such variables were given yet newer names by phis placed in
674 guard_bb by slpeel_update_phi_nodes_for_guard1. I.e:
675 new_name2 = get_current_def (get_current_def (orig_name)). */
676 new_name2
= get_current_def (new_name
);
681 guard_arg
= orig_def
;
686 guard_arg
= new_name
;
690 guard_arg
= new_name2
;
692 add_phi_arg (new_phi
, loop_arg
, new_exit_e
);
693 add_phi_arg (new_phi
, guard_arg
, guard_edge
);
695 /* 1.3. Update phi in successor block. */
696 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi
, e
) == orig_def
);
697 SET_PHI_ARG_DEF (update_phi
, e
->dest_idx
, PHI_RESULT (new_phi
));
698 update_phi2
= new_phi
;
701 /** 2. Handle loop-closed-ssa-form phis **/
703 /* 2.1. Generate new phi node in NEW_EXIT_BB: */
704 new_phi
= create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi
)),
707 /* 2.2. NEW_EXIT_BB has one incoming edge: the exit-edge of the loop. */
708 add_phi_arg (new_phi
, loop_arg
, single_exit (loop
));
710 /* 2.3. Update phi in successor of NEW_EXIT_BB: */
711 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi2
, new_exit_e
) == loop_arg
);
712 SET_PHI_ARG_DEF (update_phi2
, new_exit_e
->dest_idx
, PHI_RESULT (new_phi
));
715 /** 3. Handle loop-closed-ssa-form phis for first loop **/
717 /* 3.1. Find the relevant names that need an exit-phi in
718 GUARD_BB, i.e. names for which
719 slpeel_update_phi_nodes_for_guard1 had not already created a
720 phi node. This is the case for names that are used outside
721 the loop (and therefore need an exit phi) but are not updated
722 across loop iterations (and therefore don't have a
725 slpeel_update_phi_nodes_for_guard1 is responsible for
726 creating loop-exit phis in GUARD_BB for names that have a
727 loop-header-phi. When such a phi is created we also record
728 the new name in its current definition. If this new name
729 exists, then guard_arg was set to this new name (see 1.2
730 above). Therefore, if guard_arg is not this new name, this
731 is an indication that an exit-phi in GUARD_BB was not yet
732 created, so we take care of it here. */
733 if (guard_arg
== new_name2
)
737 /* 3.2. Generate new phi node in GUARD_BB: */
738 new_phi
= create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi
)),
741 /* 3.3. GUARD_BB has one incoming edge: */
742 gcc_assert (EDGE_COUNT (guard_edge
->src
->preds
) == 1);
743 add_phi_arg (new_phi
, arg
, EDGE_PRED (guard_edge
->src
, 0));
745 /* 3.4. Update phi in successor of GUARD_BB: */
746 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi2
, guard_edge
)
748 SET_PHI_ARG_DEF (update_phi2
, guard_edge
->dest_idx
, PHI_RESULT (new_phi
));
751 set_phi_nodes (new_merge_bb
, phi_reverse (phi_nodes (new_merge_bb
)));
755 /* Make the LOOP iterate NITERS times. This is done by adding a new IV
756 that starts at zero, increases by one and its limit is NITERS.
758 Assumption: the exit-condition of LOOP is the last stmt in the loop. */
761 slpeel_make_loop_iterate_ntimes (struct loop
*loop
, tree niters
)
763 tree indx_before_incr
, indx_after_incr
, cond_stmt
, cond
;
765 edge exit_edge
= single_exit (loop
);
766 block_stmt_iterator loop_cond_bsi
;
767 block_stmt_iterator incr_bsi
;
769 tree init
= build_int_cst (TREE_TYPE (niters
), 0);
770 tree step
= build_int_cst (TREE_TYPE (niters
), 1);
773 orig_cond
= get_loop_exit_condition (loop
);
774 gcc_assert (orig_cond
);
775 loop_cond_bsi
= bsi_for_stmt (orig_cond
);
777 standard_iv_increment_position (loop
, &incr_bsi
, &insert_after
);
778 create_iv (init
, step
, NULL_TREE
, loop
,
779 &incr_bsi
, insert_after
, &indx_before_incr
, &indx_after_incr
);
781 if (exit_edge
->flags
& EDGE_TRUE_VALUE
) /* 'then' edge exits the loop. */
782 cond
= build2 (GE_EXPR
, boolean_type_node
, indx_after_incr
, niters
);
783 else /* 'then' edge loops back. */
784 cond
= build2 (LT_EXPR
, boolean_type_node
, indx_after_incr
, niters
);
786 cond_stmt
= build3 (COND_EXPR
, TREE_TYPE (orig_cond
), cond
,
787 NULL_TREE
, NULL_TREE
);
788 bsi_insert_before (&loop_cond_bsi
, cond_stmt
, BSI_SAME_STMT
);
790 /* Remove old loop exit test: */
791 bsi_remove (&loop_cond_bsi
, true);
793 loop_loc
= find_loop_location (loop
);
794 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
796 if (loop_loc
!= UNKNOWN_LOC
)
797 fprintf (dump_file
, "\nloop at %s:%d: ",
798 LOC_FILE (loop_loc
), LOC_LINE (loop_loc
));
799 print_generic_expr (dump_file
, cond_stmt
, TDF_SLIM
);
802 loop
->nb_iterations
= niters
;
806 /* Given LOOP this function generates a new copy of it and puts it
807 on E which is either the entry or exit of LOOP. */
810 slpeel_tree_duplicate_loop_to_edge_cfg (struct loop
*loop
, edge e
)
812 struct loop
*new_loop
;
813 basic_block
*new_bbs
, *bbs
;
816 basic_block exit_dest
;
820 at_exit
= (e
== single_exit (loop
));
821 if (!at_exit
&& e
!= loop_preheader_edge (loop
))
824 bbs
= get_loop_body (loop
);
826 /* Check whether duplication is possible. */
827 if (!can_copy_bbs_p (bbs
, loop
->num_nodes
))
833 /* Generate new loop structure. */
834 new_loop
= duplicate_loop (loop
, loop_outer (loop
));
841 exit_dest
= single_exit (loop
)->dest
;
842 was_imm_dom
= (get_immediate_dominator (CDI_DOMINATORS
,
843 exit_dest
) == loop
->header
?
846 new_bbs
= XNEWVEC (basic_block
, loop
->num_nodes
);
848 exit
= single_exit (loop
);
849 copy_bbs (bbs
, loop
->num_nodes
, new_bbs
,
850 &exit
, 1, &new_exit
, NULL
,
853 /* Duplicating phi args at exit bbs as coming
854 also from exit of duplicated loop. */
855 for (phi
= phi_nodes (exit_dest
); phi
; phi
= PHI_CHAIN (phi
))
857 phi_arg
= PHI_ARG_DEF_FROM_EDGE (phi
, single_exit (loop
));
860 edge new_loop_exit_edge
;
862 if (EDGE_SUCC (new_loop
->header
, 0)->dest
== new_loop
->latch
)
863 new_loop_exit_edge
= EDGE_SUCC (new_loop
->header
, 1);
865 new_loop_exit_edge
= EDGE_SUCC (new_loop
->header
, 0);
867 add_phi_arg (phi
, phi_arg
, new_loop_exit_edge
);
871 if (at_exit
) /* Add the loop copy at exit. */
873 redirect_edge_and_branch_force (e
, new_loop
->header
);
874 set_immediate_dominator (CDI_DOMINATORS
, new_loop
->header
, e
->src
);
876 set_immediate_dominator (CDI_DOMINATORS
, exit_dest
, new_loop
->header
);
878 else /* Add the copy at entry. */
881 edge entry_e
= loop_preheader_edge (loop
);
882 basic_block preheader
= entry_e
->src
;
884 if (!flow_bb_inside_loop_p (new_loop
,
885 EDGE_SUCC (new_loop
->header
, 0)->dest
))
886 new_exit_e
= EDGE_SUCC (new_loop
->header
, 0);
888 new_exit_e
= EDGE_SUCC (new_loop
->header
, 1);
890 redirect_edge_and_branch_force (new_exit_e
, loop
->header
);
891 set_immediate_dominator (CDI_DOMINATORS
, loop
->header
,
894 /* We have to add phi args to the loop->header here as coming
895 from new_exit_e edge. */
896 for (phi
= phi_nodes (loop
->header
); phi
; phi
= PHI_CHAIN (phi
))
898 phi_arg
= PHI_ARG_DEF_FROM_EDGE (phi
, entry_e
);
900 add_phi_arg (phi
, phi_arg
, new_exit_e
);
903 redirect_edge_and_branch_force (entry_e
, new_loop
->header
);
904 set_immediate_dominator (CDI_DOMINATORS
, new_loop
->header
, preheader
);
914 /* Given the condition statement COND, put it as the last statement
915 of GUARD_BB; EXIT_BB is the basic block to skip the loop;
916 Assumes that this is the single exit of the guarded loop.
917 Returns the skip edge. */
920 slpeel_add_loop_guard (basic_block guard_bb
, tree cond
, basic_block exit_bb
,
923 block_stmt_iterator bsi
;
926 tree gimplify_stmt_list
;
928 enter_e
= EDGE_SUCC (guard_bb
, 0);
929 enter_e
->flags
&= ~EDGE_FALLTHRU
;
930 enter_e
->flags
|= EDGE_FALSE_VALUE
;
931 bsi
= bsi_last (guard_bb
);
934 force_gimple_operand (cond
, &gimplify_stmt_list
, true,
936 cond_stmt
= build3 (COND_EXPR
, void_type_node
, cond
,
937 NULL_TREE
, NULL_TREE
);
938 if (gimplify_stmt_list
)
939 bsi_insert_after (&bsi
, gimplify_stmt_list
, BSI_NEW_STMT
);
941 bsi
= bsi_last (guard_bb
);
942 bsi_insert_after (&bsi
, cond_stmt
, BSI_NEW_STMT
);
944 /* Add new edge to connect guard block to the merge/loop-exit block. */
945 new_e
= make_edge (guard_bb
, exit_bb
, EDGE_TRUE_VALUE
);
946 set_immediate_dominator (CDI_DOMINATORS
, exit_bb
, dom_bb
);
951 /* This function verifies that the following restrictions apply to LOOP:
953 (2) it consists of exactly 2 basic blocks - header, and an empty latch.
954 (3) it is single entry, single exit
955 (4) its exit condition is the last stmt in the header
956 (5) E is the entry/exit edge of LOOP.
960 slpeel_can_duplicate_loop_p (const struct loop
*loop
, const_edge e
)
962 edge exit_e
= single_exit (loop
);
963 edge entry_e
= loop_preheader_edge (loop
);
964 tree orig_cond
= get_loop_exit_condition (loop
);
965 block_stmt_iterator loop_exit_bsi
= bsi_last (exit_e
->src
);
967 if (need_ssa_update_p ())
971 /* All loops have an outer scope; the only case loop->outer is NULL is for
972 the function itself. */
973 || !loop_outer (loop
)
974 || loop
->num_nodes
!= 2
975 || !empty_block_p (loop
->latch
)
976 || !single_exit (loop
)
977 /* Verify that new loop exit condition can be trivially modified. */
978 || (!orig_cond
|| orig_cond
!= bsi_stmt (loop_exit_bsi
))
979 || (e
!= exit_e
&& e
!= entry_e
))
985 #ifdef ENABLE_CHECKING
987 slpeel_verify_cfg_after_peeling (struct loop
*first_loop
,
988 struct loop
*second_loop
)
990 basic_block loop1_exit_bb
= single_exit (first_loop
)->dest
;
991 basic_block loop2_entry_bb
= loop_preheader_edge (second_loop
)->src
;
992 basic_block loop1_entry_bb
= loop_preheader_edge (first_loop
)->src
;
994 /* A guard that controls whether the second_loop is to be executed or skipped
995 is placed in first_loop->exit. first_loopt->exit therefore has two
996 successors - one is the preheader of second_loop, and the other is a bb
999 gcc_assert (EDGE_COUNT (loop1_exit_bb
->succs
) == 2);
1001 /* 1. Verify that one of the successors of first_loopt->exit is the preheader
1004 /* The preheader of new_loop is expected to have two predecessors:
1005 first_loop->exit and the block that precedes first_loop. */
1007 gcc_assert (EDGE_COUNT (loop2_entry_bb
->preds
) == 2
1008 && ((EDGE_PRED (loop2_entry_bb
, 0)->src
== loop1_exit_bb
1009 && EDGE_PRED (loop2_entry_bb
, 1)->src
== loop1_entry_bb
)
1010 || (EDGE_PRED (loop2_entry_bb
, 1)->src
== loop1_exit_bb
1011 && EDGE_PRED (loop2_entry_bb
, 0)->src
== loop1_entry_bb
)));
1013 /* Verify that the other successor of first_loopt->exit is after the
1019 /* If the run time cost model check determines that vectorization is
1020 not profitable and hence scalar loop should be generated then set
1021 FIRST_NITERS to prologue peeled iterations. This will allow all the
1022 iterations to be executed in the prologue peeled scalar loop. */
1025 set_prologue_iterations (basic_block bb_before_first_loop
,
1031 basic_block cond_bb
, then_bb
;
1032 tree var
, prologue_after_cost_adjust_name
, stmt
;
1033 block_stmt_iterator bsi
;
1035 edge e_true
, e_false
, e_fallthru
;
1037 tree gimplify_stmt_list
;
1038 tree cost_pre_condition
= NULL_TREE
;
1039 tree scalar_loop_iters
=
1040 unshare_expr (LOOP_VINFO_NITERS_UNCHANGED (loop_vec_info_for_loop (loop
)));
1042 e
= single_pred_edge (bb_before_first_loop
);
1043 cond_bb
= split_edge(e
);
1045 e
= single_pred_edge (bb_before_first_loop
);
1046 then_bb
= split_edge(e
);
1047 set_immediate_dominator (CDI_DOMINATORS
, then_bb
, cond_bb
);
1049 e_false
= make_single_succ_edge (cond_bb
, bb_before_first_loop
,
1051 set_immediate_dominator (CDI_DOMINATORS
, bb_before_first_loop
, cond_bb
);
1053 e_true
= EDGE_PRED (then_bb
, 0);
1054 e_true
->flags
&= ~EDGE_FALLTHRU
;
1055 e_true
->flags
|= EDGE_TRUE_VALUE
;
1057 e_fallthru
= EDGE_SUCC (then_bb
, 0);
1059 cost_pre_condition
=
1060 build2 (LE_EXPR
, boolean_type_node
, scalar_loop_iters
,
1061 build_int_cst (TREE_TYPE (scalar_loop_iters
), th
));
1062 cost_pre_condition
=
1063 force_gimple_operand (cost_pre_condition
, &gimplify_stmt_list
,
1065 cond_stmt
= build3 (COND_EXPR
, void_type_node
, cost_pre_condition
,
1066 NULL_TREE
, NULL_TREE
);
1068 bsi
= bsi_last (cond_bb
);
1069 if (gimplify_stmt_list
)
1070 bsi_insert_after (&bsi
, gimplify_stmt_list
, BSI_NEW_STMT
);
1072 bsi
= bsi_last (cond_bb
);
1073 bsi_insert_after (&bsi
, cond_stmt
, BSI_NEW_STMT
);
1075 var
= create_tmp_var (TREE_TYPE (scalar_loop_iters
),
1076 "prologue_after_cost_adjust");
1077 add_referenced_var (var
);
1078 prologue_after_cost_adjust_name
=
1079 force_gimple_operand (scalar_loop_iters
, &stmt
, false, var
);
1081 bsi
= bsi_last (then_bb
);
1083 bsi_insert_after (&bsi
, stmt
, BSI_NEW_STMT
);
1085 newphi
= create_phi_node (var
, bb_before_first_loop
);
1086 add_phi_arg (newphi
, prologue_after_cost_adjust_name
, e_fallthru
);
1087 add_phi_arg (newphi
, first_niters
, e_false
);
1089 first_niters
= PHI_RESULT (newphi
);
1093 /* Function slpeel_tree_peel_loop_to_edge.
1095 Peel the first (last) iterations of LOOP into a new prolog (epilog) loop
1096 that is placed on the entry (exit) edge E of LOOP. After this transformation
1097 we have two loops one after the other - first-loop iterates FIRST_NITERS
1098 times, and second-loop iterates the remainder NITERS - FIRST_NITERS times.
1099 If the cost model indicates that it is profitable to emit a scalar
1100 loop instead of the vector one, then the prolog (epilog) loop will iterate
1101 for the entire unchanged scalar iterations of the loop.
1104 - LOOP: the loop to be peeled.
1105 - E: the exit or entry edge of LOOP.
1106 If it is the entry edge, we peel the first iterations of LOOP. In this
1107 case first-loop is LOOP, and second-loop is the newly created loop.
1108 If it is the exit edge, we peel the last iterations of LOOP. In this
1109 case, first-loop is the newly created loop, and second-loop is LOOP.
1110 - NITERS: the number of iterations that LOOP iterates.
1111 - FIRST_NITERS: the number of iterations that the first-loop should iterate.
1112 - UPDATE_FIRST_LOOP_COUNT: specified whether this function is responsible
1113 for updating the loop bound of the first-loop to FIRST_NITERS. If it
1114 is false, the caller of this function may want to take care of this
1115 (this can be useful if we don't want new stmts added to first-loop).
1116 - TH: cost model profitability threshold of iterations for vectorization.
1117 - CHECK_PROFITABILITY: specify whether cost model check has not occured
1118 during versioning and hence needs to occur during
1119 prologue generation or whether cost model check
1120 has not occured during prologue generation and hence
1121 needs to occur during epilogue generation.
1125 The function returns a pointer to the new loop-copy, or NULL if it failed
1126 to perform the transformation.
1128 The function generates two if-then-else guards: one before the first loop,
1129 and the other before the second loop:
1131 if (FIRST_NITERS == 0) then skip the first loop,
1132 and go directly to the second loop.
1133 The second guard is:
1134 if (FIRST_NITERS == NITERS) then skip the second loop.
1136 FORNOW only simple loops are supported (see slpeel_can_duplicate_loop_p).
1137 FORNOW the resulting code will not be in loop-closed-ssa form.
1141 slpeel_tree_peel_loop_to_edge (struct loop
*loop
,
1142 edge e
, tree first_niters
,
1143 tree niters
, bool update_first_loop_count
,
1144 unsigned int th
, bool check_profitability
)
1146 struct loop
*new_loop
= NULL
, *first_loop
, *second_loop
;
1148 tree pre_condition
= NULL_TREE
;
1150 basic_block bb_before_second_loop
, bb_after_second_loop
;
1151 basic_block bb_before_first_loop
;
1152 basic_block bb_between_loops
;
1153 basic_block new_exit_bb
;
1154 edge exit_e
= single_exit (loop
);
1156 tree cost_pre_condition
= NULL_TREE
;
1158 if (!slpeel_can_duplicate_loop_p (loop
, e
))
1161 /* We have to initialize cfg_hooks. Then, when calling
1162 cfg_hooks->split_edge, the function tree_split_edge
1163 is actually called and, when calling cfg_hooks->duplicate_block,
1164 the function tree_duplicate_bb is called. */
1165 tree_register_cfg_hooks ();
1168 /* 1. Generate a copy of LOOP and put it on E (E is the entry/exit of LOOP).
1169 Resulting CFG would be:
1182 if (!(new_loop
= slpeel_tree_duplicate_loop_to_edge_cfg (loop
, e
)))
1184 loop_loc
= find_loop_location (loop
);
1185 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1187 if (loop_loc
!= UNKNOWN_LOC
)
1188 fprintf (dump_file
, "\n%s:%d: note: ",
1189 LOC_FILE (loop_loc
), LOC_LINE (loop_loc
));
1190 fprintf (dump_file
, "tree_duplicate_loop_to_edge_cfg failed.\n");
1197 /* NEW_LOOP was placed after LOOP. */
1199 second_loop
= new_loop
;
1203 /* NEW_LOOP was placed before LOOP. */
1204 first_loop
= new_loop
;
1208 definitions
= ssa_names_to_replace ();
1209 slpeel_update_phis_for_duplicate_loop (loop
, new_loop
, e
== exit_e
);
1210 rename_variables_in_loop (new_loop
);
1213 /* 2. Add the guard code in one of the following ways:
1215 2.a Add the guard that controls whether the first loop is executed.
1216 This occurs when this function is invoked for prologue or epilogiue
1217 generation and when the cost model check can be done at compile time.
1219 Resulting CFG would be:
1221 bb_before_first_loop:
1222 if (FIRST_NITERS == 0) GOTO bb_before_second_loop
1229 bb_before_second_loop:
1237 2.b Add the cost model check that allows the prologue
1238 to iterate for the entire unchanged scalar
1239 iterations of the loop in the event that the cost
1240 model indicates that the scalar loop is more
1241 profitable than the vector one. This occurs when
1242 this function is invoked for prologue generation
1243 and the cost model check needs to be done at run
1246 Resulting CFG after prologue peeling would be:
1248 if (scalar_loop_iterations <= th)
1249 FIRST_NITERS = scalar_loop_iterations
1251 bb_before_first_loop:
1252 if (FIRST_NITERS == 0) GOTO bb_before_second_loop
1259 bb_before_second_loop:
1267 2.c Add the cost model check that allows the epilogue
1268 to iterate for the entire unchanged scalar
1269 iterations of the loop in the event that the cost
1270 model indicates that the scalar loop is more
1271 profitable than the vector one. This occurs when
1272 this function is invoked for epilogue generation
1273 and the cost model check needs to be done at run
1276 Resulting CFG after prologue peeling would be:
1278 bb_before_first_loop:
1279 if ((scalar_loop_iterations <= th)
1281 FIRST_NITERS == 0) GOTO bb_before_second_loop
1288 bb_before_second_loop:
1297 bb_before_first_loop
= split_edge (loop_preheader_edge (first_loop
));
1298 bb_before_second_loop
= split_edge (single_exit (first_loop
));
1300 /* Epilogue peeling. */
1301 if (!update_first_loop_count
)
1304 fold_build2 (LE_EXPR
, boolean_type_node
, first_niters
,
1305 build_int_cst (TREE_TYPE (first_niters
), 0));
1306 if (check_profitability
)
1308 tree scalar_loop_iters
1309 = unshare_expr (LOOP_VINFO_NITERS_UNCHANGED
1310 (loop_vec_info_for_loop (loop
)));
1311 cost_pre_condition
=
1312 build2 (LE_EXPR
, boolean_type_node
, scalar_loop_iters
,
1313 build_int_cst (TREE_TYPE (scalar_loop_iters
), th
));
1315 pre_condition
= fold_build2 (TRUTH_OR_EXPR
, boolean_type_node
,
1316 cost_pre_condition
, pre_condition
);
1320 /* Prologue peeling. */
1323 if (check_profitability
)
1324 set_prologue_iterations (bb_before_first_loop
, first_niters
,
1328 fold_build2 (LE_EXPR
, boolean_type_node
, first_niters
,
1329 build_int_cst (TREE_TYPE (first_niters
), 0));
1332 skip_e
= slpeel_add_loop_guard (bb_before_first_loop
, pre_condition
,
1333 bb_before_second_loop
, bb_before_first_loop
);
1334 slpeel_update_phi_nodes_for_guard1 (skip_e
, first_loop
,
1335 first_loop
== new_loop
,
1336 &new_exit_bb
, &definitions
);
1339 /* 3. Add the guard that controls whether the second loop is executed.
1340 Resulting CFG would be:
1342 bb_before_first_loop:
1343 if (FIRST_NITERS == 0) GOTO bb_before_second_loop (skip first loop)
1351 if (FIRST_NITERS == NITERS) GOTO bb_after_second_loop (skip second loop)
1352 GOTO bb_before_second_loop
1354 bb_before_second_loop:
1360 bb_after_second_loop:
1365 bb_between_loops
= new_exit_bb
;
1366 bb_after_second_loop
= split_edge (single_exit (second_loop
));
1369 fold_build2 (EQ_EXPR
, boolean_type_node
, first_niters
, niters
);
1370 skip_e
= slpeel_add_loop_guard (bb_between_loops
, pre_condition
,
1371 bb_after_second_loop
, bb_before_first_loop
);
1372 slpeel_update_phi_nodes_for_guard2 (skip_e
, second_loop
,
1373 second_loop
== new_loop
, &new_exit_bb
);
1375 /* 4. Make first-loop iterate FIRST_NITERS times, if requested.
1377 if (update_first_loop_count
)
1378 slpeel_make_loop_iterate_ntimes (first_loop
, first_niters
);
1380 BITMAP_FREE (definitions
);
1381 delete_update_ssa ();
1386 /* Function vect_get_loop_location.
1388 Extract the location of the loop in the source code.
1389 If the loop is not well formed for vectorization, an estimated
1390 location is calculated.
1391 Return the loop location if succeed and NULL if not. */
1394 find_loop_location (struct loop
*loop
)
1396 tree node
= NULL_TREE
;
1398 block_stmt_iterator si
;
1403 node
= get_loop_exit_condition (loop
);
1405 if (node
&& CAN_HAVE_LOCATION_P (node
) && EXPR_HAS_LOCATION (node
)
1406 && EXPR_FILENAME (node
) && EXPR_LINENO (node
))
1407 return EXPR_LOC (node
);
1409 /* If we got here the loop is probably not "well formed",
1410 try to estimate the loop location */
1417 for (si
= bsi_start (bb
); !bsi_end_p (si
); bsi_next (&si
))
1419 node
= bsi_stmt (si
);
1420 if (node
&& CAN_HAVE_LOCATION_P (node
) && EXPR_HAS_LOCATION (node
))
1421 return EXPR_LOC (node
);
1428 /*************************************************************************
1429 Vectorization Debug Information.
1430 *************************************************************************/
1432 /* Function vect_set_verbosity_level.
1434 Called from toplev.c upon detection of the
1435 -ftree-vectorizer-verbose=N option. */
1438 vect_set_verbosity_level (const char *val
)
1443 if (vl
< MAX_VERBOSITY_LEVEL
)
1444 vect_verbosity_level
= vl
;
1446 vect_verbosity_level
= MAX_VERBOSITY_LEVEL
- 1;
1450 /* Function vect_set_dump_settings.
1452 Fix the verbosity level of the vectorizer if the
1453 requested level was not set explicitly using the flag
1454 -ftree-vectorizer-verbose=N.
1455 Decide where to print the debugging information (dump_file/stderr).
1456 If the user defined the verbosity level, but there is no dump file,
1457 print to stderr, otherwise print to the dump file. */
1460 vect_set_dump_settings (void)
1462 vect_dump
= dump_file
;
1464 /* Check if the verbosity level was defined by the user: */
1465 if (vect_verbosity_level
!= MAX_VERBOSITY_LEVEL
)
1467 /* If there is no dump file, print to stderr. */
1473 /* User didn't specify verbosity level: */
1474 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1475 vect_verbosity_level
= REPORT_DETAILS
;
1476 else if (dump_file
&& (dump_flags
& TDF_STATS
))
1477 vect_verbosity_level
= REPORT_UNVECTORIZED_LOOPS
;
1479 vect_verbosity_level
= REPORT_NONE
;
1481 gcc_assert (dump_file
|| vect_verbosity_level
== REPORT_NONE
);
1485 /* Function debug_loop_details.
1487 For vectorization debug dumps. */
1490 vect_print_dump_info (enum verbosity_levels vl
)
1492 if (vl
> vect_verbosity_level
)
1495 if (!current_function_decl
|| !vect_dump
)
1498 if (vect_loop_location
== UNKNOWN_LOC
)
1499 fprintf (vect_dump
, "\n%s:%d: note: ",
1500 DECL_SOURCE_FILE (current_function_decl
),
1501 DECL_SOURCE_LINE (current_function_decl
));
1503 fprintf (vect_dump
, "\n%s:%d: note: ",
1504 LOC_FILE (vect_loop_location
), LOC_LINE (vect_loop_location
));
1510 /*************************************************************************
1511 Vectorization Utilities.
1512 *************************************************************************/
1514 /* Function new_stmt_vec_info.
1516 Create and initialize a new stmt_vec_info struct for STMT. */
1519 new_stmt_vec_info (tree stmt
, loop_vec_info loop_vinfo
)
1522 res
= (stmt_vec_info
) xcalloc (1, sizeof (struct _stmt_vec_info
));
1524 STMT_VINFO_TYPE (res
) = undef_vec_info_type
;
1525 STMT_VINFO_STMT (res
) = stmt
;
1526 STMT_VINFO_LOOP_VINFO (res
) = loop_vinfo
;
1527 STMT_VINFO_RELEVANT (res
) = 0;
1528 STMT_VINFO_LIVE_P (res
) = false;
1529 STMT_VINFO_VECTYPE (res
) = NULL
;
1530 STMT_VINFO_VEC_STMT (res
) = NULL
;
1531 STMT_VINFO_IN_PATTERN_P (res
) = false;
1532 STMT_VINFO_RELATED_STMT (res
) = NULL
;
1533 STMT_VINFO_DATA_REF (res
) = NULL
;
1535 STMT_VINFO_DR_BASE_ADDRESS (res
) = NULL
;
1536 STMT_VINFO_DR_OFFSET (res
) = NULL
;
1537 STMT_VINFO_DR_INIT (res
) = NULL
;
1538 STMT_VINFO_DR_STEP (res
) = NULL
;
1539 STMT_VINFO_DR_ALIGNED_TO (res
) = NULL
;
1541 if (TREE_CODE (stmt
) == PHI_NODE
&& is_loop_header_bb_p (bb_for_stmt (stmt
)))
1542 STMT_VINFO_DEF_TYPE (res
) = vect_unknown_def_type
;
1544 STMT_VINFO_DEF_TYPE (res
) = vect_loop_def
;
1545 STMT_VINFO_SAME_ALIGN_REFS (res
) = VEC_alloc (dr_p
, heap
, 5);
1546 STMT_VINFO_INSIDE_OF_LOOP_COST (res
) = 0;
1547 STMT_VINFO_OUTSIDE_OF_LOOP_COST (res
) = 0;
1548 STMT_SLP_TYPE (res
) = 0;
1549 DR_GROUP_FIRST_DR (res
) = NULL_TREE
;
1550 DR_GROUP_NEXT_DR (res
) = NULL_TREE
;
1551 DR_GROUP_SIZE (res
) = 0;
1552 DR_GROUP_STORE_COUNT (res
) = 0;
1553 DR_GROUP_GAP (res
) = 0;
1554 DR_GROUP_SAME_DR_STMT (res
) = NULL_TREE
;
1555 DR_GROUP_READ_WRITE_DEPENDENCE (res
) = false;
1561 /* Function bb_in_loop_p
1563 Used as predicate for dfs order traversal of the loop bbs. */
1566 bb_in_loop_p (const_basic_block bb
, const void *data
)
1568 const struct loop
*const loop
= (const struct loop
*)data
;
1569 if (flow_bb_inside_loop_p (loop
, bb
))
1575 /* Function new_loop_vec_info.
1577 Create and initialize a new loop_vec_info struct for LOOP, as well as
1578 stmt_vec_info structs for all the stmts in LOOP. */
1581 new_loop_vec_info (struct loop
*loop
)
1585 block_stmt_iterator si
;
1586 unsigned int i
, nbbs
;
1588 res
= (loop_vec_info
) xcalloc (1, sizeof (struct _loop_vec_info
));
1589 LOOP_VINFO_LOOP (res
) = loop
;
1591 bbs
= get_loop_body (loop
);
1593 /* Create/Update stmt_info for all stmts in the loop. */
1594 for (i
= 0; i
< loop
->num_nodes
; i
++)
1596 basic_block bb
= bbs
[i
];
1599 /* BBs in a nested inner-loop will have been already processed (because
1600 we will have called vect_analyze_loop_form for any nested inner-loop).
1601 Therefore, for stmts in an inner-loop we just want to update the
1602 STMT_VINFO_LOOP_VINFO field of their stmt_info to point to the new
1603 loop_info of the outer-loop we are currently considering to vectorize
1604 (instead of the loop_info of the inner-loop).
1605 For stmts in other BBs we need to create a stmt_info from scratch. */
1606 if (bb
->loop_father
!= loop
)
1608 /* Inner-loop bb. */
1609 gcc_assert (loop
->inner
&& bb
->loop_father
== loop
->inner
);
1610 for (phi
= phi_nodes (bb
); phi
; phi
= PHI_CHAIN (phi
))
1612 stmt_vec_info stmt_info
= vinfo_for_stmt (phi
);
1613 loop_vec_info inner_loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1614 gcc_assert (loop
->inner
== LOOP_VINFO_LOOP (inner_loop_vinfo
));
1615 STMT_VINFO_LOOP_VINFO (stmt_info
) = res
;
1617 for (si
= bsi_start (bb
); !bsi_end_p (si
); bsi_next (&si
))
1619 tree stmt
= bsi_stmt (si
);
1620 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1621 loop_vec_info inner_loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1622 gcc_assert (loop
->inner
== LOOP_VINFO_LOOP (inner_loop_vinfo
));
1623 STMT_VINFO_LOOP_VINFO (stmt_info
) = res
;
1628 /* bb in current nest. */
1629 for (phi
= phi_nodes (bb
); phi
; phi
= PHI_CHAIN (phi
))
1631 stmt_ann_t ann
= get_stmt_ann (phi
);
1632 set_stmt_info (ann
, new_stmt_vec_info (phi
, res
));
1635 for (si
= bsi_start (bb
); !bsi_end_p (si
); bsi_next (&si
))
1637 tree stmt
= bsi_stmt (si
);
1638 stmt_ann_t ann
= stmt_ann (stmt
);
1639 set_stmt_info (ann
, new_stmt_vec_info (stmt
, res
));
1644 /* CHECKME: We want to visit all BBs before their successors (except for
1645 latch blocks, for which this assertion wouldn't hold). In the simple
1646 case of the loop forms we allow, a dfs order of the BBs would the same
1647 as reversed postorder traversal, so we are safe. */
1650 bbs
= XCNEWVEC (basic_block
, loop
->num_nodes
);
1651 nbbs
= dfs_enumerate_from (loop
->header
, 0, bb_in_loop_p
,
1652 bbs
, loop
->num_nodes
, loop
);
1653 gcc_assert (nbbs
== loop
->num_nodes
);
1655 LOOP_VINFO_BBS (res
) = bbs
;
1656 LOOP_VINFO_NITERS (res
) = NULL
;
1657 LOOP_VINFO_NITERS_UNCHANGED (res
) = NULL
;
1658 LOOP_VINFO_COST_MODEL_MIN_ITERS (res
) = 0;
1659 LOOP_VINFO_VECTORIZABLE_P (res
) = 0;
1660 LOOP_PEELING_FOR_ALIGNMENT (res
) = 0;
1661 LOOP_VINFO_VECT_FACTOR (res
) = 0;
1662 LOOP_VINFO_DATAREFS (res
) = VEC_alloc (data_reference_p
, heap
, 10);
1663 LOOP_VINFO_DDRS (res
) = VEC_alloc (ddr_p
, heap
, 10 * 10);
1664 LOOP_VINFO_UNALIGNED_DR (res
) = NULL
;
1665 LOOP_VINFO_MAY_MISALIGN_STMTS (res
) =
1666 VEC_alloc (tree
, heap
, PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS
));
1667 LOOP_VINFO_MAY_ALIAS_DDRS (res
) =
1668 VEC_alloc (ddr_p
, heap
, PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS
));
1669 LOOP_VINFO_STRIDED_STORES (res
) = VEC_alloc (tree
, heap
, 10);
1670 LOOP_VINFO_SLP_INSTANCES (res
) = VEC_alloc (slp_instance
, heap
, 10);
1671 LOOP_VINFO_SLP_UNROLLING_FACTOR (res
) = 1;
1677 /* Function destroy_loop_vec_info.
1679 Free LOOP_VINFO struct, as well as all the stmt_vec_info structs of all the
1680 stmts in the loop. */
1683 destroy_loop_vec_info (loop_vec_info loop_vinfo
, bool clean_stmts
)
1688 block_stmt_iterator si
;
1690 VEC (slp_instance
, heap
) *slp_instances
;
1691 slp_instance instance
;
1696 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1698 bbs
= LOOP_VINFO_BBS (loop_vinfo
);
1699 nbbs
= loop
->num_nodes
;
1703 free (LOOP_VINFO_BBS (loop_vinfo
));
1704 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo
));
1705 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo
));
1706 VEC_free (tree
, heap
, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo
));
1713 for (j
= 0; j
< nbbs
; j
++)
1715 basic_block bb
= bbs
[j
];
1717 stmt_vec_info stmt_info
;
1719 for (phi
= phi_nodes (bb
); phi
; phi
= PHI_CHAIN (phi
))
1721 stmt_ann_t ann
= stmt_ann (phi
);
1723 stmt_info
= vinfo_for_stmt (phi
);
1725 set_stmt_info (ann
, NULL
);
1728 for (si
= bsi_start (bb
); !bsi_end_p (si
); )
1730 tree stmt
= bsi_stmt (si
);
1731 stmt_ann_t ann
= stmt_ann (stmt
);
1732 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1736 /* Check if this is a "pattern stmt" (introduced by the
1737 vectorizer during the pattern recognition pass). */
1738 bool remove_stmt_p
= false;
1739 tree orig_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
1742 stmt_vec_info orig_stmt_info
= vinfo_for_stmt (orig_stmt
);
1744 && STMT_VINFO_IN_PATTERN_P (orig_stmt_info
))
1745 remove_stmt_p
= true;
1748 /* Free stmt_vec_info. */
1749 VEC_free (dr_p
, heap
, STMT_VINFO_SAME_ALIGN_REFS (stmt_info
));
1751 set_stmt_info (ann
, NULL
);
1753 /* Remove dead "pattern stmts". */
1755 bsi_remove (&si
, true);
1761 free (LOOP_VINFO_BBS (loop_vinfo
));
1762 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo
));
1763 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo
));
1764 VEC_free (tree
, heap
, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo
));
1765 VEC_free (ddr_p
, heap
, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo
));
1766 slp_instances
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
);
1767 for (j
= 0; VEC_iterate (slp_instance
, slp_instances
, j
, instance
); j
++)
1768 vect_free_slp_tree (SLP_INSTANCE_TREE (instance
));
1769 VEC_free (slp_instance
, heap
, LOOP_VINFO_SLP_INSTANCES (loop_vinfo
));
1776 /* Function vect_force_dr_alignment_p.
1778 Returns whether the alignment of a DECL can be forced to be aligned
1779 on ALIGNMENT bit boundary. */
1782 vect_can_force_dr_alignment_p (const_tree decl
, unsigned int alignment
)
1784 if (TREE_CODE (decl
) != VAR_DECL
)
1787 if (DECL_EXTERNAL (decl
))
1790 if (TREE_ASM_WRITTEN (decl
))
1793 if (TREE_STATIC (decl
))
1794 return (alignment
<= MAX_OFILE_ALIGNMENT
);
1796 /* This used to be PREFERRED_STACK_BOUNDARY, however, that is not 100%
1797 correct until someone implements forced stack alignment. */
1798 return (alignment
<= STACK_BOUNDARY
);
1802 /* Function get_vectype_for_scalar_type.
1804 Returns the vector type corresponding to SCALAR_TYPE as supported
1808 get_vectype_for_scalar_type (tree scalar_type
)
1810 enum machine_mode inner_mode
= TYPE_MODE (scalar_type
);
1811 int nbytes
= GET_MODE_SIZE (inner_mode
);
1815 if (nbytes
== 0 || nbytes
>= UNITS_PER_SIMD_WORD
)
1818 /* FORNOW: Only a single vector size per target (UNITS_PER_SIMD_WORD)
1820 nunits
= UNITS_PER_SIMD_WORD
/ nbytes
;
1822 vectype
= build_vector_type (scalar_type
, nunits
);
1823 if (vect_print_dump_info (REPORT_DETAILS
))
1825 fprintf (vect_dump
, "get vectype with %d units of type ", nunits
);
1826 print_generic_expr (vect_dump
, scalar_type
, TDF_SLIM
);
1832 if (vect_print_dump_info (REPORT_DETAILS
))
1834 fprintf (vect_dump
, "vectype: ");
1835 print_generic_expr (vect_dump
, vectype
, TDF_SLIM
);
1838 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
1839 && !INTEGRAL_MODE_P (TYPE_MODE (vectype
)))
1841 if (vect_print_dump_info (REPORT_DETAILS
))
1842 fprintf (vect_dump
, "mode not supported by target.");
1850 /* Function vect_supportable_dr_alignment
1852 Return whether the data reference DR is supported with respect to its
1855 enum dr_alignment_support
1856 vect_supportable_dr_alignment (struct data_reference
*dr
)
1858 tree stmt
= DR_STMT (dr
);
1859 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1860 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1861 enum machine_mode mode
= (int) TYPE_MODE (vectype
);
1862 struct loop
*vect_loop
= LOOP_VINFO_LOOP (STMT_VINFO_LOOP_VINFO (stmt_info
));
1863 bool nested_in_vect_loop
= nested_in_vect_loop_p (vect_loop
, stmt
);
1864 bool invariant_in_outerloop
= false;
1866 if (aligned_access_p (dr
))
1869 if (nested_in_vect_loop
)
1871 tree outerloop_step
= STMT_VINFO_DR_STEP (stmt_info
);
1872 invariant_in_outerloop
=
1873 (tree_int_cst_compare (outerloop_step
, size_zero_node
) == 0);
1876 /* Possibly unaligned access. */
1878 /* We can choose between using the implicit realignment scheme (generating
1879 a misaligned_move stmt) and the explicit realignment scheme (generating
1880 aligned loads with a REALIGN_LOAD). There are two variants to the explicit
1881 realignment scheme: optimized, and unoptimized.
1882 We can optimize the realignment only if the step between consecutive
1883 vector loads is equal to the vector size. Since the vector memory
1884 accesses advance in steps of VS (Vector Size) in the vectorized loop, it
1885 is guaranteed that the misalignment amount remains the same throughout the
1886 execution of the vectorized loop. Therefore, we can create the
1887 "realignment token" (the permutation mask that is passed to REALIGN_LOAD)
1888 at the loop preheader.
1890 However, in the case of outer-loop vectorization, when vectorizing a
1891 memory access in the inner-loop nested within the LOOP that is now being
1892 vectorized, while it is guaranteed that the misalignment of the
1893 vectorized memory access will remain the same in different outer-loop
1894 iterations, it is *not* guaranteed that is will remain the same throughout
1895 the execution of the inner-loop. This is because the inner-loop advances
1896 with the original scalar step (and not in steps of VS). If the inner-loop
1897 step happens to be a multiple of VS, then the misalignment remains fixed
1898 and we can use the optimized realignment scheme. For example:
1904 When vectorizing the i-loop in the above example, the step between
1905 consecutive vector loads is 1, and so the misalignment does not remain
1906 fixed across the execution of the inner-loop, and the realignment cannot
1907 be optimized (as illustrated in the following pseudo vectorized loop):
1909 for (i=0; i<N; i+=4)
1910 for (j=0; j<M; j++){
1911 vs += vp[i+j]; // misalignment of &vp[i+j] is {0,1,2,3,0,1,2,3,...}
1912 // when j is {0,1,2,3,4,5,6,7,...} respectively.
1913 // (assuming that we start from an aligned address).
1916 We therefore have to use the unoptimized realignment scheme:
1918 for (i=0; i<N; i+=4)
1919 for (j=k; j<M; j+=4)
1920 vs += vp[i+j]; // misalignment of &vp[i+j] is always k (assuming
1921 // that the misalignment of the initial address is
1924 The loop can then be vectorized as follows:
1926 for (k=0; k<4; k++){
1927 rt = get_realignment_token (&vp[k]);
1928 for (i=0; i<N; i+=4){
1930 for (j=k; j<M; j+=4){
1932 va = REALIGN_LOAD <v1,v2,rt>;
1939 if (DR_IS_READ (dr
))
1941 if (optab_handler (vec_realign_load_optab
, mode
)->insn_code
!=
1943 && (!targetm
.vectorize
.builtin_mask_for_load
1944 || targetm
.vectorize
.builtin_mask_for_load ()))
1946 if (nested_in_vect_loop
1947 && TREE_INT_CST_LOW (DR_STEP (dr
)) != UNITS_PER_SIMD_WORD
)
1948 return dr_explicit_realign
;
1950 return dr_explicit_realign_optimized
;
1953 if (optab_handler (movmisalign_optab
, mode
)->insn_code
!=
1955 /* Can't software pipeline the loads, but can at least do them. */
1956 return dr_unaligned_supported
;
1960 return dr_unaligned_unsupported
;
1964 /* Function vect_is_simple_use.
1967 LOOP - the loop that is being vectorized.
1968 OPERAND - operand of a stmt in LOOP.
1969 DEF - the defining stmt in case OPERAND is an SSA_NAME.
1971 Returns whether a stmt with OPERAND can be vectorized.
1972 Supportable operands are constants, loop invariants, and operands that are
1973 defined by the current iteration of the loop. Unsupportable operands are
1974 those that are defined by a previous iteration of the loop (as is the case
1975 in reduction/induction computations). */
1978 vect_is_simple_use (tree operand
, loop_vec_info loop_vinfo
, tree
*def_stmt
,
1979 tree
*def
, enum vect_def_type
*dt
)
1982 stmt_vec_info stmt_vinfo
;
1983 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1985 *def_stmt
= NULL_TREE
;
1988 if (vect_print_dump_info (REPORT_DETAILS
))
1990 fprintf (vect_dump
, "vect_is_simple_use: operand ");
1991 print_generic_expr (vect_dump
, operand
, TDF_SLIM
);
1994 if (TREE_CODE (operand
) == INTEGER_CST
|| TREE_CODE (operand
) == REAL_CST
)
1996 *dt
= vect_constant_def
;
1999 if (is_gimple_min_invariant (operand
))
2002 *dt
= vect_invariant_def
;
2006 if (TREE_CODE (operand
) != SSA_NAME
)
2008 if (vect_print_dump_info (REPORT_DETAILS
))
2009 fprintf (vect_dump
, "not ssa-name.");
2013 *def_stmt
= SSA_NAME_DEF_STMT (operand
);
2014 if (*def_stmt
== NULL_TREE
)
2016 if (vect_print_dump_info (REPORT_DETAILS
))
2017 fprintf (vect_dump
, "no def_stmt.");
2021 if (vect_print_dump_info (REPORT_DETAILS
))
2023 fprintf (vect_dump
, "def_stmt: ");
2024 print_generic_expr (vect_dump
, *def_stmt
, TDF_SLIM
);
2027 /* empty stmt is expected only in case of a function argument.
2028 (Otherwise - we expect a phi_node or a GIMPLE_MODIFY_STMT). */
2029 if (IS_EMPTY_STMT (*def_stmt
))
2031 tree arg
= TREE_OPERAND (*def_stmt
, 0);
2032 if (is_gimple_min_invariant (arg
))
2035 *dt
= vect_invariant_def
;
2039 if (vect_print_dump_info (REPORT_DETAILS
))
2040 fprintf (vect_dump
, "Unexpected empty stmt.");
2044 bb
= bb_for_stmt (*def_stmt
);
2045 if (!flow_bb_inside_loop_p (loop
, bb
))
2046 *dt
= vect_invariant_def
;
2049 stmt_vinfo
= vinfo_for_stmt (*def_stmt
);
2050 *dt
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
2053 if (*dt
== vect_unknown_def_type
)
2055 if (vect_print_dump_info (REPORT_DETAILS
))
2056 fprintf (vect_dump
, "Unsupported pattern.");
2060 if (vect_print_dump_info (REPORT_DETAILS
))
2061 fprintf (vect_dump
, "type of def: %d.",*dt
);
2063 switch (TREE_CODE (*def_stmt
))
2066 *def
= PHI_RESULT (*def_stmt
);
2069 case GIMPLE_MODIFY_STMT
:
2070 *def
= GIMPLE_STMT_OPERAND (*def_stmt
, 0);
2074 if (vect_print_dump_info (REPORT_DETAILS
))
2075 fprintf (vect_dump
, "unsupported defining stmt: ");
2083 /* Function supportable_widening_operation
2085 Check whether an operation represented by the code CODE is a
2086 widening operation that is supported by the target platform in
2087 vector form (i.e., when operating on arguments of type VECTYPE).
2089 Widening operations we currently support are NOP (CONVERT), FLOAT
2090 and WIDEN_MULT. This function checks if these operations are supported
2091 by the target platform either directly (via vector tree-codes), or via
2095 - CODE1 and CODE2 are codes of vector operations to be used when
2096 vectorizing the operation, if available.
2097 - DECL1 and DECL2 are decls of target builtin functions to be used
2098 when vectorizing the operation, if available. In this case,
2099 CODE1 and CODE2 are CALL_EXPR. */
2102 supportable_widening_operation (enum tree_code code
, tree stmt
, tree vectype
,
2103 tree
*decl1
, tree
*decl2
,
2104 enum tree_code
*code1
, enum tree_code
*code2
)
2106 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2107 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2108 struct loop
*vect_loop
= LOOP_VINFO_LOOP (loop_info
);
2110 enum machine_mode vec_mode
;
2111 enum insn_code icode1
, icode2
;
2112 optab optab1
, optab2
;
2113 tree expr
= GIMPLE_STMT_OPERAND (stmt
, 1);
2114 tree type
= TREE_TYPE (expr
);
2115 tree wide_vectype
= get_vectype_for_scalar_type (type
);
2116 enum tree_code c1
, c2
;
2118 /* The result of a vectorized widening operation usually requires two vectors
2119 (because the widened results do not fit int one vector). The generated
2120 vector results would normally be expected to be generated in the same
2121 order as in the original scalar computation. i.e. if 8 results are
2122 generated in each vector iteration, they are to be organized as follows:
2123 vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
2125 However, in the special case that the result of the widening operation is
2126 used in a reduction computation only, the order doesn't matter (because
2127 when vectorizing a reduction we change the order of the computation).
2128 Some targets can take advantage of this and generate more efficient code.
2129 For example, targets like Altivec, that support widen_mult using a sequence
2130 of {mult_even,mult_odd} generate the following vectors:
2131 vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8].
2133 When vectorizaing outer-loops, we execute the inner-loop sequentially
2134 (each vectorized inner-loop iteration contributes to VF outer-loop
2135 iterations in parallel). We therefore don't allow to change the order
2136 of the computation in the inner-loop during outer-loop vectorization. */
2138 if (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_by_reduction
2139 && !nested_in_vect_loop_p (vect_loop
, stmt
))
2145 && code
== WIDEN_MULT_EXPR
2146 && targetm
.vectorize
.builtin_mul_widen_even
2147 && targetm
.vectorize
.builtin_mul_widen_even (vectype
)
2148 && targetm
.vectorize
.builtin_mul_widen_odd
2149 && targetm
.vectorize
.builtin_mul_widen_odd (vectype
))
2151 if (vect_print_dump_info (REPORT_DETAILS
))
2152 fprintf (vect_dump
, "Unordered widening operation detected.");
2154 *code1
= *code2
= CALL_EXPR
;
2155 *decl1
= targetm
.vectorize
.builtin_mul_widen_even (vectype
);
2156 *decl2
= targetm
.vectorize
.builtin_mul_widen_odd (vectype
);
2162 case WIDEN_MULT_EXPR
:
2163 if (BYTES_BIG_ENDIAN
)
2165 c1
= VEC_WIDEN_MULT_HI_EXPR
;
2166 c2
= VEC_WIDEN_MULT_LO_EXPR
;
2170 c2
= VEC_WIDEN_MULT_HI_EXPR
;
2171 c1
= VEC_WIDEN_MULT_LO_EXPR
;
2177 if (BYTES_BIG_ENDIAN
)
2179 c1
= VEC_UNPACK_HI_EXPR
;
2180 c2
= VEC_UNPACK_LO_EXPR
;
2184 c2
= VEC_UNPACK_HI_EXPR
;
2185 c1
= VEC_UNPACK_LO_EXPR
;
2190 if (BYTES_BIG_ENDIAN
)
2192 c1
= VEC_UNPACK_FLOAT_HI_EXPR
;
2193 c2
= VEC_UNPACK_FLOAT_LO_EXPR
;
2197 c2
= VEC_UNPACK_FLOAT_HI_EXPR
;
2198 c1
= VEC_UNPACK_FLOAT_LO_EXPR
;
2202 case FIX_TRUNC_EXPR
:
2203 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
2204 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
2205 computing the operation. */
2212 if (code
== FIX_TRUNC_EXPR
)
2214 /* The signedness is determined from output operand. */
2215 optab1
= optab_for_tree_code (c1
, type
);
2216 optab2
= optab_for_tree_code (c2
, type
);
2220 optab1
= optab_for_tree_code (c1
, vectype
);
2221 optab2
= optab_for_tree_code (c2
, vectype
);
2224 if (!optab1
|| !optab2
)
2227 vec_mode
= TYPE_MODE (vectype
);
2228 if ((icode1
= optab_handler (optab1
, vec_mode
)->insn_code
) == CODE_FOR_nothing
2229 || insn_data
[icode1
].operand
[0].mode
!= TYPE_MODE (wide_vectype
)
2230 || (icode2
= optab_handler (optab2
, vec_mode
)->insn_code
)
2232 || insn_data
[icode2
].operand
[0].mode
!= TYPE_MODE (wide_vectype
))
2241 /* Function supportable_narrowing_operation
2243 Check whether an operation represented by the code CODE is a
2244 narrowing operation that is supported by the target platform in
2245 vector form (i.e., when operating on arguments of type VECTYPE).
2247 Narrowing operations we currently support are NOP (CONVERT) and
2248 FIX_TRUNC. This function checks if these operations are supported by
2249 the target platform directly via vector tree-codes.
2252 - CODE1 is the code of a vector operation to be used when
2253 vectorizing the operation, if available. */
2256 supportable_narrowing_operation (enum tree_code code
,
2257 const_tree stmt
, const_tree vectype
,
2258 enum tree_code
*code1
)
2260 enum machine_mode vec_mode
;
2261 enum insn_code icode1
;
2263 tree expr
= GIMPLE_STMT_OPERAND (stmt
, 1);
2264 tree type
= TREE_TYPE (expr
);
2265 tree narrow_vectype
= get_vectype_for_scalar_type (type
);
2272 c1
= VEC_PACK_TRUNC_EXPR
;
2275 case FIX_TRUNC_EXPR
:
2276 c1
= VEC_PACK_FIX_TRUNC_EXPR
;
2280 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
2281 tree code and optabs used for computing the operation. */
2288 if (code
== FIX_TRUNC_EXPR
)
2289 /* The signedness is determined from output operand. */
2290 optab1
= optab_for_tree_code (c1
, type
);
2292 optab1
= optab_for_tree_code (c1
, vectype
);
2297 vec_mode
= TYPE_MODE (vectype
);
2298 if ((icode1
= optab_handler (optab1
, vec_mode
)->insn_code
) == CODE_FOR_nothing
2299 || insn_data
[icode1
].operand
[0].mode
!= TYPE_MODE (narrow_vectype
))
2307 /* Function reduction_code_for_scalar_code
2310 CODE - tree_code of a reduction operations.
2313 REDUC_CODE - the corresponding tree-code to be used to reduce the
2314 vector of partial results into a single scalar result (which
2315 will also reside in a vector).
2317 Return TRUE if a corresponding REDUC_CODE was found, FALSE otherwise. */
2320 reduction_code_for_scalar_code (enum tree_code code
,
2321 enum tree_code
*reduc_code
)
2326 *reduc_code
= REDUC_MAX_EXPR
;
2330 *reduc_code
= REDUC_MIN_EXPR
;
2334 *reduc_code
= REDUC_PLUS_EXPR
;
2343 /* Function vect_is_simple_reduction
2345 Detect a cross-iteration def-use cucle that represents a simple
2346 reduction computation. We look for the following pattern:
2351 a2 = operation (a3, a1)
2354 1. operation is commutative and associative and it is safe to
2355 change the order of the computation.
2356 2. no uses for a2 in the loop (a2 is used out of the loop)
2357 3. no uses of a1 in the loop besides the reduction operation.
2359 Condition 1 is tested here.
2360 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized. */
2363 vect_is_simple_reduction (loop_vec_info loop_info
, tree phi
)
2365 struct loop
*loop
= (bb_for_stmt (phi
))->loop_father
;
2366 struct loop
*vect_loop
= LOOP_VINFO_LOOP (loop_info
);
2367 edge latch_e
= loop_latch_edge (loop
);
2368 tree loop_arg
= PHI_ARG_DEF_FROM_EDGE (phi
, latch_e
);
2369 tree def_stmt
, def1
, def2
;
2370 enum tree_code code
;
2372 tree operation
, op1
, op2
;
2376 imm_use_iterator imm_iter
;
2377 use_operand_p use_p
;
2379 gcc_assert (loop
== vect_loop
|| flow_loop_nested_p (vect_loop
, loop
));
2381 name
= PHI_RESULT (phi
);
2383 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, name
)
2385 tree use_stmt
= USE_STMT (use_p
);
2386 if (flow_bb_inside_loop_p (loop
, bb_for_stmt (use_stmt
))
2387 && vinfo_for_stmt (use_stmt
)
2388 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt
)))
2392 if (vect_print_dump_info (REPORT_DETAILS
))
2393 fprintf (vect_dump
, "reduction used in loop.");
2398 if (TREE_CODE (loop_arg
) != SSA_NAME
)
2400 if (vect_print_dump_info (REPORT_DETAILS
))
2402 fprintf (vect_dump
, "reduction: not ssa_name: ");
2403 print_generic_expr (vect_dump
, loop_arg
, TDF_SLIM
);
2408 def_stmt
= SSA_NAME_DEF_STMT (loop_arg
);
2411 if (vect_print_dump_info (REPORT_DETAILS
))
2412 fprintf (vect_dump
, "reduction: no def_stmt.");
2416 if (TREE_CODE (def_stmt
) != GIMPLE_MODIFY_STMT
)
2418 if (vect_print_dump_info (REPORT_DETAILS
))
2419 print_generic_expr (vect_dump
, def_stmt
, TDF_SLIM
);
2423 name
= GIMPLE_STMT_OPERAND (def_stmt
, 0);
2425 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, name
)
2427 tree use_stmt
= USE_STMT (use_p
);
2428 if (flow_bb_inside_loop_p (loop
, bb_for_stmt (use_stmt
))
2429 && vinfo_for_stmt (use_stmt
)
2430 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt
)))
2434 if (vect_print_dump_info (REPORT_DETAILS
))
2435 fprintf (vect_dump
, "reduction used in loop.");
2440 operation
= GIMPLE_STMT_OPERAND (def_stmt
, 1);
2441 code
= TREE_CODE (operation
);
2442 if (!commutative_tree_code (code
) || !associative_tree_code (code
))
2444 if (vect_print_dump_info (REPORT_DETAILS
))
2446 fprintf (vect_dump
, "reduction: not commutative/associative: ");
2447 print_generic_expr (vect_dump
, operation
, TDF_SLIM
);
2452 op_type
= TREE_OPERAND_LENGTH (operation
);
2453 if (op_type
!= binary_op
)
2455 if (vect_print_dump_info (REPORT_DETAILS
))
2457 fprintf (vect_dump
, "reduction: not binary operation: ");
2458 print_generic_expr (vect_dump
, operation
, TDF_SLIM
);
2463 op1
= TREE_OPERAND (operation
, 0);
2464 op2
= TREE_OPERAND (operation
, 1);
2465 if (TREE_CODE (op1
) != SSA_NAME
|| TREE_CODE (op2
) != SSA_NAME
)
2467 if (vect_print_dump_info (REPORT_DETAILS
))
2469 fprintf (vect_dump
, "reduction: uses not ssa_names: ");
2470 print_generic_expr (vect_dump
, operation
, TDF_SLIM
);
2475 /* Check that it's ok to change the order of the computation. */
2476 type
= TREE_TYPE (operation
);
2477 if (TYPE_MAIN_VARIANT (type
) != TYPE_MAIN_VARIANT (TREE_TYPE (op1
))
2478 || TYPE_MAIN_VARIANT (type
) != TYPE_MAIN_VARIANT (TREE_TYPE (op2
)))
2480 if (vect_print_dump_info (REPORT_DETAILS
))
2482 fprintf (vect_dump
, "reduction: multiple types: operation type: ");
2483 print_generic_expr (vect_dump
, type
, TDF_SLIM
);
2484 fprintf (vect_dump
, ", operands types: ");
2485 print_generic_expr (vect_dump
, TREE_TYPE (op1
), TDF_SLIM
);
2486 fprintf (vect_dump
, ",");
2487 print_generic_expr (vect_dump
, TREE_TYPE (op2
), TDF_SLIM
);
2492 /* Generally, when vectorizing a reduction we change the order of the
2493 computation. This may change the behavior of the program in some
2494 cases, so we need to check that this is ok. One exception is when
2495 vectorizing an outer-loop: the inner-loop is executed sequentially,
2496 and therefore vectorizing reductions in the inner-loop durint
2497 outer-loop vectorization is safe. */
2499 /* CHECKME: check for !flag_finite_math_only too? */
2500 if (SCALAR_FLOAT_TYPE_P (type
) && !flag_associative_math
2501 && !nested_in_vect_loop_p (vect_loop
, def_stmt
))
2503 /* Changing the order of operations changes the semantics. */
2504 if (vect_print_dump_info (REPORT_DETAILS
))
2506 fprintf (vect_dump
, "reduction: unsafe fp math optimization: ");
2507 print_generic_expr (vect_dump
, operation
, TDF_SLIM
);
2511 else if (INTEGRAL_TYPE_P (type
) && TYPE_OVERFLOW_TRAPS (type
)
2512 && !nested_in_vect_loop_p (vect_loop
, def_stmt
))
2514 /* Changing the order of operations changes the semantics. */
2515 if (vect_print_dump_info (REPORT_DETAILS
))
2517 fprintf (vect_dump
, "reduction: unsafe int math optimization: ");
2518 print_generic_expr (vect_dump
, operation
, TDF_SLIM
);
2522 else if (SAT_FIXED_POINT_TYPE_P (type
))
2524 /* Changing the order of operations changes the semantics. */
2525 if (vect_print_dump_info (REPORT_DETAILS
))
2527 fprintf (vect_dump
, "reduction: unsafe fixed-point math optimization: ");
2528 print_generic_expr (vect_dump
, operation
, TDF_SLIM
);
2533 /* reduction is safe. we're dealing with one of the following:
2534 1) integer arithmetic and no trapv
2535 2) floating point arithmetic, and special flags permit this optimization.
2537 def1
= SSA_NAME_DEF_STMT (op1
);
2538 def2
= SSA_NAME_DEF_STMT (op2
);
2539 if (!def1
|| !def2
|| IS_EMPTY_STMT (def1
) || IS_EMPTY_STMT (def2
))
2541 if (vect_print_dump_info (REPORT_DETAILS
))
2543 fprintf (vect_dump
, "reduction: no defs for operands: ");
2544 print_generic_expr (vect_dump
, operation
, TDF_SLIM
);
2550 /* Check that one def is the reduction def, defined by PHI,
2551 the other def is either defined in the loop ("vect_loop_def"),
2552 or it's an induction (defined by a loop-header phi-node). */
2555 && flow_bb_inside_loop_p (loop
, bb_for_stmt (def1
))
2556 && (TREE_CODE (def1
) == GIMPLE_MODIFY_STMT
2557 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1
)) == vect_induction_def
2558 || (TREE_CODE (def1
) == PHI_NODE
2559 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1
)) == vect_loop_def
2560 && !is_loop_header_bb_p (bb_for_stmt (def1
)))))
2562 if (vect_print_dump_info (REPORT_DETAILS
))
2564 fprintf (vect_dump
, "detected reduction:");
2565 print_generic_expr (vect_dump
, operation
, TDF_SLIM
);
2569 else if (def1
== phi
2570 && flow_bb_inside_loop_p (loop
, bb_for_stmt (def2
))
2571 && (TREE_CODE (def2
) == GIMPLE_MODIFY_STMT
2572 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2
)) == vect_induction_def
2573 || (TREE_CODE (def2
) == PHI_NODE
2574 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2
)) == vect_loop_def
2575 && !is_loop_header_bb_p (bb_for_stmt (def2
)))))
2577 /* Swap operands (just for simplicity - so that the rest of the code
2578 can assume that the reduction variable is always the last (second)
2580 if (vect_print_dump_info (REPORT_DETAILS
))
2582 fprintf (vect_dump
, "detected reduction: need to swap operands:");
2583 print_generic_expr (vect_dump
, operation
, TDF_SLIM
);
2585 swap_tree_operands (def_stmt
, &TREE_OPERAND (operation
, 0),
2586 &TREE_OPERAND (operation
, 1));
2591 if (vect_print_dump_info (REPORT_DETAILS
))
2593 fprintf (vect_dump
, "reduction: unknown pattern.");
2594 print_generic_expr (vect_dump
, operation
, TDF_SLIM
);
2601 /* Function vect_is_simple_iv_evolution.
2603 FORNOW: A simple evolution of an induction variables in the loop is
2604 considered a polynomial evolution with constant step. */
2607 vect_is_simple_iv_evolution (unsigned loop_nb
, tree access_fn
, tree
* init
,
2612 tree evolution_part
= evolution_part_in_loop_num (access_fn
, loop_nb
);
2614 /* When there is no evolution in this loop, the evolution function
2616 if (evolution_part
== NULL_TREE
)
2619 /* When the evolution is a polynomial of degree >= 2
2620 the evolution function is not "simple". */
2621 if (tree_is_chrec (evolution_part
))
2624 step_expr
= evolution_part
;
2625 init_expr
= unshare_expr (initial_condition_in_loop_num (access_fn
, loop_nb
));
2627 if (vect_print_dump_info (REPORT_DETAILS
))
2629 fprintf (vect_dump
, "step: ");
2630 print_generic_expr (vect_dump
, step_expr
, TDF_SLIM
);
2631 fprintf (vect_dump
, ", init: ");
2632 print_generic_expr (vect_dump
, init_expr
, TDF_SLIM
);
2638 if (TREE_CODE (step_expr
) != INTEGER_CST
)
2640 if (vect_print_dump_info (REPORT_DETAILS
))
2641 fprintf (vect_dump
, "step unknown.");
2649 /* Function vectorize_loops.
2651 Entry Point to loop vectorization phase. */
2654 vectorize_loops (void)
2657 unsigned int num_vectorized_loops
= 0;
2658 unsigned int vect_loops_num
;
2662 vect_loops_num
= number_of_loops ();
2664 /* Bail out if there are no loops. */
2665 if (vect_loops_num
<= 1)
2668 /* Fix the verbosity level if not defined explicitly by the user. */
2669 vect_set_dump_settings ();
2671 /* Allocate the bitmap that records which virtual variables that
2672 need to be renamed. */
2673 vect_memsyms_to_rename
= BITMAP_ALLOC (NULL
);
2675 /* ----------- Analyze loops. ----------- */
2677 /* If some loop was duplicated, it gets bigger number
2678 than all previously defined loops. This fact allows us to run
2679 only over initial loops skipping newly generated ones. */
2680 FOR_EACH_LOOP (li
, loop
, 0)
2682 loop_vec_info loop_vinfo
;
2684 vect_loop_location
= find_loop_location (loop
);
2685 loop_vinfo
= vect_analyze_loop (loop
);
2686 loop
->aux
= loop_vinfo
;
2688 if (!loop_vinfo
|| !LOOP_VINFO_VECTORIZABLE_P (loop_vinfo
))
2691 vect_transform_loop (loop_vinfo
);
2692 num_vectorized_loops
++;
2694 vect_loop_location
= UNKNOWN_LOC
;
2696 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS
)
2697 || (vect_print_dump_info (REPORT_VECTORIZED_LOOPS
)
2698 && num_vectorized_loops
> 0))
2699 fprintf (vect_dump
, "vectorized %u loops in function.\n",
2700 num_vectorized_loops
);
2702 /* ----------- Finalize. ----------- */
2704 BITMAP_FREE (vect_memsyms_to_rename
);
2706 for (i
= 1; i
< vect_loops_num
; i
++)
2708 loop_vec_info loop_vinfo
;
2710 loop
= get_loop (i
);
2713 loop_vinfo
= loop
->aux
;
2714 destroy_loop_vec_info (loop_vinfo
, true);
2718 return num_vectorized_loops
> 0 ? TODO_cleanup_cfg
: 0;
2721 /* Increase alignment of global arrays to improve vectorization potential.
2723 - Consider also structs that have an array field.
2724 - Use ipa analysis to prune arrays that can't be vectorized?
2725 This should involve global alignment analysis and in the future also
2729 increase_alignment (void)
2731 struct varpool_node
*vnode
;
2733 /* Increase the alignment of all global arrays for vectorization. */
2734 for (vnode
= varpool_nodes_queue
;
2736 vnode
= vnode
->next_needed
)
2738 tree vectype
, decl
= vnode
->decl
;
2739 unsigned int alignment
;
2741 if (TREE_CODE (TREE_TYPE (decl
)) != ARRAY_TYPE
)
2743 vectype
= get_vectype_for_scalar_type (TREE_TYPE (TREE_TYPE (decl
)));
2746 alignment
= TYPE_ALIGN (vectype
);
2747 if (DECL_ALIGN (decl
) >= alignment
)
2750 if (vect_can_force_dr_alignment_p (decl
, alignment
))
2752 DECL_ALIGN (decl
) = TYPE_ALIGN (vectype
);
2753 DECL_USER_ALIGN (decl
) = 1;
2756 fprintf (dump_file
, "Increasing alignment of decl: ");
2757 print_generic_expr (dump_file
, decl
, TDF_SLIM
);
2765 gate_increase_alignment (void)
2767 return flag_section_anchors
&& flag_tree_vectorize
;
2770 struct tree_opt_pass pass_ipa_increase_alignment
=
2772 "increase_alignment", /* name */
2773 gate_increase_alignment
, /* gate */
2774 increase_alignment
, /* execute */
2777 0, /* static_pass_number */
2779 0, /* properties_required */
2780 0, /* properties_provided */
2781 0, /* properties_destroyed */
2782 0, /* todo_flags_start */
2783 0, /* todo_flags_finish */