Updated for libbid move.
[official-gcc.git] / gcc / tree-vectorizer.c
blobbaf699d23f0d59cef882fc881d2233e5880a0ab3
1 /* Loop Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
22 /* Loop Vectorization Pass.
24 This pass tries to vectorize loops. This first implementation focuses on
25 simple inner-most loops, with no conditional control flow, and a set of
26 simple operations which vector form can be expressed using existing
27 tree codes (PLUS, MULT etc).
29 For example, the vectorizer transforms the following simple loop:
31 short a[N]; short b[N]; short c[N]; int i;
33 for (i=0; i<N; i++){
34 a[i] = b[i] + c[i];
37 as if it was manually vectorized by rewriting the source code into:
39 typedef int __attribute__((mode(V8HI))) v8hi;
40 short a[N]; short b[N]; short c[N]; int i;
41 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
42 v8hi va, vb, vc;
44 for (i=0; i<N/8; i++){
45 vb = pb[i];
46 vc = pc[i];
47 va = vb + vc;
48 pa[i] = va;
51 The main entry to this pass is vectorize_loops(), in which
52 the vectorizer applies a set of analyses on a given set of loops,
53 followed by the actual vectorization transformation for the loops that
54 had successfully passed the analysis phase.
56 Throughout this pass we make a distinction between two types of
57 data: scalars (which are represented by SSA_NAMES), and memory references
58 ("data-refs"). These two types of data require different handling both
59 during analysis and transformation. The types of data-refs that the
60 vectorizer currently supports are ARRAY_REFS which base is an array DECL
61 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
62 accesses are required to have a simple (consecutive) access pattern.
64 Analysis phase:
65 ===============
66 The driver for the analysis phase is vect_analyze_loop_nest().
67 It applies a set of analyses, some of which rely on the scalar evolution
68 analyzer (scev) developed by Sebastian Pop.
70 During the analysis phase the vectorizer records some information
71 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
72 loop, as well as general information about the loop as a whole, which is
73 recorded in a "loop_vec_info" struct attached to each loop.
75 Transformation phase:
76 =====================
77 The loop transformation phase scans all the stmts in the loop, and
78 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
79 the loop that needs to be vectorized. It insert the vector code sequence
80 just before the scalar stmt S, and records a pointer to the vector code
81 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
82 attached to S). This pointer will be used for the vectorization of following
83 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
84 otherwise, we rely on dead code elimination for removing it.
86 For example, say stmt S1 was vectorized into stmt VS1:
88 VS1: vb = px[i];
89 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
90 S2: a = b;
92 To vectorize stmt S2, the vectorizer first finds the stmt that defines
93 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
94 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
95 resulting sequence would be:
97 VS1: vb = px[i];
98 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
99 VS2: va = vb;
100 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
102 Operands that are not SSA_NAMEs, are data-refs that appear in
103 load/store operations (like 'x[i]' in S1), and are handled differently.
105 Target modeling:
106 =================
107 Currently the only target specific information that is used is the
108 size of the vector (in bytes) - "UNITS_PER_SIMD_WORD". Targets that can
109 support different sizes of vectors, for now will need to specify one value
110 for "UNITS_PER_SIMD_WORD". More flexibility will be added in the future.
112 Since we only vectorize operations which vector form can be
113 expressed using existing tree codes, to verify that an operation is
114 supported, the vectorizer checks the relevant optab at the relevant
115 machine_mode (e.g, add_optab->handlers[(int) V8HImode].insn_code). If
116 the value found is CODE_FOR_nothing, then there's no target support, and
117 we can't vectorize the stmt.
119 For additional information on this project see:
120 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
123 #include "config.h"
124 #include "system.h"
125 #include "coretypes.h"
126 #include "tm.h"
127 #include "ggc.h"
128 #include "tree.h"
129 #include "target.h"
130 #include "rtl.h"
131 #include "basic-block.h"
132 #include "diagnostic.h"
133 #include "tree-flow.h"
134 #include "tree-dump.h"
135 #include "timevar.h"
136 #include "cfgloop.h"
137 #include "cfglayout.h"
138 #include "expr.h"
139 #include "recog.h"
140 #include "optabs.h"
141 #include "params.h"
142 #include "toplev.h"
143 #include "tree-chrec.h"
144 #include "tree-data-ref.h"
145 #include "tree-scalar-evolution.h"
146 #include "input.h"
147 #include "tree-vectorizer.h"
148 #include "tree-pass.h"
150 /*************************************************************************
151 Simple Loop Peeling Utilities
152 *************************************************************************/
153 static void slpeel_update_phis_for_duplicate_loop
154 (struct loop *, struct loop *, bool after);
155 static void slpeel_update_phi_nodes_for_guard1
156 (edge, struct loop *, bool, basic_block *, bitmap *);
157 static void slpeel_update_phi_nodes_for_guard2
158 (edge, struct loop *, bool, basic_block *);
159 static edge slpeel_add_loop_guard (basic_block, tree, basic_block, basic_block);
161 static void rename_use_op (use_operand_p);
162 static void rename_variables_in_bb (basic_block);
163 static void rename_variables_in_loop (struct loop *);
165 /*************************************************************************
166 General Vectorization Utilities
167 *************************************************************************/
168 static void vect_set_dump_settings (void);
170 /* vect_dump will be set to stderr or dump_file if exist. */
171 FILE *vect_dump;
173 /* vect_verbosity_level set to an invalid value
174 to mark that it's uninitialized. */
175 enum verbosity_levels vect_verbosity_level = MAX_VERBOSITY_LEVEL;
177 /* Loop location. */
178 static LOC vect_loop_location;
180 /* Bitmap of virtual variables to be renamed. */
181 bitmap vect_memsyms_to_rename;
183 /*************************************************************************
184 Simple Loop Peeling Utilities
186 Utilities to support loop peeling for vectorization purposes.
187 *************************************************************************/
190 /* Renames the use *OP_P. */
192 static void
193 rename_use_op (use_operand_p op_p)
195 tree new_name;
197 if (TREE_CODE (USE_FROM_PTR (op_p)) != SSA_NAME)
198 return;
200 new_name = get_current_def (USE_FROM_PTR (op_p));
202 /* Something defined outside of the loop. */
203 if (!new_name)
204 return;
206 /* An ordinary ssa name defined in the loop. */
208 SET_USE (op_p, new_name);
212 /* Renames the variables in basic block BB. */
214 static void
215 rename_variables_in_bb (basic_block bb)
217 tree phi;
218 block_stmt_iterator bsi;
219 tree stmt;
220 use_operand_p use_p;
221 ssa_op_iter iter;
222 edge e;
223 edge_iterator ei;
224 struct loop *loop = bb->loop_father;
226 for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
228 stmt = bsi_stmt (bsi);
229 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES)
230 rename_use_op (use_p);
233 FOR_EACH_EDGE (e, ei, bb->succs)
235 if (!flow_bb_inside_loop_p (loop, e->dest))
236 continue;
237 for (phi = phi_nodes (e->dest); phi; phi = PHI_CHAIN (phi))
238 rename_use_op (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e));
243 /* Renames variables in new generated LOOP. */
245 static void
246 rename_variables_in_loop (struct loop *loop)
248 unsigned i;
249 basic_block *bbs;
251 bbs = get_loop_body (loop);
253 for (i = 0; i < loop->num_nodes; i++)
254 rename_variables_in_bb (bbs[i]);
256 free (bbs);
260 /* Update the PHI nodes of NEW_LOOP.
262 NEW_LOOP is a duplicate of ORIG_LOOP.
263 AFTER indicates whether NEW_LOOP executes before or after ORIG_LOOP:
264 AFTER is true if NEW_LOOP executes after ORIG_LOOP, and false if it
265 executes before it. */
267 static void
268 slpeel_update_phis_for_duplicate_loop (struct loop *orig_loop,
269 struct loop *new_loop, bool after)
271 tree new_ssa_name;
272 tree phi_new, phi_orig;
273 tree def;
274 edge orig_loop_latch = loop_latch_edge (orig_loop);
275 edge orig_entry_e = loop_preheader_edge (orig_loop);
276 edge new_loop_exit_e = single_exit (new_loop);
277 edge new_loop_entry_e = loop_preheader_edge (new_loop);
278 edge entry_arg_e = (after ? orig_loop_latch : orig_entry_e);
281 step 1. For each loop-header-phi:
282 Add the first phi argument for the phi in NEW_LOOP
283 (the one associated with the entry of NEW_LOOP)
285 step 2. For each loop-header-phi:
286 Add the second phi argument for the phi in NEW_LOOP
287 (the one associated with the latch of NEW_LOOP)
289 step 3. Update the phis in the successor block of NEW_LOOP.
291 case 1: NEW_LOOP was placed before ORIG_LOOP:
292 The successor block of NEW_LOOP is the header of ORIG_LOOP.
293 Updating the phis in the successor block can therefore be done
294 along with the scanning of the loop header phis, because the
295 header blocks of ORIG_LOOP and NEW_LOOP have exactly the same
296 phi nodes, organized in the same order.
298 case 2: NEW_LOOP was placed after ORIG_LOOP:
299 The successor block of NEW_LOOP is the original exit block of
300 ORIG_LOOP - the phis to be updated are the loop-closed-ssa phis.
301 We postpone updating these phis to a later stage (when
302 loop guards are added).
306 /* Scan the phis in the headers of the old and new loops
307 (they are organized in exactly the same order). */
309 for (phi_new = phi_nodes (new_loop->header),
310 phi_orig = phi_nodes (orig_loop->header);
311 phi_new && phi_orig;
312 phi_new = PHI_CHAIN (phi_new), phi_orig = PHI_CHAIN (phi_orig))
314 /* step 1. */
315 def = PHI_ARG_DEF_FROM_EDGE (phi_orig, entry_arg_e);
316 add_phi_arg (phi_new, def, new_loop_entry_e);
318 /* step 2. */
319 def = PHI_ARG_DEF_FROM_EDGE (phi_orig, orig_loop_latch);
320 if (TREE_CODE (def) != SSA_NAME)
321 continue;
323 new_ssa_name = get_current_def (def);
324 if (!new_ssa_name)
326 /* This only happens if there are no definitions
327 inside the loop. use the phi_result in this case. */
328 new_ssa_name = PHI_RESULT (phi_new);
331 /* An ordinary ssa name defined in the loop. */
332 add_phi_arg (phi_new, new_ssa_name, loop_latch_edge (new_loop));
334 /* step 3 (case 1). */
335 if (!after)
337 gcc_assert (new_loop_exit_e == orig_entry_e);
338 SET_PHI_ARG_DEF (phi_orig,
339 new_loop_exit_e->dest_idx,
340 new_ssa_name);
346 /* Update PHI nodes for a guard of the LOOP.
348 Input:
349 - LOOP, GUARD_EDGE: LOOP is a loop for which we added guard code that
350 controls whether LOOP is to be executed. GUARD_EDGE is the edge that
351 originates from the guard-bb, skips LOOP and reaches the (unique) exit
352 bb of LOOP. This loop-exit-bb is an empty bb with one successor.
353 We denote this bb NEW_MERGE_BB because before the guard code was added
354 it had a single predecessor (the LOOP header), and now it became a merge
355 point of two paths - the path that ends with the LOOP exit-edge, and
356 the path that ends with GUARD_EDGE.
357 - NEW_EXIT_BB: New basic block that is added by this function between LOOP
358 and NEW_MERGE_BB. It is used to place loop-closed-ssa-form exit-phis.
360 ===> The CFG before the guard-code was added:
361 LOOP_header_bb:
362 loop_body
363 if (exit_loop) goto update_bb
364 else goto LOOP_header_bb
365 update_bb:
367 ==> The CFG after the guard-code was added:
368 guard_bb:
369 if (LOOP_guard_condition) goto new_merge_bb
370 else goto LOOP_header_bb
371 LOOP_header_bb:
372 loop_body
373 if (exit_loop_condition) goto new_merge_bb
374 else goto LOOP_header_bb
375 new_merge_bb:
376 goto update_bb
377 update_bb:
379 ==> The CFG after this function:
380 guard_bb:
381 if (LOOP_guard_condition) goto new_merge_bb
382 else goto LOOP_header_bb
383 LOOP_header_bb:
384 loop_body
385 if (exit_loop_condition) goto new_exit_bb
386 else goto LOOP_header_bb
387 new_exit_bb:
388 new_merge_bb:
389 goto update_bb
390 update_bb:
392 This function:
393 1. creates and updates the relevant phi nodes to account for the new
394 incoming edge (GUARD_EDGE) into NEW_MERGE_BB. This involves:
395 1.1. Create phi nodes at NEW_MERGE_BB.
396 1.2. Update the phi nodes at the successor of NEW_MERGE_BB (denoted
397 UPDATE_BB). UPDATE_BB was the exit-bb of LOOP before NEW_MERGE_BB
398 2. preserves loop-closed-ssa-form by creating the required phi nodes
399 at the exit of LOOP (i.e, in NEW_EXIT_BB).
401 There are two flavors to this function:
403 slpeel_update_phi_nodes_for_guard1:
404 Here the guard controls whether we enter or skip LOOP, where LOOP is a
405 prolog_loop (loop1 below), and the new phis created in NEW_MERGE_BB are
406 for variables that have phis in the loop header.
408 slpeel_update_phi_nodes_for_guard2:
409 Here the guard controls whether we enter or skip LOOP, where LOOP is an
410 epilog_loop (loop2 below), and the new phis created in NEW_MERGE_BB are
411 for variables that have phis in the loop exit.
413 I.E., the overall structure is:
415 loop1_preheader_bb:
416 guard1 (goto loop1/merg1_bb)
417 loop1
418 loop1_exit_bb:
419 guard2 (goto merge1_bb/merge2_bb)
420 merge1_bb
421 loop2
422 loop2_exit_bb
423 merge2_bb
424 next_bb
426 slpeel_update_phi_nodes_for_guard1 takes care of creating phis in
427 loop1_exit_bb and merge1_bb. These are entry phis (phis for the vars
428 that have phis in loop1->header).
430 slpeel_update_phi_nodes_for_guard2 takes care of creating phis in
431 loop2_exit_bb and merge2_bb. These are exit phis (phis for the vars
432 that have phis in next_bb). It also adds some of these phis to
433 loop1_exit_bb.
435 slpeel_update_phi_nodes_for_guard1 is always called before
436 slpeel_update_phi_nodes_for_guard2. They are both needed in order
437 to create correct data-flow and loop-closed-ssa-form.
439 Generally slpeel_update_phi_nodes_for_guard1 creates phis for variables
440 that change between iterations of a loop (and therefore have a phi-node
441 at the loop entry), whereas slpeel_update_phi_nodes_for_guard2 creates
442 phis for variables that are used out of the loop (and therefore have
443 loop-closed exit phis). Some variables may be both updated between
444 iterations and used after the loop. This is why in loop1_exit_bb we
445 may need both entry_phis (created by slpeel_update_phi_nodes_for_guard1)
446 and exit phis (created by slpeel_update_phi_nodes_for_guard2).
448 - IS_NEW_LOOP: if IS_NEW_LOOP is true, then LOOP is a newly created copy of
449 an original loop. i.e., we have:
451 orig_loop
452 guard_bb (goto LOOP/new_merge)
453 new_loop <-- LOOP
454 new_exit
455 new_merge
456 next_bb
458 If IS_NEW_LOOP is false, then LOOP is an original loop, in which case we
459 have:
461 new_loop
462 guard_bb (goto LOOP/new_merge)
463 orig_loop <-- LOOP
464 new_exit
465 new_merge
466 next_bb
468 The SSA names defined in the original loop have a current
469 reaching definition that that records the corresponding new
470 ssa-name used in the new duplicated loop copy.
473 /* Function slpeel_update_phi_nodes_for_guard1
475 Input:
476 - GUARD_EDGE, LOOP, IS_NEW_LOOP, NEW_EXIT_BB - as explained above.
477 - DEFS - a bitmap of ssa names to mark new names for which we recorded
478 information.
480 In the context of the overall structure, we have:
482 loop1_preheader_bb:
483 guard1 (goto loop1/merg1_bb)
484 LOOP-> loop1
485 loop1_exit_bb:
486 guard2 (goto merge1_bb/merge2_bb)
487 merge1_bb
488 loop2
489 loop2_exit_bb
490 merge2_bb
491 next_bb
493 For each name updated between loop iterations (i.e - for each name that has
494 an entry (loop-header) phi in LOOP) we create a new phi in:
495 1. merge1_bb (to account for the edge from guard1)
496 2. loop1_exit_bb (an exit-phi to keep LOOP in loop-closed form)
499 static void
500 slpeel_update_phi_nodes_for_guard1 (edge guard_edge, struct loop *loop,
501 bool is_new_loop, basic_block *new_exit_bb,
502 bitmap *defs)
504 tree orig_phi, new_phi;
505 tree update_phi, update_phi2;
506 tree guard_arg, loop_arg;
507 basic_block new_merge_bb = guard_edge->dest;
508 edge e = EDGE_SUCC (new_merge_bb, 0);
509 basic_block update_bb = e->dest;
510 basic_block orig_bb = loop->header;
511 edge new_exit_e;
512 tree current_new_name;
513 tree name;
515 /* Create new bb between loop and new_merge_bb. */
516 *new_exit_bb = split_edge (single_exit (loop));
518 new_exit_e = EDGE_SUCC (*new_exit_bb, 0);
520 for (orig_phi = phi_nodes (orig_bb), update_phi = phi_nodes (update_bb);
521 orig_phi && update_phi;
522 orig_phi = PHI_CHAIN (orig_phi), update_phi = PHI_CHAIN (update_phi))
524 /* Virtual phi; Mark it for renaming. We actually want to call
525 mar_sym_for_renaming, but since all ssa renaming datastructures
526 are going to be freed before we get to call ssa_upate, we just
527 record this name for now in a bitmap, and will mark it for
528 renaming later. */
529 name = PHI_RESULT (orig_phi);
530 if (!is_gimple_reg (SSA_NAME_VAR (name)))
531 bitmap_set_bit (vect_memsyms_to_rename, DECL_UID (SSA_NAME_VAR (name)));
533 /** 1. Handle new-merge-point phis **/
535 /* 1.1. Generate new phi node in NEW_MERGE_BB: */
536 new_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi)),
537 new_merge_bb);
539 /* 1.2. NEW_MERGE_BB has two incoming edges: GUARD_EDGE and the exit-edge
540 of LOOP. Set the two phi args in NEW_PHI for these edges: */
541 loop_arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, EDGE_SUCC (loop->latch, 0));
542 guard_arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, loop_preheader_edge (loop));
544 add_phi_arg (new_phi, loop_arg, new_exit_e);
545 add_phi_arg (new_phi, guard_arg, guard_edge);
547 /* 1.3. Update phi in successor block. */
548 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi, e) == loop_arg
549 || PHI_ARG_DEF_FROM_EDGE (update_phi, e) == guard_arg);
550 SET_PHI_ARG_DEF (update_phi, e->dest_idx, PHI_RESULT (new_phi));
551 update_phi2 = new_phi;
554 /** 2. Handle loop-closed-ssa-form phis **/
556 if (!is_gimple_reg (PHI_RESULT (orig_phi)))
557 continue;
559 /* 2.1. Generate new phi node in NEW_EXIT_BB: */
560 new_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi)),
561 *new_exit_bb);
563 /* 2.2. NEW_EXIT_BB has one incoming edge: the exit-edge of the loop. */
564 add_phi_arg (new_phi, loop_arg, single_exit (loop));
566 /* 2.3. Update phi in successor of NEW_EXIT_BB: */
567 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi2, new_exit_e) == loop_arg);
568 SET_PHI_ARG_DEF (update_phi2, new_exit_e->dest_idx, PHI_RESULT (new_phi));
570 /* 2.4. Record the newly created name with set_current_def.
571 We want to find a name such that
572 name = get_current_def (orig_loop_name)
573 and to set its current definition as follows:
574 set_current_def (name, new_phi_name)
576 If LOOP is a new loop then loop_arg is already the name we're
577 looking for. If LOOP is the original loop, then loop_arg is
578 the orig_loop_name and the relevant name is recorded in its
579 current reaching definition. */
580 if (is_new_loop)
581 current_new_name = loop_arg;
582 else
584 current_new_name = get_current_def (loop_arg);
585 /* current_def is not available only if the variable does not
586 change inside the loop, in which case we also don't care
587 about recording a current_def for it because we won't be
588 trying to create loop-exit-phis for it. */
589 if (!current_new_name)
590 continue;
592 gcc_assert (get_current_def (current_new_name) == NULL_TREE);
594 set_current_def (current_new_name, PHI_RESULT (new_phi));
595 bitmap_set_bit (*defs, SSA_NAME_VERSION (current_new_name));
598 set_phi_nodes (new_merge_bb, phi_reverse (phi_nodes (new_merge_bb)));
602 /* Function slpeel_update_phi_nodes_for_guard2
604 Input:
605 - GUARD_EDGE, LOOP, IS_NEW_LOOP, NEW_EXIT_BB - as explained above.
607 In the context of the overall structure, we have:
609 loop1_preheader_bb:
610 guard1 (goto loop1/merg1_bb)
611 loop1
612 loop1_exit_bb:
613 guard2 (goto merge1_bb/merge2_bb)
614 merge1_bb
615 LOOP-> loop2
616 loop2_exit_bb
617 merge2_bb
618 next_bb
620 For each name used out side the loop (i.e - for each name that has an exit
621 phi in next_bb) we create a new phi in:
622 1. merge2_bb (to account for the edge from guard_bb)
623 2. loop2_exit_bb (an exit-phi to keep LOOP in loop-closed form)
624 3. guard2 bb (an exit phi to keep the preceding loop in loop-closed form),
625 if needed (if it wasn't handled by slpeel_update_phis_nodes_for_phi1).
628 static void
629 slpeel_update_phi_nodes_for_guard2 (edge guard_edge, struct loop *loop,
630 bool is_new_loop, basic_block *new_exit_bb)
632 tree orig_phi, new_phi;
633 tree update_phi, update_phi2;
634 tree guard_arg, loop_arg;
635 basic_block new_merge_bb = guard_edge->dest;
636 edge e = EDGE_SUCC (new_merge_bb, 0);
637 basic_block update_bb = e->dest;
638 edge new_exit_e;
639 tree orig_def, orig_def_new_name;
640 tree new_name, new_name2;
641 tree arg;
643 /* Create new bb between loop and new_merge_bb. */
644 *new_exit_bb = split_edge (single_exit (loop));
646 new_exit_e = EDGE_SUCC (*new_exit_bb, 0);
648 for (update_phi = phi_nodes (update_bb); update_phi;
649 update_phi = PHI_CHAIN (update_phi))
651 orig_phi = update_phi;
652 orig_def = PHI_ARG_DEF_FROM_EDGE (orig_phi, e);
653 /* This loop-closed-phi actually doesn't represent a use
654 out of the loop - the phi arg is a constant. */
655 if (TREE_CODE (orig_def) != SSA_NAME)
656 continue;
657 orig_def_new_name = get_current_def (orig_def);
658 arg = NULL_TREE;
660 /** 1. Handle new-merge-point phis **/
662 /* 1.1. Generate new phi node in NEW_MERGE_BB: */
663 new_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi)),
664 new_merge_bb);
666 /* 1.2. NEW_MERGE_BB has two incoming edges: GUARD_EDGE and the exit-edge
667 of LOOP. Set the two PHI args in NEW_PHI for these edges: */
668 new_name = orig_def;
669 new_name2 = NULL_TREE;
670 if (orig_def_new_name)
672 new_name = orig_def_new_name;
673 /* Some variables have both loop-entry-phis and loop-exit-phis.
674 Such variables were given yet newer names by phis placed in
675 guard_bb by slpeel_update_phi_nodes_for_guard1. I.e:
676 new_name2 = get_current_def (get_current_def (orig_name)). */
677 new_name2 = get_current_def (new_name);
680 if (is_new_loop)
682 guard_arg = orig_def;
683 loop_arg = new_name;
685 else
687 guard_arg = new_name;
688 loop_arg = orig_def;
690 if (new_name2)
691 guard_arg = new_name2;
693 add_phi_arg (new_phi, loop_arg, new_exit_e);
694 add_phi_arg (new_phi, guard_arg, guard_edge);
696 /* 1.3. Update phi in successor block. */
697 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi, e) == orig_def);
698 SET_PHI_ARG_DEF (update_phi, e->dest_idx, PHI_RESULT (new_phi));
699 update_phi2 = new_phi;
702 /** 2. Handle loop-closed-ssa-form phis **/
704 /* 2.1. Generate new phi node in NEW_EXIT_BB: */
705 new_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi)),
706 *new_exit_bb);
708 /* 2.2. NEW_EXIT_BB has one incoming edge: the exit-edge of the loop. */
709 add_phi_arg (new_phi, loop_arg, single_exit (loop));
711 /* 2.3. Update phi in successor of NEW_EXIT_BB: */
712 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi2, new_exit_e) == loop_arg);
713 SET_PHI_ARG_DEF (update_phi2, new_exit_e->dest_idx, PHI_RESULT (new_phi));
716 /** 3. Handle loop-closed-ssa-form phis for first loop **/
718 /* 3.1. Find the relevant names that need an exit-phi in
719 GUARD_BB, i.e. names for which
720 slpeel_update_phi_nodes_for_guard1 had not already created a
721 phi node. This is the case for names that are used outside
722 the loop (and therefore need an exit phi) but are not updated
723 across loop iterations (and therefore don't have a
724 loop-header-phi).
726 slpeel_update_phi_nodes_for_guard1 is responsible for
727 creating loop-exit phis in GUARD_BB for names that have a
728 loop-header-phi. When such a phi is created we also record
729 the new name in its current definition. If this new name
730 exists, then guard_arg was set to this new name (see 1.2
731 above). Therefore, if guard_arg is not this new name, this
732 is an indication that an exit-phi in GUARD_BB was not yet
733 created, so we take care of it here. */
734 if (guard_arg == new_name2)
735 continue;
736 arg = guard_arg;
738 /* 3.2. Generate new phi node in GUARD_BB: */
739 new_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi)),
740 guard_edge->src);
742 /* 3.3. GUARD_BB has one incoming edge: */
743 gcc_assert (EDGE_COUNT (guard_edge->src->preds) == 1);
744 add_phi_arg (new_phi, arg, EDGE_PRED (guard_edge->src, 0));
746 /* 3.4. Update phi in successor of GUARD_BB: */
747 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi2, guard_edge)
748 == guard_arg);
749 SET_PHI_ARG_DEF (update_phi2, guard_edge->dest_idx, PHI_RESULT (new_phi));
752 set_phi_nodes (new_merge_bb, phi_reverse (phi_nodes (new_merge_bb)));
756 /* Make the LOOP iterate NITERS times. This is done by adding a new IV
757 that starts at zero, increases by one and its limit is NITERS.
759 Assumption: the exit-condition of LOOP is the last stmt in the loop. */
761 void
762 slpeel_make_loop_iterate_ntimes (struct loop *loop, tree niters)
764 tree indx_before_incr, indx_after_incr, cond_stmt, cond;
765 tree orig_cond;
766 edge exit_edge = single_exit (loop);
767 block_stmt_iterator loop_cond_bsi;
768 block_stmt_iterator incr_bsi;
769 bool insert_after;
770 tree init = build_int_cst (TREE_TYPE (niters), 0);
771 tree step = build_int_cst (TREE_TYPE (niters), 1);
772 LOC loop_loc;
774 orig_cond = get_loop_exit_condition (loop);
775 gcc_assert (orig_cond);
776 loop_cond_bsi = bsi_for_stmt (orig_cond);
778 standard_iv_increment_position (loop, &incr_bsi, &insert_after);
779 create_iv (init, step, NULL_TREE, loop,
780 &incr_bsi, insert_after, &indx_before_incr, &indx_after_incr);
782 if (exit_edge->flags & EDGE_TRUE_VALUE) /* 'then' edge exits the loop. */
783 cond = build2 (GE_EXPR, boolean_type_node, indx_after_incr, niters);
784 else /* 'then' edge loops back. */
785 cond = build2 (LT_EXPR, boolean_type_node, indx_after_incr, niters);
787 cond_stmt = build3 (COND_EXPR, TREE_TYPE (orig_cond), cond,
788 NULL_TREE, NULL_TREE);
789 bsi_insert_before (&loop_cond_bsi, cond_stmt, BSI_SAME_STMT);
791 /* Remove old loop exit test: */
792 bsi_remove (&loop_cond_bsi, true);
794 loop_loc = find_loop_location (loop);
795 if (dump_file && (dump_flags & TDF_DETAILS))
797 if (loop_loc != UNKNOWN_LOC)
798 fprintf (dump_file, "\nloop at %s:%d: ",
799 LOC_FILE (loop_loc), LOC_LINE (loop_loc));
800 print_generic_expr (dump_file, cond_stmt, TDF_SLIM);
803 loop->nb_iterations = niters;
807 /* Given LOOP this function generates a new copy of it and puts it
808 on E which is either the entry or exit of LOOP. */
810 static struct loop *
811 slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *loop, edge e)
813 struct loop *new_loop;
814 basic_block *new_bbs, *bbs;
815 bool at_exit;
816 bool was_imm_dom;
817 basic_block exit_dest;
818 tree phi, phi_arg;
819 edge exit, new_exit;
821 at_exit = (e == single_exit (loop));
822 if (!at_exit && e != loop_preheader_edge (loop))
823 return NULL;
825 bbs = get_loop_body (loop);
827 /* Check whether duplication is possible. */
828 if (!can_copy_bbs_p (bbs, loop->num_nodes))
830 free (bbs);
831 return NULL;
834 /* Generate new loop structure. */
835 new_loop = duplicate_loop (loop, loop_outer (loop));
836 if (!new_loop)
838 free (bbs);
839 return NULL;
842 exit_dest = single_exit (loop)->dest;
843 was_imm_dom = (get_immediate_dominator (CDI_DOMINATORS,
844 exit_dest) == loop->header ?
845 true : false);
847 new_bbs = XNEWVEC (basic_block, loop->num_nodes);
849 exit = single_exit (loop);
850 copy_bbs (bbs, loop->num_nodes, new_bbs,
851 &exit, 1, &new_exit, NULL,
852 e->src);
854 /* Duplicating phi args at exit bbs as coming
855 also from exit of duplicated loop. */
856 for (phi = phi_nodes (exit_dest); phi; phi = PHI_CHAIN (phi))
858 phi_arg = PHI_ARG_DEF_FROM_EDGE (phi, single_exit (loop));
859 if (phi_arg)
861 edge new_loop_exit_edge;
863 if (EDGE_SUCC (new_loop->header, 0)->dest == new_loop->latch)
864 new_loop_exit_edge = EDGE_SUCC (new_loop->header, 1);
865 else
866 new_loop_exit_edge = EDGE_SUCC (new_loop->header, 0);
868 add_phi_arg (phi, phi_arg, new_loop_exit_edge);
872 if (at_exit) /* Add the loop copy at exit. */
874 redirect_edge_and_branch_force (e, new_loop->header);
875 set_immediate_dominator (CDI_DOMINATORS, new_loop->header, e->src);
876 if (was_imm_dom)
877 set_immediate_dominator (CDI_DOMINATORS, exit_dest, new_loop->header);
879 else /* Add the copy at entry. */
881 edge new_exit_e;
882 edge entry_e = loop_preheader_edge (loop);
883 basic_block preheader = entry_e->src;
885 if (!flow_bb_inside_loop_p (new_loop,
886 EDGE_SUCC (new_loop->header, 0)->dest))
887 new_exit_e = EDGE_SUCC (new_loop->header, 0);
888 else
889 new_exit_e = EDGE_SUCC (new_loop->header, 1);
891 redirect_edge_and_branch_force (new_exit_e, loop->header);
892 set_immediate_dominator (CDI_DOMINATORS, loop->header,
893 new_exit_e->src);
895 /* We have to add phi args to the loop->header here as coming
896 from new_exit_e edge. */
897 for (phi = phi_nodes (loop->header); phi; phi = PHI_CHAIN (phi))
899 phi_arg = PHI_ARG_DEF_FROM_EDGE (phi, entry_e);
900 if (phi_arg)
901 add_phi_arg (phi, phi_arg, new_exit_e);
904 redirect_edge_and_branch_force (entry_e, new_loop->header);
905 set_immediate_dominator (CDI_DOMINATORS, new_loop->header, preheader);
908 free (new_bbs);
909 free (bbs);
911 return new_loop;
915 /* Given the condition statement COND, put it as the last statement
916 of GUARD_BB; EXIT_BB is the basic block to skip the loop;
917 Assumes that this is the single exit of the guarded loop.
918 Returns the skip edge. */
920 static edge
921 slpeel_add_loop_guard (basic_block guard_bb, tree cond, basic_block exit_bb,
922 basic_block dom_bb)
924 block_stmt_iterator bsi;
925 edge new_e, enter_e;
926 tree cond_stmt;
928 enter_e = EDGE_SUCC (guard_bb, 0);
929 enter_e->flags &= ~EDGE_FALLTHRU;
930 enter_e->flags |= EDGE_FALSE_VALUE;
931 bsi = bsi_last (guard_bb);
933 cond_stmt = build3 (COND_EXPR, void_type_node, cond,
934 NULL_TREE, NULL_TREE);
935 bsi_insert_after (&bsi, cond_stmt, BSI_NEW_STMT);
936 /* Add new edge to connect guard block to the merge/loop-exit block. */
937 new_e = make_edge (guard_bb, exit_bb, EDGE_TRUE_VALUE);
938 set_immediate_dominator (CDI_DOMINATORS, exit_bb, dom_bb);
939 return new_e;
943 /* This function verifies that the following restrictions apply to LOOP:
944 (1) it is innermost
945 (2) it consists of exactly 2 basic blocks - header, and an empty latch.
946 (3) it is single entry, single exit
947 (4) its exit condition is the last stmt in the header
948 (5) E is the entry/exit edge of LOOP.
951 bool
952 slpeel_can_duplicate_loop_p (struct loop *loop, edge e)
954 edge exit_e = single_exit (loop);
955 edge entry_e = loop_preheader_edge (loop);
956 tree orig_cond = get_loop_exit_condition (loop);
957 block_stmt_iterator loop_exit_bsi = bsi_last (exit_e->src);
959 if (need_ssa_update_p ())
960 return false;
962 if (loop->inner
963 /* All loops have an outer scope; the only case loop->outer is NULL is for
964 the function itself. */
965 || !loop_outer (loop)
966 || loop->num_nodes != 2
967 || !empty_block_p (loop->latch)
968 || !single_exit (loop)
969 /* Verify that new loop exit condition can be trivially modified. */
970 || (!orig_cond || orig_cond != bsi_stmt (loop_exit_bsi))
971 || (e != exit_e && e != entry_e))
972 return false;
974 return true;
977 #ifdef ENABLE_CHECKING
978 void
979 slpeel_verify_cfg_after_peeling (struct loop *first_loop,
980 struct loop *second_loop)
982 basic_block loop1_exit_bb = single_exit (first_loop)->dest;
983 basic_block loop2_entry_bb = loop_preheader_edge (second_loop)->src;
984 basic_block loop1_entry_bb = loop_preheader_edge (first_loop)->src;
986 /* A guard that controls whether the second_loop is to be executed or skipped
987 is placed in first_loop->exit. first_loopt->exit therefore has two
988 successors - one is the preheader of second_loop, and the other is a bb
989 after second_loop.
991 gcc_assert (EDGE_COUNT (loop1_exit_bb->succs) == 2);
993 /* 1. Verify that one of the successors of first_loopt->exit is the preheader
994 of second_loop. */
996 /* The preheader of new_loop is expected to have two predecessors:
997 first_loop->exit and the block that precedes first_loop. */
999 gcc_assert (EDGE_COUNT (loop2_entry_bb->preds) == 2
1000 && ((EDGE_PRED (loop2_entry_bb, 0)->src == loop1_exit_bb
1001 && EDGE_PRED (loop2_entry_bb, 1)->src == loop1_entry_bb)
1002 || (EDGE_PRED (loop2_entry_bb, 1)->src == loop1_exit_bb
1003 && EDGE_PRED (loop2_entry_bb, 0)->src == loop1_entry_bb)));
1005 /* Verify that the other successor of first_loopt->exit is after the
1006 second_loop. */
1007 /* TODO */
1009 #endif
1011 /* Function slpeel_tree_peel_loop_to_edge.
1013 Peel the first (last) iterations of LOOP into a new prolog (epilog) loop
1014 that is placed on the entry (exit) edge E of LOOP. After this transformation
1015 we have two loops one after the other - first-loop iterates FIRST_NITERS
1016 times, and second-loop iterates the remainder NITERS - FIRST_NITERS times.
1018 Input:
1019 - LOOP: the loop to be peeled.
1020 - E: the exit or entry edge of LOOP.
1021 If it is the entry edge, we peel the first iterations of LOOP. In this
1022 case first-loop is LOOP, and second-loop is the newly created loop.
1023 If it is the exit edge, we peel the last iterations of LOOP. In this
1024 case, first-loop is the newly created loop, and second-loop is LOOP.
1025 - NITERS: the number of iterations that LOOP iterates.
1026 - FIRST_NITERS: the number of iterations that the first-loop should iterate.
1027 - UPDATE_FIRST_LOOP_COUNT: specified whether this function is responsible
1028 for updating the loop bound of the first-loop to FIRST_NITERS. If it
1029 is false, the caller of this function may want to take care of this
1030 (this can be useful if we don't want new stmts added to first-loop).
1032 Output:
1033 The function returns a pointer to the new loop-copy, or NULL if it failed
1034 to perform the transformation.
1036 The function generates two if-then-else guards: one before the first loop,
1037 and the other before the second loop:
1038 The first guard is:
1039 if (FIRST_NITERS == 0) then skip the first loop,
1040 and go directly to the second loop.
1041 The second guard is:
1042 if (FIRST_NITERS == NITERS) then skip the second loop.
1044 FORNOW only simple loops are supported (see slpeel_can_duplicate_loop_p).
1045 FORNOW the resulting code will not be in loop-closed-ssa form.
1048 struct loop*
1049 slpeel_tree_peel_loop_to_edge (struct loop *loop,
1050 edge e, tree first_niters,
1051 tree niters, bool update_first_loop_count,
1052 unsigned int th)
1054 struct loop *new_loop = NULL, *first_loop, *second_loop;
1055 edge skip_e;
1056 tree pre_condition;
1057 bitmap definitions;
1058 basic_block bb_before_second_loop, bb_after_second_loop;
1059 basic_block bb_before_first_loop;
1060 basic_block bb_between_loops;
1061 basic_block new_exit_bb;
1062 edge exit_e = single_exit (loop);
1063 LOC loop_loc;
1065 if (!slpeel_can_duplicate_loop_p (loop, e))
1066 return NULL;
1068 /* We have to initialize cfg_hooks. Then, when calling
1069 cfg_hooks->split_edge, the function tree_split_edge
1070 is actually called and, when calling cfg_hooks->duplicate_block,
1071 the function tree_duplicate_bb is called. */
1072 tree_register_cfg_hooks ();
1075 /* 1. Generate a copy of LOOP and put it on E (E is the entry/exit of LOOP).
1076 Resulting CFG would be:
1078 first_loop:
1079 do {
1080 } while ...
1082 second_loop:
1083 do {
1084 } while ...
1086 orig_exit_bb:
1089 if (!(new_loop = slpeel_tree_duplicate_loop_to_edge_cfg (loop, e)))
1091 loop_loc = find_loop_location (loop);
1092 if (dump_file && (dump_flags & TDF_DETAILS))
1094 if (loop_loc != UNKNOWN_LOC)
1095 fprintf (dump_file, "\n%s:%d: note: ",
1096 LOC_FILE (loop_loc), LOC_LINE (loop_loc));
1097 fprintf (dump_file, "tree_duplicate_loop_to_edge_cfg failed.\n");
1099 return NULL;
1102 if (e == exit_e)
1104 /* NEW_LOOP was placed after LOOP. */
1105 first_loop = loop;
1106 second_loop = new_loop;
1108 else
1110 /* NEW_LOOP was placed before LOOP. */
1111 first_loop = new_loop;
1112 second_loop = loop;
1115 definitions = ssa_names_to_replace ();
1116 slpeel_update_phis_for_duplicate_loop (loop, new_loop, e == exit_e);
1117 rename_variables_in_loop (new_loop);
1120 /* 2. Add the guard that controls whether the first loop is executed.
1121 Resulting CFG would be:
1123 bb_before_first_loop:
1124 if (FIRST_NITERS == 0) GOTO bb_before_second_loop
1125 GOTO first-loop
1127 first_loop:
1128 do {
1129 } while ...
1131 bb_before_second_loop:
1133 second_loop:
1134 do {
1135 } while ...
1137 orig_exit_bb:
1140 bb_before_first_loop = split_edge (loop_preheader_edge (first_loop));
1141 bb_before_second_loop = split_edge (single_exit (first_loop));
1143 pre_condition =
1144 fold_build2 (LE_EXPR, boolean_type_node, first_niters,
1145 build_int_cst (TREE_TYPE (first_niters), th));
1147 skip_e = slpeel_add_loop_guard (bb_before_first_loop, pre_condition,
1148 bb_before_second_loop, bb_before_first_loop);
1149 slpeel_update_phi_nodes_for_guard1 (skip_e, first_loop,
1150 first_loop == new_loop,
1151 &new_exit_bb, &definitions);
1154 /* 3. Add the guard that controls whether the second loop is executed.
1155 Resulting CFG would be:
1157 bb_before_first_loop:
1158 if (FIRST_NITERS == 0) GOTO bb_before_second_loop (skip first loop)
1159 GOTO first-loop
1161 first_loop:
1162 do {
1163 } while ...
1165 bb_between_loops:
1166 if (FIRST_NITERS == NITERS) GOTO bb_after_second_loop (skip second loop)
1167 GOTO bb_before_second_loop
1169 bb_before_second_loop:
1171 second_loop:
1172 do {
1173 } while ...
1175 bb_after_second_loop:
1177 orig_exit_bb:
1180 bb_between_loops = new_exit_bb;
1181 bb_after_second_loop = split_edge (single_exit (second_loop));
1183 pre_condition =
1184 fold_build2 (EQ_EXPR, boolean_type_node, first_niters, niters);
1185 skip_e = slpeel_add_loop_guard (bb_between_loops, pre_condition,
1186 bb_after_second_loop, bb_before_first_loop);
1187 slpeel_update_phi_nodes_for_guard2 (skip_e, second_loop,
1188 second_loop == new_loop, &new_exit_bb);
1190 /* 4. Make first-loop iterate FIRST_NITERS times, if requested.
1192 if (update_first_loop_count)
1193 slpeel_make_loop_iterate_ntimes (first_loop, first_niters);
1195 BITMAP_FREE (definitions);
1196 delete_update_ssa ();
1198 return new_loop;
1201 /* Function vect_get_loop_location.
1203 Extract the location of the loop in the source code.
1204 If the loop is not well formed for vectorization, an estimated
1205 location is calculated.
1206 Return the loop location if succeed and NULL if not. */
1209 find_loop_location (struct loop *loop)
1211 tree node = NULL_TREE;
1212 basic_block bb;
1213 block_stmt_iterator si;
1215 if (!loop)
1216 return UNKNOWN_LOC;
1218 node = get_loop_exit_condition (loop);
1220 if (node && CAN_HAVE_LOCATION_P (node) && EXPR_HAS_LOCATION (node)
1221 && EXPR_FILENAME (node) && EXPR_LINENO (node))
1222 return EXPR_LOC (node);
1224 /* If we got here the loop is probably not "well formed",
1225 try to estimate the loop location */
1227 if (!loop->header)
1228 return UNKNOWN_LOC;
1230 bb = loop->header;
1232 for (si = bsi_start (bb); !bsi_end_p (si); bsi_next (&si))
1234 node = bsi_stmt (si);
1235 if (node && CAN_HAVE_LOCATION_P (node) && EXPR_HAS_LOCATION (node))
1236 return EXPR_LOC (node);
1239 return UNKNOWN_LOC;
1243 /*************************************************************************
1244 Vectorization Debug Information.
1245 *************************************************************************/
1247 /* Function vect_set_verbosity_level.
1249 Called from toplev.c upon detection of the
1250 -ftree-vectorizer-verbose=N option. */
1252 void
1253 vect_set_verbosity_level (const char *val)
1255 unsigned int vl;
1257 vl = atoi (val);
1258 if (vl < MAX_VERBOSITY_LEVEL)
1259 vect_verbosity_level = vl;
1260 else
1261 vect_verbosity_level = MAX_VERBOSITY_LEVEL - 1;
1265 /* Function vect_set_dump_settings.
1267 Fix the verbosity level of the vectorizer if the
1268 requested level was not set explicitly using the flag
1269 -ftree-vectorizer-verbose=N.
1270 Decide where to print the debugging information (dump_file/stderr).
1271 If the user defined the verbosity level, but there is no dump file,
1272 print to stderr, otherwise print to the dump file. */
1274 static void
1275 vect_set_dump_settings (void)
1277 vect_dump = dump_file;
1279 /* Check if the verbosity level was defined by the user: */
1280 if (vect_verbosity_level != MAX_VERBOSITY_LEVEL)
1282 /* If there is no dump file, print to stderr. */
1283 if (!dump_file)
1284 vect_dump = stderr;
1285 return;
1288 /* User didn't specify verbosity level: */
1289 if (dump_file && (dump_flags & TDF_DETAILS))
1290 vect_verbosity_level = REPORT_DETAILS;
1291 else if (dump_file && (dump_flags & TDF_STATS))
1292 vect_verbosity_level = REPORT_UNVECTORIZED_LOOPS;
1293 else
1294 vect_verbosity_level = REPORT_NONE;
1296 gcc_assert (dump_file || vect_verbosity_level == REPORT_NONE);
1300 /* Function debug_loop_details.
1302 For vectorization debug dumps. */
1304 bool
1305 vect_print_dump_info (enum verbosity_levels vl)
1307 if (vl > vect_verbosity_level)
1308 return false;
1310 if (!current_function_decl || !vect_dump)
1311 return false;
1313 if (vect_loop_location == UNKNOWN_LOC)
1314 fprintf (vect_dump, "\n%s:%d: note: ",
1315 DECL_SOURCE_FILE (current_function_decl),
1316 DECL_SOURCE_LINE (current_function_decl));
1317 else
1318 fprintf (vect_dump, "\n%s:%d: note: ",
1319 LOC_FILE (vect_loop_location), LOC_LINE (vect_loop_location));
1321 return true;
1325 /*************************************************************************
1326 Vectorization Utilities.
1327 *************************************************************************/
1329 /* Function new_stmt_vec_info.
1331 Create and initialize a new stmt_vec_info struct for STMT. */
1333 stmt_vec_info
1334 new_stmt_vec_info (tree stmt, loop_vec_info loop_vinfo)
1336 stmt_vec_info res;
1337 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
1339 STMT_VINFO_TYPE (res) = undef_vec_info_type;
1340 STMT_VINFO_STMT (res) = stmt;
1341 STMT_VINFO_LOOP_VINFO (res) = loop_vinfo;
1342 STMT_VINFO_RELEVANT (res) = 0;
1343 STMT_VINFO_LIVE_P (res) = false;
1344 STMT_VINFO_VECTYPE (res) = NULL;
1345 STMT_VINFO_VEC_STMT (res) = NULL;
1346 STMT_VINFO_IN_PATTERN_P (res) = false;
1347 STMT_VINFO_RELATED_STMT (res) = NULL;
1348 STMT_VINFO_DATA_REF (res) = NULL;
1349 if (TREE_CODE (stmt) == PHI_NODE)
1350 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
1351 else
1352 STMT_VINFO_DEF_TYPE (res) = vect_loop_def;
1353 STMT_VINFO_SAME_ALIGN_REFS (res) = VEC_alloc (dr_p, heap, 5);
1354 STMT_VINFO_INSIDE_OF_LOOP_COST (res) = 0;
1355 STMT_VINFO_OUTSIDE_OF_LOOP_COST (res) = 0;
1356 DR_GROUP_FIRST_DR (res) = NULL_TREE;
1357 DR_GROUP_NEXT_DR (res) = NULL_TREE;
1358 DR_GROUP_SIZE (res) = 0;
1359 DR_GROUP_STORE_COUNT (res) = 0;
1360 DR_GROUP_GAP (res) = 0;
1361 DR_GROUP_SAME_DR_STMT (res) = NULL_TREE;
1362 DR_GROUP_READ_WRITE_DEPENDENCE (res) = false;
1364 return res;
1368 /* Function new_loop_vec_info.
1370 Create and initialize a new loop_vec_info struct for LOOP, as well as
1371 stmt_vec_info structs for all the stmts in LOOP. */
1373 loop_vec_info
1374 new_loop_vec_info (struct loop *loop)
1376 loop_vec_info res;
1377 basic_block *bbs;
1378 block_stmt_iterator si;
1379 unsigned int i;
1381 res = (loop_vec_info) xcalloc (1, sizeof (struct _loop_vec_info));
1383 bbs = get_loop_body (loop);
1385 /* Create stmt_info for all stmts in the loop. */
1386 for (i = 0; i < loop->num_nodes; i++)
1388 basic_block bb = bbs[i];
1389 tree phi;
1391 for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi))
1393 stmt_ann_t ann = get_stmt_ann (phi);
1394 set_stmt_info (ann, new_stmt_vec_info (phi, res));
1397 for (si = bsi_start (bb); !bsi_end_p (si); bsi_next (&si))
1399 tree stmt = bsi_stmt (si);
1400 stmt_ann_t ann;
1402 ann = stmt_ann (stmt);
1403 set_stmt_info (ann, new_stmt_vec_info (stmt, res));
1407 LOOP_VINFO_LOOP (res) = loop;
1408 LOOP_VINFO_BBS (res) = bbs;
1409 LOOP_VINFO_EXIT_COND (res) = NULL;
1410 LOOP_VINFO_NITERS (res) = NULL;
1411 LOOP_VINFO_VECTORIZABLE_P (res) = 0;
1412 LOOP_PEELING_FOR_ALIGNMENT (res) = 0;
1413 LOOP_VINFO_VECT_FACTOR (res) = 0;
1414 LOOP_VINFO_DATAREFS (res) = VEC_alloc (data_reference_p, heap, 10);
1415 LOOP_VINFO_DDRS (res) = VEC_alloc (ddr_p, heap, 10 * 10);
1416 LOOP_VINFO_UNALIGNED_DR (res) = NULL;
1417 LOOP_VINFO_MAY_MISALIGN_STMTS (res)
1418 = VEC_alloc (tree, heap, PARAM_VALUE (PARAM_VECT_MAX_VERSION_CHECKS));
1420 return res;
1424 /* Function destroy_loop_vec_info.
1426 Free LOOP_VINFO struct, as well as all the stmt_vec_info structs of all the
1427 stmts in the loop. */
1429 void
1430 destroy_loop_vec_info (loop_vec_info loop_vinfo)
1432 struct loop *loop;
1433 basic_block *bbs;
1434 int nbbs;
1435 block_stmt_iterator si;
1436 int j;
1438 if (!loop_vinfo)
1439 return;
1441 loop = LOOP_VINFO_LOOP (loop_vinfo);
1443 bbs = LOOP_VINFO_BBS (loop_vinfo);
1444 nbbs = loop->num_nodes;
1446 for (j = 0; j < nbbs; j++)
1448 basic_block bb = bbs[j];
1449 tree phi;
1450 stmt_vec_info stmt_info;
1452 for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi))
1454 stmt_ann_t ann = stmt_ann (phi);
1456 stmt_info = vinfo_for_stmt (phi);
1457 free (stmt_info);
1458 set_stmt_info (ann, NULL);
1461 for (si = bsi_start (bb); !bsi_end_p (si); )
1463 tree stmt = bsi_stmt (si);
1464 stmt_ann_t ann = stmt_ann (stmt);
1465 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1467 if (stmt_info)
1469 /* Check if this is a "pattern stmt" (introduced by the
1470 vectorizer during the pattern recognition pass). */
1471 bool remove_stmt_p = false;
1472 tree orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
1473 if (orig_stmt)
1475 stmt_vec_info orig_stmt_info = vinfo_for_stmt (orig_stmt);
1476 if (orig_stmt_info
1477 && STMT_VINFO_IN_PATTERN_P (orig_stmt_info))
1478 remove_stmt_p = true;
1481 /* Free stmt_vec_info. */
1482 VEC_free (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
1483 free (stmt_info);
1484 set_stmt_info (ann, NULL);
1486 /* Remove dead "pattern stmts". */
1487 if (remove_stmt_p)
1488 bsi_remove (&si, true);
1490 bsi_next (&si);
1494 free (LOOP_VINFO_BBS (loop_vinfo));
1495 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
1496 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
1497 VEC_free (tree, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
1499 free (loop_vinfo);
1500 loop->aux = NULL;
1504 /* Function vect_force_dr_alignment_p.
1506 Returns whether the alignment of a DECL can be forced to be aligned
1507 on ALIGNMENT bit boundary. */
1509 bool
1510 vect_can_force_dr_alignment_p (tree decl, unsigned int alignment)
1512 if (TREE_CODE (decl) != VAR_DECL)
1513 return false;
1515 if (DECL_EXTERNAL (decl))
1516 return false;
1518 if (TREE_ASM_WRITTEN (decl))
1519 return false;
1521 if (TREE_STATIC (decl))
1522 return (alignment <= MAX_OFILE_ALIGNMENT);
1523 else
1524 /* This is not 100% correct. The absolute correct stack alignment
1525 is STACK_BOUNDARY. We're supposed to hope, but not assume, that
1526 PREFERRED_STACK_BOUNDARY is honored by all translation units.
1527 However, until someone implements forced stack alignment, SSE
1528 isn't really usable without this. */
1529 return (alignment <= PREFERRED_STACK_BOUNDARY);
1533 /* Function get_vectype_for_scalar_type.
1535 Returns the vector type corresponding to SCALAR_TYPE as supported
1536 by the target. */
1538 tree
1539 get_vectype_for_scalar_type (tree scalar_type)
1541 enum machine_mode inner_mode = TYPE_MODE (scalar_type);
1542 int nbytes = GET_MODE_SIZE (inner_mode);
1543 int nunits;
1544 tree vectype;
1546 if (nbytes == 0 || nbytes >= UNITS_PER_SIMD_WORD)
1547 return NULL_TREE;
1549 /* FORNOW: Only a single vector size per target (UNITS_PER_SIMD_WORD)
1550 is expected. */
1551 nunits = UNITS_PER_SIMD_WORD / nbytes;
1553 vectype = build_vector_type (scalar_type, nunits);
1554 if (vect_print_dump_info (REPORT_DETAILS))
1556 fprintf (vect_dump, "get vectype with %d units of type ", nunits);
1557 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
1560 if (!vectype)
1561 return NULL_TREE;
1563 if (vect_print_dump_info (REPORT_DETAILS))
1565 fprintf (vect_dump, "vectype: ");
1566 print_generic_expr (vect_dump, vectype, TDF_SLIM);
1569 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
1570 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
1572 if (vect_print_dump_info (REPORT_DETAILS))
1573 fprintf (vect_dump, "mode not supported by target.");
1574 return NULL_TREE;
1577 return vectype;
1581 /* Function vect_supportable_dr_alignment
1583 Return whether the data reference DR is supported with respect to its
1584 alignment. */
1586 enum dr_alignment_support
1587 vect_supportable_dr_alignment (struct data_reference *dr)
1589 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr)));
1590 enum machine_mode mode = (int) TYPE_MODE (vectype);
1592 if (aligned_access_p (dr))
1593 return dr_aligned;
1595 /* Possibly unaligned access. */
1597 if (DR_IS_READ (dr))
1599 if (vec_realign_load_optab->handlers[mode].insn_code != CODE_FOR_nothing
1600 && (!targetm.vectorize.builtin_mask_for_load
1601 || targetm.vectorize.builtin_mask_for_load ()))
1602 return dr_unaligned_software_pipeline;
1604 if (movmisalign_optab->handlers[mode].insn_code != CODE_FOR_nothing)
1605 /* Can't software pipeline the loads, but can at least do them. */
1606 return dr_unaligned_supported;
1609 /* Unsupported. */
1610 return dr_unaligned_unsupported;
1614 /* Function vect_is_simple_use.
1616 Input:
1617 LOOP - the loop that is being vectorized.
1618 OPERAND - operand of a stmt in LOOP.
1619 DEF - the defining stmt in case OPERAND is an SSA_NAME.
1621 Returns whether a stmt with OPERAND can be vectorized.
1622 Supportable operands are constants, loop invariants, and operands that are
1623 defined by the current iteration of the loop. Unsupportable operands are
1624 those that are defined by a previous iteration of the loop (as is the case
1625 in reduction/induction computations). */
1627 bool
1628 vect_is_simple_use (tree operand, loop_vec_info loop_vinfo, tree *def_stmt,
1629 tree *def, enum vect_def_type *dt)
1631 basic_block bb;
1632 stmt_vec_info stmt_vinfo;
1633 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1635 *def_stmt = NULL_TREE;
1636 *def = NULL_TREE;
1638 if (vect_print_dump_info (REPORT_DETAILS))
1640 fprintf (vect_dump, "vect_is_simple_use: operand ");
1641 print_generic_expr (vect_dump, operand, TDF_SLIM);
1644 if (TREE_CODE (operand) == INTEGER_CST || TREE_CODE (operand) == REAL_CST)
1646 *dt = vect_constant_def;
1647 return true;
1649 if (is_gimple_min_invariant (operand))
1651 *def = operand;
1652 *dt = vect_invariant_def;
1653 return true;
1656 if (TREE_CODE (operand) != SSA_NAME)
1658 if (vect_print_dump_info (REPORT_DETAILS))
1659 fprintf (vect_dump, "not ssa-name.");
1660 return false;
1663 *def_stmt = SSA_NAME_DEF_STMT (operand);
1664 if (*def_stmt == NULL_TREE )
1666 if (vect_print_dump_info (REPORT_DETAILS))
1667 fprintf (vect_dump, "no def_stmt.");
1668 return false;
1671 if (vect_print_dump_info (REPORT_DETAILS))
1673 fprintf (vect_dump, "def_stmt: ");
1674 print_generic_expr (vect_dump, *def_stmt, TDF_SLIM);
1677 /* empty stmt is expected only in case of a function argument.
1678 (Otherwise - we expect a phi_node or a GIMPLE_MODIFY_STMT). */
1679 if (IS_EMPTY_STMT (*def_stmt))
1681 tree arg = TREE_OPERAND (*def_stmt, 0);
1682 if (is_gimple_min_invariant (arg))
1684 *def = operand;
1685 *dt = vect_invariant_def;
1686 return true;
1689 if (vect_print_dump_info (REPORT_DETAILS))
1690 fprintf (vect_dump, "Unexpected empty stmt.");
1691 return false;
1694 bb = bb_for_stmt (*def_stmt);
1695 if (!flow_bb_inside_loop_p (loop, bb))
1696 *dt = vect_invariant_def;
1697 else
1699 stmt_vinfo = vinfo_for_stmt (*def_stmt);
1700 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
1703 if (*dt == vect_unknown_def_type)
1705 if (vect_print_dump_info (REPORT_DETAILS))
1706 fprintf (vect_dump, "Unsupported pattern.");
1707 return false;
1710 if (vect_print_dump_info (REPORT_DETAILS))
1711 fprintf (vect_dump, "type of def: %d.",*dt);
1713 switch (TREE_CODE (*def_stmt))
1715 case PHI_NODE:
1716 *def = PHI_RESULT (*def_stmt);
1717 gcc_assert (*dt == vect_induction_def || *dt == vect_reduction_def
1718 || *dt == vect_invariant_def);
1719 break;
1721 case GIMPLE_MODIFY_STMT:
1722 *def = GIMPLE_STMT_OPERAND (*def_stmt, 0);
1723 break;
1725 default:
1726 if (vect_print_dump_info (REPORT_DETAILS))
1727 fprintf (vect_dump, "unsupported defining stmt: ");
1728 return false;
1731 return true;
1735 /* Function supportable_widening_operation
1737 Check whether an operation represented by the code CODE is a
1738 widening operation that is supported by the target platform in
1739 vector form (i.e., when operating on arguments of type VECTYPE).
1741 Widening operations we currently support are NOP (CONVERT), FLOAT
1742 and WIDEN_MULT. This function checks if these operations are supported
1743 by the target platform either directly (via vector tree-codes), or via
1744 target builtins.
1746 Output:
1747 - CODE1 and CODE2 are codes of vector operations to be used when
1748 vectorizing the operation, if available.
1749 - DECL1 and DECL2 are decls of target builtin functions to be used
1750 when vectorizing the operation, if available. In this case,
1751 CODE1 and CODE2 are CALL_EXPR. */
1753 bool
1754 supportable_widening_operation (enum tree_code code, tree stmt, tree vectype,
1755 tree *decl1, tree *decl2,
1756 enum tree_code *code1, enum tree_code *code2)
1758 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1759 bool ordered_p;
1760 enum machine_mode vec_mode;
1761 enum insn_code icode1, icode2;
1762 optab optab1, optab2;
1763 tree expr = GIMPLE_STMT_OPERAND (stmt, 1);
1764 tree type = TREE_TYPE (expr);
1765 tree wide_vectype = get_vectype_for_scalar_type (type);
1766 enum tree_code c1, c2;
1768 /* The result of a vectorized widening operation usually requires two vectors
1769 (because the widened results do not fit int one vector). The generated
1770 vector results would normally be expected to be generated in the same
1771 order as in the original scalar computation. i.e. if 8 results are
1772 generated in each vector iteration, they are to be organized as follows:
1773 vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
1775 However, in the special case that the result of the widening operation is
1776 used in a reduction computation only, the order doesn't matter (because
1777 when vectorizing a reduction we change the order of the computation).
1778 Some targets can take advantage of this and generate more efficient code.
1779 For example, targets like Altivec, that support widen_mult using a sequence
1780 of {mult_even,mult_odd} generate the following vectors:
1781 vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8]. */
1783 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction)
1784 ordered_p = false;
1785 else
1786 ordered_p = true;
1788 if (!ordered_p
1789 && code == WIDEN_MULT_EXPR
1790 && targetm.vectorize.builtin_mul_widen_even
1791 && targetm.vectorize.builtin_mul_widen_even (vectype)
1792 && targetm.vectorize.builtin_mul_widen_odd
1793 && targetm.vectorize.builtin_mul_widen_odd (vectype))
1795 if (vect_print_dump_info (REPORT_DETAILS))
1796 fprintf (vect_dump, "Unordered widening operation detected.");
1798 *code1 = *code2 = CALL_EXPR;
1799 *decl1 = targetm.vectorize.builtin_mul_widen_even (vectype);
1800 *decl2 = targetm.vectorize.builtin_mul_widen_odd (vectype);
1801 return true;
1804 switch (code)
1806 case WIDEN_MULT_EXPR:
1807 if (BYTES_BIG_ENDIAN)
1809 c1 = VEC_WIDEN_MULT_HI_EXPR;
1810 c2 = VEC_WIDEN_MULT_LO_EXPR;
1812 else
1814 c2 = VEC_WIDEN_MULT_HI_EXPR;
1815 c1 = VEC_WIDEN_MULT_LO_EXPR;
1817 break;
1819 case NOP_EXPR:
1820 case CONVERT_EXPR:
1821 if (BYTES_BIG_ENDIAN)
1823 c1 = VEC_UNPACK_HI_EXPR;
1824 c2 = VEC_UNPACK_LO_EXPR;
1826 else
1828 c2 = VEC_UNPACK_HI_EXPR;
1829 c1 = VEC_UNPACK_LO_EXPR;
1831 break;
1833 case FLOAT_EXPR:
1834 if (BYTES_BIG_ENDIAN)
1836 c1 = VEC_UNPACK_FLOAT_HI_EXPR;
1837 c2 = VEC_UNPACK_FLOAT_LO_EXPR;
1839 else
1841 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
1842 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
1844 break;
1846 case FIX_TRUNC_EXPR:
1847 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
1848 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
1849 computing the operation. */
1850 return false;
1852 default:
1853 gcc_unreachable ();
1856 if (code == FIX_TRUNC_EXPR)
1858 /* The signedness is determined from output operand. */
1859 optab1 = optab_for_tree_code (c1, type);
1860 optab2 = optab_for_tree_code (c2, type);
1862 else
1864 optab1 = optab_for_tree_code (c1, vectype);
1865 optab2 = optab_for_tree_code (c2, vectype);
1868 if (!optab1 || !optab2)
1869 return false;
1871 vec_mode = TYPE_MODE (vectype);
1872 if ((icode1 = optab1->handlers[(int) vec_mode].insn_code) == CODE_FOR_nothing
1873 || insn_data[icode1].operand[0].mode != TYPE_MODE (wide_vectype)
1874 || (icode2 = optab2->handlers[(int) vec_mode].insn_code)
1875 == CODE_FOR_nothing
1876 || insn_data[icode2].operand[0].mode != TYPE_MODE (wide_vectype))
1877 return false;
1879 *code1 = c1;
1880 *code2 = c2;
1881 return true;
1885 /* Function supportable_narrowing_operation
1887 Check whether an operation represented by the code CODE is a
1888 narrowing operation that is supported by the target platform in
1889 vector form (i.e., when operating on arguments of type VECTYPE).
1891 Narrowing operations we currently support are NOP (CONVERT) and
1892 FIX_TRUNC. This function checks if these operations are supported by
1893 the target platform directly via vector tree-codes.
1895 Output:
1896 - CODE1 is the code of a vector operation to be used when
1897 vectorizing the operation, if available. */
1899 bool
1900 supportable_narrowing_operation (enum tree_code code,
1901 tree stmt, tree vectype,
1902 enum tree_code *code1)
1904 enum machine_mode vec_mode;
1905 enum insn_code icode1;
1906 optab optab1;
1907 tree expr = GIMPLE_STMT_OPERAND (stmt, 1);
1908 tree type = TREE_TYPE (expr);
1909 tree narrow_vectype = get_vectype_for_scalar_type (type);
1910 enum tree_code c1;
1912 switch (code)
1914 case NOP_EXPR:
1915 case CONVERT_EXPR:
1916 c1 = VEC_PACK_TRUNC_EXPR;
1917 break;
1919 case FIX_TRUNC_EXPR:
1920 c1 = VEC_PACK_FIX_TRUNC_EXPR;
1921 break;
1923 case FLOAT_EXPR:
1924 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
1925 tree code and optabs used for computing the operation. */
1926 return false;
1928 default:
1929 gcc_unreachable ();
1932 if (code == FIX_TRUNC_EXPR)
1933 /* The signedness is determined from output operand. */
1934 optab1 = optab_for_tree_code (c1, type);
1935 else
1936 optab1 = optab_for_tree_code (c1, vectype);
1938 if (!optab1)
1939 return false;
1941 vec_mode = TYPE_MODE (vectype);
1942 if ((icode1 = optab1->handlers[(int) vec_mode].insn_code) == CODE_FOR_nothing
1943 || insn_data[icode1].operand[0].mode != TYPE_MODE (narrow_vectype))
1944 return false;
1946 *code1 = c1;
1947 return true;
1951 /* Function reduction_code_for_scalar_code
1953 Input:
1954 CODE - tree_code of a reduction operations.
1956 Output:
1957 REDUC_CODE - the corresponding tree-code to be used to reduce the
1958 vector of partial results into a single scalar result (which
1959 will also reside in a vector).
1961 Return TRUE if a corresponding REDUC_CODE was found, FALSE otherwise. */
1963 bool
1964 reduction_code_for_scalar_code (enum tree_code code,
1965 enum tree_code *reduc_code)
1967 switch (code)
1969 case MAX_EXPR:
1970 *reduc_code = REDUC_MAX_EXPR;
1971 return true;
1973 case MIN_EXPR:
1974 *reduc_code = REDUC_MIN_EXPR;
1975 return true;
1977 case PLUS_EXPR:
1978 *reduc_code = REDUC_PLUS_EXPR;
1979 return true;
1981 default:
1982 return false;
1987 /* Function vect_is_simple_reduction
1989 Detect a cross-iteration def-use cucle that represents a simple
1990 reduction computation. We look for the following pattern:
1992 loop_header:
1993 a1 = phi < a0, a2 >
1994 a3 = ...
1995 a2 = operation (a3, a1)
1997 such that:
1998 1. operation is commutative and associative and it is safe to
1999 change the order of the computation.
2000 2. no uses for a2 in the loop (a2 is used out of the loop)
2001 3. no uses of a1 in the loop besides the reduction operation.
2003 Condition 1 is tested here.
2004 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized. */
2006 tree
2007 vect_is_simple_reduction (struct loop *loop, tree phi)
2009 edge latch_e = loop_latch_edge (loop);
2010 tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
2011 tree def_stmt, def1, def2;
2012 enum tree_code code;
2013 int op_type;
2014 tree operation, op1, op2;
2015 tree type;
2016 int nloop_uses;
2017 tree name;
2018 imm_use_iterator imm_iter;
2019 use_operand_p use_p;
2021 name = PHI_RESULT (phi);
2022 nloop_uses = 0;
2023 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
2025 tree use_stmt = USE_STMT (use_p);
2026 if (flow_bb_inside_loop_p (loop, bb_for_stmt (use_stmt))
2027 && vinfo_for_stmt (use_stmt)
2028 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
2029 nloop_uses++;
2030 if (nloop_uses > 1)
2032 if (vect_print_dump_info (REPORT_DETAILS))
2033 fprintf (vect_dump, "reduction used in loop.");
2034 return NULL_TREE;
2038 if (TREE_CODE (loop_arg) != SSA_NAME)
2040 if (vect_print_dump_info (REPORT_DETAILS))
2042 fprintf (vect_dump, "reduction: not ssa_name: ");
2043 print_generic_expr (vect_dump, loop_arg, TDF_SLIM);
2045 return NULL_TREE;
2048 def_stmt = SSA_NAME_DEF_STMT (loop_arg);
2049 if (!def_stmt)
2051 if (vect_print_dump_info (REPORT_DETAILS))
2052 fprintf (vect_dump, "reduction: no def_stmt.");
2053 return NULL_TREE;
2056 if (TREE_CODE (def_stmt) != GIMPLE_MODIFY_STMT)
2058 if (vect_print_dump_info (REPORT_DETAILS))
2059 print_generic_expr (vect_dump, def_stmt, TDF_SLIM);
2060 return NULL_TREE;
2063 name = GIMPLE_STMT_OPERAND (def_stmt, 0);
2064 nloop_uses = 0;
2065 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
2067 tree use_stmt = USE_STMT (use_p);
2068 if (flow_bb_inside_loop_p (loop, bb_for_stmt (use_stmt))
2069 && vinfo_for_stmt (use_stmt)
2070 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
2071 nloop_uses++;
2072 if (nloop_uses > 1)
2074 if (vect_print_dump_info (REPORT_DETAILS))
2075 fprintf (vect_dump, "reduction used in loop.");
2076 return NULL_TREE;
2080 operation = GIMPLE_STMT_OPERAND (def_stmt, 1);
2081 code = TREE_CODE (operation);
2082 if (!commutative_tree_code (code) || !associative_tree_code (code))
2084 if (vect_print_dump_info (REPORT_DETAILS))
2086 fprintf (vect_dump, "reduction: not commutative/associative: ");
2087 print_generic_expr (vect_dump, operation, TDF_SLIM);
2089 return NULL_TREE;
2092 op_type = TREE_OPERAND_LENGTH (operation);
2093 if (op_type != binary_op)
2095 if (vect_print_dump_info (REPORT_DETAILS))
2097 fprintf (vect_dump, "reduction: not binary operation: ");
2098 print_generic_expr (vect_dump, operation, TDF_SLIM);
2100 return NULL_TREE;
2103 op1 = TREE_OPERAND (operation, 0);
2104 op2 = TREE_OPERAND (operation, 1);
2105 if (TREE_CODE (op1) != SSA_NAME || TREE_CODE (op2) != SSA_NAME)
2107 if (vect_print_dump_info (REPORT_DETAILS))
2109 fprintf (vect_dump, "reduction: uses not ssa_names: ");
2110 print_generic_expr (vect_dump, operation, TDF_SLIM);
2112 return NULL_TREE;
2115 /* Check that it's ok to change the order of the computation. */
2116 type = TREE_TYPE (operation);
2117 if (TYPE_MAIN_VARIANT (type) != TYPE_MAIN_VARIANT (TREE_TYPE (op1))
2118 || TYPE_MAIN_VARIANT (type) != TYPE_MAIN_VARIANT (TREE_TYPE (op2)))
2120 if (vect_print_dump_info (REPORT_DETAILS))
2122 fprintf (vect_dump, "reduction: multiple types: operation type: ");
2123 print_generic_expr (vect_dump, type, TDF_SLIM);
2124 fprintf (vect_dump, ", operands types: ");
2125 print_generic_expr (vect_dump, TREE_TYPE (op1), TDF_SLIM);
2126 fprintf (vect_dump, ",");
2127 print_generic_expr (vect_dump, TREE_TYPE (op2), TDF_SLIM);
2129 return NULL_TREE;
2132 /* CHECKME: check for !flag_finite_math_only too? */
2133 if (SCALAR_FLOAT_TYPE_P (type) && !flag_unsafe_math_optimizations)
2135 /* Changing the order of operations changes the semantics. */
2136 if (vect_print_dump_info (REPORT_DETAILS))
2138 fprintf (vect_dump, "reduction: unsafe fp math optimization: ");
2139 print_generic_expr (vect_dump, operation, TDF_SLIM);
2141 return NULL_TREE;
2143 else if (INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type))
2145 /* Changing the order of operations changes the semantics. */
2146 if (vect_print_dump_info (REPORT_DETAILS))
2148 fprintf (vect_dump, "reduction: unsafe int math optimization: ");
2149 print_generic_expr (vect_dump, operation, TDF_SLIM);
2151 return NULL_TREE;
2154 /* reduction is safe. we're dealing with one of the following:
2155 1) integer arithmetic and no trapv
2156 2) floating point arithmetic, and special flags permit this optimization.
2158 def1 = SSA_NAME_DEF_STMT (op1);
2159 def2 = SSA_NAME_DEF_STMT (op2);
2160 if (!def1 || !def2 || IS_EMPTY_STMT (def1) || IS_EMPTY_STMT (def2))
2162 if (vect_print_dump_info (REPORT_DETAILS))
2164 fprintf (vect_dump, "reduction: no defs for operands: ");
2165 print_generic_expr (vect_dump, operation, TDF_SLIM);
2167 return NULL_TREE;
2171 /* Check that one def is the reduction def, defined by PHI,
2172 the other def is either defined in the loop by a GIMPLE_MODIFY_STMT,
2173 or it's an induction (defined by some phi node). */
2175 if (def2 == phi
2176 && flow_bb_inside_loop_p (loop, bb_for_stmt (def1))
2177 && (TREE_CODE (def1) == GIMPLE_MODIFY_STMT
2178 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1)) == vect_induction_def))
2180 if (vect_print_dump_info (REPORT_DETAILS))
2182 fprintf (vect_dump, "detected reduction:");
2183 print_generic_expr (vect_dump, operation, TDF_SLIM);
2185 return def_stmt;
2187 else if (def1 == phi
2188 && flow_bb_inside_loop_p (loop, bb_for_stmt (def2))
2189 && (TREE_CODE (def2) == GIMPLE_MODIFY_STMT
2190 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2)) == vect_induction_def))
2192 /* Swap operands (just for simplicity - so that the rest of the code
2193 can assume that the reduction variable is always the last (second)
2194 argument). */
2195 if (vect_print_dump_info (REPORT_DETAILS))
2197 fprintf (vect_dump, "detected reduction: need to swap operands:");
2198 print_generic_expr (vect_dump, operation, TDF_SLIM);
2200 swap_tree_operands (def_stmt, &TREE_OPERAND (operation, 0),
2201 &TREE_OPERAND (operation, 1));
2202 return def_stmt;
2204 else
2206 if (vect_print_dump_info (REPORT_DETAILS))
2208 fprintf (vect_dump, "reduction: unknown pattern.");
2209 print_generic_expr (vect_dump, operation, TDF_SLIM);
2211 return NULL_TREE;
2216 /* Function vect_is_simple_iv_evolution.
2218 FORNOW: A simple evolution of an induction variables in the loop is
2219 considered a polynomial evolution with constant step. */
2221 bool
2222 vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
2223 tree * step)
2225 tree init_expr;
2226 tree step_expr;
2227 tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
2229 /* When there is no evolution in this loop, the evolution function
2230 is not "simple". */
2231 if (evolution_part == NULL_TREE)
2232 return false;
2234 /* When the evolution is a polynomial of degree >= 2
2235 the evolution function is not "simple". */
2236 if (tree_is_chrec (evolution_part))
2237 return false;
2239 step_expr = evolution_part;
2240 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
2242 if (vect_print_dump_info (REPORT_DETAILS))
2244 fprintf (vect_dump, "step: ");
2245 print_generic_expr (vect_dump, step_expr, TDF_SLIM);
2246 fprintf (vect_dump, ", init: ");
2247 print_generic_expr (vect_dump, init_expr, TDF_SLIM);
2250 *init = init_expr;
2251 *step = step_expr;
2253 if (TREE_CODE (step_expr) != INTEGER_CST)
2255 if (vect_print_dump_info (REPORT_DETAILS))
2256 fprintf (vect_dump, "step unknown.");
2257 return false;
2260 return true;
2264 /* Function vectorize_loops.
2266 Entry Point to loop vectorization phase. */
2268 unsigned
2269 vectorize_loops (void)
2271 unsigned int i;
2272 unsigned int num_vectorized_loops = 0;
2273 unsigned int vect_loops_num;
2274 loop_iterator li;
2275 struct loop *loop;
2277 vect_loops_num = number_of_loops ();
2279 /* Bail out if there are no loops. */
2280 if (vect_loops_num <= 1)
2281 return 0;
2283 /* Fix the verbosity level if not defined explicitly by the user. */
2284 vect_set_dump_settings ();
2286 /* Allocate the bitmap that records which virtual variables that
2287 need to be renamed. */
2288 vect_memsyms_to_rename = BITMAP_ALLOC (NULL);
2290 /* ----------- Analyze loops. ----------- */
2292 /* If some loop was duplicated, it gets bigger number
2293 than all previously defined loops. This fact allows us to run
2294 only over initial loops skipping newly generated ones. */
2295 FOR_EACH_LOOP (li, loop, 0)
2297 loop_vec_info loop_vinfo;
2299 vect_loop_location = find_loop_location (loop);
2300 loop_vinfo = vect_analyze_loop (loop);
2301 loop->aux = loop_vinfo;
2303 if (!loop_vinfo || !LOOP_VINFO_VECTORIZABLE_P (loop_vinfo))
2304 continue;
2306 vect_transform_loop (loop_vinfo);
2307 num_vectorized_loops++;
2309 vect_loop_location = UNKNOWN_LOC;
2311 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS)
2312 || (vect_print_dump_info (REPORT_VECTORIZED_LOOPS)
2313 && num_vectorized_loops > 0))
2314 fprintf (vect_dump, "vectorized %u loops in function.\n",
2315 num_vectorized_loops);
2317 /* ----------- Finalize. ----------- */
2319 BITMAP_FREE (vect_memsyms_to_rename);
2321 for (i = 1; i < vect_loops_num; i++)
2323 loop_vec_info loop_vinfo;
2325 loop = get_loop (i);
2326 if (!loop)
2327 continue;
2328 loop_vinfo = loop->aux;
2329 destroy_loop_vec_info (loop_vinfo);
2330 loop->aux = NULL;
2333 return num_vectorized_loops > 0 ? TODO_cleanup_cfg : 0;
2336 /* Increase alignment of global arrays to improve vectorization potential.
2337 TODO:
2338 - Consider also structs that have an array field.
2339 - Use ipa analysis to prune arrays that can't be vectorized?
2340 This should involve global alignment analysis and in the future also
2341 array padding. */
2343 static unsigned int
2344 increase_alignment (void)
2346 struct varpool_node *vnode;
2348 /* Increase the alignment of all global arrays for vectorization. */
2349 for (vnode = varpool_nodes_queue;
2350 vnode;
2351 vnode = vnode->next_needed)
2353 tree vectype, decl = vnode->decl;
2354 unsigned int alignment;
2356 if (TREE_CODE (TREE_TYPE (decl)) != ARRAY_TYPE)
2357 continue;
2358 vectype = get_vectype_for_scalar_type (TREE_TYPE (TREE_TYPE (decl)));
2359 if (!vectype)
2360 continue;
2361 alignment = TYPE_ALIGN (vectype);
2362 if (DECL_ALIGN (decl) >= alignment)
2363 continue;
2365 if (vect_can_force_dr_alignment_p (decl, alignment))
2367 DECL_ALIGN (decl) = TYPE_ALIGN (vectype);
2368 DECL_USER_ALIGN (decl) = 1;
2369 if (dump_file)
2371 fprintf (dump_file, "Increasing alignment of decl: ");
2372 print_generic_expr (dump_file, decl, TDF_SLIM);
2376 return 0;
2379 static bool
2380 gate_increase_alignment (void)
2382 return flag_section_anchors && flag_tree_vectorize;
2385 struct tree_opt_pass pass_ipa_increase_alignment =
2387 "increase_alignment", /* name */
2388 gate_increase_alignment, /* gate */
2389 increase_alignment, /* execute */
2390 NULL, /* sub */
2391 NULL, /* next */
2392 0, /* static_pass_number */
2393 0, /* tv_id */
2394 0, /* properties_required */
2395 0, /* properties_provided */
2396 0, /* properties_destroyed */
2397 0, /* todo_flags_start */
2398 0, /* todo_flags_finish */
2399 0 /* letter */