rephrase text
[official-gcc.git] / gcc / tree-vect-loop.c
blob809f3e15a029c24401722dbeb99645b6f7e928d1
1 /* Loop Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
5 Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "basic-block.h"
30 #include "diagnostic.h"
31 #include "tree-flow.h"
32 #include "tree-dump.h"
33 #include "cfgloop.h"
34 #include "cfglayout.h"
35 #include "expr.h"
36 #include "recog.h"
37 #include "optabs.h"
38 #include "params.h"
39 #include "toplev.h"
40 #include "tree-chrec.h"
41 #include "tree-scalar-evolution.h"
42 #include "tree-vectorizer.h"
44 /* Loop Vectorization Pass.
46 This pass tries to vectorize loops.
48 For example, the vectorizer transforms the following simple loop:
50 short a[N]; short b[N]; short c[N]; int i;
52 for (i=0; i<N; i++){
53 a[i] = b[i] + c[i];
56 as if it was manually vectorized by rewriting the source code into:
58 typedef int __attribute__((mode(V8HI))) v8hi;
59 short a[N]; short b[N]; short c[N]; int i;
60 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
61 v8hi va, vb, vc;
63 for (i=0; i<N/8; i++){
64 vb = pb[i];
65 vc = pc[i];
66 va = vb + vc;
67 pa[i] = va;
70 The main entry to this pass is vectorize_loops(), in which
71 the vectorizer applies a set of analyses on a given set of loops,
72 followed by the actual vectorization transformation for the loops that
73 had successfully passed the analysis phase.
74 Throughout this pass we make a distinction between two types of
75 data: scalars (which are represented by SSA_NAMES), and memory references
76 ("data-refs"). These two types of data require different handling both
77 during analysis and transformation. The types of data-refs that the
78 vectorizer currently supports are ARRAY_REFS which base is an array DECL
79 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
80 accesses are required to have a simple (consecutive) access pattern.
82 Analysis phase:
83 ===============
84 The driver for the analysis phase is vect_analyze_loop().
85 It applies a set of analyses, some of which rely on the scalar evolution
86 analyzer (scev) developed by Sebastian Pop.
88 During the analysis phase the vectorizer records some information
89 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
90 loop, as well as general information about the loop as a whole, which is
91 recorded in a "loop_vec_info" struct attached to each loop.
93 Transformation phase:
94 =====================
95 The loop transformation phase scans all the stmts in the loop, and
96 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
97 the loop that needs to be vectorized. It inserts the vector code sequence
98 just before the scalar stmt S, and records a pointer to the vector code
99 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
100 attached to S). This pointer will be used for the vectorization of following
101 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
102 otherwise, we rely on dead code elimination for removing it.
104 For example, say stmt S1 was vectorized into stmt VS1:
106 VS1: vb = px[i];
107 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
108 S2: a = b;
110 To vectorize stmt S2, the vectorizer first finds the stmt that defines
111 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
112 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
113 resulting sequence would be:
115 VS1: vb = px[i];
116 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
117 VS2: va = vb;
118 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
120 Operands that are not SSA_NAMEs, are data-refs that appear in
121 load/store operations (like 'x[i]' in S1), and are handled differently.
123 Target modeling:
124 =================
125 Currently the only target specific information that is used is the
126 size of the vector (in bytes) - "UNITS_PER_SIMD_WORD". Targets that can
127 support different sizes of vectors, for now will need to specify one value
128 for "UNITS_PER_SIMD_WORD". More flexibility will be added in the future.
130 Since we only vectorize operations which vector form can be
131 expressed using existing tree codes, to verify that an operation is
132 supported, the vectorizer checks the relevant optab at the relevant
133 machine_mode (e.g, optab_handler (add_optab, V8HImode)->insn_code). If
134 the value found is CODE_FOR_nothing, then there's no target support, and
135 we can't vectorize the stmt.
137 For additional information on this project see:
138 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
141 /* Function vect_determine_vectorization_factor
143 Determine the vectorization factor (VF). VF is the number of data elements
144 that are operated upon in parallel in a single iteration of the vectorized
145 loop. For example, when vectorizing a loop that operates on 4byte elements,
146 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
147 elements can fit in a single vector register.
149 We currently support vectorization of loops in which all types operated upon
150 are of the same size. Therefore this function currently sets VF according to
151 the size of the types operated upon, and fails if there are multiple sizes
152 in the loop.
154 VF is also the factor by which the loop iterations are strip-mined, e.g.:
155 original loop:
156 for (i=0; i<N; i++){
157 a[i] = b[i] + c[i];
160 vectorized loop:
161 for (i=0; i<N; i+=VF){
162 a[i:VF] = b[i:VF] + c[i:VF];
166 static bool
167 vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
169 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
170 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
171 int nbbs = loop->num_nodes;
172 gimple_stmt_iterator si;
173 unsigned int vectorization_factor = 0;
174 tree scalar_type;
175 gimple phi;
176 tree vectype;
177 unsigned int nunits;
178 stmt_vec_info stmt_info;
179 int i;
180 HOST_WIDE_INT dummy;
182 if (vect_print_dump_info (REPORT_DETAILS))
183 fprintf (vect_dump, "=== vect_determine_vectorization_factor ===");
185 for (i = 0; i < nbbs; i++)
187 basic_block bb = bbs[i];
189 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
191 phi = gsi_stmt (si);
192 stmt_info = vinfo_for_stmt (phi);
193 if (vect_print_dump_info (REPORT_DETAILS))
195 fprintf (vect_dump, "==> examining phi: ");
196 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
199 gcc_assert (stmt_info);
201 if (STMT_VINFO_RELEVANT_P (stmt_info))
203 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
204 scalar_type = TREE_TYPE (PHI_RESULT (phi));
206 if (vect_print_dump_info (REPORT_DETAILS))
208 fprintf (vect_dump, "get vectype for scalar type: ");
209 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
212 vectype = get_vectype_for_scalar_type (scalar_type);
213 if (!vectype)
215 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
217 fprintf (vect_dump,
218 "not vectorized: unsupported data-type ");
219 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
221 return false;
223 STMT_VINFO_VECTYPE (stmt_info) = vectype;
225 if (vect_print_dump_info (REPORT_DETAILS))
227 fprintf (vect_dump, "vectype: ");
228 print_generic_expr (vect_dump, vectype, TDF_SLIM);
231 nunits = TYPE_VECTOR_SUBPARTS (vectype);
232 if (vect_print_dump_info (REPORT_DETAILS))
233 fprintf (vect_dump, "nunits = %d", nunits);
235 if (!vectorization_factor
236 || (nunits > vectorization_factor))
237 vectorization_factor = nunits;
241 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
243 tree vf_vectype;
244 gimple stmt = gsi_stmt (si);
245 stmt_info = vinfo_for_stmt (stmt);
247 if (vect_print_dump_info (REPORT_DETAILS))
249 fprintf (vect_dump, "==> examining statement: ");
250 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
253 gcc_assert (stmt_info);
255 /* skip stmts which do not need to be vectorized. */
256 if (!STMT_VINFO_RELEVANT_P (stmt_info)
257 && !STMT_VINFO_LIVE_P (stmt_info))
259 if (vect_print_dump_info (REPORT_DETAILS))
260 fprintf (vect_dump, "skip.");
261 continue;
264 if (gimple_get_lhs (stmt) == NULL_TREE)
266 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
268 fprintf (vect_dump, "not vectorized: irregular stmt.");
269 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
271 return false;
274 if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
276 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
278 fprintf (vect_dump, "not vectorized: vector stmt in loop:");
279 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
281 return false;
284 if (STMT_VINFO_VECTYPE (stmt_info))
286 /* The only case when a vectype had been already set is for stmts
287 that contain a dataref, or for "pattern-stmts" (stmts generated
288 by the vectorizer to represent/replace a certain idiom). */
289 gcc_assert (STMT_VINFO_DATA_REF (stmt_info)
290 || is_pattern_stmt_p (stmt_info));
291 vectype = STMT_VINFO_VECTYPE (stmt_info);
293 else
295 gcc_assert (!STMT_VINFO_DATA_REF (stmt_info)
296 && !is_pattern_stmt_p (stmt_info));
298 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
299 if (vect_print_dump_info (REPORT_DETAILS))
301 fprintf (vect_dump, "get vectype for scalar type: ");
302 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
304 vectype = get_vectype_for_scalar_type (scalar_type);
305 if (!vectype)
307 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
309 fprintf (vect_dump,
310 "not vectorized: unsupported data-type ");
311 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
313 return false;
316 STMT_VINFO_VECTYPE (stmt_info) = vectype;
319 /* The vectorization factor is according to the smallest
320 scalar type (or the largest vector size, but we only
321 support one vector size per loop). */
322 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy,
323 &dummy);
324 if (vect_print_dump_info (REPORT_DETAILS))
326 fprintf (vect_dump, "get vectype for scalar type: ");
327 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
329 vf_vectype = get_vectype_for_scalar_type (scalar_type);
330 if (!vf_vectype)
332 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
334 fprintf (vect_dump,
335 "not vectorized: unsupported data-type ");
336 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
338 return false;
341 if ((GET_MODE_SIZE (TYPE_MODE (vectype))
342 != GET_MODE_SIZE (TYPE_MODE (vf_vectype))))
344 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
346 fprintf (vect_dump,
347 "not vectorized: different sized vector "
348 "types in statement, ");
349 print_generic_expr (vect_dump, vectype, TDF_SLIM);
350 fprintf (vect_dump, " and ");
351 print_generic_expr (vect_dump, vf_vectype, TDF_SLIM);
353 return false;
356 if (vect_print_dump_info (REPORT_DETAILS))
358 fprintf (vect_dump, "vectype: ");
359 print_generic_expr (vect_dump, vf_vectype, TDF_SLIM);
362 nunits = TYPE_VECTOR_SUBPARTS (vf_vectype);
363 if (vect_print_dump_info (REPORT_DETAILS))
364 fprintf (vect_dump, "nunits = %d", nunits);
366 if (!vectorization_factor
367 || (nunits > vectorization_factor))
368 vectorization_factor = nunits;
372 /* TODO: Analyze cost. Decide if worth while to vectorize. */
373 if (vect_print_dump_info (REPORT_DETAILS))
374 fprintf (vect_dump, "vectorization factor = %d", vectorization_factor);
375 if (vectorization_factor <= 1)
377 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
378 fprintf (vect_dump, "not vectorized: unsupported data-type");
379 return false;
381 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
383 return true;
387 /* Function vect_is_simple_iv_evolution.
389 FORNOW: A simple evolution of an induction variables in the loop is
390 considered a polynomial evolution with constant step. */
392 static bool
393 vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
394 tree * step)
396 tree init_expr;
397 tree step_expr;
398 tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
400 /* When there is no evolution in this loop, the evolution function
401 is not "simple". */
402 if (evolution_part == NULL_TREE)
403 return false;
405 /* When the evolution is a polynomial of degree >= 2
406 the evolution function is not "simple". */
407 if (tree_is_chrec (evolution_part))
408 return false;
410 step_expr = evolution_part;
411 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
413 if (vect_print_dump_info (REPORT_DETAILS))
415 fprintf (vect_dump, "step: ");
416 print_generic_expr (vect_dump, step_expr, TDF_SLIM);
417 fprintf (vect_dump, ", init: ");
418 print_generic_expr (vect_dump, init_expr, TDF_SLIM);
421 *init = init_expr;
422 *step = step_expr;
424 if (TREE_CODE (step_expr) != INTEGER_CST)
426 if (vect_print_dump_info (REPORT_DETAILS))
427 fprintf (vect_dump, "step unknown.");
428 return false;
431 return true;
434 /* Function vect_analyze_scalar_cycles_1.
436 Examine the cross iteration def-use cycles of scalar variables
437 in LOOP. LOOP_VINFO represents the loop that is now being
438 considered for vectorization (can be LOOP, or an outer-loop
439 enclosing LOOP). */
441 static void
442 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
444 basic_block bb = loop->header;
445 tree dumy;
446 VEC(gimple,heap) *worklist = VEC_alloc (gimple, heap, 64);
447 gimple_stmt_iterator gsi;
448 bool double_reduc;
450 if (vect_print_dump_info (REPORT_DETAILS))
451 fprintf (vect_dump, "=== vect_analyze_scalar_cycles ===");
453 /* First - identify all inductions. Reduction detection assumes that all the
454 inductions have been identified, therefore, this order must not be
455 changed. */
456 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
458 gimple phi = gsi_stmt (gsi);
459 tree access_fn = NULL;
460 tree def = PHI_RESULT (phi);
461 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
463 if (vect_print_dump_info (REPORT_DETAILS))
465 fprintf (vect_dump, "Analyze phi: ");
466 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
469 /* Skip virtual phi's. The data dependences that are associated with
470 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
471 if (!is_gimple_reg (SSA_NAME_VAR (def)))
472 continue;
474 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
476 /* Analyze the evolution function. */
477 access_fn = analyze_scalar_evolution (loop, def);
478 if (access_fn && vect_print_dump_info (REPORT_DETAILS))
480 fprintf (vect_dump, "Access function of PHI: ");
481 print_generic_expr (vect_dump, access_fn, TDF_SLIM);
484 if (!access_fn
485 || !vect_is_simple_iv_evolution (loop->num, access_fn, &dumy, &dumy))
487 VEC_safe_push (gimple, heap, worklist, phi);
488 continue;
491 if (vect_print_dump_info (REPORT_DETAILS))
492 fprintf (vect_dump, "Detected induction.");
493 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
497 /* Second - identify all reductions and nested cycles. */
498 while (VEC_length (gimple, worklist) > 0)
500 gimple phi = VEC_pop (gimple, worklist);
501 tree def = PHI_RESULT (phi);
502 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
503 gimple reduc_stmt;
504 bool nested_cycle;
506 if (vect_print_dump_info (REPORT_DETAILS))
508 fprintf (vect_dump, "Analyze phi: ");
509 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
512 gcc_assert (is_gimple_reg (SSA_NAME_VAR (def)));
513 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
515 nested_cycle = (loop != LOOP_VINFO_LOOP (loop_vinfo));
516 reduc_stmt = vect_is_simple_reduction (loop_vinfo, phi, !nested_cycle,
517 &double_reduc);
518 if (reduc_stmt)
520 if (double_reduc)
522 if (vect_print_dump_info (REPORT_DETAILS))
523 fprintf (vect_dump, "Detected double reduction.");
525 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
526 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
527 vect_double_reduction_def;
529 else
531 if (nested_cycle)
533 if (vect_print_dump_info (REPORT_DETAILS))
534 fprintf (vect_dump, "Detected vectorizable nested cycle.");
536 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
537 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
538 vect_nested_cycle;
540 else
542 if (vect_print_dump_info (REPORT_DETAILS))
543 fprintf (vect_dump, "Detected reduction.");
545 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
546 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
547 vect_reduction_def;
551 else
552 if (vect_print_dump_info (REPORT_DETAILS))
553 fprintf (vect_dump, "Unknown def-use cycle pattern.");
556 VEC_free (gimple, heap, worklist);
560 /* Function vect_analyze_scalar_cycles.
562 Examine the cross iteration def-use cycles of scalar variables, by
563 analyzing the loop-header PHIs of scalar variables; Classify each
564 cycle as one of the following: invariant, induction, reduction, unknown.
565 We do that for the loop represented by LOOP_VINFO, and also to its
566 inner-loop, if exists.
567 Examples for scalar cycles:
569 Example1: reduction:
571 loop1:
572 for (i=0; i<N; i++)
573 sum += a[i];
575 Example2: induction:
577 loop2:
578 for (i=0; i<N; i++)
579 a[i] = i; */
581 static void
582 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
584 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
586 vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
588 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
589 Reductions in such inner-loop therefore have different properties than
590 the reductions in the nest that gets vectorized:
591 1. When vectorized, they are executed in the same order as in the original
592 scalar loop, so we can't change the order of computation when
593 vectorizing them.
594 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
595 current checks are too strict. */
597 if (loop->inner)
598 vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
601 /* Function vect_get_loop_niters.
603 Determine how many iterations the loop is executed.
604 If an expression that represents the number of iterations
605 can be constructed, place it in NUMBER_OF_ITERATIONS.
606 Return the loop exit condition. */
608 static gimple
609 vect_get_loop_niters (struct loop *loop, tree *number_of_iterations)
611 tree niters;
613 if (vect_print_dump_info (REPORT_DETAILS))
614 fprintf (vect_dump, "=== get_loop_niters ===");
616 niters = number_of_exit_cond_executions (loop);
618 if (niters != NULL_TREE
619 && niters != chrec_dont_know)
621 *number_of_iterations = niters;
623 if (vect_print_dump_info (REPORT_DETAILS))
625 fprintf (vect_dump, "==> get_loop_niters:" );
626 print_generic_expr (vect_dump, *number_of_iterations, TDF_SLIM);
630 return get_loop_exit_condition (loop);
634 /* Function bb_in_loop_p
636 Used as predicate for dfs order traversal of the loop bbs. */
638 static bool
639 bb_in_loop_p (const_basic_block bb, const void *data)
641 const struct loop *const loop = (const struct loop *)data;
642 if (flow_bb_inside_loop_p (loop, bb))
643 return true;
644 return false;
648 /* Function new_loop_vec_info.
650 Create and initialize a new loop_vec_info struct for LOOP, as well as
651 stmt_vec_info structs for all the stmts in LOOP. */
653 static loop_vec_info
654 new_loop_vec_info (struct loop *loop)
656 loop_vec_info res;
657 basic_block *bbs;
658 gimple_stmt_iterator si;
659 unsigned int i, nbbs;
661 res = (loop_vec_info) xcalloc (1, sizeof (struct _loop_vec_info));
662 LOOP_VINFO_LOOP (res) = loop;
664 bbs = get_loop_body (loop);
666 /* Create/Update stmt_info for all stmts in the loop. */
667 for (i = 0; i < loop->num_nodes; i++)
669 basic_block bb = bbs[i];
671 /* BBs in a nested inner-loop will have been already processed (because
672 we will have called vect_analyze_loop_form for any nested inner-loop).
673 Therefore, for stmts in an inner-loop we just want to update the
674 STMT_VINFO_LOOP_VINFO field of their stmt_info to point to the new
675 loop_info of the outer-loop we are currently considering to vectorize
676 (instead of the loop_info of the inner-loop).
677 For stmts in other BBs we need to create a stmt_info from scratch. */
678 if (bb->loop_father != loop)
680 /* Inner-loop bb. */
681 gcc_assert (loop->inner && bb->loop_father == loop->inner);
682 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
684 gimple phi = gsi_stmt (si);
685 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
686 loop_vec_info inner_loop_vinfo =
687 STMT_VINFO_LOOP_VINFO (stmt_info);
688 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
689 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
691 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
693 gimple stmt = gsi_stmt (si);
694 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
695 loop_vec_info inner_loop_vinfo =
696 STMT_VINFO_LOOP_VINFO (stmt_info);
697 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
698 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
701 else
703 /* bb in current nest. */
704 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
706 gimple phi = gsi_stmt (si);
707 gimple_set_uid (phi, 0);
708 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, res, NULL));
711 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
713 gimple stmt = gsi_stmt (si);
714 gimple_set_uid (stmt, 0);
715 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, res, NULL));
720 /* CHECKME: We want to visit all BBs before their successors (except for
721 latch blocks, for which this assertion wouldn't hold). In the simple
722 case of the loop forms we allow, a dfs order of the BBs would the same
723 as reversed postorder traversal, so we are safe. */
725 free (bbs);
726 bbs = XCNEWVEC (basic_block, loop->num_nodes);
727 nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
728 bbs, loop->num_nodes, loop);
729 gcc_assert (nbbs == loop->num_nodes);
731 LOOP_VINFO_BBS (res) = bbs;
732 LOOP_VINFO_NITERS (res) = NULL;
733 LOOP_VINFO_NITERS_UNCHANGED (res) = NULL;
734 LOOP_VINFO_COST_MODEL_MIN_ITERS (res) = 0;
735 LOOP_VINFO_VECTORIZABLE_P (res) = 0;
736 LOOP_PEELING_FOR_ALIGNMENT (res) = 0;
737 LOOP_VINFO_VECT_FACTOR (res) = 0;
738 LOOP_VINFO_DATAREFS (res) = VEC_alloc (data_reference_p, heap, 10);
739 LOOP_VINFO_DDRS (res) = VEC_alloc (ddr_p, heap, 10 * 10);
740 LOOP_VINFO_UNALIGNED_DR (res) = NULL;
741 LOOP_VINFO_MAY_MISALIGN_STMTS (res) =
742 VEC_alloc (gimple, heap,
743 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS));
744 LOOP_VINFO_MAY_ALIAS_DDRS (res) =
745 VEC_alloc (ddr_p, heap,
746 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS));
747 LOOP_VINFO_STRIDED_STORES (res) = VEC_alloc (gimple, heap, 10);
748 LOOP_VINFO_SLP_INSTANCES (res) = VEC_alloc (slp_instance, heap, 10);
749 LOOP_VINFO_SLP_UNROLLING_FACTOR (res) = 1;
751 return res;
755 /* Function destroy_loop_vec_info.
757 Free LOOP_VINFO struct, as well as all the stmt_vec_info structs of all the
758 stmts in the loop. */
760 void
761 destroy_loop_vec_info (loop_vec_info loop_vinfo, bool clean_stmts)
763 struct loop *loop;
764 basic_block *bbs;
765 int nbbs;
766 gimple_stmt_iterator si;
767 int j;
768 VEC (slp_instance, heap) *slp_instances;
769 slp_instance instance;
771 if (!loop_vinfo)
772 return;
774 loop = LOOP_VINFO_LOOP (loop_vinfo);
776 bbs = LOOP_VINFO_BBS (loop_vinfo);
777 nbbs = loop->num_nodes;
779 if (!clean_stmts)
781 free (LOOP_VINFO_BBS (loop_vinfo));
782 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
783 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
784 VEC_free (gimple, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
786 free (loop_vinfo);
787 loop->aux = NULL;
788 return;
791 for (j = 0; j < nbbs; j++)
793 basic_block bb = bbs[j];
794 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
795 free_stmt_vec_info (gsi_stmt (si));
797 for (si = gsi_start_bb (bb); !gsi_end_p (si); )
799 gimple stmt = gsi_stmt (si);
800 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
802 if (stmt_info)
804 /* Check if this is a "pattern stmt" (introduced by the
805 vectorizer during the pattern recognition pass). */
806 bool remove_stmt_p = false;
807 gimple orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
808 if (orig_stmt)
810 stmt_vec_info orig_stmt_info = vinfo_for_stmt (orig_stmt);
811 if (orig_stmt_info
812 && STMT_VINFO_IN_PATTERN_P (orig_stmt_info))
813 remove_stmt_p = true;
816 /* Free stmt_vec_info. */
817 free_stmt_vec_info (stmt);
819 /* Remove dead "pattern stmts". */
820 if (remove_stmt_p)
821 gsi_remove (&si, true);
823 gsi_next (&si);
827 free (LOOP_VINFO_BBS (loop_vinfo));
828 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
829 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
830 VEC_free (gimple, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
831 VEC_free (ddr_p, heap, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
832 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
833 for (j = 0; VEC_iterate (slp_instance, slp_instances, j, instance); j++)
834 vect_free_slp_instance (instance);
836 VEC_free (slp_instance, heap, LOOP_VINFO_SLP_INSTANCES (loop_vinfo));
837 VEC_free (gimple, heap, LOOP_VINFO_STRIDED_STORES (loop_vinfo));
839 free (loop_vinfo);
840 loop->aux = NULL;
844 /* Function vect_analyze_loop_1.
846 Apply a set of analyses on LOOP, and create a loop_vec_info struct
847 for it. The different analyses will record information in the
848 loop_vec_info struct. This is a subset of the analyses applied in
849 vect_analyze_loop, to be applied on an inner-loop nested in the loop
850 that is now considered for (outer-loop) vectorization. */
852 static loop_vec_info
853 vect_analyze_loop_1 (struct loop *loop)
855 loop_vec_info loop_vinfo;
857 if (vect_print_dump_info (REPORT_DETAILS))
858 fprintf (vect_dump, "===== analyze_loop_nest_1 =====");
860 /* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */
862 loop_vinfo = vect_analyze_loop_form (loop);
863 if (!loop_vinfo)
865 if (vect_print_dump_info (REPORT_DETAILS))
866 fprintf (vect_dump, "bad inner-loop form.");
867 return NULL;
870 return loop_vinfo;
874 /* Function vect_analyze_loop_form.
876 Verify that certain CFG restrictions hold, including:
877 - the loop has a pre-header
878 - the loop has a single entry and exit
879 - the loop exit condition is simple enough, and the number of iterations
880 can be analyzed (a countable loop). */
882 loop_vec_info
883 vect_analyze_loop_form (struct loop *loop)
885 loop_vec_info loop_vinfo;
886 gimple loop_cond;
887 tree number_of_iterations = NULL;
888 loop_vec_info inner_loop_vinfo = NULL;
890 if (vect_print_dump_info (REPORT_DETAILS))
891 fprintf (vect_dump, "=== vect_analyze_loop_form ===");
893 /* Different restrictions apply when we are considering an inner-most loop,
894 vs. an outer (nested) loop.
895 (FORNOW. May want to relax some of these restrictions in the future). */
897 if (!loop->inner)
899 /* Inner-most loop. We currently require that the number of BBs is
900 exactly 2 (the header and latch). Vectorizable inner-most loops
901 look like this:
903 (pre-header)
905 header <--------+
906 | | |
907 | +--> latch --+
909 (exit-bb) */
911 if (loop->num_nodes != 2)
913 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
914 fprintf (vect_dump, "not vectorized: control flow in loop.");
915 return NULL;
918 if (empty_block_p (loop->header))
920 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
921 fprintf (vect_dump, "not vectorized: empty loop.");
922 return NULL;
925 else
927 struct loop *innerloop = loop->inner;
928 edge entryedge;
930 /* Nested loop. We currently require that the loop is doubly-nested,
931 contains a single inner loop, and the number of BBs is exactly 5.
932 Vectorizable outer-loops look like this:
934 (pre-header)
936 header <---+
938 inner-loop |
940 tail ------+
942 (exit-bb)
944 The inner-loop has the properties expected of inner-most loops
945 as described above. */
947 if ((loop->inner)->inner || (loop->inner)->next)
949 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
950 fprintf (vect_dump, "not vectorized: multiple nested loops.");
951 return NULL;
954 /* Analyze the inner-loop. */
955 inner_loop_vinfo = vect_analyze_loop_1 (loop->inner);
956 if (!inner_loop_vinfo)
958 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
959 fprintf (vect_dump, "not vectorized: Bad inner loop.");
960 return NULL;
963 if (!expr_invariant_in_loop_p (loop,
964 LOOP_VINFO_NITERS (inner_loop_vinfo)))
966 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
967 fprintf (vect_dump,
968 "not vectorized: inner-loop count not invariant.");
969 destroy_loop_vec_info (inner_loop_vinfo, true);
970 return NULL;
973 if (loop->num_nodes != 5)
975 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
976 fprintf (vect_dump, "not vectorized: control flow in loop.");
977 destroy_loop_vec_info (inner_loop_vinfo, true);
978 return NULL;
981 gcc_assert (EDGE_COUNT (innerloop->header->preds) == 2);
982 entryedge = EDGE_PRED (innerloop->header, 0);
983 if (EDGE_PRED (innerloop->header, 0)->src == innerloop->latch)
984 entryedge = EDGE_PRED (innerloop->header, 1);
986 if (entryedge->src != loop->header
987 || !single_exit (innerloop)
988 || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
990 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
991 fprintf (vect_dump, "not vectorized: unsupported outerloop form.");
992 destroy_loop_vec_info (inner_loop_vinfo, true);
993 return NULL;
996 if (vect_print_dump_info (REPORT_DETAILS))
997 fprintf (vect_dump, "Considering outer-loop vectorization.");
1000 if (!single_exit (loop)
1001 || EDGE_COUNT (loop->header->preds) != 2)
1003 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1005 if (!single_exit (loop))
1006 fprintf (vect_dump, "not vectorized: multiple exits.");
1007 else if (EDGE_COUNT (loop->header->preds) != 2)
1008 fprintf (vect_dump, "not vectorized: too many incoming edges.");
1010 if (inner_loop_vinfo)
1011 destroy_loop_vec_info (inner_loop_vinfo, true);
1012 return NULL;
1015 /* We assume that the loop exit condition is at the end of the loop. i.e,
1016 that the loop is represented as a do-while (with a proper if-guard
1017 before the loop if needed), where the loop header contains all the
1018 executable statements, and the latch is empty. */
1019 if (!empty_block_p (loop->latch)
1020 || !gimple_seq_empty_p (phi_nodes (loop->latch)))
1022 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1023 fprintf (vect_dump, "not vectorized: unexpected loop form.");
1024 if (inner_loop_vinfo)
1025 destroy_loop_vec_info (inner_loop_vinfo, true);
1026 return NULL;
1029 /* Make sure there exists a single-predecessor exit bb: */
1030 if (!single_pred_p (single_exit (loop)->dest))
1032 edge e = single_exit (loop);
1033 if (!(e->flags & EDGE_ABNORMAL))
1035 split_loop_exit_edge (e);
1036 if (vect_print_dump_info (REPORT_DETAILS))
1037 fprintf (vect_dump, "split exit edge.");
1039 else
1041 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1042 fprintf (vect_dump, "not vectorized: abnormal loop exit edge.");
1043 if (inner_loop_vinfo)
1044 destroy_loop_vec_info (inner_loop_vinfo, true);
1045 return NULL;
1049 loop_cond = vect_get_loop_niters (loop, &number_of_iterations);
1050 if (!loop_cond)
1052 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1053 fprintf (vect_dump, "not vectorized: complicated exit condition.");
1054 if (inner_loop_vinfo)
1055 destroy_loop_vec_info (inner_loop_vinfo, true);
1056 return NULL;
1059 if (!number_of_iterations)
1061 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1062 fprintf (vect_dump,
1063 "not vectorized: number of iterations cannot be computed.");
1064 if (inner_loop_vinfo)
1065 destroy_loop_vec_info (inner_loop_vinfo, true);
1066 return NULL;
1069 if (chrec_contains_undetermined (number_of_iterations))
1071 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1072 fprintf (vect_dump, "Infinite number of iterations.");
1073 if (inner_loop_vinfo)
1074 destroy_loop_vec_info (inner_loop_vinfo, true);
1075 return NULL;
1078 if (!NITERS_KNOWN_P (number_of_iterations))
1080 if (vect_print_dump_info (REPORT_DETAILS))
1082 fprintf (vect_dump, "Symbolic number of iterations is ");
1083 print_generic_expr (vect_dump, number_of_iterations, TDF_DETAILS);
1086 else if (TREE_INT_CST_LOW (number_of_iterations) == 0)
1088 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1089 fprintf (vect_dump, "not vectorized: number of iterations = 0.");
1090 if (inner_loop_vinfo)
1091 destroy_loop_vec_info (inner_loop_vinfo, false);
1092 return NULL;
1095 loop_vinfo = new_loop_vec_info (loop);
1096 LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
1097 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
1099 STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond)) = loop_exit_ctrl_vec_info_type;
1101 /* CHECKME: May want to keep it around it in the future. */
1102 if (inner_loop_vinfo)
1103 destroy_loop_vec_info (inner_loop_vinfo, false);
1105 gcc_assert (!loop->aux);
1106 loop->aux = loop_vinfo;
1107 return loop_vinfo;
1111 /* Function vect_analyze_loop_operations.
1113 Scan the loop stmts and make sure they are all vectorizable. */
1115 static bool
1116 vect_analyze_loop_operations (loop_vec_info loop_vinfo)
1118 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1119 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1120 int nbbs = loop->num_nodes;
1121 gimple_stmt_iterator si;
1122 unsigned int vectorization_factor = 0;
1123 int i;
1124 gimple phi;
1125 stmt_vec_info stmt_info;
1126 bool need_to_vectorize = false;
1127 int min_profitable_iters;
1128 int min_scalar_loop_bound;
1129 unsigned int th;
1130 bool only_slp_in_loop = true, ok;
1132 if (vect_print_dump_info (REPORT_DETAILS))
1133 fprintf (vect_dump, "=== vect_analyze_loop_operations ===");
1135 gcc_assert (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
1136 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1138 for (i = 0; i < nbbs; i++)
1140 basic_block bb = bbs[i];
1142 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
1144 phi = gsi_stmt (si);
1145 ok = true;
1147 stmt_info = vinfo_for_stmt (phi);
1148 if (vect_print_dump_info (REPORT_DETAILS))
1150 fprintf (vect_dump, "examining phi: ");
1151 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
1154 if (! is_loop_header_bb_p (bb))
1156 /* inner-loop loop-closed exit phi in outer-loop vectorization
1157 (i.e. a phi in the tail of the outer-loop).
1158 FORNOW: we currently don't support the case that these phis
1159 are not used in the outerloop (unless it is double reduction,
1160 i.e., this phi is vect_reduction_def), cause this case
1161 requires to actually do something here. */
1162 if ((!STMT_VINFO_RELEVANT_P (stmt_info)
1163 || STMT_VINFO_LIVE_P (stmt_info))
1164 && STMT_VINFO_DEF_TYPE (stmt_info)
1165 != vect_double_reduction_def)
1167 if (vect_print_dump_info (REPORT_DETAILS))
1168 fprintf (vect_dump,
1169 "Unsupported loop-closed phi in outer-loop.");
1170 return false;
1172 continue;
1175 gcc_assert (stmt_info);
1177 if (STMT_VINFO_LIVE_P (stmt_info))
1179 /* FORNOW: not yet supported. */
1180 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1181 fprintf (vect_dump, "not vectorized: value used after loop.");
1182 return false;
1185 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
1186 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
1188 /* A scalar-dependence cycle that we don't support. */
1189 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1190 fprintf (vect_dump, "not vectorized: scalar dependence cycle.");
1191 return false;
1194 if (STMT_VINFO_RELEVANT_P (stmt_info))
1196 need_to_vectorize = true;
1197 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
1198 ok = vectorizable_induction (phi, NULL, NULL);
1201 if (!ok)
1203 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1205 fprintf (vect_dump,
1206 "not vectorized: relevant phi not supported: ");
1207 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
1209 return false;
1213 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1215 gimple stmt = gsi_stmt (si);
1216 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1218 gcc_assert (stmt_info);
1220 if (!vect_analyze_stmt (stmt, &need_to_vectorize, NULL))
1221 return false;
1223 if ((STMT_VINFO_RELEVANT_P (stmt_info)
1224 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1225 && !PURE_SLP_STMT (stmt_info))
1227 /* STMT needs both SLP and loop-based vectorization. */
1228 only_slp_in_loop = false;
1230 } /* bbs */
1232 /* All operations in the loop are either irrelevant (deal with loop
1233 control, or dead), or only used outside the loop and can be moved
1234 out of the loop (e.g. invariants, inductions). The loop can be
1235 optimized away by scalar optimizations. We're better off not
1236 touching this loop. */
1237 if (!need_to_vectorize)
1239 if (vect_print_dump_info (REPORT_DETAILS))
1240 fprintf (vect_dump,
1241 "All the computation can be taken out of the loop.");
1242 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1243 fprintf (vect_dump,
1244 "not vectorized: redundant loop. no profit to vectorize.");
1245 return false;
1248 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1249 vectorization factor of the loop is the unrolling factor required by the
1250 SLP instances. If that unrolling factor is 1, we say, that we perform
1251 pure SLP on loop - cross iteration parallelism is not exploited. */
1252 if (only_slp_in_loop)
1253 vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
1254 else
1255 vectorization_factor = least_common_multiple (vectorization_factor,
1256 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
1258 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
1260 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1261 && vect_print_dump_info (REPORT_DETAILS))
1262 fprintf (vect_dump,
1263 "vectorization_factor = %d, niters = " HOST_WIDE_INT_PRINT_DEC,
1264 vectorization_factor, LOOP_VINFO_INT_NITERS (loop_vinfo));
1266 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1267 && (LOOP_VINFO_INT_NITERS (loop_vinfo) < vectorization_factor))
1269 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1270 fprintf (vect_dump, "not vectorized: iteration count too small.");
1271 if (vect_print_dump_info (REPORT_DETAILS))
1272 fprintf (vect_dump,"not vectorized: iteration count smaller than "
1273 "vectorization factor.");
1274 return false;
1277 /* Analyze cost. Decide if worth while to vectorize. */
1279 /* Once VF is set, SLP costs should be updated since the number of created
1280 vector stmts depends on VF. */
1281 vect_update_slp_costs_according_to_vf (loop_vinfo);
1283 min_profitable_iters = vect_estimate_min_profitable_iters (loop_vinfo);
1284 LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo) = min_profitable_iters;
1286 if (min_profitable_iters < 0)
1288 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1289 fprintf (vect_dump, "not vectorized: vectorization not profitable.");
1290 if (vect_print_dump_info (REPORT_DETAILS))
1291 fprintf (vect_dump, "not vectorized: vector version will never be "
1292 "profitable.");
1293 return false;
1296 min_scalar_loop_bound = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
1297 * vectorization_factor) - 1);
1299 /* Use the cost model only if it is more conservative than user specified
1300 threshold. */
1302 th = (unsigned) min_scalar_loop_bound;
1303 if (min_profitable_iters
1304 && (!min_scalar_loop_bound
1305 || min_profitable_iters > min_scalar_loop_bound))
1306 th = (unsigned) min_profitable_iters;
1308 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1309 && LOOP_VINFO_INT_NITERS (loop_vinfo) <= th)
1311 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1312 fprintf (vect_dump, "not vectorized: vectorization not "
1313 "profitable.");
1314 if (vect_print_dump_info (REPORT_DETAILS))
1315 fprintf (vect_dump, "not vectorized: iteration count smaller than "
1316 "user specified loop bound parameter or minimum "
1317 "profitable iterations (whichever is more conservative).");
1318 return false;
1321 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1322 || LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0
1323 || LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
1325 if (vect_print_dump_info (REPORT_DETAILS))
1326 fprintf (vect_dump, "epilog loop required.");
1327 if (!vect_can_advance_ivs_p (loop_vinfo))
1329 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1330 fprintf (vect_dump,
1331 "not vectorized: can't create epilog loop 1.");
1332 return false;
1334 if (!slpeel_can_duplicate_loop_p (loop, single_exit (loop)))
1336 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1337 fprintf (vect_dump,
1338 "not vectorized: can't create epilog loop 2.");
1339 return false;
1343 return true;
1347 /* Function vect_analyze_loop.
1349 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1350 for it. The different analyses will record information in the
1351 loop_vec_info struct. */
1352 loop_vec_info
1353 vect_analyze_loop (struct loop *loop)
1355 bool ok;
1356 loop_vec_info loop_vinfo;
1357 int max_vf = MAX_VECTORIZATION_FACTOR;
1358 int min_vf = 2;
1360 if (vect_print_dump_info (REPORT_DETAILS))
1361 fprintf (vect_dump, "===== analyze_loop_nest =====");
1363 if (loop_outer (loop)
1364 && loop_vec_info_for_loop (loop_outer (loop))
1365 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
1367 if (vect_print_dump_info (REPORT_DETAILS))
1368 fprintf (vect_dump, "outer-loop already vectorized.");
1369 return NULL;
1372 /* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */
1374 loop_vinfo = vect_analyze_loop_form (loop);
1375 if (!loop_vinfo)
1377 if (vect_print_dump_info (REPORT_DETAILS))
1378 fprintf (vect_dump, "bad loop form.");
1379 return NULL;
1382 /* Find all data references in the loop (which correspond to vdefs/vuses)
1383 and analyze their evolution in the loop. Also adjust the minimal
1384 vectorization factor according to the loads and stores.
1386 FORNOW: Handle only simple, array references, which
1387 alignment can be forced, and aligned pointer-references. */
1389 ok = vect_analyze_data_refs (loop_vinfo, NULL, &min_vf);
1390 if (!ok)
1392 if (vect_print_dump_info (REPORT_DETAILS))
1393 fprintf (vect_dump, "bad data references.");
1394 destroy_loop_vec_info (loop_vinfo, true);
1395 return NULL;
1398 /* Classify all cross-iteration scalar data-flow cycles.
1399 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1401 vect_analyze_scalar_cycles (loop_vinfo);
1403 vect_pattern_recog (loop_vinfo);
1405 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1407 ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
1408 if (!ok)
1410 if (vect_print_dump_info (REPORT_DETAILS))
1411 fprintf (vect_dump, "unexpected pattern.");
1412 destroy_loop_vec_info (loop_vinfo, true);
1413 return NULL;
1416 /* Analyze data dependences between the data-refs in the loop
1417 and adjust the maximum vectorization factor according to
1418 the dependences.
1419 FORNOW: fail at the first data dependence that we encounter. */
1421 ok = vect_analyze_data_ref_dependences (loop_vinfo, NULL, &max_vf);
1422 if (!ok
1423 || max_vf < min_vf)
1425 if (vect_print_dump_info (REPORT_DETAILS))
1426 fprintf (vect_dump, "bad data dependence.");
1427 destroy_loop_vec_info (loop_vinfo, true);
1428 return NULL;
1431 ok = vect_determine_vectorization_factor (loop_vinfo);
1432 if (!ok)
1434 if (vect_print_dump_info (REPORT_DETAILS))
1435 fprintf (vect_dump, "can't determine vectorization factor.");
1436 destroy_loop_vec_info (loop_vinfo, true);
1437 return NULL;
1439 if (max_vf < LOOP_VINFO_VECT_FACTOR (loop_vinfo))
1441 if (vect_print_dump_info (REPORT_DETAILS))
1442 fprintf (vect_dump, "bad data dependence.");
1443 destroy_loop_vec_info (loop_vinfo, true);
1444 return NULL;
1447 /* Analyze the alignment of the data-refs in the loop.
1448 Fail if a data reference is found that cannot be vectorized. */
1450 ok = vect_analyze_data_refs_alignment (loop_vinfo, NULL);
1451 if (!ok)
1453 if (vect_print_dump_info (REPORT_DETAILS))
1454 fprintf (vect_dump, "bad data alignment.");
1455 destroy_loop_vec_info (loop_vinfo, true);
1456 return NULL;
1459 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1460 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1462 ok = vect_analyze_data_ref_accesses (loop_vinfo, NULL);
1463 if (!ok)
1465 if (vect_print_dump_info (REPORT_DETAILS))
1466 fprintf (vect_dump, "bad data access.");
1467 destroy_loop_vec_info (loop_vinfo, true);
1468 return NULL;
1471 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
1472 It is important to call pruning after vect_analyze_data_ref_accesses,
1473 since we use grouping information gathered by interleaving analysis. */
1474 ok = vect_prune_runtime_alias_test_list (loop_vinfo);
1475 if (!ok)
1477 if (vect_print_dump_info (REPORT_DETAILS))
1478 fprintf (vect_dump, "too long list of versioning for alias "
1479 "run-time tests.");
1480 destroy_loop_vec_info (loop_vinfo, true);
1481 return NULL;
1484 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
1485 ok = vect_analyze_slp (loop_vinfo, NULL);
1486 if (ok)
1488 /* Decide which possible SLP instances to SLP. */
1489 vect_make_slp_decision (loop_vinfo);
1491 /* Find stmts that need to be both vectorized and SLPed. */
1492 vect_detect_hybrid_slp (loop_vinfo);
1495 /* This pass will decide on using loop versioning and/or loop peeling in
1496 order to enhance the alignment of data references in the loop. */
1498 ok = vect_enhance_data_refs_alignment (loop_vinfo);
1499 if (!ok)
1501 if (vect_print_dump_info (REPORT_DETAILS))
1502 fprintf (vect_dump, "bad data alignment.");
1503 destroy_loop_vec_info (loop_vinfo, true);
1504 return NULL;
1507 /* Scan all the operations in the loop and make sure they are
1508 vectorizable. */
1510 ok = vect_analyze_loop_operations (loop_vinfo);
1511 if (!ok)
1513 if (vect_print_dump_info (REPORT_DETAILS))
1514 fprintf (vect_dump, "bad operation or unsupported loop bound.");
1515 destroy_loop_vec_info (loop_vinfo, true);
1516 return NULL;
1519 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
1521 return loop_vinfo;
1525 /* Function reduction_code_for_scalar_code
1527 Input:
1528 CODE - tree_code of a reduction operations.
1530 Output:
1531 REDUC_CODE - the corresponding tree-code to be used to reduce the
1532 vector of partial results into a single scalar result (which
1533 will also reside in a vector) or ERROR_MARK if the operation is
1534 a supported reduction operation, but does not have such tree-code.
1536 Return FALSE if CODE currently cannot be vectorized as reduction. */
1538 static bool
1539 reduction_code_for_scalar_code (enum tree_code code,
1540 enum tree_code *reduc_code)
1542 switch (code)
1544 case MAX_EXPR:
1545 *reduc_code = REDUC_MAX_EXPR;
1546 return true;
1548 case MIN_EXPR:
1549 *reduc_code = REDUC_MIN_EXPR;
1550 return true;
1552 case PLUS_EXPR:
1553 *reduc_code = REDUC_PLUS_EXPR;
1554 return true;
1556 case MULT_EXPR:
1557 case MINUS_EXPR:
1558 case BIT_IOR_EXPR:
1559 case BIT_XOR_EXPR:
1560 case BIT_AND_EXPR:
1561 *reduc_code = ERROR_MARK;
1562 return true;
1564 default:
1565 return false;
1570 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
1571 STMT is printed with a message MSG. */
1573 static void
1574 report_vect_op (gimple stmt, const char *msg)
1576 fprintf (vect_dump, "%s", msg);
1577 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
1581 /* Function vect_is_simple_reduction
1583 (1) Detect a cross-iteration def-use cycle that represents a simple
1584 reduction computation. We look for the following pattern:
1586 loop_header:
1587 a1 = phi < a0, a2 >
1588 a3 = ...
1589 a2 = operation (a3, a1)
1591 such that:
1592 1. operation is commutative and associative and it is safe to
1593 change the order of the computation (if CHECK_REDUCTION is true)
1594 2. no uses for a2 in the loop (a2 is used out of the loop)
1595 3. no uses of a1 in the loop besides the reduction operation.
1597 Condition 1 is tested here.
1598 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
1600 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
1601 nested cycles, if CHECK_REDUCTION is false.
1603 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
1604 reductions:
1606 a1 = phi < a0, a2 >
1607 inner loop (def of a3)
1608 a2 = phi < a3 >
1611 gimple
1612 vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
1613 bool check_reduction, bool *double_reduc)
1615 struct loop *loop = (gimple_bb (phi))->loop_father;
1616 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
1617 edge latch_e = loop_latch_edge (loop);
1618 tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
1619 gimple def_stmt, def1 = NULL, def2 = NULL;
1620 enum tree_code code;
1621 tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE;
1622 tree type;
1623 int nloop_uses;
1624 tree name;
1625 imm_use_iterator imm_iter;
1626 use_operand_p use_p;
1627 bool phi_def;
1629 *double_reduc = false;
1631 /* If CHECK_REDUCTION is true, we assume inner-most loop vectorization,
1632 otherwise, we assume outer loop vectorization. */
1633 gcc_assert ((check_reduction && loop == vect_loop)
1634 || (!check_reduction && flow_loop_nested_p (vect_loop, loop)));
1636 name = PHI_RESULT (phi);
1637 nloop_uses = 0;
1638 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
1640 gimple use_stmt = USE_STMT (use_p);
1641 if (is_gimple_debug (use_stmt))
1642 continue;
1643 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
1644 && vinfo_for_stmt (use_stmt)
1645 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
1646 nloop_uses++;
1647 if (nloop_uses > 1)
1649 if (vect_print_dump_info (REPORT_DETAILS))
1650 fprintf (vect_dump, "reduction used in loop.");
1651 return NULL;
1655 if (TREE_CODE (loop_arg) != SSA_NAME)
1657 if (vect_print_dump_info (REPORT_DETAILS))
1659 fprintf (vect_dump, "reduction: not ssa_name: ");
1660 print_generic_expr (vect_dump, loop_arg, TDF_SLIM);
1662 return NULL;
1665 def_stmt = SSA_NAME_DEF_STMT (loop_arg);
1666 if (!def_stmt)
1668 if (vect_print_dump_info (REPORT_DETAILS))
1669 fprintf (vect_dump, "reduction: no def_stmt.");
1670 return NULL;
1673 if (!is_gimple_assign (def_stmt) && gimple_code (def_stmt) != GIMPLE_PHI)
1675 if (vect_print_dump_info (REPORT_DETAILS))
1676 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
1677 return NULL;
1680 if (is_gimple_assign (def_stmt))
1682 name = gimple_assign_lhs (def_stmt);
1683 phi_def = false;
1685 else
1687 name = PHI_RESULT (def_stmt);
1688 phi_def = true;
1691 nloop_uses = 0;
1692 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
1694 gimple use_stmt = USE_STMT (use_p);
1695 if (is_gimple_debug (use_stmt))
1696 continue;
1697 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
1698 && vinfo_for_stmt (use_stmt)
1699 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
1700 nloop_uses++;
1701 if (nloop_uses > 1)
1703 if (vect_print_dump_info (REPORT_DETAILS))
1704 fprintf (vect_dump, "reduction used in loop.");
1705 return NULL;
1709 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
1710 defined in the inner loop. */
1711 if (phi_def)
1713 op1 = PHI_ARG_DEF (def_stmt, 0);
1715 if (gimple_phi_num_args (def_stmt) != 1
1716 || TREE_CODE (op1) != SSA_NAME)
1718 if (vect_print_dump_info (REPORT_DETAILS))
1719 fprintf (vect_dump, "unsupported phi node definition.");
1721 return NULL;
1724 def1 = SSA_NAME_DEF_STMT (op1);
1725 if (flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
1726 && loop->inner
1727 && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1))
1728 && is_gimple_assign (def1))
1730 if (vect_print_dump_info (REPORT_DETAILS))
1731 report_vect_op (def_stmt, "detected double reduction: ");
1733 *double_reduc = true;
1734 return def_stmt;
1737 return NULL;
1740 code = gimple_assign_rhs_code (def_stmt);
1742 if (check_reduction
1743 && (!commutative_tree_code (code) || !associative_tree_code (code)))
1745 if (vect_print_dump_info (REPORT_DETAILS))
1746 report_vect_op (def_stmt, "reduction: not commutative/associative: ");
1747 return NULL;
1750 if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS)
1752 if (code != COND_EXPR)
1754 if (vect_print_dump_info (REPORT_DETAILS))
1755 report_vect_op (def_stmt, "reduction: not binary operation: ");
1757 return NULL;
1760 op3 = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 0);
1761 if (COMPARISON_CLASS_P (op3))
1763 op4 = TREE_OPERAND (op3, 1);
1764 op3 = TREE_OPERAND (op3, 0);
1767 op1 = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 1);
1768 op2 = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 2);
1770 if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
1772 if (vect_print_dump_info (REPORT_DETAILS))
1773 report_vect_op (def_stmt, "reduction: uses not ssa_names: ");
1775 return NULL;
1778 else
1780 op1 = gimple_assign_rhs1 (def_stmt);
1781 op2 = gimple_assign_rhs2 (def_stmt);
1783 if (TREE_CODE (op1) != SSA_NAME || TREE_CODE (op2) != SSA_NAME)
1785 if (vect_print_dump_info (REPORT_DETAILS))
1786 report_vect_op (def_stmt, "reduction: uses not ssa_names: ");
1788 return NULL;
1792 type = TREE_TYPE (gimple_assign_lhs (def_stmt));
1793 if ((TREE_CODE (op1) == SSA_NAME
1794 && !types_compatible_p (type,TREE_TYPE (op1)))
1795 || (TREE_CODE (op2) == SSA_NAME
1796 && !types_compatible_p (type, TREE_TYPE (op2)))
1797 || (op3 && TREE_CODE (op3) == SSA_NAME
1798 && !types_compatible_p (type, TREE_TYPE (op3)))
1799 || (op4 && TREE_CODE (op4) == SSA_NAME
1800 && !types_compatible_p (type, TREE_TYPE (op4))))
1802 if (vect_print_dump_info (REPORT_DETAILS))
1804 fprintf (vect_dump, "reduction: multiple types: operation type: ");
1805 print_generic_expr (vect_dump, type, TDF_SLIM);
1806 fprintf (vect_dump, ", operands types: ");
1807 print_generic_expr (vect_dump, TREE_TYPE (op1), TDF_SLIM);
1808 fprintf (vect_dump, ",");
1809 print_generic_expr (vect_dump, TREE_TYPE (op2), TDF_SLIM);
1810 if (op3)
1812 fprintf (vect_dump, ",");
1813 print_generic_expr (vect_dump, TREE_TYPE (op3), TDF_SLIM);
1816 if (op4)
1818 fprintf (vect_dump, ",");
1819 print_generic_expr (vect_dump, TREE_TYPE (op4), TDF_SLIM);
1823 return NULL;
1826 /* Check that it's ok to change the order of the computation.
1827 Generally, when vectorizing a reduction we change the order of the
1828 computation. This may change the behavior of the program in some
1829 cases, so we need to check that this is ok. One exception is when
1830 vectorizing an outer-loop: the inner-loop is executed sequentially,
1831 and therefore vectorizing reductions in the inner-loop during
1832 outer-loop vectorization is safe. */
1834 /* CHECKME: check for !flag_finite_math_only too? */
1835 if (SCALAR_FLOAT_TYPE_P (type) && !flag_associative_math
1836 && check_reduction)
1838 /* Changing the order of operations changes the semantics. */
1839 if (vect_print_dump_info (REPORT_DETAILS))
1840 report_vect_op (def_stmt, "reduction: unsafe fp math optimization: ");
1841 return NULL;
1843 else if (INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type)
1844 && check_reduction)
1846 /* Changing the order of operations changes the semantics. */
1847 if (vect_print_dump_info (REPORT_DETAILS))
1848 report_vect_op (def_stmt, "reduction: unsafe int math optimization: ");
1849 return NULL;
1851 else if (SAT_FIXED_POINT_TYPE_P (type) && check_reduction)
1853 /* Changing the order of operations changes the semantics. */
1854 if (vect_print_dump_info (REPORT_DETAILS))
1855 report_vect_op (def_stmt,
1856 "reduction: unsafe fixed-point math optimization: ");
1857 return NULL;
1860 /* Reduction is safe. We're dealing with one of the following:
1861 1) integer arithmetic and no trapv
1862 2) floating point arithmetic, and special flags permit this optimization
1863 3) nested cycle (i.e., outer loop vectorization). */
1864 if (TREE_CODE (op1) == SSA_NAME)
1865 def1 = SSA_NAME_DEF_STMT (op1);
1867 if (TREE_CODE (op2) == SSA_NAME)
1868 def2 = SSA_NAME_DEF_STMT (op2);
1870 if (code != COND_EXPR
1871 && (!def1 || !def2 || gimple_nop_p (def1) || gimple_nop_p (def2)))
1873 if (vect_print_dump_info (REPORT_DETAILS))
1874 report_vect_op (def_stmt, "reduction: no defs for operands: ");
1875 return NULL;
1878 /* Check that one def is the reduction def, defined by PHI,
1879 the other def is either defined in the loop ("vect_internal_def"),
1880 or it's an induction (defined by a loop-header phi-node). */
1882 if (def2 && def2 == phi
1883 && (code == COND_EXPR
1884 || (def1 && flow_bb_inside_loop_p (loop, gimple_bb (def1))
1885 && (is_gimple_assign (def1)
1886 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
1887 == vect_induction_def
1888 || (gimple_code (def1) == GIMPLE_PHI
1889 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
1890 == vect_internal_def
1891 && !is_loop_header_bb_p (gimple_bb (def1)))))))
1893 if (vect_print_dump_info (REPORT_DETAILS))
1894 report_vect_op (def_stmt, "detected reduction: ");
1895 return def_stmt;
1897 else if (def1 && def1 == phi
1898 && (code == COND_EXPR
1899 || (def2 && flow_bb_inside_loop_p (loop, gimple_bb (def2))
1900 && (is_gimple_assign (def2)
1901 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
1902 == vect_induction_def
1903 || (gimple_code (def2) == GIMPLE_PHI
1904 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
1905 == vect_internal_def
1906 && !is_loop_header_bb_p (gimple_bb (def2)))))))
1908 if (check_reduction)
1910 /* Swap operands (just for simplicity - so that the rest of the code
1911 can assume that the reduction variable is always the last (second)
1912 argument). */
1913 if (vect_print_dump_info (REPORT_DETAILS))
1914 report_vect_op (def_stmt,
1915 "detected reduction: need to swap operands: ");
1917 swap_tree_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
1918 gimple_assign_rhs2_ptr (def_stmt));
1920 else
1922 if (vect_print_dump_info (REPORT_DETAILS))
1923 report_vect_op (def_stmt, "detected reduction: ");
1926 return def_stmt;
1928 else
1930 if (vect_print_dump_info (REPORT_DETAILS))
1931 report_vect_op (def_stmt, "reduction: unknown pattern: ");
1933 return NULL;
1938 /* Function vect_estimate_min_profitable_iters
1940 Return the number of iterations required for the vector version of the
1941 loop to be profitable relative to the cost of the scalar version of the
1942 loop.
1944 TODO: Take profile info into account before making vectorization
1945 decisions, if available. */
1948 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo)
1950 int i;
1951 int min_profitable_iters;
1952 int peel_iters_prologue;
1953 int peel_iters_epilogue;
1954 int vec_inside_cost = 0;
1955 int vec_outside_cost = 0;
1956 int scalar_single_iter_cost = 0;
1957 int scalar_outside_cost = 0;
1958 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1959 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1960 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1961 int nbbs = loop->num_nodes;
1962 int byte_misalign = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo);
1963 int peel_guard_costs = 0;
1964 int innerloop_iters = 0, factor;
1965 VEC (slp_instance, heap) *slp_instances;
1966 slp_instance instance;
1968 /* Cost model disabled. */
1969 if (!flag_vect_cost_model)
1971 if (vect_print_dump_info (REPORT_COST))
1972 fprintf (vect_dump, "cost model disabled.");
1973 return 0;
1976 /* Requires loop versioning tests to handle misalignment. */
1977 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
1979 /* FIXME: Make cost depend on complexity of individual check. */
1980 vec_outside_cost +=
1981 VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
1982 if (vect_print_dump_info (REPORT_COST))
1983 fprintf (vect_dump, "cost model: Adding cost of checks for loop "
1984 "versioning to treat misalignment.\n");
1987 /* Requires loop versioning with alias checks. */
1988 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
1990 /* FIXME: Make cost depend on complexity of individual check. */
1991 vec_outside_cost +=
1992 VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
1993 if (vect_print_dump_info (REPORT_COST))
1994 fprintf (vect_dump, "cost model: Adding cost of checks for loop "
1995 "versioning aliasing.\n");
1998 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
1999 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2000 vec_outside_cost += TARG_COND_TAKEN_BRANCH_COST;
2002 /* Count statements in scalar loop. Using this as scalar cost for a single
2003 iteration for now.
2005 TODO: Add outer loop support.
2007 TODO: Consider assigning different costs to different scalar
2008 statements. */
2010 /* FORNOW. */
2011 if (loop->inner)
2012 innerloop_iters = 50; /* FIXME */
2014 for (i = 0; i < nbbs; i++)
2016 gimple_stmt_iterator si;
2017 basic_block bb = bbs[i];
2019 if (bb->loop_father == loop->inner)
2020 factor = innerloop_iters;
2021 else
2022 factor = 1;
2024 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
2026 gimple stmt = gsi_stmt (si);
2027 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2028 /* Skip stmts that are not vectorized inside the loop. */
2029 if (!STMT_VINFO_RELEVANT_P (stmt_info)
2030 && (!STMT_VINFO_LIVE_P (stmt_info)
2031 || STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def))
2032 continue;
2033 scalar_single_iter_cost += cost_for_stmt (stmt) * factor;
2034 vec_inside_cost += STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) * factor;
2035 /* FIXME: for stmts in the inner-loop in outer-loop vectorization,
2036 some of the "outside" costs are generated inside the outer-loop. */
2037 vec_outside_cost += STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info);
2041 /* Add additional cost for the peeled instructions in prologue and epilogue
2042 loop.
2044 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
2045 at compile-time - we assume it's vf/2 (the worst would be vf-1).
2047 TODO: Build an expression that represents peel_iters for prologue and
2048 epilogue to be used in a run-time test. */
2050 if (byte_misalign < 0)
2052 peel_iters_prologue = vf/2;
2053 if (vect_print_dump_info (REPORT_COST))
2054 fprintf (vect_dump, "cost model: "
2055 "prologue peel iters set to vf/2.");
2057 /* If peeling for alignment is unknown, loop bound of main loop becomes
2058 unknown. */
2059 peel_iters_epilogue = vf/2;
2060 if (vect_print_dump_info (REPORT_COST))
2061 fprintf (vect_dump, "cost model: "
2062 "epilogue peel iters set to vf/2 because "
2063 "peeling for alignment is unknown .");
2065 /* If peeled iterations are unknown, count a taken branch and a not taken
2066 branch per peeled loop. Even if scalar loop iterations are known,
2067 vector iterations are not known since peeled prologue iterations are
2068 not known. Hence guards remain the same. */
2069 peel_guard_costs += 2 * (TARG_COND_TAKEN_BRANCH_COST
2070 + TARG_COND_NOT_TAKEN_BRANCH_COST);
2072 else
2074 if (byte_misalign)
2076 struct data_reference *dr = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
2077 int element_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr))));
2078 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr)));
2079 int nelements = TYPE_VECTOR_SUBPARTS (vectype);
2081 peel_iters_prologue = nelements - (byte_misalign / element_size);
2083 else
2084 peel_iters_prologue = 0;
2086 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
2088 peel_iters_epilogue = vf/2;
2089 if (vect_print_dump_info (REPORT_COST))
2090 fprintf (vect_dump, "cost model: "
2091 "epilogue peel iters set to vf/2 because "
2092 "loop iterations are unknown .");
2094 /* If peeled iterations are known but number of scalar loop
2095 iterations are unknown, count a taken branch per peeled loop. */
2096 peel_guard_costs += 2 * TARG_COND_TAKEN_BRANCH_COST;
2099 else
2101 int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
2102 peel_iters_prologue = niters < peel_iters_prologue ?
2103 niters : peel_iters_prologue;
2104 peel_iters_epilogue = (niters - peel_iters_prologue) % vf;
2108 vec_outside_cost += (peel_iters_prologue * scalar_single_iter_cost)
2109 + (peel_iters_epilogue * scalar_single_iter_cost)
2110 + peel_guard_costs;
2112 /* FORNOW: The scalar outside cost is incremented in one of the
2113 following ways:
2115 1. The vectorizer checks for alignment and aliasing and generates
2116 a condition that allows dynamic vectorization. A cost model
2117 check is ANDED with the versioning condition. Hence scalar code
2118 path now has the added cost of the versioning check.
2120 if (cost > th & versioning_check)
2121 jmp to vector code
2123 Hence run-time scalar is incremented by not-taken branch cost.
2125 2. The vectorizer then checks if a prologue is required. If the
2126 cost model check was not done before during versioning, it has to
2127 be done before the prologue check.
2129 if (cost <= th)
2130 prologue = scalar_iters
2131 if (prologue == 0)
2132 jmp to vector code
2133 else
2134 execute prologue
2135 if (prologue == num_iters)
2136 go to exit
2138 Hence the run-time scalar cost is incremented by a taken branch,
2139 plus a not-taken branch, plus a taken branch cost.
2141 3. The vectorizer then checks if an epilogue is required. If the
2142 cost model check was not done before during prologue check, it
2143 has to be done with the epilogue check.
2145 if (prologue == 0)
2146 jmp to vector code
2147 else
2148 execute prologue
2149 if (prologue == num_iters)
2150 go to exit
2151 vector code:
2152 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
2153 jmp to epilogue
2155 Hence the run-time scalar cost should be incremented by 2 taken
2156 branches.
2158 TODO: The back end may reorder the BBS's differently and reverse
2159 conditions/branch directions. Change the estimates below to
2160 something more reasonable. */
2162 /* If the number of iterations is known and we do not do versioning, we can
2163 decide whether to vectorize at compile time. Hence the scalar version
2164 do not carry cost model guard costs. */
2165 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2166 || LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2167 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2169 /* Cost model check occurs at versioning. */
2170 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2171 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2172 scalar_outside_cost += TARG_COND_NOT_TAKEN_BRANCH_COST;
2173 else
2175 /* Cost model check occurs at prologue generation. */
2176 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
2177 scalar_outside_cost += 2 * TARG_COND_TAKEN_BRANCH_COST
2178 + TARG_COND_NOT_TAKEN_BRANCH_COST;
2179 /* Cost model check occurs at epilogue generation. */
2180 else
2181 scalar_outside_cost += 2 * TARG_COND_TAKEN_BRANCH_COST;
2185 /* Add SLP costs. */
2186 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
2187 for (i = 0; VEC_iterate (slp_instance, slp_instances, i, instance); i++)
2189 vec_outside_cost += SLP_INSTANCE_OUTSIDE_OF_LOOP_COST (instance);
2190 vec_inside_cost += SLP_INSTANCE_INSIDE_OF_LOOP_COST (instance);
2193 /* Calculate number of iterations required to make the vector version
2194 profitable, relative to the loop bodies only. The following condition
2195 must hold true:
2196 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
2197 where
2198 SIC = scalar iteration cost, VIC = vector iteration cost,
2199 VOC = vector outside cost, VF = vectorization factor,
2200 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
2201 SOC = scalar outside cost for run time cost model check. */
2203 if ((scalar_single_iter_cost * vf) > vec_inside_cost)
2205 if (vec_outside_cost <= 0)
2206 min_profitable_iters = 1;
2207 else
2209 min_profitable_iters = ((vec_outside_cost - scalar_outside_cost) * vf
2210 - vec_inside_cost * peel_iters_prologue
2211 - vec_inside_cost * peel_iters_epilogue)
2212 / ((scalar_single_iter_cost * vf)
2213 - vec_inside_cost);
2215 if ((scalar_single_iter_cost * vf * min_profitable_iters)
2216 <= ((vec_inside_cost * min_profitable_iters)
2217 + ((vec_outside_cost - scalar_outside_cost) * vf)))
2218 min_profitable_iters++;
2221 /* vector version will never be profitable. */
2222 else
2224 if (vect_print_dump_info (REPORT_COST))
2225 fprintf (vect_dump, "cost model: the vector iteration cost = %d "
2226 "divided by the scalar iteration cost = %d "
2227 "is greater or equal to the vectorization factor = %d.",
2228 vec_inside_cost, scalar_single_iter_cost, vf);
2229 return -1;
2232 if (vect_print_dump_info (REPORT_COST))
2234 fprintf (vect_dump, "Cost model analysis: \n");
2235 fprintf (vect_dump, " Vector inside of loop cost: %d\n",
2236 vec_inside_cost);
2237 fprintf (vect_dump, " Vector outside of loop cost: %d\n",
2238 vec_outside_cost);
2239 fprintf (vect_dump, " Scalar iteration cost: %d\n",
2240 scalar_single_iter_cost);
2241 fprintf (vect_dump, " Scalar outside cost: %d\n", scalar_outside_cost);
2242 fprintf (vect_dump, " prologue iterations: %d\n",
2243 peel_iters_prologue);
2244 fprintf (vect_dump, " epilogue iterations: %d\n",
2245 peel_iters_epilogue);
2246 fprintf (vect_dump, " Calculated minimum iters for profitability: %d\n",
2247 min_profitable_iters);
2250 min_profitable_iters =
2251 min_profitable_iters < vf ? vf : min_profitable_iters;
2253 /* Because the condition we create is:
2254 if (niters <= min_profitable_iters)
2255 then skip the vectorized loop. */
2256 min_profitable_iters--;
2258 if (vect_print_dump_info (REPORT_COST))
2259 fprintf (vect_dump, " Profitability threshold = %d\n",
2260 min_profitable_iters);
2262 return min_profitable_iters;
2266 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
2267 functions. Design better to avoid maintenance issues. */
2269 /* Function vect_model_reduction_cost.
2271 Models cost for a reduction operation, including the vector ops
2272 generated within the strip-mine loop, the initial definition before
2273 the loop, and the epilogue code that must be generated. */
2275 static bool
2276 vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
2277 int ncopies)
2279 int outer_cost = 0;
2280 enum tree_code code;
2281 optab optab;
2282 tree vectype;
2283 gimple stmt, orig_stmt;
2284 tree reduction_op;
2285 enum machine_mode mode;
2286 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2287 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2290 /* Cost of reduction op inside loop. */
2291 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) += ncopies * TARG_VEC_STMT_COST;
2293 stmt = STMT_VINFO_STMT (stmt_info);
2295 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
2297 case GIMPLE_SINGLE_RHS:
2298 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)) == ternary_op);
2299 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), 2);
2300 break;
2301 case GIMPLE_UNARY_RHS:
2302 reduction_op = gimple_assign_rhs1 (stmt);
2303 break;
2304 case GIMPLE_BINARY_RHS:
2305 reduction_op = gimple_assign_rhs2 (stmt);
2306 break;
2307 default:
2308 gcc_unreachable ();
2311 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
2312 if (!vectype)
2314 if (vect_print_dump_info (REPORT_COST))
2316 fprintf (vect_dump, "unsupported data-type ");
2317 print_generic_expr (vect_dump, TREE_TYPE (reduction_op), TDF_SLIM);
2319 return false;
2322 mode = TYPE_MODE (vectype);
2323 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2325 if (!orig_stmt)
2326 orig_stmt = STMT_VINFO_STMT (stmt_info);
2328 code = gimple_assign_rhs_code (orig_stmt);
2330 /* Add in cost for initial definition. */
2331 outer_cost += TARG_SCALAR_TO_VEC_COST;
2333 /* Determine cost of epilogue code.
2335 We have a reduction operator that will reduce the vector in one statement.
2336 Also requires scalar extract. */
2338 if (!nested_in_vect_loop_p (loop, orig_stmt))
2340 if (reduc_code != ERROR_MARK)
2341 outer_cost += TARG_VEC_STMT_COST + TARG_VEC_TO_SCALAR_COST;
2342 else
2344 int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
2345 tree bitsize =
2346 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt)));
2347 int element_bitsize = tree_low_cst (bitsize, 1);
2348 int nelements = vec_size_in_bits / element_bitsize;
2350 optab = optab_for_tree_code (code, vectype, optab_default);
2352 /* We have a whole vector shift available. */
2353 if (VECTOR_MODE_P (mode)
2354 && optab_handler (optab, mode)->insn_code != CODE_FOR_nothing
2355 && optab_handler (vec_shr_optab, mode)->insn_code != CODE_FOR_nothing)
2356 /* Final reduction via vector shifts and the reduction operator. Also
2357 requires scalar extract. */
2358 outer_cost += ((exact_log2(nelements) * 2) * TARG_VEC_STMT_COST
2359 + TARG_VEC_TO_SCALAR_COST);
2360 else
2361 /* Use extracts and reduction op for final reduction. For N elements,
2362 we have N extracts and N-1 reduction ops. */
2363 outer_cost += ((nelements + nelements - 1) * TARG_VEC_STMT_COST);
2367 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = outer_cost;
2369 if (vect_print_dump_info (REPORT_COST))
2370 fprintf (vect_dump, "vect_model_reduction_cost: inside_cost = %d, "
2371 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
2372 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
2374 return true;
2378 /* Function vect_model_induction_cost.
2380 Models cost for induction operations. */
2382 static void
2383 vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies)
2385 /* loop cost for vec_loop. */
2386 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) = ncopies * TARG_VEC_STMT_COST;
2387 /* prologue cost for vec_init and vec_step. */
2388 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = 2 * TARG_SCALAR_TO_VEC_COST;
2390 if (vect_print_dump_info (REPORT_COST))
2391 fprintf (vect_dump, "vect_model_induction_cost: inside_cost = %d, "
2392 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
2393 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
2397 /* Function get_initial_def_for_induction
2399 Input:
2400 STMT - a stmt that performs an induction operation in the loop.
2401 IV_PHI - the initial value of the induction variable
2403 Output:
2404 Return a vector variable, initialized with the first VF values of
2405 the induction variable. E.g., for an iv with IV_PHI='X' and
2406 evolution S, for a vector of 4 units, we want to return:
2407 [X, X + S, X + 2*S, X + 3*S]. */
2409 static tree
2410 get_initial_def_for_induction (gimple iv_phi)
2412 stmt_vec_info stmt_vinfo = vinfo_for_stmt (iv_phi);
2413 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
2414 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2415 tree scalar_type = TREE_TYPE (gimple_phi_result (iv_phi));
2416 tree vectype;
2417 int nunits;
2418 edge pe = loop_preheader_edge (loop);
2419 struct loop *iv_loop;
2420 basic_block new_bb;
2421 tree vec, vec_init, vec_step, t;
2422 tree access_fn;
2423 tree new_var;
2424 tree new_name;
2425 gimple init_stmt, induction_phi, new_stmt;
2426 tree induc_def, vec_def, vec_dest;
2427 tree init_expr, step_expr;
2428 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2429 int i;
2430 bool ok;
2431 int ncopies;
2432 tree expr;
2433 stmt_vec_info phi_info = vinfo_for_stmt (iv_phi);
2434 bool nested_in_vect_loop = false;
2435 gimple_seq stmts = NULL;
2436 imm_use_iterator imm_iter;
2437 use_operand_p use_p;
2438 gimple exit_phi;
2439 edge latch_e;
2440 tree loop_arg;
2441 gimple_stmt_iterator si;
2442 basic_block bb = gimple_bb (iv_phi);
2443 tree stepvectype;
2445 vectype = get_vectype_for_scalar_type (scalar_type);
2446 gcc_assert (vectype);
2447 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2448 ncopies = vf / nunits;
2450 gcc_assert (phi_info);
2451 gcc_assert (ncopies >= 1);
2453 /* Find the first insertion point in the BB. */
2454 si = gsi_after_labels (bb);
2456 if (INTEGRAL_TYPE_P (scalar_type))
2457 step_expr = build_int_cst (scalar_type, 0);
2458 else if (POINTER_TYPE_P (scalar_type))
2459 step_expr = build_int_cst (sizetype, 0);
2460 else
2461 step_expr = build_real (scalar_type, dconst0);
2463 /* Is phi in an inner-loop, while vectorizing an enclosing outer-loop? */
2464 if (nested_in_vect_loop_p (loop, iv_phi))
2466 nested_in_vect_loop = true;
2467 iv_loop = loop->inner;
2469 else
2470 iv_loop = loop;
2471 gcc_assert (iv_loop == (gimple_bb (iv_phi))->loop_father);
2473 latch_e = loop_latch_edge (iv_loop);
2474 loop_arg = PHI_ARG_DEF_FROM_EDGE (iv_phi, latch_e);
2476 access_fn = analyze_scalar_evolution (iv_loop, PHI_RESULT (iv_phi));
2477 gcc_assert (access_fn);
2478 ok = vect_is_simple_iv_evolution (iv_loop->num, access_fn,
2479 &init_expr, &step_expr);
2480 gcc_assert (ok);
2481 pe = loop_preheader_edge (iv_loop);
2483 /* Create the vector that holds the initial_value of the induction. */
2484 if (nested_in_vect_loop)
2486 /* iv_loop is nested in the loop to be vectorized. init_expr had already
2487 been created during vectorization of previous stmts; We obtain it from
2488 the STMT_VINFO_VEC_STMT of the defining stmt. */
2489 tree iv_def = PHI_ARG_DEF_FROM_EDGE (iv_phi,
2490 loop_preheader_edge (iv_loop));
2491 vec_init = vect_get_vec_def_for_operand (iv_def, iv_phi, NULL);
2493 else
2495 /* iv_loop is the loop to be vectorized. Create:
2496 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
2497 new_var = vect_get_new_vect_var (scalar_type, vect_scalar_var, "var_");
2498 add_referenced_var (new_var);
2500 new_name = force_gimple_operand (init_expr, &stmts, false, new_var);
2501 if (stmts)
2503 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
2504 gcc_assert (!new_bb);
2507 t = NULL_TREE;
2508 t = tree_cons (NULL_TREE, init_expr, t);
2509 for (i = 1; i < nunits; i++)
2511 /* Create: new_name_i = new_name + step_expr */
2512 enum tree_code code = POINTER_TYPE_P (scalar_type)
2513 ? POINTER_PLUS_EXPR : PLUS_EXPR;
2514 init_stmt = gimple_build_assign_with_ops (code, new_var,
2515 new_name, step_expr);
2516 new_name = make_ssa_name (new_var, init_stmt);
2517 gimple_assign_set_lhs (init_stmt, new_name);
2519 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
2520 gcc_assert (!new_bb);
2522 if (vect_print_dump_info (REPORT_DETAILS))
2524 fprintf (vect_dump, "created new init_stmt: ");
2525 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
2527 t = tree_cons (NULL_TREE, new_name, t);
2529 /* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1] */
2530 vec = build_constructor_from_list (vectype, nreverse (t));
2531 vec_init = vect_init_vector (iv_phi, vec, vectype, NULL);
2535 /* Create the vector that holds the step of the induction. */
2536 if (nested_in_vect_loop)
2537 /* iv_loop is nested in the loop to be vectorized. Generate:
2538 vec_step = [S, S, S, S] */
2539 new_name = step_expr;
2540 else
2542 /* iv_loop is the loop to be vectorized. Generate:
2543 vec_step = [VF*S, VF*S, VF*S, VF*S] */
2544 expr = build_int_cst (TREE_TYPE (step_expr), vf);
2545 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
2546 expr, step_expr);
2549 t = NULL_TREE;
2550 for (i = 0; i < nunits; i++)
2551 t = tree_cons (NULL_TREE, unshare_expr (new_name), t);
2552 gcc_assert (CONSTANT_CLASS_P (new_name));
2553 stepvectype = get_vectype_for_scalar_type (TREE_TYPE (new_name));
2554 gcc_assert (stepvectype);
2555 vec = build_vector (stepvectype, t);
2556 vec_step = vect_init_vector (iv_phi, vec, stepvectype, NULL);
2559 /* Create the following def-use cycle:
2560 loop prolog:
2561 vec_init = ...
2562 vec_step = ...
2563 loop:
2564 vec_iv = PHI <vec_init, vec_loop>
2566 STMT
2568 vec_loop = vec_iv + vec_step; */
2570 /* Create the induction-phi that defines the induction-operand. */
2571 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
2572 add_referenced_var (vec_dest);
2573 induction_phi = create_phi_node (vec_dest, iv_loop->header);
2574 set_vinfo_for_stmt (induction_phi,
2575 new_stmt_vec_info (induction_phi, loop_vinfo, NULL));
2576 induc_def = PHI_RESULT (induction_phi);
2578 /* Create the iv update inside the loop */
2579 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
2580 induc_def, vec_step);
2581 vec_def = make_ssa_name (vec_dest, new_stmt);
2582 gimple_assign_set_lhs (new_stmt, vec_def);
2583 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
2584 set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo,
2585 NULL));
2587 /* Set the arguments of the phi node: */
2588 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
2589 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
2590 UNKNOWN_LOCATION);
2593 /* In case that vectorization factor (VF) is bigger than the number
2594 of elements that we can fit in a vectype (nunits), we have to generate
2595 more than one vector stmt - i.e - we need to "unroll" the
2596 vector stmt by a factor VF/nunits. For more details see documentation
2597 in vectorizable_operation. */
2599 if (ncopies > 1)
2601 stmt_vec_info prev_stmt_vinfo;
2602 /* FORNOW. This restriction should be relaxed. */
2603 gcc_assert (!nested_in_vect_loop);
2605 /* Create the vector that holds the step of the induction. */
2606 expr = build_int_cst (TREE_TYPE (step_expr), nunits);
2607 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
2608 expr, step_expr);
2609 t = NULL_TREE;
2610 for (i = 0; i < nunits; i++)
2611 t = tree_cons (NULL_TREE, unshare_expr (new_name), t);
2612 gcc_assert (CONSTANT_CLASS_P (new_name));
2613 vec = build_vector (stepvectype, t);
2614 vec_step = vect_init_vector (iv_phi, vec, stepvectype, NULL);
2616 vec_def = induc_def;
2617 prev_stmt_vinfo = vinfo_for_stmt (induction_phi);
2618 for (i = 1; i < ncopies; i++)
2620 /* vec_i = vec_prev + vec_step */
2621 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
2622 vec_def, vec_step);
2623 vec_def = make_ssa_name (vec_dest, new_stmt);
2624 gimple_assign_set_lhs (new_stmt, vec_def);
2626 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
2627 set_vinfo_for_stmt (new_stmt,
2628 new_stmt_vec_info (new_stmt, loop_vinfo, NULL));
2629 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt;
2630 prev_stmt_vinfo = vinfo_for_stmt (new_stmt);
2634 if (nested_in_vect_loop)
2636 /* Find the loop-closed exit-phi of the induction, and record
2637 the final vector of induction results: */
2638 exit_phi = NULL;
2639 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
2641 if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (USE_STMT (use_p))))
2643 exit_phi = USE_STMT (use_p);
2644 break;
2647 if (exit_phi)
2649 stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi);
2650 /* FORNOW. Currently not supporting the case that an inner-loop induction
2651 is not used in the outer-loop (i.e. only outside the outer-loop). */
2652 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
2653 && !STMT_VINFO_LIVE_P (stmt_vinfo));
2655 STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt;
2656 if (vect_print_dump_info (REPORT_DETAILS))
2658 fprintf (vect_dump, "vector of inductions after inner-loop:");
2659 print_gimple_stmt (vect_dump, new_stmt, 0, TDF_SLIM);
2665 if (vect_print_dump_info (REPORT_DETAILS))
2667 fprintf (vect_dump, "transform induction: created def-use cycle: ");
2668 print_gimple_stmt (vect_dump, induction_phi, 0, TDF_SLIM);
2669 fprintf (vect_dump, "\n");
2670 print_gimple_stmt (vect_dump, SSA_NAME_DEF_STMT (vec_def), 0, TDF_SLIM);
2673 STMT_VINFO_VEC_STMT (phi_info) = induction_phi;
2674 return induc_def;
2678 /* Function get_initial_def_for_reduction
2680 Input:
2681 STMT - a stmt that performs a reduction operation in the loop.
2682 INIT_VAL - the initial value of the reduction variable
2684 Output:
2685 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
2686 of the reduction (used for adjusting the epilog - see below).
2687 Return a vector variable, initialized according to the operation that STMT
2688 performs. This vector will be used as the initial value of the
2689 vector of partial results.
2691 Option1 (adjust in epilog): Initialize the vector as follows:
2692 add/bit or/xor: [0,0,...,0,0]
2693 mult/bit and: [1,1,...,1,1]
2694 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
2695 and when necessary (e.g. add/mult case) let the caller know
2696 that it needs to adjust the result by init_val.
2698 Option2: Initialize the vector as follows:
2699 add/bit or/xor: [init_val,0,0,...,0]
2700 mult/bit and: [init_val,1,1,...,1]
2701 min/max/cond_expr: [init_val,init_val,...,init_val]
2702 and no adjustments are needed.
2704 For example, for the following code:
2706 s = init_val;
2707 for (i=0;i<n;i++)
2708 s = s + a[i];
2710 STMT is 's = s + a[i]', and the reduction variable is 's'.
2711 For a vector of 4 units, we want to return either [0,0,0,init_val],
2712 or [0,0,0,0] and let the caller know that it needs to adjust
2713 the result at the end by 'init_val'.
2715 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
2716 initialization vector is simpler (same element in all entries), if
2717 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
2719 A cost model should help decide between these two schemes. */
2721 tree
2722 get_initial_def_for_reduction (gimple stmt, tree init_val,
2723 tree *adjustment_def)
2725 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
2726 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
2727 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2728 tree scalar_type = TREE_TYPE (init_val);
2729 tree vectype = get_vectype_for_scalar_type (scalar_type);
2730 int nunits;
2731 enum tree_code code = gimple_assign_rhs_code (stmt);
2732 tree def_for_init;
2733 tree init_def;
2734 tree t = NULL_TREE;
2735 int i;
2736 bool nested_in_vect_loop = false;
2737 tree init_value;
2738 REAL_VALUE_TYPE real_init_val = dconst0;
2739 int int_init_val = 0;
2740 gimple def_stmt = NULL;
2742 gcc_assert (vectype);
2743 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2745 gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
2746 || SCALAR_FLOAT_TYPE_P (scalar_type));
2748 if (nested_in_vect_loop_p (loop, stmt))
2749 nested_in_vect_loop = true;
2750 else
2751 gcc_assert (loop == (gimple_bb (stmt))->loop_father);
2753 /* In case of double reduction we only create a vector variable to be put
2754 in the reduction phi node. The actual statement creation is done in
2755 vect_create_epilog_for_reduction. */
2756 if (adjustment_def && nested_in_vect_loop
2757 && TREE_CODE (init_val) == SSA_NAME
2758 && (def_stmt = SSA_NAME_DEF_STMT (init_val))
2759 && gimple_code (def_stmt) == GIMPLE_PHI
2760 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
2761 && vinfo_for_stmt (def_stmt)
2762 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
2763 == vect_double_reduction_def)
2765 *adjustment_def = NULL;
2766 return vect_create_destination_var (init_val, vectype);
2769 if (TREE_CONSTANT (init_val))
2771 if (SCALAR_FLOAT_TYPE_P (scalar_type))
2772 init_value = build_real (scalar_type, TREE_REAL_CST (init_val));
2773 else
2774 init_value = build_int_cst (scalar_type, TREE_INT_CST_LOW (init_val));
2776 else
2777 init_value = init_val;
2779 switch (code)
2781 case WIDEN_SUM_EXPR:
2782 case DOT_PROD_EXPR:
2783 case PLUS_EXPR:
2784 case MINUS_EXPR:
2785 case BIT_IOR_EXPR:
2786 case BIT_XOR_EXPR:
2787 case MULT_EXPR:
2788 case BIT_AND_EXPR:
2789 /* ADJUSMENT_DEF is NULL when called from
2790 vect_create_epilog_for_reduction to vectorize double reduction. */
2791 if (adjustment_def)
2793 if (nested_in_vect_loop)
2794 *adjustment_def = vect_get_vec_def_for_operand (init_val, stmt,
2795 NULL);
2796 else
2797 *adjustment_def = init_val;
2800 if (code == MULT_EXPR || code == BIT_AND_EXPR)
2802 real_init_val = dconst1;
2803 int_init_val = 1;
2806 if (SCALAR_FLOAT_TYPE_P (scalar_type))
2807 def_for_init = build_real (scalar_type, real_init_val);
2808 else
2809 def_for_init = build_int_cst (scalar_type, int_init_val);
2811 /* Create a vector of '0' or '1' except the first element. */
2812 for (i = nunits - 2; i >= 0; --i)
2813 t = tree_cons (NULL_TREE, def_for_init, t);
2815 /* Option1: the first element is '0' or '1' as well. */
2816 if (adjustment_def)
2818 t = tree_cons (NULL_TREE, def_for_init, t);
2819 init_def = build_vector (vectype, t);
2820 break;
2823 /* Option2: the first element is INIT_VAL. */
2824 t = tree_cons (NULL_TREE, init_value, t);
2825 if (TREE_CONSTANT (init_val))
2826 init_def = build_vector (vectype, t);
2827 else
2828 init_def = build_constructor_from_list (vectype, t);
2830 break;
2832 case MIN_EXPR:
2833 case MAX_EXPR:
2834 case COND_EXPR:
2835 if (adjustment_def)
2837 *adjustment_def = NULL_TREE;
2838 init_def = vect_get_vec_def_for_operand (init_val, stmt, NULL);
2839 break;
2842 for (i = nunits - 1; i >= 0; --i)
2843 t = tree_cons (NULL_TREE, init_value, t);
2845 if (TREE_CONSTANT (init_val))
2846 init_def = build_vector (vectype, t);
2847 else
2848 init_def = build_constructor_from_list (vectype, t);
2850 break;
2852 default:
2853 gcc_unreachable ();
2856 return init_def;
2860 /* Function vect_create_epilog_for_reduction
2862 Create code at the loop-epilog to finalize the result of a reduction
2863 computation.
2865 VECT_DEF is a vector of partial results.
2866 REDUC_CODE is the tree-code for the epilog reduction.
2867 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
2868 number of elements that we can fit in a vectype (nunits). In this case
2869 we have to generate more than one vector stmt - i.e - we need to "unroll"
2870 the vector stmt by a factor VF/nunits. For more details see documentation
2871 in vectorizable_operation.
2872 STMT is the scalar reduction stmt that is being vectorized.
2873 REDUCTION_PHI is the phi-node that carries the reduction computation.
2874 REDUC_INDEX is the index of the operand in the right hand side of the
2875 statement that is defined by REDUCTION_PHI.
2876 DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
2878 This function:
2879 1. Creates the reduction def-use cycle: sets the arguments for
2880 REDUCTION_PHI:
2881 The loop-entry argument is the vectorized initial-value of the reduction.
2882 The loop-latch argument is VECT_DEF - the vector of partial sums.
2883 2. "Reduces" the vector of partial results VECT_DEF into a single result,
2884 by applying the operation specified by REDUC_CODE if available, or by
2885 other means (whole-vector shifts or a scalar loop).
2886 The function also creates a new phi node at the loop exit to preserve
2887 loop-closed form, as illustrated below.
2889 The flow at the entry to this function:
2891 loop:
2892 vec_def = phi <null, null> # REDUCTION_PHI
2893 VECT_DEF = vector_stmt # vectorized form of STMT
2894 s_loop = scalar_stmt # (scalar) STMT
2895 loop_exit:
2896 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
2897 use <s_out0>
2898 use <s_out0>
2900 The above is transformed by this function into:
2902 loop:
2903 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
2904 VECT_DEF = vector_stmt # vectorized form of STMT
2905 s_loop = scalar_stmt # (scalar) STMT
2906 loop_exit:
2907 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
2908 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
2909 v_out2 = reduce <v_out1>
2910 s_out3 = extract_field <v_out2, 0>
2911 s_out4 = adjust_result <s_out3>
2912 use <s_out4>
2913 use <s_out4>
2916 static void
2917 vect_create_epilog_for_reduction (tree vect_def, gimple stmt,
2918 int ncopies,
2919 enum tree_code reduc_code,
2920 gimple reduction_phi,
2921 int reduc_index,
2922 bool double_reduc)
2924 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2925 stmt_vec_info prev_phi_info;
2926 tree vectype;
2927 enum machine_mode mode;
2928 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2929 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
2930 basic_block exit_bb;
2931 tree scalar_dest;
2932 tree scalar_type;
2933 gimple new_phi = NULL, phi;
2934 gimple_stmt_iterator exit_gsi;
2935 tree vec_dest;
2936 tree new_temp = NULL_TREE;
2937 tree new_name;
2938 gimple epilog_stmt = NULL;
2939 tree new_scalar_dest, new_dest;
2940 gimple exit_phi;
2941 tree bitsize, bitpos;
2942 enum tree_code code = gimple_assign_rhs_code (stmt);
2943 tree adjustment_def;
2944 tree vec_initial_def, def;
2945 tree orig_name;
2946 imm_use_iterator imm_iter;
2947 use_operand_p use_p;
2948 bool extract_scalar_result = false;
2949 tree reduction_op, expr;
2950 gimple orig_stmt;
2951 gimple use_stmt;
2952 bool nested_in_vect_loop = false;
2953 VEC(gimple,heap) *phis = NULL;
2954 enum vect_def_type dt = vect_unknown_def_type;
2955 int j, i;
2957 if (nested_in_vect_loop_p (loop, stmt))
2959 outer_loop = loop;
2960 loop = loop->inner;
2961 nested_in_vect_loop = true;
2964 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
2966 case GIMPLE_SINGLE_RHS:
2967 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt))
2968 == ternary_op);
2969 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), reduc_index);
2970 break;
2971 case GIMPLE_UNARY_RHS:
2972 reduction_op = gimple_assign_rhs1 (stmt);
2973 break;
2974 case GIMPLE_BINARY_RHS:
2975 reduction_op = reduc_index ?
2976 gimple_assign_rhs2 (stmt) : gimple_assign_rhs1 (stmt);
2977 break;
2978 default:
2979 gcc_unreachable ();
2982 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
2983 gcc_assert (vectype);
2984 mode = TYPE_MODE (vectype);
2986 /*** 1. Create the reduction def-use cycle ***/
2988 /* For the case of reduction, vect_get_vec_def_for_operand returns
2989 the scalar def before the loop, that defines the initial value
2990 of the reduction variable. */
2991 vec_initial_def = vect_get_vec_def_for_operand (reduction_op, stmt,
2992 &adjustment_def);
2994 phi = reduction_phi;
2995 def = vect_def;
2996 for (j = 0; j < ncopies; j++)
2998 /* 1.1 set the loop-entry arg of the reduction-phi: */
2999 add_phi_arg (phi, vec_initial_def, loop_preheader_edge (loop),
3000 UNKNOWN_LOCATION);
3002 /* 1.2 set the loop-latch arg for the reduction-phi: */
3003 if (j > 0)
3004 def = vect_get_vec_def_for_stmt_copy (dt, def);
3005 add_phi_arg (phi, def, loop_latch_edge (loop), UNKNOWN_LOCATION);
3007 if (vect_print_dump_info (REPORT_DETAILS))
3009 fprintf (vect_dump, "transform reduction: created def-use cycle: ");
3010 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
3011 fprintf (vect_dump, "\n");
3012 print_gimple_stmt (vect_dump, SSA_NAME_DEF_STMT (def), 0, TDF_SLIM);
3015 phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
3018 /*** 2. Create epilog code
3019 The reduction epilog code operates across the elements of the vector
3020 of partial results computed by the vectorized loop.
3021 The reduction epilog code consists of:
3022 step 1: compute the scalar result in a vector (v_out2)
3023 step 2: extract the scalar result (s_out3) from the vector (v_out2)
3024 step 3: adjust the scalar result (s_out3) if needed.
3026 Step 1 can be accomplished using one the following three schemes:
3027 (scheme 1) using reduc_code, if available.
3028 (scheme 2) using whole-vector shifts, if available.
3029 (scheme 3) using a scalar loop. In this case steps 1+2 above are
3030 combined.
3032 The overall epilog code looks like this:
3034 s_out0 = phi <s_loop> # original EXIT_PHI
3035 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3036 v_out2 = reduce <v_out1> # step 1
3037 s_out3 = extract_field <v_out2, 0> # step 2
3038 s_out4 = adjust_result <s_out3> # step 3
3040 (step 3 is optional, and steps 1 and 2 may be combined).
3041 Lastly, the uses of s_out0 are replaced by s_out4.
3043 ***/
3045 /* 2.1 Create new loop-exit-phi to preserve loop-closed form:
3046 v_out1 = phi <v_loop> */
3048 exit_bb = single_exit (loop)->dest;
3049 def = vect_def;
3050 prev_phi_info = NULL;
3051 for (j = 0; j < ncopies; j++)
3053 phi = create_phi_node (SSA_NAME_VAR (vect_def), exit_bb);
3054 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, loop_vinfo, NULL));
3055 if (j == 0)
3056 new_phi = phi;
3057 else
3059 def = vect_get_vec_def_for_stmt_copy (dt, def);
3060 STMT_VINFO_RELATED_STMT (prev_phi_info) = phi;
3062 SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
3063 prev_phi_info = vinfo_for_stmt (phi);
3066 exit_gsi = gsi_after_labels (exit_bb);
3068 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
3069 (i.e. when reduc_code is not available) and in the final adjustment
3070 code (if needed). Also get the original scalar reduction variable as
3071 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
3072 represents a reduction pattern), the tree-code and scalar-def are
3073 taken from the original stmt that the pattern-stmt (STMT) replaces.
3074 Otherwise (it is a regular reduction) - the tree-code and scalar-def
3075 are taken from STMT. */
3077 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
3078 if (!orig_stmt)
3080 /* Regular reduction */
3081 orig_stmt = stmt;
3083 else
3085 /* Reduction pattern */
3086 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt);
3087 gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo));
3088 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
3091 code = gimple_assign_rhs_code (orig_stmt);
3092 scalar_dest = gimple_assign_lhs (orig_stmt);
3093 scalar_type = TREE_TYPE (scalar_dest);
3094 new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
3095 bitsize = TYPE_SIZE (scalar_type);
3097 /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
3098 partial results are added and not subtracted. */
3099 if (code == MINUS_EXPR)
3100 code = PLUS_EXPR;
3102 /* In case this is a reduction in an inner-loop while vectorizing an outer
3103 loop - we don't need to extract a single scalar result at the end of the
3104 inner-loop (unless it is double reduction, i.e., the use of reduction is
3105 outside the outer-loop). The final vector of partial results will be used
3106 in the vectorized outer-loop, or reduced to a scalar result at the end of
3107 the outer-loop. */
3108 if (nested_in_vect_loop && !double_reduc)
3109 goto vect_finalize_reduction;
3111 /* The epilogue is created for the outer-loop, i.e., for the loop being
3112 vectorized. */
3113 if (double_reduc)
3114 loop = outer_loop;
3116 /* FORNOW */
3117 gcc_assert (ncopies == 1);
3119 /* 2.3 Create the reduction code, using one of the three schemes described
3120 above. */
3122 if (reduc_code != ERROR_MARK)
3124 tree tmp;
3126 /*** Case 1: Create:
3127 v_out2 = reduc_expr <v_out1> */
3129 if (vect_print_dump_info (REPORT_DETAILS))
3130 fprintf (vect_dump, "Reduce using direct vector reduction.");
3132 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3133 tmp = build1 (reduc_code, vectype, PHI_RESULT (new_phi));
3134 epilog_stmt = gimple_build_assign (vec_dest, tmp);
3135 new_temp = make_ssa_name (vec_dest, epilog_stmt);
3136 gimple_assign_set_lhs (epilog_stmt, new_temp);
3137 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3139 extract_scalar_result = true;
3141 else
3143 enum tree_code shift_code = ERROR_MARK;
3144 bool have_whole_vector_shift = true;
3145 int bit_offset;
3146 int element_bitsize = tree_low_cst (bitsize, 1);
3147 int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
3148 tree vec_temp;
3150 if (optab_handler (vec_shr_optab, mode)->insn_code != CODE_FOR_nothing)
3151 shift_code = VEC_RSHIFT_EXPR;
3152 else
3153 have_whole_vector_shift = false;
3155 /* Regardless of whether we have a whole vector shift, if we're
3156 emulating the operation via tree-vect-generic, we don't want
3157 to use it. Only the first round of the reduction is likely
3158 to still be profitable via emulation. */
3159 /* ??? It might be better to emit a reduction tree code here, so that
3160 tree-vect-generic can expand the first round via bit tricks. */
3161 if (!VECTOR_MODE_P (mode))
3162 have_whole_vector_shift = false;
3163 else
3165 optab optab = optab_for_tree_code (code, vectype, optab_default);
3166 if (optab_handler (optab, mode)->insn_code == CODE_FOR_nothing)
3167 have_whole_vector_shift = false;
3170 if (have_whole_vector_shift)
3172 /*** Case 2: Create:
3173 for (offset = VS/2; offset >= element_size; offset/=2)
3175 Create: va' = vec_shift <va, offset>
3176 Create: va = vop <va, va'>
3177 } */
3179 if (vect_print_dump_info (REPORT_DETAILS))
3180 fprintf (vect_dump, "Reduce using vector shifts");
3182 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3183 new_temp = PHI_RESULT (new_phi);
3185 for (bit_offset = vec_size_in_bits/2;
3186 bit_offset >= element_bitsize;
3187 bit_offset /= 2)
3189 tree bitpos = size_int (bit_offset);
3191 epilog_stmt = gimple_build_assign_with_ops (shift_code, vec_dest,
3192 new_temp, bitpos);
3193 new_name = make_ssa_name (vec_dest, epilog_stmt);
3194 gimple_assign_set_lhs (epilog_stmt, new_name);
3195 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3197 epilog_stmt = gimple_build_assign_with_ops (code, vec_dest,
3198 new_name, new_temp);
3199 new_temp = make_ssa_name (vec_dest, epilog_stmt);
3200 gimple_assign_set_lhs (epilog_stmt, new_temp);
3201 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3204 extract_scalar_result = true;
3206 else
3208 tree rhs;
3210 /*** Case 3: Create:
3211 s = extract_field <v_out2, 0>
3212 for (offset = element_size;
3213 offset < vector_size;
3214 offset += element_size;)
3216 Create: s' = extract_field <v_out2, offset>
3217 Create: s = op <s, s'>
3218 } */
3220 if (vect_print_dump_info (REPORT_DETAILS))
3221 fprintf (vect_dump, "Reduce using scalar code. ");
3223 vec_temp = PHI_RESULT (new_phi);
3224 vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
3225 rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
3226 bitsize_zero_node);
3227 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3228 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3229 gimple_assign_set_lhs (epilog_stmt, new_temp);
3230 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3232 for (bit_offset = element_bitsize;
3233 bit_offset < vec_size_in_bits;
3234 bit_offset += element_bitsize)
3236 tree bitpos = bitsize_int (bit_offset);
3237 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
3238 bitpos);
3240 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3241 new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
3242 gimple_assign_set_lhs (epilog_stmt, new_name);
3243 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3245 epilog_stmt = gimple_build_assign_with_ops (code,
3246 new_scalar_dest,
3247 new_name, new_temp);
3248 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3249 gimple_assign_set_lhs (epilog_stmt, new_temp);
3250 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3253 extract_scalar_result = false;
3257 /* 2.4 Extract the final scalar result. Create:
3258 s_out3 = extract_field <v_out2, bitpos> */
3260 if (extract_scalar_result)
3262 tree rhs;
3264 gcc_assert (!nested_in_vect_loop || double_reduc);
3265 if (vect_print_dump_info (REPORT_DETAILS))
3266 fprintf (vect_dump, "extract scalar result");
3268 if (BYTES_BIG_ENDIAN)
3269 bitpos = size_binop (MULT_EXPR,
3270 bitsize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1),
3271 TYPE_SIZE (scalar_type));
3272 else
3273 bitpos = bitsize_zero_node;
3275 rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp, bitsize, bitpos);
3276 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3277 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3278 gimple_assign_set_lhs (epilog_stmt, new_temp);
3279 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3282 vect_finalize_reduction:
3284 if (double_reduc)
3285 loop = loop->inner;
3287 /* 2.5 Adjust the final result by the initial value of the reduction
3288 variable. (When such adjustment is not needed, then
3289 'adjustment_def' is zero). For example, if code is PLUS we create:
3290 new_temp = loop_exit_def + adjustment_def */
3292 if (adjustment_def)
3294 if (nested_in_vect_loop)
3296 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
3297 expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
3298 new_dest = vect_create_destination_var (scalar_dest, vectype);
3300 else
3302 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
3303 expr = build2 (code, scalar_type, new_temp, adjustment_def);
3304 new_dest = vect_create_destination_var (scalar_dest, scalar_type);
3307 epilog_stmt = gimple_build_assign (new_dest, expr);
3308 new_temp = make_ssa_name (new_dest, epilog_stmt);
3309 gimple_assign_set_lhs (epilog_stmt, new_temp);
3310 SSA_NAME_DEF_STMT (new_temp) = epilog_stmt;
3311 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3315 /* 2.6 Handle the loop-exit phi */
3317 /* Replace uses of s_out0 with uses of s_out3:
3318 Find the loop-closed-use at the loop exit of the original scalar result.
3319 (The reduction result is expected to have two immediate uses - one at the
3320 latch block, and one at the loop exit). */
3321 phis = VEC_alloc (gimple, heap, 10);
3322 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
3324 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
3326 exit_phi = USE_STMT (use_p);
3327 VEC_quick_push (gimple, phis, exit_phi);
3331 /* We expect to have found an exit_phi because of loop-closed-ssa form. */
3332 gcc_assert (!VEC_empty (gimple, phis));
3334 for (i = 0; VEC_iterate (gimple, phis, i, exit_phi); i++)
3336 if (nested_in_vect_loop)
3338 stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi);
3339 gimple vect_phi;
3341 /* FORNOW. Currently not supporting the case that an inner-loop
3342 reduction is not used in the outer-loop (but only outside the
3343 outer-loop), unless it is double reduction. */
3344 gcc_assert ((STMT_VINFO_RELEVANT_P (stmt_vinfo)
3345 && !STMT_VINFO_LIVE_P (stmt_vinfo)) || double_reduc);
3347 epilog_stmt = adjustment_def ? epilog_stmt : new_phi;
3348 STMT_VINFO_VEC_STMT (stmt_vinfo) = epilog_stmt;
3349 set_vinfo_for_stmt (epilog_stmt,
3350 new_stmt_vec_info (epilog_stmt, loop_vinfo,
3351 NULL));
3352 if (adjustment_def)
3353 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt)) =
3354 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi));
3356 if (!double_reduc
3357 || STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_double_reduction_def)
3358 continue;
3360 /* Handle double reduction:
3362 stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
3363 stmt2: s3 = phi <s1, s4> - (regular) reduction phi (inner loop)
3364 stmt3: s4 = use (s3) - (regular) reduction stmt (inner loop)
3365 stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
3367 At that point the regular reduction (stmt2 and stmt3) is already
3368 vectorized, as well as the exit phi node, stmt4.
3369 Here we vectorize the phi node of double reduction, stmt1, and
3370 update all relevant statements. */
3372 /* Go through all the uses of s2 to find double reduction phi node,
3373 i.e., stmt1 above. */
3374 orig_name = PHI_RESULT (exit_phi);
3375 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
3377 stmt_vec_info use_stmt_vinfo = vinfo_for_stmt (use_stmt);
3378 stmt_vec_info new_phi_vinfo;
3379 tree vect_phi_init, preheader_arg, vect_phi_res, init_def;
3380 basic_block bb = gimple_bb (use_stmt);
3381 gimple use;
3383 /* Check that USE_STMT is really double reduction phi node. */
3384 if (gimple_code (use_stmt) != GIMPLE_PHI
3385 || gimple_phi_num_args (use_stmt) != 2
3386 || !use_stmt_vinfo
3387 || STMT_VINFO_DEF_TYPE (use_stmt_vinfo)
3388 != vect_double_reduction_def
3389 || bb->loop_father != outer_loop)
3390 continue;
3392 /* Create vector phi node for double reduction:
3393 vs1 = phi <vs0, vs2>
3394 vs1 was created previously in this function by a call to
3395 vect_get_vec_def_for_operand and is stored in vec_initial_def;
3396 vs2 is defined by EPILOG_STMT, the vectorized EXIT_PHI;
3397 vs0 is created here. */
3399 /* Create vector phi node. */
3400 vect_phi = create_phi_node (vec_initial_def, bb);
3401 new_phi_vinfo = new_stmt_vec_info (vect_phi,
3402 loop_vec_info_for_loop (outer_loop), NULL);
3403 set_vinfo_for_stmt (vect_phi, new_phi_vinfo);
3405 /* Create vs0 - initial def of the double reduction phi. */
3406 preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt,
3407 loop_preheader_edge (outer_loop));
3408 init_def = get_initial_def_for_reduction (stmt, preheader_arg,
3409 NULL);
3410 vect_phi_init = vect_init_vector (use_stmt, init_def, vectype,
3411 NULL);
3413 /* Update phi node arguments with vs0 and vs2. */
3414 add_phi_arg (vect_phi, vect_phi_init,
3415 loop_preheader_edge (outer_loop), UNKNOWN_LOCATION);
3416 add_phi_arg (vect_phi, PHI_RESULT (epilog_stmt),
3417 loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
3418 if (vect_print_dump_info (REPORT_DETAILS))
3420 fprintf (vect_dump, "created double reduction phi node: ");
3421 print_gimple_stmt (vect_dump, vect_phi, 0, TDF_SLIM);
3424 vect_phi_res = PHI_RESULT (vect_phi);
3426 /* Replace the use, i.e., set the correct vs1 in the regular
3427 reduction phi node. FORNOW, NCOPIES is always 1, so the loop
3428 is redundant. */
3429 use = reduction_phi;
3430 for (j = 0; j < ncopies; j++)
3432 edge pr_edge = loop_preheader_edge (loop);
3433 SET_PHI_ARG_DEF (use, pr_edge->dest_idx, vect_phi_res);
3434 use = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use));
3439 /* Replace the uses: */
3440 orig_name = PHI_RESULT (exit_phi);
3441 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
3442 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
3443 SET_USE (use_p, new_temp);
3446 VEC_free (gimple, heap, phis);
3450 /* Function vectorizable_reduction.
3452 Check if STMT performs a reduction operation that can be vectorized.
3453 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3454 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3455 Return FALSE if not a vectorizable STMT, TRUE otherwise.
3457 This function also handles reduction idioms (patterns) that have been
3458 recognized in advance during vect_pattern_recog. In this case, STMT may be
3459 of this form:
3460 X = pattern_expr (arg0, arg1, ..., X)
3461 and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
3462 sequence that had been detected and replaced by the pattern-stmt (STMT).
3464 In some cases of reduction patterns, the type of the reduction variable X is
3465 different than the type of the other arguments of STMT.
3466 In such cases, the vectype that is used when transforming STMT into a vector
3467 stmt is different than the vectype that is used to determine the
3468 vectorization factor, because it consists of a different number of elements
3469 than the actual number of elements that are being operated upon in parallel.
3471 For example, consider an accumulation of shorts into an int accumulator.
3472 On some targets it's possible to vectorize this pattern operating on 8
3473 shorts at a time (hence, the vectype for purposes of determining the
3474 vectorization factor should be V8HI); on the other hand, the vectype that
3475 is used to create the vector form is actually V4SI (the type of the result).
3477 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
3478 indicates what is the actual level of parallelism (V8HI in the example), so
3479 that the right vectorization factor would be derived. This vectype
3480 corresponds to the type of arguments to the reduction stmt, and should *NOT*
3481 be used to create the vectorized stmt. The right vectype for the vectorized
3482 stmt is obtained from the type of the result X:
3483 get_vectype_for_scalar_type (TREE_TYPE (X))
3485 This means that, contrary to "regular" reductions (or "regular" stmts in
3486 general), the following equation:
3487 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
3488 does *NOT* necessarily hold for reduction patterns. */
3490 bool
3491 vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
3492 gimple *vec_stmt)
3494 tree vec_dest;
3495 tree scalar_dest;
3496 tree loop_vec_def0 = NULL_TREE, loop_vec_def1 = NULL_TREE;
3497 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3498 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3499 tree vectype_in = NULL_TREE;
3500 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3501 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3502 enum tree_code code, orig_code, epilog_reduc_code;
3503 enum machine_mode vec_mode;
3504 int op_type;
3505 optab optab, reduc_optab;
3506 tree new_temp = NULL_TREE;
3507 tree def;
3508 gimple def_stmt;
3509 enum vect_def_type dt;
3510 gimple new_phi = NULL;
3511 tree scalar_type;
3512 bool is_simple_use;
3513 gimple orig_stmt;
3514 stmt_vec_info orig_stmt_info;
3515 tree expr = NULL_TREE;
3516 int i;
3517 int ncopies;
3518 int epilog_copies;
3519 stmt_vec_info prev_stmt_info, prev_phi_info;
3520 gimple first_phi = NULL;
3521 bool single_defuse_cycle = false;
3522 tree reduc_def = NULL_TREE;
3523 gimple new_stmt = NULL;
3524 int j;
3525 tree ops[3];
3526 bool nested_cycle = false, found_nested_cycle_def = false;
3527 gimple reduc_def_stmt = NULL;
3528 /* The default is that the reduction variable is the last in statement. */
3529 int reduc_index = 2;
3530 bool double_reduc = false, dummy;
3531 basic_block def_bb;
3532 struct loop * def_stmt_loop, *outer_loop = NULL;
3533 tree def_arg;
3534 gimple def_arg_stmt;
3536 if (nested_in_vect_loop_p (loop, stmt))
3538 outer_loop = loop;
3539 loop = loop->inner;
3540 nested_cycle = true;
3543 /* FORNOW: SLP not supported. */
3544 if (STMT_SLP_TYPE (stmt_info))
3545 return false;
3547 /* 1. Is vectorizable reduction? */
3548 /* Not supportable if the reduction variable is used in the loop. */
3549 if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer)
3550 return false;
3552 /* Reductions that are not used even in an enclosing outer-loop,
3553 are expected to be "live" (used out of the loop). */
3554 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope
3555 && !STMT_VINFO_LIVE_P (stmt_info))
3556 return false;
3558 /* Make sure it was already recognized as a reduction computation. */
3559 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def
3560 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_nested_cycle)
3561 return false;
3563 /* 2. Has this been recognized as a reduction pattern?
3565 Check if STMT represents a pattern that has been recognized
3566 in earlier analysis stages. For stmts that represent a pattern,
3567 the STMT_VINFO_RELATED_STMT field records the last stmt in
3568 the original sequence that constitutes the pattern. */
3570 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
3571 if (orig_stmt)
3573 orig_stmt_info = vinfo_for_stmt (orig_stmt);
3574 gcc_assert (STMT_VINFO_RELATED_STMT (orig_stmt_info) == stmt);
3575 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
3576 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
3579 /* 3. Check the operands of the operation. The first operands are defined
3580 inside the loop body. The last operand is the reduction variable,
3581 which is defined by the loop-header-phi. */
3583 gcc_assert (is_gimple_assign (stmt));
3585 /* Flatten RHS */
3586 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
3588 case GIMPLE_SINGLE_RHS:
3589 op_type = TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt));
3590 if (op_type == ternary_op)
3592 tree rhs = gimple_assign_rhs1 (stmt);
3593 ops[0] = TREE_OPERAND (rhs, 0);
3594 ops[1] = TREE_OPERAND (rhs, 1);
3595 ops[2] = TREE_OPERAND (rhs, 2);
3596 code = TREE_CODE (rhs);
3598 else
3599 return false;
3600 break;
3602 case GIMPLE_BINARY_RHS:
3603 code = gimple_assign_rhs_code (stmt);
3604 op_type = TREE_CODE_LENGTH (code);
3605 gcc_assert (op_type == binary_op);
3606 ops[0] = gimple_assign_rhs1 (stmt);
3607 ops[1] = gimple_assign_rhs2 (stmt);
3608 break;
3610 case GIMPLE_UNARY_RHS:
3611 return false;
3613 default:
3614 gcc_unreachable ();
3617 scalar_dest = gimple_assign_lhs (stmt);
3618 scalar_type = TREE_TYPE (scalar_dest);
3619 if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
3620 && !SCALAR_FLOAT_TYPE_P (scalar_type))
3621 return false;
3623 /* All uses but the last are expected to be defined in the loop.
3624 The last use is the reduction variable. In case of nested cycle this
3625 assumption is not true: we use reduc_index to record the index of the
3626 reduction variable. */
3627 for (i = 0; i < op_type-1; i++)
3629 tree tem;
3631 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
3632 if (i == 0 && code == COND_EXPR)
3633 continue;
3635 is_simple_use = vect_is_simple_use_1 (ops[i], loop_vinfo, NULL,
3636 &def_stmt, &def, &dt, &tem);
3637 if (!vectype_in)
3638 vectype_in = tem;
3639 gcc_assert (is_simple_use);
3640 if (dt != vect_internal_def
3641 && dt != vect_external_def
3642 && dt != vect_constant_def
3643 && dt != vect_induction_def
3644 && !(dt == vect_nested_cycle && nested_cycle))
3645 return false;
3647 if (dt == vect_nested_cycle)
3649 found_nested_cycle_def = true;
3650 reduc_def_stmt = def_stmt;
3651 reduc_index = i;
3655 is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, NULL, &def_stmt,
3656 &def, &dt);
3657 gcc_assert (is_simple_use);
3658 gcc_assert (dt == vect_reduction_def
3659 || dt == vect_nested_cycle
3660 || ((dt == vect_internal_def || dt == vect_external_def
3661 || dt == vect_constant_def || dt == vect_induction_def)
3662 && nested_cycle && found_nested_cycle_def));
3663 if (!found_nested_cycle_def)
3664 reduc_def_stmt = def_stmt;
3666 gcc_assert (gimple_code (reduc_def_stmt) == GIMPLE_PHI);
3667 if (orig_stmt)
3668 gcc_assert (orig_stmt == vect_is_simple_reduction (loop_vinfo,
3669 reduc_def_stmt,
3670 !nested_cycle,
3671 &dummy));
3672 else
3673 gcc_assert (stmt == vect_is_simple_reduction (loop_vinfo, reduc_def_stmt,
3674 !nested_cycle, &dummy));
3676 if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt)))
3677 return false;
3680 ncopies = (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
3681 / TYPE_VECTOR_SUBPARTS (vectype_in));
3682 gcc_assert (ncopies >= 1);
3684 vec_mode = TYPE_MODE (vectype_in);
3686 if (code == COND_EXPR)
3688 if (!vectorizable_condition (stmt, gsi, NULL, ops[reduc_index], 0))
3690 if (vect_print_dump_info (REPORT_DETAILS))
3691 fprintf (vect_dump, "unsupported condition in reduction");
3693 return false;
3696 else
3698 /* 4. Supportable by target? */
3700 /* 4.1. check support for the operation in the loop */
3701 optab = optab_for_tree_code (code, vectype_in, optab_default);
3702 if (!optab)
3704 if (vect_print_dump_info (REPORT_DETAILS))
3705 fprintf (vect_dump, "no optab.");
3707 return false;
3710 if (optab_handler (optab, vec_mode)->insn_code == CODE_FOR_nothing)
3712 if (vect_print_dump_info (REPORT_DETAILS))
3713 fprintf (vect_dump, "op not supported by target.");
3715 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
3716 || LOOP_VINFO_VECT_FACTOR (loop_vinfo)
3717 < vect_min_worthwhile_factor (code))
3718 return false;
3720 if (vect_print_dump_info (REPORT_DETAILS))
3721 fprintf (vect_dump, "proceeding using word mode.");
3724 /* Worthwhile without SIMD support? */
3725 if (!VECTOR_MODE_P (TYPE_MODE (vectype_in))
3726 && LOOP_VINFO_VECT_FACTOR (loop_vinfo)
3727 < vect_min_worthwhile_factor (code))
3729 if (vect_print_dump_info (REPORT_DETAILS))
3730 fprintf (vect_dump, "not worthwhile without SIMD support.");
3732 return false;
3736 /* 4.2. Check support for the epilog operation.
3738 If STMT represents a reduction pattern, then the type of the
3739 reduction variable may be different than the type of the rest
3740 of the arguments. For example, consider the case of accumulation
3741 of shorts into an int accumulator; The original code:
3742 S1: int_a = (int) short_a;
3743 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
3745 was replaced with:
3746 STMT: int_acc = widen_sum <short_a, int_acc>
3748 This means that:
3749 1. The tree-code that is used to create the vector operation in the
3750 epilog code (that reduces the partial results) is not the
3751 tree-code of STMT, but is rather the tree-code of the original
3752 stmt from the pattern that STMT is replacing. I.e, in the example
3753 above we want to use 'widen_sum' in the loop, but 'plus' in the
3754 epilog.
3755 2. The type (mode) we use to check available target support
3756 for the vector operation to be created in the *epilog*, is
3757 determined by the type of the reduction variable (in the example
3758 above we'd check this: plus_optab[vect_int_mode]).
3759 However the type (mode) we use to check available target support
3760 for the vector operation to be created *inside the loop*, is
3761 determined by the type of the other arguments to STMT (in the
3762 example we'd check this: widen_sum_optab[vect_short_mode]).
3764 This is contrary to "regular" reductions, in which the types of all
3765 the arguments are the same as the type of the reduction variable.
3766 For "regular" reductions we can therefore use the same vector type
3767 (and also the same tree-code) when generating the epilog code and
3768 when generating the code inside the loop. */
3770 if (orig_stmt)
3772 /* This is a reduction pattern: get the vectype from the type of the
3773 reduction variable, and get the tree-code from orig_stmt. */
3774 orig_code = gimple_assign_rhs_code (orig_stmt);
3775 gcc_assert (vectype_out);
3776 vec_mode = TYPE_MODE (vectype_out);
3778 else
3780 /* Regular reduction: use the same vectype and tree-code as used for
3781 the vector code inside the loop can be used for the epilog code. */
3782 orig_code = code;
3785 if (nested_cycle)
3787 def_bb = gimple_bb (reduc_def_stmt);
3788 def_stmt_loop = def_bb->loop_father;
3789 def_arg = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
3790 loop_preheader_edge (def_stmt_loop));
3791 if (TREE_CODE (def_arg) == SSA_NAME
3792 && (def_arg_stmt = SSA_NAME_DEF_STMT (def_arg))
3793 && gimple_code (def_arg_stmt) == GIMPLE_PHI
3794 && flow_bb_inside_loop_p (outer_loop, gimple_bb (def_arg_stmt))
3795 && vinfo_for_stmt (def_arg_stmt)
3796 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_arg_stmt))
3797 == vect_double_reduction_def)
3798 double_reduc = true;
3801 epilog_reduc_code = ERROR_MARK;
3802 if (reduction_code_for_scalar_code (orig_code, &epilog_reduc_code))
3804 reduc_optab = optab_for_tree_code (epilog_reduc_code, vectype_out,
3805 optab_default);
3806 if (!reduc_optab)
3808 if (vect_print_dump_info (REPORT_DETAILS))
3809 fprintf (vect_dump, "no optab for reduction.");
3811 epilog_reduc_code = ERROR_MARK;
3814 if (reduc_optab
3815 && optab_handler (reduc_optab, vec_mode)->insn_code
3816 == CODE_FOR_nothing)
3818 if (vect_print_dump_info (REPORT_DETAILS))
3819 fprintf (vect_dump, "reduc op not supported by target.");
3821 epilog_reduc_code = ERROR_MARK;
3824 else
3826 if (!nested_cycle || double_reduc)
3828 if (vect_print_dump_info (REPORT_DETAILS))
3829 fprintf (vect_dump, "no reduc code for scalar code.");
3831 return false;
3835 if (double_reduc && ncopies > 1)
3837 if (vect_print_dump_info (REPORT_DETAILS))
3838 fprintf (vect_dump, "multiple types in double reduction");
3840 return false;
3843 if (!vec_stmt) /* transformation not required. */
3845 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
3846 if (!vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies))
3847 return false;
3848 return true;
3851 /** Transform. **/
3853 if (vect_print_dump_info (REPORT_DETAILS))
3854 fprintf (vect_dump, "transform reduction.");
3856 /* FORNOW: Multiple types are not supported for condition. */
3857 if (code == COND_EXPR)
3858 gcc_assert (ncopies == 1);
3860 /* Create the destination vector */
3861 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
3863 /* In case the vectorization factor (VF) is bigger than the number
3864 of elements that we can fit in a vectype (nunits), we have to generate
3865 more than one vector stmt - i.e - we need to "unroll" the
3866 vector stmt by a factor VF/nunits. For more details see documentation
3867 in vectorizable_operation. */
3869 /* If the reduction is used in an outer loop we need to generate
3870 VF intermediate results, like so (e.g. for ncopies=2):
3871 r0 = phi (init, r0)
3872 r1 = phi (init, r1)
3873 r0 = x0 + r0;
3874 r1 = x1 + r1;
3875 (i.e. we generate VF results in 2 registers).
3876 In this case we have a separate def-use cycle for each copy, and therefore
3877 for each copy we get the vector def for the reduction variable from the
3878 respective phi node created for this copy.
3880 Otherwise (the reduction is unused in the loop nest), we can combine
3881 together intermediate results, like so (e.g. for ncopies=2):
3882 r = phi (init, r)
3883 r = x0 + r;
3884 r = x1 + r;
3885 (i.e. we generate VF/2 results in a single register).
3886 In this case for each copy we get the vector def for the reduction variable
3887 from the vectorized reduction operation generated in the previous iteration.
3890 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope)
3892 single_defuse_cycle = true;
3893 epilog_copies = 1;
3895 else
3896 epilog_copies = ncopies;
3898 prev_stmt_info = NULL;
3899 prev_phi_info = NULL;
3900 for (j = 0; j < ncopies; j++)
3902 if (j == 0 || !single_defuse_cycle)
3904 /* Create the reduction-phi that defines the reduction-operand. */
3905 new_phi = create_phi_node (vec_dest, loop->header);
3906 set_vinfo_for_stmt (new_phi, new_stmt_vec_info (new_phi, loop_vinfo,
3907 NULL));
3908 /* Get the vector def for the reduction variable from the phi
3909 node. */
3910 reduc_def = PHI_RESULT (new_phi);
3913 if (code == COND_EXPR)
3915 first_phi = new_phi;
3916 vectorizable_condition (stmt, gsi, vec_stmt, reduc_def, reduc_index);
3917 /* Multiple types are not supported for condition. */
3918 break;
3921 /* Handle uses. */
3922 if (j == 0)
3924 loop_vec_def0 = vect_get_vec_def_for_operand (ops[!reduc_index],
3925 stmt, NULL);
3926 if (op_type == ternary_op)
3928 if (reduc_index == 0)
3929 loop_vec_def1 = vect_get_vec_def_for_operand (ops[2], stmt,
3930 NULL);
3931 else
3932 loop_vec_def1 = vect_get_vec_def_for_operand (ops[1], stmt,
3933 NULL);
3936 /* Get the vector def for the reduction variable from the phi
3937 node. */
3938 first_phi = new_phi;
3940 else
3942 enum vect_def_type dt = vect_unknown_def_type; /* Dummy */
3943 loop_vec_def0 = vect_get_vec_def_for_stmt_copy (dt, loop_vec_def0);
3944 if (op_type == ternary_op)
3945 loop_vec_def1 = vect_get_vec_def_for_stmt_copy (dt, loop_vec_def1);
3947 if (single_defuse_cycle)
3948 reduc_def = gimple_assign_lhs (new_stmt);
3949 else
3950 reduc_def = PHI_RESULT (new_phi);
3952 STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi;
3955 /* Arguments are ready. Create the new vector stmt. */
3956 if (op_type == binary_op)
3958 if (reduc_index == 0)
3959 expr = build2 (code, vectype_out, reduc_def, loop_vec_def0);
3960 else
3961 expr = build2 (code, vectype_out, loop_vec_def0, reduc_def);
3963 else
3965 if (reduc_index == 0)
3966 expr = build3 (code, vectype_out, reduc_def, loop_vec_def0,
3967 loop_vec_def1);
3968 else
3970 if (reduc_index == 1)
3971 expr = build3 (code, vectype_out, loop_vec_def0, reduc_def,
3972 loop_vec_def1);
3973 else
3974 expr = build3 (code, vectype_out, loop_vec_def0, loop_vec_def1,
3975 reduc_def);
3979 new_stmt = gimple_build_assign (vec_dest, expr);
3980 new_temp = make_ssa_name (vec_dest, new_stmt);
3981 gimple_assign_set_lhs (new_stmt, new_temp);
3982 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3984 if (j == 0)
3985 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3986 else
3987 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3989 prev_stmt_info = vinfo_for_stmt (new_stmt);
3990 prev_phi_info = vinfo_for_stmt (new_phi);
3993 /* Finalize the reduction-phi (set its arguments) and create the
3994 epilog reduction code. */
3995 if (!single_defuse_cycle || code == COND_EXPR)
3996 new_temp = gimple_assign_lhs (*vec_stmt);
3998 vect_create_epilog_for_reduction (new_temp, stmt, epilog_copies,
3999 epilog_reduc_code, first_phi, reduc_index,
4000 double_reduc);
4001 return true;
4004 /* Function vect_min_worthwhile_factor.
4006 For a loop where we could vectorize the operation indicated by CODE,
4007 return the minimum vectorization factor that makes it worthwhile
4008 to use generic vectors. */
4010 vect_min_worthwhile_factor (enum tree_code code)
4012 switch (code)
4014 case PLUS_EXPR:
4015 case MINUS_EXPR:
4016 case NEGATE_EXPR:
4017 return 4;
4019 case BIT_AND_EXPR:
4020 case BIT_IOR_EXPR:
4021 case BIT_XOR_EXPR:
4022 case BIT_NOT_EXPR:
4023 return 2;
4025 default:
4026 return INT_MAX;
4031 /* Function vectorizable_induction
4033 Check if PHI performs an induction computation that can be vectorized.
4034 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
4035 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
4036 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4038 bool
4039 vectorizable_induction (gimple phi, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
4040 gimple *vec_stmt)
4042 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
4043 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4044 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4045 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4046 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4047 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4048 tree vec_def;
4050 gcc_assert (ncopies >= 1);
4051 /* FORNOW. This restriction should be relaxed. */
4052 if (nested_in_vect_loop_p (loop, phi) && ncopies > 1)
4054 if (vect_print_dump_info (REPORT_DETAILS))
4055 fprintf (vect_dump, "multiple types in nested loop.");
4056 return false;
4059 if (!STMT_VINFO_RELEVANT_P (stmt_info))
4060 return false;
4062 /* FORNOW: SLP not supported. */
4063 if (STMT_SLP_TYPE (stmt_info))
4064 return false;
4066 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def);
4068 if (gimple_code (phi) != GIMPLE_PHI)
4069 return false;
4071 if (!vec_stmt) /* transformation not required. */
4073 STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
4074 if (vect_print_dump_info (REPORT_DETAILS))
4075 fprintf (vect_dump, "=== vectorizable_induction ===");
4076 vect_model_induction_cost (stmt_info, ncopies);
4077 return true;
4080 /** Transform. **/
4082 if (vect_print_dump_info (REPORT_DETAILS))
4083 fprintf (vect_dump, "transform induction phi.");
4085 vec_def = get_initial_def_for_induction (phi);
4086 *vec_stmt = SSA_NAME_DEF_STMT (vec_def);
4087 return true;
4090 /* Function vectorizable_live_operation.
4092 STMT computes a value that is used outside the loop. Check if
4093 it can be supported. */
4095 bool
4096 vectorizable_live_operation (gimple stmt,
4097 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
4098 gimple *vec_stmt ATTRIBUTE_UNUSED)
4100 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4101 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4102 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4103 int i;
4104 int op_type;
4105 tree op;
4106 tree def;
4107 gimple def_stmt;
4108 enum vect_def_type dt;
4109 enum tree_code code;
4110 enum gimple_rhs_class rhs_class;
4112 gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
4114 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
4115 return false;
4117 if (!is_gimple_assign (stmt))
4118 return false;
4120 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4121 return false;
4123 /* FORNOW. CHECKME. */
4124 if (nested_in_vect_loop_p (loop, stmt))
4125 return false;
4127 code = gimple_assign_rhs_code (stmt);
4128 op_type = TREE_CODE_LENGTH (code);
4129 rhs_class = get_gimple_rhs_class (code);
4130 gcc_assert (rhs_class != GIMPLE_UNARY_RHS || op_type == unary_op);
4131 gcc_assert (rhs_class != GIMPLE_BINARY_RHS || op_type == binary_op);
4133 /* FORNOW: support only if all uses are invariant. This means
4134 that the scalar operations can remain in place, unvectorized.
4135 The original last scalar value that they compute will be used. */
4137 for (i = 0; i < op_type; i++)
4139 if (rhs_class == GIMPLE_SINGLE_RHS)
4140 op = TREE_OPERAND (gimple_op (stmt, 1), i);
4141 else
4142 op = gimple_op (stmt, i + 1);
4143 if (op
4144 && !vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def, &dt))
4146 if (vect_print_dump_info (REPORT_DETAILS))
4147 fprintf (vect_dump, "use not simple.");
4148 return false;
4151 if (dt != vect_external_def && dt != vect_constant_def)
4152 return false;
4155 /* No transformation is required for the cases we currently support. */
4156 return true;
4159 /* Kill any debug uses outside LOOP of SSA names defined in STMT. */
4161 static void
4162 vect_loop_kill_debug_uses (struct loop *loop, gimple stmt)
4164 ssa_op_iter op_iter;
4165 imm_use_iterator imm_iter;
4166 def_operand_p def_p;
4167 gimple ustmt;
4169 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
4171 FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p))
4173 basic_block bb;
4175 if (!is_gimple_debug (ustmt))
4176 continue;
4178 bb = gimple_bb (ustmt);
4180 if (!flow_bb_inside_loop_p (loop, bb))
4182 if (gimple_debug_bind_p (ustmt))
4184 if (vect_print_dump_info (REPORT_DETAILS))
4185 fprintf (vect_dump, "killing debug use");
4187 gimple_debug_bind_reset_value (ustmt);
4188 update_stmt (ustmt);
4190 else
4191 gcc_unreachable ();
4197 /* Function vect_transform_loop.
4199 The analysis phase has determined that the loop is vectorizable.
4200 Vectorize the loop - created vectorized stmts to replace the scalar
4201 stmts in the loop, and update the loop exit condition. */
4203 void
4204 vect_transform_loop (loop_vec_info loop_vinfo)
4206 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4207 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
4208 int nbbs = loop->num_nodes;
4209 gimple_stmt_iterator si;
4210 int i;
4211 tree ratio = NULL;
4212 int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4213 bool strided_store;
4214 bool slp_scheduled = false;
4215 unsigned int nunits;
4216 tree cond_expr = NULL_TREE;
4217 gimple_seq cond_expr_stmt_list = NULL;
4218 bool do_peeling_for_loop_bound;
4220 if (vect_print_dump_info (REPORT_DETAILS))
4221 fprintf (vect_dump, "=== vec_transform_loop ===");
4223 /* Peel the loop if there are data refs with unknown alignment.
4224 Only one data ref with unknown store is allowed. */
4226 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
4227 vect_do_peeling_for_alignment (loop_vinfo);
4229 do_peeling_for_loop_bound
4230 = (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
4231 || (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
4232 && LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0));
4234 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
4235 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
4236 vect_loop_versioning (loop_vinfo,
4237 !do_peeling_for_loop_bound,
4238 &cond_expr, &cond_expr_stmt_list);
4240 /* If the loop has a symbolic number of iterations 'n' (i.e. it's not a
4241 compile time constant), or it is a constant that doesn't divide by the
4242 vectorization factor, then an epilog loop needs to be created.
4243 We therefore duplicate the loop: the original loop will be vectorized,
4244 and will compute the first (n/VF) iterations. The second copy of the loop
4245 will remain scalar and will compute the remaining (n%VF) iterations.
4246 (VF is the vectorization factor). */
4248 if (do_peeling_for_loop_bound)
4249 vect_do_peeling_for_loop_bound (loop_vinfo, &ratio,
4250 cond_expr, cond_expr_stmt_list);
4251 else
4252 ratio = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
4253 LOOP_VINFO_INT_NITERS (loop_vinfo) / vectorization_factor);
4255 /* 1) Make sure the loop header has exactly two entries
4256 2) Make sure we have a preheader basic block. */
4258 gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
4260 split_edge (loop_preheader_edge (loop));
4262 /* FORNOW: the vectorizer supports only loops which body consist
4263 of one basic block (header + empty latch). When the vectorizer will
4264 support more involved loop forms, the order by which the BBs are
4265 traversed need to be reconsidered. */
4267 for (i = 0; i < nbbs; i++)
4269 basic_block bb = bbs[i];
4270 stmt_vec_info stmt_info;
4271 gimple phi;
4273 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
4275 phi = gsi_stmt (si);
4276 if (vect_print_dump_info (REPORT_DETAILS))
4278 fprintf (vect_dump, "------>vectorizing phi: ");
4279 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
4281 stmt_info = vinfo_for_stmt (phi);
4282 if (!stmt_info)
4283 continue;
4285 if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
4286 vect_loop_kill_debug_uses (loop, phi);
4288 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4289 && !STMT_VINFO_LIVE_P (stmt_info))
4290 continue;
4292 if ((TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info))
4293 != (unsigned HOST_WIDE_INT) vectorization_factor)
4294 && vect_print_dump_info (REPORT_DETAILS))
4295 fprintf (vect_dump, "multiple-types.");
4297 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
4299 if (vect_print_dump_info (REPORT_DETAILS))
4300 fprintf (vect_dump, "transform phi.");
4301 vect_transform_stmt (phi, NULL, NULL, NULL, NULL);
4305 for (si = gsi_start_bb (bb); !gsi_end_p (si);)
4307 gimple stmt = gsi_stmt (si);
4308 bool is_store;
4310 if (vect_print_dump_info (REPORT_DETAILS))
4312 fprintf (vect_dump, "------>vectorizing statement: ");
4313 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4316 stmt_info = vinfo_for_stmt (stmt);
4318 /* vector stmts created in the outer-loop during vectorization of
4319 stmts in an inner-loop may not have a stmt_info, and do not
4320 need to be vectorized. */
4321 if (!stmt_info)
4323 gsi_next (&si);
4324 continue;
4327 if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
4328 vect_loop_kill_debug_uses (loop, stmt);
4330 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4331 && !STMT_VINFO_LIVE_P (stmt_info))
4333 gsi_next (&si);
4334 continue;
4337 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
4338 nunits =
4339 (unsigned int) TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
4340 if (!STMT_SLP_TYPE (stmt_info)
4341 && nunits != (unsigned int) vectorization_factor
4342 && vect_print_dump_info (REPORT_DETAILS))
4343 /* For SLP VF is set according to unrolling factor, and not to
4344 vector size, hence for SLP this print is not valid. */
4345 fprintf (vect_dump, "multiple-types.");
4347 /* SLP. Schedule all the SLP instances when the first SLP stmt is
4348 reached. */
4349 if (STMT_SLP_TYPE (stmt_info))
4351 if (!slp_scheduled)
4353 slp_scheduled = true;
4355 if (vect_print_dump_info (REPORT_DETAILS))
4356 fprintf (vect_dump, "=== scheduling SLP instances ===");
4358 vect_schedule_slp (loop_vinfo, NULL);
4361 /* Hybrid SLP stmts must be vectorized in addition to SLP. */
4362 if (!vinfo_for_stmt (stmt) || PURE_SLP_STMT (stmt_info))
4364 gsi_next (&si);
4365 continue;
4369 /* -------- vectorize statement ------------ */
4370 if (vect_print_dump_info (REPORT_DETAILS))
4371 fprintf (vect_dump, "transform statement.");
4373 strided_store = false;
4374 is_store = vect_transform_stmt (stmt, &si, &strided_store, NULL, NULL);
4375 if (is_store)
4377 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
4379 /* Interleaving. If IS_STORE is TRUE, the vectorization of the
4380 interleaving chain was completed - free all the stores in
4381 the chain. */
4382 vect_remove_stores (DR_GROUP_FIRST_DR (stmt_info));
4383 gsi_remove (&si, true);
4384 continue;
4386 else
4388 /* Free the attached stmt_vec_info and remove the stmt. */
4389 free_stmt_vec_info (stmt);
4390 gsi_remove (&si, true);
4391 continue;
4394 gsi_next (&si);
4395 } /* stmts in BB */
4396 } /* BBs in loop */
4398 slpeel_make_loop_iterate_ntimes (loop, ratio);
4400 /* The memory tags and pointers in vectorized statements need to
4401 have their SSA forms updated. FIXME, why can't this be delayed
4402 until all the loops have been transformed? */
4403 update_ssa (TODO_update_ssa);
4405 if (vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
4406 fprintf (vect_dump, "LOOP VECTORIZED.");
4407 if (loop->inner && vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
4408 fprintf (vect_dump, "OUTER LOOP VECTORIZED.");