* target-def.h (TARGET_HAVE_NAMED_SECTIONS): Move to
[official-gcc.git] / gcc / tree-vect-loop.c
blobdd6cafe2e58b2fa887580ad97bcca35782bba165
1 /* Loop Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
5 Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "basic-block.h"
30 #include "tree-pretty-print.h"
31 #include "gimple-pretty-print.h"
32 #include "tree-flow.h"
33 #include "tree-dump.h"
34 #include "cfgloop.h"
35 #include "cfglayout.h"
36 #include "expr.h"
37 #include "recog.h"
38 #include "optabs.h"
39 #include "params.h"
40 #include "diagnostic-core.h"
41 #include "tree-chrec.h"
42 #include "tree-scalar-evolution.h"
43 #include "tree-vectorizer.h"
44 #include "target.h"
46 /* Loop Vectorization Pass.
48 This pass tries to vectorize loops.
50 For example, the vectorizer transforms the following simple loop:
52 short a[N]; short b[N]; short c[N]; int i;
54 for (i=0; i<N; i++){
55 a[i] = b[i] + c[i];
58 as if it was manually vectorized by rewriting the source code into:
60 typedef int __attribute__((mode(V8HI))) v8hi;
61 short a[N]; short b[N]; short c[N]; int i;
62 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
63 v8hi va, vb, vc;
65 for (i=0; i<N/8; i++){
66 vb = pb[i];
67 vc = pc[i];
68 va = vb + vc;
69 pa[i] = va;
72 The main entry to this pass is vectorize_loops(), in which
73 the vectorizer applies a set of analyses on a given set of loops,
74 followed by the actual vectorization transformation for the loops that
75 had successfully passed the analysis phase.
76 Throughout this pass we make a distinction between two types of
77 data: scalars (which are represented by SSA_NAMES), and memory references
78 ("data-refs"). These two types of data require different handling both
79 during analysis and transformation. The types of data-refs that the
80 vectorizer currently supports are ARRAY_REFS which base is an array DECL
81 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
82 accesses are required to have a simple (consecutive) access pattern.
84 Analysis phase:
85 ===============
86 The driver for the analysis phase is vect_analyze_loop().
87 It applies a set of analyses, some of which rely on the scalar evolution
88 analyzer (scev) developed by Sebastian Pop.
90 During the analysis phase the vectorizer records some information
91 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
92 loop, as well as general information about the loop as a whole, which is
93 recorded in a "loop_vec_info" struct attached to each loop.
95 Transformation phase:
96 =====================
97 The loop transformation phase scans all the stmts in the loop, and
98 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
99 the loop that needs to be vectorized. It inserts the vector code sequence
100 just before the scalar stmt S, and records a pointer to the vector code
101 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
102 attached to S). This pointer will be used for the vectorization of following
103 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
104 otherwise, we rely on dead code elimination for removing it.
106 For example, say stmt S1 was vectorized into stmt VS1:
108 VS1: vb = px[i];
109 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
110 S2: a = b;
112 To vectorize stmt S2, the vectorizer first finds the stmt that defines
113 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
114 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
115 resulting sequence would be:
117 VS1: vb = px[i];
118 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
119 VS2: va = vb;
120 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
122 Operands that are not SSA_NAMEs, are data-refs that appear in
123 load/store operations (like 'x[i]' in S1), and are handled differently.
125 Target modeling:
126 =================
127 Currently the only target specific information that is used is the
128 size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".
129 Targets that can support different sizes of vectors, for now will need
130 to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More
131 flexibility will be added in the future.
133 Since we only vectorize operations which vector form can be
134 expressed using existing tree codes, to verify that an operation is
135 supported, the vectorizer checks the relevant optab at the relevant
136 machine_mode (e.g, optab_handler (add_optab, V8HImode)). If
137 the value found is CODE_FOR_nothing, then there's no target support, and
138 we can't vectorize the stmt.
140 For additional information on this project see:
141 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
144 /* Function vect_determine_vectorization_factor
146 Determine the vectorization factor (VF). VF is the number of data elements
147 that are operated upon in parallel in a single iteration of the vectorized
148 loop. For example, when vectorizing a loop that operates on 4byte elements,
149 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
150 elements can fit in a single vector register.
152 We currently support vectorization of loops in which all types operated upon
153 are of the same size. Therefore this function currently sets VF according to
154 the size of the types operated upon, and fails if there are multiple sizes
155 in the loop.
157 VF is also the factor by which the loop iterations are strip-mined, e.g.:
158 original loop:
159 for (i=0; i<N; i++){
160 a[i] = b[i] + c[i];
163 vectorized loop:
164 for (i=0; i<N; i+=VF){
165 a[i:VF] = b[i:VF] + c[i:VF];
169 static bool
170 vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
172 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
173 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
174 int nbbs = loop->num_nodes;
175 gimple_stmt_iterator si;
176 unsigned int vectorization_factor = 0;
177 tree scalar_type;
178 gimple phi;
179 tree vectype;
180 unsigned int nunits;
181 stmt_vec_info stmt_info;
182 int i;
183 HOST_WIDE_INT dummy;
185 if (vect_print_dump_info (REPORT_DETAILS))
186 fprintf (vect_dump, "=== vect_determine_vectorization_factor ===");
188 for (i = 0; i < nbbs; i++)
190 basic_block bb = bbs[i];
192 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
194 phi = gsi_stmt (si);
195 stmt_info = vinfo_for_stmt (phi);
196 if (vect_print_dump_info (REPORT_DETAILS))
198 fprintf (vect_dump, "==> examining phi: ");
199 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
202 gcc_assert (stmt_info);
204 if (STMT_VINFO_RELEVANT_P (stmt_info))
206 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
207 scalar_type = TREE_TYPE (PHI_RESULT (phi));
209 if (vect_print_dump_info (REPORT_DETAILS))
211 fprintf (vect_dump, "get vectype for scalar type: ");
212 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
215 vectype = get_vectype_for_scalar_type (scalar_type);
216 if (!vectype)
218 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
220 fprintf (vect_dump,
221 "not vectorized: unsupported data-type ");
222 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
224 return false;
226 STMT_VINFO_VECTYPE (stmt_info) = vectype;
228 if (vect_print_dump_info (REPORT_DETAILS))
230 fprintf (vect_dump, "vectype: ");
231 print_generic_expr (vect_dump, vectype, TDF_SLIM);
234 nunits = TYPE_VECTOR_SUBPARTS (vectype);
235 if (vect_print_dump_info (REPORT_DETAILS))
236 fprintf (vect_dump, "nunits = %d", nunits);
238 if (!vectorization_factor
239 || (nunits > vectorization_factor))
240 vectorization_factor = nunits;
244 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
246 tree vf_vectype;
247 gimple stmt = gsi_stmt (si);
248 stmt_info = vinfo_for_stmt (stmt);
250 if (vect_print_dump_info (REPORT_DETAILS))
252 fprintf (vect_dump, "==> examining statement: ");
253 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
256 gcc_assert (stmt_info);
258 /* Skip stmts which do not need to be vectorized. */
259 if (!STMT_VINFO_RELEVANT_P (stmt_info)
260 && !STMT_VINFO_LIVE_P (stmt_info))
262 if (is_pattern_stmt_p (stmt_info))
264 /* We are not going to vectorize this pattern statement,
265 therefore, remove it. */
266 gimple_stmt_iterator tmp_gsi = gsi_for_stmt (stmt);
267 STMT_VINFO_RELATED_STMT (stmt_info) = NULL;
268 gsi_remove (&tmp_gsi, true);
269 free_stmt_vec_info (stmt);
272 if (vect_print_dump_info (REPORT_DETAILS))
273 fprintf (vect_dump, "skip.");
274 continue;
277 if (gimple_get_lhs (stmt) == NULL_TREE)
279 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
281 fprintf (vect_dump, "not vectorized: irregular stmt.");
282 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
284 return false;
287 if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
289 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
291 fprintf (vect_dump, "not vectorized: vector stmt in loop:");
292 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
294 return false;
297 if (STMT_VINFO_VECTYPE (stmt_info))
299 /* The only case when a vectype had been already set is for stmts
300 that contain a dataref, or for "pattern-stmts" (stmts generated
301 by the vectorizer to represent/replace a certain idiom). */
302 gcc_assert (STMT_VINFO_DATA_REF (stmt_info)
303 || is_pattern_stmt_p (stmt_info));
304 vectype = STMT_VINFO_VECTYPE (stmt_info);
306 else
308 gcc_assert (!STMT_VINFO_DATA_REF (stmt_info)
309 && !is_pattern_stmt_p (stmt_info));
311 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
312 if (vect_print_dump_info (REPORT_DETAILS))
314 fprintf (vect_dump, "get vectype for scalar type: ");
315 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
317 vectype = get_vectype_for_scalar_type (scalar_type);
318 if (!vectype)
320 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
322 fprintf (vect_dump,
323 "not vectorized: unsupported data-type ");
324 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
326 return false;
329 STMT_VINFO_VECTYPE (stmt_info) = vectype;
332 /* The vectorization factor is according to the smallest
333 scalar type (or the largest vector size, but we only
334 support one vector size per loop). */
335 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy,
336 &dummy);
337 if (vect_print_dump_info (REPORT_DETAILS))
339 fprintf (vect_dump, "get vectype for scalar type: ");
340 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
342 vf_vectype = get_vectype_for_scalar_type (scalar_type);
343 if (!vf_vectype)
345 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
347 fprintf (vect_dump,
348 "not vectorized: unsupported data-type ");
349 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
351 return false;
354 if ((GET_MODE_SIZE (TYPE_MODE (vectype))
355 != GET_MODE_SIZE (TYPE_MODE (vf_vectype))))
357 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
359 fprintf (vect_dump,
360 "not vectorized: different sized vector "
361 "types in statement, ");
362 print_generic_expr (vect_dump, vectype, TDF_SLIM);
363 fprintf (vect_dump, " and ");
364 print_generic_expr (vect_dump, vf_vectype, TDF_SLIM);
366 return false;
369 if (vect_print_dump_info (REPORT_DETAILS))
371 fprintf (vect_dump, "vectype: ");
372 print_generic_expr (vect_dump, vf_vectype, TDF_SLIM);
375 nunits = TYPE_VECTOR_SUBPARTS (vf_vectype);
376 if (vect_print_dump_info (REPORT_DETAILS))
377 fprintf (vect_dump, "nunits = %d", nunits);
379 if (!vectorization_factor
380 || (nunits > vectorization_factor))
381 vectorization_factor = nunits;
385 /* TODO: Analyze cost. Decide if worth while to vectorize. */
386 if (vect_print_dump_info (REPORT_DETAILS))
387 fprintf (vect_dump, "vectorization factor = %d", vectorization_factor);
388 if (vectorization_factor <= 1)
390 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
391 fprintf (vect_dump, "not vectorized: unsupported data-type");
392 return false;
394 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
396 return true;
400 /* Function vect_is_simple_iv_evolution.
402 FORNOW: A simple evolution of an induction variables in the loop is
403 considered a polynomial evolution with constant step. */
405 static bool
406 vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
407 tree * step)
409 tree init_expr;
410 tree step_expr;
411 tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
413 /* When there is no evolution in this loop, the evolution function
414 is not "simple". */
415 if (evolution_part == NULL_TREE)
416 return false;
418 /* When the evolution is a polynomial of degree >= 2
419 the evolution function is not "simple". */
420 if (tree_is_chrec (evolution_part))
421 return false;
423 step_expr = evolution_part;
424 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
426 if (vect_print_dump_info (REPORT_DETAILS))
428 fprintf (vect_dump, "step: ");
429 print_generic_expr (vect_dump, step_expr, TDF_SLIM);
430 fprintf (vect_dump, ", init: ");
431 print_generic_expr (vect_dump, init_expr, TDF_SLIM);
434 *init = init_expr;
435 *step = step_expr;
437 if (TREE_CODE (step_expr) != INTEGER_CST)
439 if (vect_print_dump_info (REPORT_DETAILS))
440 fprintf (vect_dump, "step unknown.");
441 return false;
444 return true;
447 /* Function vect_analyze_scalar_cycles_1.
449 Examine the cross iteration def-use cycles of scalar variables
450 in LOOP. LOOP_VINFO represents the loop that is now being
451 considered for vectorization (can be LOOP, or an outer-loop
452 enclosing LOOP). */
454 static void
455 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
457 basic_block bb = loop->header;
458 tree dumy;
459 VEC(gimple,heap) *worklist = VEC_alloc (gimple, heap, 64);
460 gimple_stmt_iterator gsi;
461 bool double_reduc;
463 if (vect_print_dump_info (REPORT_DETAILS))
464 fprintf (vect_dump, "=== vect_analyze_scalar_cycles ===");
466 /* First - identify all inductions. Reduction detection assumes that all the
467 inductions have been identified, therefore, this order must not be
468 changed. */
469 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
471 gimple phi = gsi_stmt (gsi);
472 tree access_fn = NULL;
473 tree def = PHI_RESULT (phi);
474 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
476 if (vect_print_dump_info (REPORT_DETAILS))
478 fprintf (vect_dump, "Analyze phi: ");
479 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
482 /* Skip virtual phi's. The data dependences that are associated with
483 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
484 if (!is_gimple_reg (SSA_NAME_VAR (def)))
485 continue;
487 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
489 /* Analyze the evolution function. */
490 access_fn = analyze_scalar_evolution (loop, def);
491 if (access_fn)
492 STRIP_NOPS (access_fn);
493 if (access_fn && vect_print_dump_info (REPORT_DETAILS))
495 fprintf (vect_dump, "Access function of PHI: ");
496 print_generic_expr (vect_dump, access_fn, TDF_SLIM);
499 if (!access_fn
500 || !vect_is_simple_iv_evolution (loop->num, access_fn, &dumy, &dumy))
502 VEC_safe_push (gimple, heap, worklist, phi);
503 continue;
506 if (vect_print_dump_info (REPORT_DETAILS))
507 fprintf (vect_dump, "Detected induction.");
508 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
512 /* Second - identify all reductions and nested cycles. */
513 while (VEC_length (gimple, worklist) > 0)
515 gimple phi = VEC_pop (gimple, worklist);
516 tree def = PHI_RESULT (phi);
517 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
518 gimple reduc_stmt;
519 bool nested_cycle;
521 if (vect_print_dump_info (REPORT_DETAILS))
523 fprintf (vect_dump, "Analyze phi: ");
524 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
527 gcc_assert (is_gimple_reg (SSA_NAME_VAR (def)));
528 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
530 nested_cycle = (loop != LOOP_VINFO_LOOP (loop_vinfo));
531 reduc_stmt = vect_force_simple_reduction (loop_vinfo, phi, !nested_cycle,
532 &double_reduc);
533 if (reduc_stmt)
535 if (double_reduc)
537 if (vect_print_dump_info (REPORT_DETAILS))
538 fprintf (vect_dump, "Detected double reduction.");
540 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
541 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
542 vect_double_reduction_def;
544 else
546 if (nested_cycle)
548 if (vect_print_dump_info (REPORT_DETAILS))
549 fprintf (vect_dump, "Detected vectorizable nested cycle.");
551 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
552 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
553 vect_nested_cycle;
555 else
557 if (vect_print_dump_info (REPORT_DETAILS))
558 fprintf (vect_dump, "Detected reduction.");
560 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
561 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
562 vect_reduction_def;
563 /* Store the reduction cycles for possible vectorization in
564 loop-aware SLP. */
565 VEC_safe_push (gimple, heap,
566 LOOP_VINFO_REDUCTIONS (loop_vinfo),
567 reduc_stmt);
571 else
572 if (vect_print_dump_info (REPORT_DETAILS))
573 fprintf (vect_dump, "Unknown def-use cycle pattern.");
576 VEC_free (gimple, heap, worklist);
580 /* Function vect_analyze_scalar_cycles.
582 Examine the cross iteration def-use cycles of scalar variables, by
583 analyzing the loop-header PHIs of scalar variables. Classify each
584 cycle as one of the following: invariant, induction, reduction, unknown.
585 We do that for the loop represented by LOOP_VINFO, and also to its
586 inner-loop, if exists.
587 Examples for scalar cycles:
589 Example1: reduction:
591 loop1:
592 for (i=0; i<N; i++)
593 sum += a[i];
595 Example2: induction:
597 loop2:
598 for (i=0; i<N; i++)
599 a[i] = i; */
601 static void
602 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
604 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
606 vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
608 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
609 Reductions in such inner-loop therefore have different properties than
610 the reductions in the nest that gets vectorized:
611 1. When vectorized, they are executed in the same order as in the original
612 scalar loop, so we can't change the order of computation when
613 vectorizing them.
614 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
615 current checks are too strict. */
617 if (loop->inner)
618 vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
621 /* Function vect_get_loop_niters.
623 Determine how many iterations the loop is executed.
624 If an expression that represents the number of iterations
625 can be constructed, place it in NUMBER_OF_ITERATIONS.
626 Return the loop exit condition. */
628 static gimple
629 vect_get_loop_niters (struct loop *loop, tree *number_of_iterations)
631 tree niters;
633 if (vect_print_dump_info (REPORT_DETAILS))
634 fprintf (vect_dump, "=== get_loop_niters ===");
636 niters = number_of_exit_cond_executions (loop);
638 if (niters != NULL_TREE
639 && niters != chrec_dont_know)
641 *number_of_iterations = niters;
643 if (vect_print_dump_info (REPORT_DETAILS))
645 fprintf (vect_dump, "==> get_loop_niters:" );
646 print_generic_expr (vect_dump, *number_of_iterations, TDF_SLIM);
650 return get_loop_exit_condition (loop);
654 /* Function bb_in_loop_p
656 Used as predicate for dfs order traversal of the loop bbs. */
658 static bool
659 bb_in_loop_p (const_basic_block bb, const void *data)
661 const struct loop *const loop = (const struct loop *)data;
662 if (flow_bb_inside_loop_p (loop, bb))
663 return true;
664 return false;
668 /* Function new_loop_vec_info.
670 Create and initialize a new loop_vec_info struct for LOOP, as well as
671 stmt_vec_info structs for all the stmts in LOOP. */
673 static loop_vec_info
674 new_loop_vec_info (struct loop *loop)
676 loop_vec_info res;
677 basic_block *bbs;
678 gimple_stmt_iterator si;
679 unsigned int i, nbbs;
681 res = (loop_vec_info) xcalloc (1, sizeof (struct _loop_vec_info));
682 LOOP_VINFO_LOOP (res) = loop;
684 bbs = get_loop_body (loop);
686 /* Create/Update stmt_info for all stmts in the loop. */
687 for (i = 0; i < loop->num_nodes; i++)
689 basic_block bb = bbs[i];
691 /* BBs in a nested inner-loop will have been already processed (because
692 we will have called vect_analyze_loop_form for any nested inner-loop).
693 Therefore, for stmts in an inner-loop we just want to update the
694 STMT_VINFO_LOOP_VINFO field of their stmt_info to point to the new
695 loop_info of the outer-loop we are currently considering to vectorize
696 (instead of the loop_info of the inner-loop).
697 For stmts in other BBs we need to create a stmt_info from scratch. */
698 if (bb->loop_father != loop)
700 /* Inner-loop bb. */
701 gcc_assert (loop->inner && bb->loop_father == loop->inner);
702 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
704 gimple phi = gsi_stmt (si);
705 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
706 loop_vec_info inner_loop_vinfo =
707 STMT_VINFO_LOOP_VINFO (stmt_info);
708 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
709 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
711 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
713 gimple stmt = gsi_stmt (si);
714 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
715 loop_vec_info inner_loop_vinfo =
716 STMT_VINFO_LOOP_VINFO (stmt_info);
717 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
718 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
721 else
723 /* bb in current nest. */
724 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
726 gimple phi = gsi_stmt (si);
727 gimple_set_uid (phi, 0);
728 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, res, NULL));
731 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
733 gimple stmt = gsi_stmt (si);
734 gimple_set_uid (stmt, 0);
735 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, res, NULL));
740 /* CHECKME: We want to visit all BBs before their successors (except for
741 latch blocks, for which this assertion wouldn't hold). In the simple
742 case of the loop forms we allow, a dfs order of the BBs would the same
743 as reversed postorder traversal, so we are safe. */
745 free (bbs);
746 bbs = XCNEWVEC (basic_block, loop->num_nodes);
747 nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
748 bbs, loop->num_nodes, loop);
749 gcc_assert (nbbs == loop->num_nodes);
751 LOOP_VINFO_BBS (res) = bbs;
752 LOOP_VINFO_NITERS (res) = NULL;
753 LOOP_VINFO_NITERS_UNCHANGED (res) = NULL;
754 LOOP_VINFO_COST_MODEL_MIN_ITERS (res) = 0;
755 LOOP_VINFO_VECTORIZABLE_P (res) = 0;
756 LOOP_PEELING_FOR_ALIGNMENT (res) = 0;
757 LOOP_VINFO_VECT_FACTOR (res) = 0;
758 LOOP_VINFO_LOOP_NEST (res) = VEC_alloc (loop_p, heap, 3);
759 LOOP_VINFO_DATAREFS (res) = VEC_alloc (data_reference_p, heap, 10);
760 LOOP_VINFO_DDRS (res) = VEC_alloc (ddr_p, heap, 10 * 10);
761 LOOP_VINFO_UNALIGNED_DR (res) = NULL;
762 LOOP_VINFO_MAY_MISALIGN_STMTS (res) =
763 VEC_alloc (gimple, heap,
764 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS));
765 LOOP_VINFO_MAY_ALIAS_DDRS (res) =
766 VEC_alloc (ddr_p, heap,
767 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS));
768 LOOP_VINFO_STRIDED_STORES (res) = VEC_alloc (gimple, heap, 10);
769 LOOP_VINFO_REDUCTIONS (res) = VEC_alloc (gimple, heap, 10);
770 LOOP_VINFO_REDUCTION_CHAINS (res) = VEC_alloc (gimple, heap, 10);
771 LOOP_VINFO_SLP_INSTANCES (res) = VEC_alloc (slp_instance, heap, 10);
772 LOOP_VINFO_SLP_UNROLLING_FACTOR (res) = 1;
773 LOOP_VINFO_PEELING_HTAB (res) = NULL;
774 LOOP_VINFO_PEELING_FOR_GAPS (res) = false;
776 return res;
780 /* Function destroy_loop_vec_info.
782 Free LOOP_VINFO struct, as well as all the stmt_vec_info structs of all the
783 stmts in the loop. */
785 void
786 destroy_loop_vec_info (loop_vec_info loop_vinfo, bool clean_stmts)
788 struct loop *loop;
789 basic_block *bbs;
790 int nbbs;
791 gimple_stmt_iterator si;
792 int j;
793 VEC (slp_instance, heap) *slp_instances;
794 slp_instance instance;
796 if (!loop_vinfo)
797 return;
799 loop = LOOP_VINFO_LOOP (loop_vinfo);
801 bbs = LOOP_VINFO_BBS (loop_vinfo);
802 nbbs = loop->num_nodes;
804 if (!clean_stmts)
806 free (LOOP_VINFO_BBS (loop_vinfo));
807 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
808 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
809 VEC_free (loop_p, heap, LOOP_VINFO_LOOP_NEST (loop_vinfo));
810 VEC_free (gimple, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
811 VEC_free (ddr_p, heap, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
813 free (loop_vinfo);
814 loop->aux = NULL;
815 return;
818 for (j = 0; j < nbbs; j++)
820 basic_block bb = bbs[j];
821 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
822 free_stmt_vec_info (gsi_stmt (si));
824 for (si = gsi_start_bb (bb); !gsi_end_p (si); )
826 gimple stmt = gsi_stmt (si);
827 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
829 if (stmt_info)
831 /* Check if this is a "pattern stmt" (introduced by the
832 vectorizer during the pattern recognition pass). */
833 bool remove_stmt_p = false;
834 gimple orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
835 if (orig_stmt)
837 stmt_vec_info orig_stmt_info = vinfo_for_stmt (orig_stmt);
838 if (orig_stmt_info
839 && STMT_VINFO_IN_PATTERN_P (orig_stmt_info))
840 remove_stmt_p = true;
843 /* Free stmt_vec_info. */
844 free_stmt_vec_info (stmt);
846 /* Remove dead "pattern stmts". */
847 if (remove_stmt_p)
848 gsi_remove (&si, true);
850 gsi_next (&si);
854 free (LOOP_VINFO_BBS (loop_vinfo));
855 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
856 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
857 VEC_free (loop_p, heap, LOOP_VINFO_LOOP_NEST (loop_vinfo));
858 VEC_free (gimple, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
859 VEC_free (ddr_p, heap, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
860 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
861 FOR_EACH_VEC_ELT (slp_instance, slp_instances, j, instance)
862 vect_free_slp_instance (instance);
864 VEC_free (slp_instance, heap, LOOP_VINFO_SLP_INSTANCES (loop_vinfo));
865 VEC_free (gimple, heap, LOOP_VINFO_STRIDED_STORES (loop_vinfo));
866 VEC_free (gimple, heap, LOOP_VINFO_REDUCTIONS (loop_vinfo));
867 VEC_free (gimple, heap, LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo));
869 if (LOOP_VINFO_PEELING_HTAB (loop_vinfo))
870 htab_delete (LOOP_VINFO_PEELING_HTAB (loop_vinfo));
872 free (loop_vinfo);
873 loop->aux = NULL;
877 /* Function vect_analyze_loop_1.
879 Apply a set of analyses on LOOP, and create a loop_vec_info struct
880 for it. The different analyses will record information in the
881 loop_vec_info struct. This is a subset of the analyses applied in
882 vect_analyze_loop, to be applied on an inner-loop nested in the loop
883 that is now considered for (outer-loop) vectorization. */
885 static loop_vec_info
886 vect_analyze_loop_1 (struct loop *loop)
888 loop_vec_info loop_vinfo;
890 if (vect_print_dump_info (REPORT_DETAILS))
891 fprintf (vect_dump, "===== analyze_loop_nest_1 =====");
893 /* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */
895 loop_vinfo = vect_analyze_loop_form (loop);
896 if (!loop_vinfo)
898 if (vect_print_dump_info (REPORT_DETAILS))
899 fprintf (vect_dump, "bad inner-loop form.");
900 return NULL;
903 return loop_vinfo;
907 /* Function vect_analyze_loop_form.
909 Verify that certain CFG restrictions hold, including:
910 - the loop has a pre-header
911 - the loop has a single entry and exit
912 - the loop exit condition is simple enough, and the number of iterations
913 can be analyzed (a countable loop). */
915 loop_vec_info
916 vect_analyze_loop_form (struct loop *loop)
918 loop_vec_info loop_vinfo;
919 gimple loop_cond;
920 tree number_of_iterations = NULL;
921 loop_vec_info inner_loop_vinfo = NULL;
923 if (vect_print_dump_info (REPORT_DETAILS))
924 fprintf (vect_dump, "=== vect_analyze_loop_form ===");
926 /* Different restrictions apply when we are considering an inner-most loop,
927 vs. an outer (nested) loop.
928 (FORNOW. May want to relax some of these restrictions in the future). */
930 if (!loop->inner)
932 /* Inner-most loop. We currently require that the number of BBs is
933 exactly 2 (the header and latch). Vectorizable inner-most loops
934 look like this:
936 (pre-header)
938 header <--------+
939 | | |
940 | +--> latch --+
942 (exit-bb) */
944 if (loop->num_nodes != 2)
946 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
947 fprintf (vect_dump, "not vectorized: control flow in loop.");
948 return NULL;
951 if (empty_block_p (loop->header))
953 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
954 fprintf (vect_dump, "not vectorized: empty loop.");
955 return NULL;
958 else
960 struct loop *innerloop = loop->inner;
961 edge entryedge;
963 /* Nested loop. We currently require that the loop is doubly-nested,
964 contains a single inner loop, and the number of BBs is exactly 5.
965 Vectorizable outer-loops look like this:
967 (pre-header)
969 header <---+
971 inner-loop |
973 tail ------+
975 (exit-bb)
977 The inner-loop has the properties expected of inner-most loops
978 as described above. */
980 if ((loop->inner)->inner || (loop->inner)->next)
982 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
983 fprintf (vect_dump, "not vectorized: multiple nested loops.");
984 return NULL;
987 /* Analyze the inner-loop. */
988 inner_loop_vinfo = vect_analyze_loop_1 (loop->inner);
989 if (!inner_loop_vinfo)
991 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
992 fprintf (vect_dump, "not vectorized: Bad inner loop.");
993 return NULL;
996 if (!expr_invariant_in_loop_p (loop,
997 LOOP_VINFO_NITERS (inner_loop_vinfo)))
999 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1000 fprintf (vect_dump,
1001 "not vectorized: inner-loop count not invariant.");
1002 destroy_loop_vec_info (inner_loop_vinfo, true);
1003 return NULL;
1006 if (loop->num_nodes != 5)
1008 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1009 fprintf (vect_dump, "not vectorized: control flow in loop.");
1010 destroy_loop_vec_info (inner_loop_vinfo, true);
1011 return NULL;
1014 gcc_assert (EDGE_COUNT (innerloop->header->preds) == 2);
1015 entryedge = EDGE_PRED (innerloop->header, 0);
1016 if (EDGE_PRED (innerloop->header, 0)->src == innerloop->latch)
1017 entryedge = EDGE_PRED (innerloop->header, 1);
1019 if (entryedge->src != loop->header
1020 || !single_exit (innerloop)
1021 || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
1023 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1024 fprintf (vect_dump, "not vectorized: unsupported outerloop form.");
1025 destroy_loop_vec_info (inner_loop_vinfo, true);
1026 return NULL;
1029 if (vect_print_dump_info (REPORT_DETAILS))
1030 fprintf (vect_dump, "Considering outer-loop vectorization.");
1033 if (!single_exit (loop)
1034 || EDGE_COUNT (loop->header->preds) != 2)
1036 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1038 if (!single_exit (loop))
1039 fprintf (vect_dump, "not vectorized: multiple exits.");
1040 else if (EDGE_COUNT (loop->header->preds) != 2)
1041 fprintf (vect_dump, "not vectorized: too many incoming edges.");
1043 if (inner_loop_vinfo)
1044 destroy_loop_vec_info (inner_loop_vinfo, true);
1045 return NULL;
1048 /* We assume that the loop exit condition is at the end of the loop. i.e,
1049 that the loop is represented as a do-while (with a proper if-guard
1050 before the loop if needed), where the loop header contains all the
1051 executable statements, and the latch is empty. */
1052 if (!empty_block_p (loop->latch)
1053 || !gimple_seq_empty_p (phi_nodes (loop->latch)))
1055 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1056 fprintf (vect_dump, "not vectorized: unexpected loop form.");
1057 if (inner_loop_vinfo)
1058 destroy_loop_vec_info (inner_loop_vinfo, true);
1059 return NULL;
1062 /* Make sure there exists a single-predecessor exit bb: */
1063 if (!single_pred_p (single_exit (loop)->dest))
1065 edge e = single_exit (loop);
1066 if (!(e->flags & EDGE_ABNORMAL))
1068 split_loop_exit_edge (e);
1069 if (vect_print_dump_info (REPORT_DETAILS))
1070 fprintf (vect_dump, "split exit edge.");
1072 else
1074 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1075 fprintf (vect_dump, "not vectorized: abnormal loop exit edge.");
1076 if (inner_loop_vinfo)
1077 destroy_loop_vec_info (inner_loop_vinfo, true);
1078 return NULL;
1082 loop_cond = vect_get_loop_niters (loop, &number_of_iterations);
1083 if (!loop_cond)
1085 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1086 fprintf (vect_dump, "not vectorized: complicated exit condition.");
1087 if (inner_loop_vinfo)
1088 destroy_loop_vec_info (inner_loop_vinfo, true);
1089 return NULL;
1092 if (!number_of_iterations)
1094 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1095 fprintf (vect_dump,
1096 "not vectorized: number of iterations cannot be computed.");
1097 if (inner_loop_vinfo)
1098 destroy_loop_vec_info (inner_loop_vinfo, true);
1099 return NULL;
1102 if (chrec_contains_undetermined (number_of_iterations))
1104 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1105 fprintf (vect_dump, "Infinite number of iterations.");
1106 if (inner_loop_vinfo)
1107 destroy_loop_vec_info (inner_loop_vinfo, true);
1108 return NULL;
1111 if (!NITERS_KNOWN_P (number_of_iterations))
1113 if (vect_print_dump_info (REPORT_DETAILS))
1115 fprintf (vect_dump, "Symbolic number of iterations is ");
1116 print_generic_expr (vect_dump, number_of_iterations, TDF_DETAILS);
1119 else if (TREE_INT_CST_LOW (number_of_iterations) == 0)
1121 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1122 fprintf (vect_dump, "not vectorized: number of iterations = 0.");
1123 if (inner_loop_vinfo)
1124 destroy_loop_vec_info (inner_loop_vinfo, false);
1125 return NULL;
1128 loop_vinfo = new_loop_vec_info (loop);
1129 LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
1130 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
1132 STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond)) = loop_exit_ctrl_vec_info_type;
1134 /* CHECKME: May want to keep it around it in the future. */
1135 if (inner_loop_vinfo)
1136 destroy_loop_vec_info (inner_loop_vinfo, false);
1138 gcc_assert (!loop->aux);
1139 loop->aux = loop_vinfo;
1140 return loop_vinfo;
1144 /* Get cost by calling cost target builtin. */
1146 static inline int
1147 vect_get_cost (enum vect_cost_for_stmt type_of_cost)
1149 tree dummy_type = NULL;
1150 int dummy = 0;
1152 return targetm.vectorize.builtin_vectorization_cost (type_of_cost,
1153 dummy_type, dummy);
1157 /* Function vect_analyze_loop_operations.
1159 Scan the loop stmts and make sure they are all vectorizable. */
1161 static bool
1162 vect_analyze_loop_operations (loop_vec_info loop_vinfo, bool slp)
1164 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1165 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1166 int nbbs = loop->num_nodes;
1167 gimple_stmt_iterator si;
1168 unsigned int vectorization_factor = 0;
1169 int i;
1170 gimple phi;
1171 stmt_vec_info stmt_info;
1172 bool need_to_vectorize = false;
1173 int min_profitable_iters;
1174 int min_scalar_loop_bound;
1175 unsigned int th;
1176 bool only_slp_in_loop = true, ok;
1178 if (vect_print_dump_info (REPORT_DETAILS))
1179 fprintf (vect_dump, "=== vect_analyze_loop_operations ===");
1181 gcc_assert (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
1182 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1183 if (slp)
1185 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1186 vectorization factor of the loop is the unrolling factor required by
1187 the SLP instances. If that unrolling factor is 1, we say, that we
1188 perform pure SLP on loop - cross iteration parallelism is not
1189 exploited. */
1190 for (i = 0; i < nbbs; i++)
1192 basic_block bb = bbs[i];
1193 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1195 gimple stmt = gsi_stmt (si);
1196 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1197 gcc_assert (stmt_info);
1198 if ((STMT_VINFO_RELEVANT_P (stmt_info)
1199 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1200 && !PURE_SLP_STMT (stmt_info))
1201 /* STMT needs both SLP and loop-based vectorization. */
1202 only_slp_in_loop = false;
1206 if (only_slp_in_loop)
1207 vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
1208 else
1209 vectorization_factor = least_common_multiple (vectorization_factor,
1210 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
1212 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
1213 if (vect_print_dump_info (REPORT_DETAILS))
1214 fprintf (vect_dump, "Updating vectorization factor to %d ",
1215 vectorization_factor);
1218 for (i = 0; i < nbbs; i++)
1220 basic_block bb = bbs[i];
1222 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
1224 phi = gsi_stmt (si);
1225 ok = true;
1227 stmt_info = vinfo_for_stmt (phi);
1228 if (vect_print_dump_info (REPORT_DETAILS))
1230 fprintf (vect_dump, "examining phi: ");
1231 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
1234 /* Inner-loop loop-closed exit phi in outer-loop vectorization
1235 (i.e., a phi in the tail of the outer-loop). */
1236 if (! is_loop_header_bb_p (bb))
1238 /* FORNOW: we currently don't support the case that these phis
1239 are not used in the outerloop (unless it is double reduction,
1240 i.e., this phi is vect_reduction_def), cause this case
1241 requires to actually do something here. */
1242 if ((!STMT_VINFO_RELEVANT_P (stmt_info)
1243 || STMT_VINFO_LIVE_P (stmt_info))
1244 && STMT_VINFO_DEF_TYPE (stmt_info)
1245 != vect_double_reduction_def)
1247 if (vect_print_dump_info (REPORT_DETAILS))
1248 fprintf (vect_dump,
1249 "Unsupported loop-closed phi in outer-loop.");
1250 return false;
1253 /* If PHI is used in the outer loop, we check that its operand
1254 is defined in the inner loop. */
1255 if (STMT_VINFO_RELEVANT_P (stmt_info))
1257 tree phi_op;
1258 gimple op_def_stmt;
1260 if (gimple_phi_num_args (phi) != 1)
1261 return false;
1263 phi_op = PHI_ARG_DEF (phi, 0);
1264 if (TREE_CODE (phi_op) != SSA_NAME)
1265 return false;
1267 op_def_stmt = SSA_NAME_DEF_STMT (phi_op);
1268 if (!op_def_stmt || !vinfo_for_stmt (op_def_stmt))
1269 return false;
1271 if (STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt))
1272 != vect_used_in_outer
1273 && STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt))
1274 != vect_used_in_outer_by_reduction)
1275 return false;
1278 continue;
1281 gcc_assert (stmt_info);
1283 if (STMT_VINFO_LIVE_P (stmt_info))
1285 /* FORNOW: not yet supported. */
1286 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1287 fprintf (vect_dump, "not vectorized: value used after loop.");
1288 return false;
1291 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
1292 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
1294 /* A scalar-dependence cycle that we don't support. */
1295 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1296 fprintf (vect_dump, "not vectorized: scalar dependence cycle.");
1297 return false;
1300 if (STMT_VINFO_RELEVANT_P (stmt_info))
1302 need_to_vectorize = true;
1303 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
1304 ok = vectorizable_induction (phi, NULL, NULL);
1307 if (!ok)
1309 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1311 fprintf (vect_dump,
1312 "not vectorized: relevant phi not supported: ");
1313 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
1315 return false;
1319 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1321 gimple stmt = gsi_stmt (si);
1322 if (!vect_analyze_stmt (stmt, &need_to_vectorize, NULL))
1323 return false;
1325 } /* bbs */
1327 /* All operations in the loop are either irrelevant (deal with loop
1328 control, or dead), or only used outside the loop and can be moved
1329 out of the loop (e.g. invariants, inductions). The loop can be
1330 optimized away by scalar optimizations. We're better off not
1331 touching this loop. */
1332 if (!need_to_vectorize)
1334 if (vect_print_dump_info (REPORT_DETAILS))
1335 fprintf (vect_dump,
1336 "All the computation can be taken out of the loop.");
1337 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1338 fprintf (vect_dump,
1339 "not vectorized: redundant loop. no profit to vectorize.");
1340 return false;
1343 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1344 && vect_print_dump_info (REPORT_DETAILS))
1345 fprintf (vect_dump,
1346 "vectorization_factor = %d, niters = " HOST_WIDE_INT_PRINT_DEC,
1347 vectorization_factor, LOOP_VINFO_INT_NITERS (loop_vinfo));
1349 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1350 && (LOOP_VINFO_INT_NITERS (loop_vinfo) < vectorization_factor))
1352 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1353 fprintf (vect_dump, "not vectorized: iteration count too small.");
1354 if (vect_print_dump_info (REPORT_DETAILS))
1355 fprintf (vect_dump,"not vectorized: iteration count smaller than "
1356 "vectorization factor.");
1357 return false;
1360 /* Analyze cost. Decide if worth while to vectorize. */
1362 /* Once VF is set, SLP costs should be updated since the number of created
1363 vector stmts depends on VF. */
1364 vect_update_slp_costs_according_to_vf (loop_vinfo);
1366 min_profitable_iters = vect_estimate_min_profitable_iters (loop_vinfo);
1367 LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo) = min_profitable_iters;
1369 if (min_profitable_iters < 0)
1371 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1372 fprintf (vect_dump, "not vectorized: vectorization not profitable.");
1373 if (vect_print_dump_info (REPORT_DETAILS))
1374 fprintf (vect_dump, "not vectorized: vector version will never be "
1375 "profitable.");
1376 return false;
1379 min_scalar_loop_bound = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
1380 * vectorization_factor) - 1);
1382 /* Use the cost model only if it is more conservative than user specified
1383 threshold. */
1385 th = (unsigned) min_scalar_loop_bound;
1386 if (min_profitable_iters
1387 && (!min_scalar_loop_bound
1388 || min_profitable_iters > min_scalar_loop_bound))
1389 th = (unsigned) min_profitable_iters;
1391 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1392 && LOOP_VINFO_INT_NITERS (loop_vinfo) <= th)
1394 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1395 fprintf (vect_dump, "not vectorized: vectorization not "
1396 "profitable.");
1397 if (vect_print_dump_info (REPORT_DETAILS))
1398 fprintf (vect_dump, "not vectorized: iteration count smaller than "
1399 "user specified loop bound parameter or minimum "
1400 "profitable iterations (whichever is more conservative).");
1401 return false;
1404 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1405 || LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0
1406 || LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
1408 if (vect_print_dump_info (REPORT_DETAILS))
1409 fprintf (vect_dump, "epilog loop required.");
1410 if (!vect_can_advance_ivs_p (loop_vinfo))
1412 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1413 fprintf (vect_dump,
1414 "not vectorized: can't create epilog loop 1.");
1415 return false;
1417 if (!slpeel_can_duplicate_loop_p (loop, single_exit (loop)))
1419 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1420 fprintf (vect_dump,
1421 "not vectorized: can't create epilog loop 2.");
1422 return false;
1426 return true;
1430 /* Function vect_analyze_loop_2.
1432 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1433 for it. The different analyses will record information in the
1434 loop_vec_info struct. */
1435 static bool
1436 vect_analyze_loop_2 (loop_vec_info loop_vinfo)
1438 bool ok, dummy, slp = false;
1439 int max_vf = MAX_VECTORIZATION_FACTOR;
1440 int min_vf = 2;
1442 /* Find all data references in the loop (which correspond to vdefs/vuses)
1443 and analyze their evolution in the loop. Also adjust the minimal
1444 vectorization factor according to the loads and stores.
1446 FORNOW: Handle only simple, array references, which
1447 alignment can be forced, and aligned pointer-references. */
1449 ok = vect_analyze_data_refs (loop_vinfo, NULL, &min_vf);
1450 if (!ok)
1452 if (vect_print_dump_info (REPORT_DETAILS))
1453 fprintf (vect_dump, "bad data references.");
1454 return false;
1457 /* Classify all cross-iteration scalar data-flow cycles.
1458 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1460 vect_analyze_scalar_cycles (loop_vinfo);
1462 vect_pattern_recog (loop_vinfo);
1464 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1466 ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
1467 if (!ok)
1469 if (vect_print_dump_info (REPORT_DETAILS))
1470 fprintf (vect_dump, "unexpected pattern.");
1471 return false;
1474 /* Analyze data dependences between the data-refs in the loop
1475 and adjust the maximum vectorization factor according to
1476 the dependences.
1477 FORNOW: fail at the first data dependence that we encounter. */
1479 ok = vect_analyze_data_ref_dependences (loop_vinfo, NULL, &max_vf, &dummy);
1480 if (!ok
1481 || max_vf < min_vf)
1483 if (vect_print_dump_info (REPORT_DETAILS))
1484 fprintf (vect_dump, "bad data dependence.");
1485 return false;
1488 ok = vect_determine_vectorization_factor (loop_vinfo);
1489 if (!ok)
1491 if (vect_print_dump_info (REPORT_DETAILS))
1492 fprintf (vect_dump, "can't determine vectorization factor.");
1493 return false;
1495 if (max_vf < LOOP_VINFO_VECT_FACTOR (loop_vinfo))
1497 if (vect_print_dump_info (REPORT_DETAILS))
1498 fprintf (vect_dump, "bad data dependence.");
1499 return false;
1502 /* Analyze the alignment of the data-refs in the loop.
1503 Fail if a data reference is found that cannot be vectorized. */
1505 ok = vect_analyze_data_refs_alignment (loop_vinfo, NULL);
1506 if (!ok)
1508 if (vect_print_dump_info (REPORT_DETAILS))
1509 fprintf (vect_dump, "bad data alignment.");
1510 return false;
1513 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1514 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1516 ok = vect_analyze_data_ref_accesses (loop_vinfo, NULL);
1517 if (!ok)
1519 if (vect_print_dump_info (REPORT_DETAILS))
1520 fprintf (vect_dump, "bad data access.");
1521 return false;
1524 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
1525 It is important to call pruning after vect_analyze_data_ref_accesses,
1526 since we use grouping information gathered by interleaving analysis. */
1527 ok = vect_prune_runtime_alias_test_list (loop_vinfo);
1528 if (!ok)
1530 if (vect_print_dump_info (REPORT_DETAILS))
1531 fprintf (vect_dump, "too long list of versioning for alias "
1532 "run-time tests.");
1533 return false;
1536 /* This pass will decide on using loop versioning and/or loop peeling in
1537 order to enhance the alignment of data references in the loop. */
1539 ok = vect_enhance_data_refs_alignment (loop_vinfo);
1540 if (!ok)
1542 if (vect_print_dump_info (REPORT_DETAILS))
1543 fprintf (vect_dump, "bad data alignment.");
1544 return false;
1547 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
1548 ok = vect_analyze_slp (loop_vinfo, NULL);
1549 if (ok)
1551 /* Decide which possible SLP instances to SLP. */
1552 slp = vect_make_slp_decision (loop_vinfo);
1554 /* Find stmts that need to be both vectorized and SLPed. */
1555 vect_detect_hybrid_slp (loop_vinfo);
1557 else
1558 return false;
1560 /* Scan all the operations in the loop and make sure they are
1561 vectorizable. */
1563 ok = vect_analyze_loop_operations (loop_vinfo, slp);
1564 if (!ok)
1566 if (vect_print_dump_info (REPORT_DETAILS))
1567 fprintf (vect_dump, "bad operation or unsupported loop bound.");
1568 return false;
1571 return true;
1574 /* Function vect_analyze_loop.
1576 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1577 for it. The different analyses will record information in the
1578 loop_vec_info struct. */
1579 loop_vec_info
1580 vect_analyze_loop (struct loop *loop)
1582 loop_vec_info loop_vinfo;
1583 unsigned int vector_sizes;
1585 /* Autodetect first vector size we try. */
1586 current_vector_size = 0;
1587 vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
1589 if (vect_print_dump_info (REPORT_DETAILS))
1590 fprintf (vect_dump, "===== analyze_loop_nest =====");
1592 if (loop_outer (loop)
1593 && loop_vec_info_for_loop (loop_outer (loop))
1594 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
1596 if (vect_print_dump_info (REPORT_DETAILS))
1597 fprintf (vect_dump, "outer-loop already vectorized.");
1598 return NULL;
1601 while (1)
1603 /* Check the CFG characteristics of the loop (nesting, entry/exit). */
1604 loop_vinfo = vect_analyze_loop_form (loop);
1605 if (!loop_vinfo)
1607 if (vect_print_dump_info (REPORT_DETAILS))
1608 fprintf (vect_dump, "bad loop form.");
1609 return NULL;
1612 if (vect_analyze_loop_2 (loop_vinfo))
1614 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
1616 return loop_vinfo;
1619 destroy_loop_vec_info (loop_vinfo, true);
1621 vector_sizes &= ~current_vector_size;
1622 if (vector_sizes == 0
1623 || current_vector_size == 0)
1624 return NULL;
1626 /* Try the next biggest vector size. */
1627 current_vector_size = 1 << floor_log2 (vector_sizes);
1628 if (vect_print_dump_info (REPORT_DETAILS))
1629 fprintf (vect_dump, "***** Re-trying analysis with "
1630 "vector size %d\n", current_vector_size);
1635 /* Function reduction_code_for_scalar_code
1637 Input:
1638 CODE - tree_code of a reduction operations.
1640 Output:
1641 REDUC_CODE - the corresponding tree-code to be used to reduce the
1642 vector of partial results into a single scalar result (which
1643 will also reside in a vector) or ERROR_MARK if the operation is
1644 a supported reduction operation, but does not have such tree-code.
1646 Return FALSE if CODE currently cannot be vectorized as reduction. */
1648 static bool
1649 reduction_code_for_scalar_code (enum tree_code code,
1650 enum tree_code *reduc_code)
1652 switch (code)
1654 case MAX_EXPR:
1655 *reduc_code = REDUC_MAX_EXPR;
1656 return true;
1658 case MIN_EXPR:
1659 *reduc_code = REDUC_MIN_EXPR;
1660 return true;
1662 case PLUS_EXPR:
1663 *reduc_code = REDUC_PLUS_EXPR;
1664 return true;
1666 case MULT_EXPR:
1667 case MINUS_EXPR:
1668 case BIT_IOR_EXPR:
1669 case BIT_XOR_EXPR:
1670 case BIT_AND_EXPR:
1671 *reduc_code = ERROR_MARK;
1672 return true;
1674 default:
1675 return false;
1680 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
1681 STMT is printed with a message MSG. */
1683 static void
1684 report_vect_op (gimple stmt, const char *msg)
1686 fprintf (vect_dump, "%s", msg);
1687 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
1691 /* Detect SLP reduction of the form:
1693 #a1 = phi <a5, a0>
1694 a2 = operation (a1)
1695 a3 = operation (a2)
1696 a4 = operation (a3)
1697 a5 = operation (a4)
1699 #a = phi <a5>
1701 PHI is the reduction phi node (#a1 = phi <a5, a0> above)
1702 FIRST_STMT is the first reduction stmt in the chain
1703 (a2 = operation (a1)).
1705 Return TRUE if a reduction chain was detected. */
1707 static bool
1708 vect_is_slp_reduction (loop_vec_info loop_info, gimple phi, gimple first_stmt)
1710 struct loop *loop = (gimple_bb (phi))->loop_father;
1711 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
1712 enum tree_code code;
1713 gimple current_stmt = NULL, loop_use_stmt = NULL, first, next_stmt;
1714 stmt_vec_info use_stmt_info, current_stmt_info;
1715 tree lhs;
1716 imm_use_iterator imm_iter;
1717 use_operand_p use_p;
1718 int nloop_uses, size = 0;
1719 bool found = false;
1721 if (loop != vect_loop)
1722 return false;
1724 lhs = PHI_RESULT (phi);
1725 code = gimple_assign_rhs_code (first_stmt);
1726 while (1)
1728 nloop_uses = 0;
1729 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
1731 gimple use_stmt = USE_STMT (use_p);
1732 if (is_gimple_debug (use_stmt))
1733 continue;
1735 use_stmt = USE_STMT (use_p);
1737 /* Check if we got back to the reduction phi. */
1738 if (use_stmt == phi)
1740 loop_use_stmt = use_stmt;
1741 found = true;
1742 break;
1745 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
1746 && vinfo_for_stmt (use_stmt)
1747 && !STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt)))
1749 loop_use_stmt = use_stmt;
1750 nloop_uses++;
1753 if (nloop_uses > 1)
1754 return false;
1757 if (found)
1758 break;
1760 /* We reached a statement with no loop uses. */
1761 if (nloop_uses == 0)
1762 return false;
1764 /* This is a loop exit phi, and we haven't reached the reduction phi. */
1765 if (gimple_code (loop_use_stmt) == GIMPLE_PHI)
1766 return false;
1768 if (!is_gimple_assign (loop_use_stmt)
1769 || code != gimple_assign_rhs_code (loop_use_stmt)
1770 || !flow_bb_inside_loop_p (loop, gimple_bb (loop_use_stmt)))
1771 return false;
1773 /* Insert USE_STMT into reduction chain. */
1774 use_stmt_info = vinfo_for_stmt (loop_use_stmt);
1775 if (current_stmt)
1777 current_stmt_info = vinfo_for_stmt (current_stmt);
1778 GROUP_NEXT_ELEMENT (current_stmt_info) = loop_use_stmt;
1779 GROUP_FIRST_ELEMENT (use_stmt_info)
1780 = GROUP_FIRST_ELEMENT (current_stmt_info);
1782 else
1783 GROUP_FIRST_ELEMENT (use_stmt_info) = loop_use_stmt;
1785 lhs = gimple_assign_lhs (loop_use_stmt);
1786 current_stmt = loop_use_stmt;
1787 size++;
1790 if (!found || loop_use_stmt != phi || size < 2)
1791 return false;
1793 /* Swap the operands, if needed, to make the reduction operand be the second
1794 operand. */
1795 lhs = PHI_RESULT (phi);
1796 next_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt));
1797 while (next_stmt)
1799 if (gimple_assign_rhs2 (next_stmt) == lhs)
1801 tree op = gimple_assign_rhs1 (next_stmt);
1802 gimple def_stmt = NULL;
1804 if (TREE_CODE (op) == SSA_NAME)
1805 def_stmt = SSA_NAME_DEF_STMT (op);
1807 /* Check that the other def is either defined in the loop
1808 ("vect_internal_def"), or it's an induction (defined by a
1809 loop-header phi-node). */
1810 if (def_stmt
1811 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
1812 && (is_gimple_assign (def_stmt)
1813 || is_gimple_call (def_stmt)
1814 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
1815 == vect_induction_def
1816 || (gimple_code (def_stmt) == GIMPLE_PHI
1817 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
1818 == vect_internal_def
1819 && !is_loop_header_bb_p (gimple_bb (def_stmt)))))
1821 lhs = gimple_assign_lhs (next_stmt);
1822 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
1823 continue;
1826 return false;
1828 else
1830 tree op = gimple_assign_rhs2 (next_stmt);
1831 gimple def_stmt = NULL;
1833 if (TREE_CODE (op) == SSA_NAME)
1834 def_stmt = SSA_NAME_DEF_STMT (op);
1836 /* Check that the other def is either defined in the loop
1837 ("vect_internal_def"), or it's an induction (defined by a
1838 loop-header phi-node). */
1839 if (def_stmt
1840 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
1841 && (is_gimple_assign (def_stmt)
1842 || is_gimple_call (def_stmt)
1843 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
1844 == vect_induction_def
1845 || (gimple_code (def_stmt) == GIMPLE_PHI
1846 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
1847 == vect_internal_def
1848 && !is_loop_header_bb_p (gimple_bb (def_stmt)))))
1850 if (vect_print_dump_info (REPORT_DETAILS))
1852 fprintf (vect_dump, "swapping oprnds: ");
1853 print_gimple_stmt (vect_dump, next_stmt, 0, TDF_SLIM);
1856 swap_tree_operands (next_stmt,
1857 gimple_assign_rhs1_ptr (next_stmt),
1858 gimple_assign_rhs2_ptr (next_stmt));
1859 mark_symbols_for_renaming (next_stmt);
1861 else
1862 return false;
1865 lhs = gimple_assign_lhs (next_stmt);
1866 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
1869 /* Save the chain for further analysis in SLP detection. */
1870 first = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt));
1871 VEC_safe_push (gimple, heap, LOOP_VINFO_REDUCTION_CHAINS (loop_info), first);
1872 GROUP_SIZE (vinfo_for_stmt (first)) = size;
1874 return true;
1878 /* Function vect_is_simple_reduction_1
1880 (1) Detect a cross-iteration def-use cycle that represents a simple
1881 reduction computation. We look for the following pattern:
1883 loop_header:
1884 a1 = phi < a0, a2 >
1885 a3 = ...
1886 a2 = operation (a3, a1)
1888 such that:
1889 1. operation is commutative and associative and it is safe to
1890 change the order of the computation (if CHECK_REDUCTION is true)
1891 2. no uses for a2 in the loop (a2 is used out of the loop)
1892 3. no uses of a1 in the loop besides the reduction operation
1893 4. no uses of a1 outside the loop.
1895 Conditions 1,4 are tested here.
1896 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
1898 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
1899 nested cycles, if CHECK_REDUCTION is false.
1901 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
1902 reductions:
1904 a1 = phi < a0, a2 >
1905 inner loop (def of a3)
1906 a2 = phi < a3 >
1908 If MODIFY is true it tries also to rework the code in-place to enable
1909 detection of more reduction patterns. For the time being we rewrite
1910 "res -= RHS" into "rhs += -RHS" when it seems worthwhile.
1913 static gimple
1914 vect_is_simple_reduction_1 (loop_vec_info loop_info, gimple phi,
1915 bool check_reduction, bool *double_reduc,
1916 bool modify)
1918 struct loop *loop = (gimple_bb (phi))->loop_father;
1919 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
1920 edge latch_e = loop_latch_edge (loop);
1921 tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
1922 gimple def_stmt, def1 = NULL, def2 = NULL;
1923 enum tree_code orig_code, code;
1924 tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE;
1925 tree type;
1926 int nloop_uses;
1927 tree name;
1928 imm_use_iterator imm_iter;
1929 use_operand_p use_p;
1930 bool phi_def;
1932 *double_reduc = false;
1934 /* If CHECK_REDUCTION is true, we assume inner-most loop vectorization,
1935 otherwise, we assume outer loop vectorization. */
1936 gcc_assert ((check_reduction && loop == vect_loop)
1937 || (!check_reduction && flow_loop_nested_p (vect_loop, loop)));
1939 name = PHI_RESULT (phi);
1940 nloop_uses = 0;
1941 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
1943 gimple use_stmt = USE_STMT (use_p);
1944 if (is_gimple_debug (use_stmt))
1945 continue;
1947 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
1949 if (vect_print_dump_info (REPORT_DETAILS))
1950 fprintf (vect_dump, "intermediate value used outside loop.");
1952 return NULL;
1955 if (vinfo_for_stmt (use_stmt)
1956 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
1957 nloop_uses++;
1958 if (nloop_uses > 1)
1960 if (vect_print_dump_info (REPORT_DETAILS))
1961 fprintf (vect_dump, "reduction used in loop.");
1962 return NULL;
1966 if (TREE_CODE (loop_arg) != SSA_NAME)
1968 if (vect_print_dump_info (REPORT_DETAILS))
1970 fprintf (vect_dump, "reduction: not ssa_name: ");
1971 print_generic_expr (vect_dump, loop_arg, TDF_SLIM);
1973 return NULL;
1976 def_stmt = SSA_NAME_DEF_STMT (loop_arg);
1977 if (!def_stmt)
1979 if (vect_print_dump_info (REPORT_DETAILS))
1980 fprintf (vect_dump, "reduction: no def_stmt.");
1981 return NULL;
1984 if (!is_gimple_assign (def_stmt) && gimple_code (def_stmt) != GIMPLE_PHI)
1986 if (vect_print_dump_info (REPORT_DETAILS))
1987 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
1988 return NULL;
1991 if (is_gimple_assign (def_stmt))
1993 name = gimple_assign_lhs (def_stmt);
1994 phi_def = false;
1996 else
1998 name = PHI_RESULT (def_stmt);
1999 phi_def = true;
2002 nloop_uses = 0;
2003 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
2005 gimple use_stmt = USE_STMT (use_p);
2006 if (is_gimple_debug (use_stmt))
2007 continue;
2008 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
2009 && vinfo_for_stmt (use_stmt)
2010 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
2011 nloop_uses++;
2012 if (nloop_uses > 1)
2014 if (vect_print_dump_info (REPORT_DETAILS))
2015 fprintf (vect_dump, "reduction used in loop.");
2016 return NULL;
2020 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
2021 defined in the inner loop. */
2022 if (phi_def)
2024 op1 = PHI_ARG_DEF (def_stmt, 0);
2026 if (gimple_phi_num_args (def_stmt) != 1
2027 || TREE_CODE (op1) != SSA_NAME)
2029 if (vect_print_dump_info (REPORT_DETAILS))
2030 fprintf (vect_dump, "unsupported phi node definition.");
2032 return NULL;
2035 def1 = SSA_NAME_DEF_STMT (op1);
2036 if (flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
2037 && loop->inner
2038 && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1))
2039 && is_gimple_assign (def1))
2041 if (vect_print_dump_info (REPORT_DETAILS))
2042 report_vect_op (def_stmt, "detected double reduction: ");
2044 *double_reduc = true;
2045 return def_stmt;
2048 return NULL;
2051 code = orig_code = gimple_assign_rhs_code (def_stmt);
2053 /* We can handle "res -= x[i]", which is non-associative by
2054 simply rewriting this into "res += -x[i]". Avoid changing
2055 gimple instruction for the first simple tests and only do this
2056 if we're allowed to change code at all. */
2057 if (code == MINUS_EXPR
2058 && modify
2059 && (op1 = gimple_assign_rhs1 (def_stmt))
2060 && TREE_CODE (op1) == SSA_NAME
2061 && SSA_NAME_DEF_STMT (op1) == phi)
2062 code = PLUS_EXPR;
2064 if (check_reduction
2065 && (!commutative_tree_code (code) || !associative_tree_code (code)))
2067 if (vect_print_dump_info (REPORT_DETAILS))
2068 report_vect_op (def_stmt, "reduction: not commutative/associative: ");
2069 return NULL;
2072 if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS)
2074 if (code != COND_EXPR)
2076 if (vect_print_dump_info (REPORT_DETAILS))
2077 report_vect_op (def_stmt, "reduction: not binary operation: ");
2079 return NULL;
2082 op3 = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 0);
2083 if (COMPARISON_CLASS_P (op3))
2085 op4 = TREE_OPERAND (op3, 1);
2086 op3 = TREE_OPERAND (op3, 0);
2089 op1 = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 1);
2090 op2 = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 2);
2092 if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
2094 if (vect_print_dump_info (REPORT_DETAILS))
2095 report_vect_op (def_stmt, "reduction: uses not ssa_names: ");
2097 return NULL;
2100 else
2102 op1 = gimple_assign_rhs1 (def_stmt);
2103 op2 = gimple_assign_rhs2 (def_stmt);
2105 if (TREE_CODE (op1) != SSA_NAME || TREE_CODE (op2) != SSA_NAME)
2107 if (vect_print_dump_info (REPORT_DETAILS))
2108 report_vect_op (def_stmt, "reduction: uses not ssa_names: ");
2110 return NULL;
2114 type = TREE_TYPE (gimple_assign_lhs (def_stmt));
2115 if ((TREE_CODE (op1) == SSA_NAME
2116 && !types_compatible_p (type,TREE_TYPE (op1)))
2117 || (TREE_CODE (op2) == SSA_NAME
2118 && !types_compatible_p (type, TREE_TYPE (op2)))
2119 || (op3 && TREE_CODE (op3) == SSA_NAME
2120 && !types_compatible_p (type, TREE_TYPE (op3)))
2121 || (op4 && TREE_CODE (op4) == SSA_NAME
2122 && !types_compatible_p (type, TREE_TYPE (op4))))
2124 if (vect_print_dump_info (REPORT_DETAILS))
2126 fprintf (vect_dump, "reduction: multiple types: operation type: ");
2127 print_generic_expr (vect_dump, type, TDF_SLIM);
2128 fprintf (vect_dump, ", operands types: ");
2129 print_generic_expr (vect_dump, TREE_TYPE (op1), TDF_SLIM);
2130 fprintf (vect_dump, ",");
2131 print_generic_expr (vect_dump, TREE_TYPE (op2), TDF_SLIM);
2132 if (op3)
2134 fprintf (vect_dump, ",");
2135 print_generic_expr (vect_dump, TREE_TYPE (op3), TDF_SLIM);
2138 if (op4)
2140 fprintf (vect_dump, ",");
2141 print_generic_expr (vect_dump, TREE_TYPE (op4), TDF_SLIM);
2145 return NULL;
2148 /* Check that it's ok to change the order of the computation.
2149 Generally, when vectorizing a reduction we change the order of the
2150 computation. This may change the behavior of the program in some
2151 cases, so we need to check that this is ok. One exception is when
2152 vectorizing an outer-loop: the inner-loop is executed sequentially,
2153 and therefore vectorizing reductions in the inner-loop during
2154 outer-loop vectorization is safe. */
2156 /* CHECKME: check for !flag_finite_math_only too? */
2157 if (SCALAR_FLOAT_TYPE_P (type) && !flag_associative_math
2158 && check_reduction)
2160 /* Changing the order of operations changes the semantics. */
2161 if (vect_print_dump_info (REPORT_DETAILS))
2162 report_vect_op (def_stmt, "reduction: unsafe fp math optimization: ");
2163 return NULL;
2165 else if (INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type)
2166 && check_reduction)
2168 /* Changing the order of operations changes the semantics. */
2169 if (vect_print_dump_info (REPORT_DETAILS))
2170 report_vect_op (def_stmt, "reduction: unsafe int math optimization: ");
2171 return NULL;
2173 else if (SAT_FIXED_POINT_TYPE_P (type) && check_reduction)
2175 /* Changing the order of operations changes the semantics. */
2176 if (vect_print_dump_info (REPORT_DETAILS))
2177 report_vect_op (def_stmt,
2178 "reduction: unsafe fixed-point math optimization: ");
2179 return NULL;
2182 /* If we detected "res -= x[i]" earlier, rewrite it into
2183 "res += -x[i]" now. If this turns out to be useless reassoc
2184 will clean it up again. */
2185 if (orig_code == MINUS_EXPR)
2187 tree rhs = gimple_assign_rhs2 (def_stmt);
2188 tree negrhs = make_ssa_name (SSA_NAME_VAR (rhs), NULL);
2189 gimple negate_stmt = gimple_build_assign_with_ops (NEGATE_EXPR, negrhs,
2190 rhs, NULL);
2191 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
2192 set_vinfo_for_stmt (negate_stmt, new_stmt_vec_info (negate_stmt,
2193 loop_info, NULL));
2194 gsi_insert_before (&gsi, negate_stmt, GSI_NEW_STMT);
2195 gimple_assign_set_rhs2 (def_stmt, negrhs);
2196 gimple_assign_set_rhs_code (def_stmt, PLUS_EXPR);
2197 update_stmt (def_stmt);
2200 /* Reduction is safe. We're dealing with one of the following:
2201 1) integer arithmetic and no trapv
2202 2) floating point arithmetic, and special flags permit this optimization
2203 3) nested cycle (i.e., outer loop vectorization). */
2204 if (TREE_CODE (op1) == SSA_NAME)
2205 def1 = SSA_NAME_DEF_STMT (op1);
2207 if (TREE_CODE (op2) == SSA_NAME)
2208 def2 = SSA_NAME_DEF_STMT (op2);
2210 if (code != COND_EXPR
2211 && (!def1 || !def2 || gimple_nop_p (def1) || gimple_nop_p (def2)))
2213 if (vect_print_dump_info (REPORT_DETAILS))
2214 report_vect_op (def_stmt, "reduction: no defs for operands: ");
2215 return NULL;
2218 /* Check that one def is the reduction def, defined by PHI,
2219 the other def is either defined in the loop ("vect_internal_def"),
2220 or it's an induction (defined by a loop-header phi-node). */
2222 if (def2 && def2 == phi
2223 && (code == COND_EXPR
2224 || (def1 && flow_bb_inside_loop_p (loop, gimple_bb (def1))
2225 && (is_gimple_assign (def1)
2226 || is_gimple_call (def1)
2227 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
2228 == vect_induction_def
2229 || (gimple_code (def1) == GIMPLE_PHI
2230 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
2231 == vect_internal_def
2232 && !is_loop_header_bb_p (gimple_bb (def1)))))))
2234 if (vect_print_dump_info (REPORT_DETAILS))
2235 report_vect_op (def_stmt, "detected reduction: ");
2236 return def_stmt;
2239 if (def1 && def1 == phi
2240 && (code == COND_EXPR
2241 || (def2 && flow_bb_inside_loop_p (loop, gimple_bb (def2))
2242 && (is_gimple_assign (def2)
2243 || is_gimple_call (def2)
2244 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
2245 == vect_induction_def
2246 || (gimple_code (def2) == GIMPLE_PHI
2247 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
2248 == vect_internal_def
2249 && !is_loop_header_bb_p (gimple_bb (def2)))))))
2251 if (check_reduction)
2253 /* Swap operands (just for simplicity - so that the rest of the code
2254 can assume that the reduction variable is always the last (second)
2255 argument). */
2256 if (vect_print_dump_info (REPORT_DETAILS))
2257 report_vect_op (def_stmt,
2258 "detected reduction: need to swap operands: ");
2260 swap_tree_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
2261 gimple_assign_rhs2_ptr (def_stmt));
2263 else
2265 if (vect_print_dump_info (REPORT_DETAILS))
2266 report_vect_op (def_stmt, "detected reduction: ");
2269 return def_stmt;
2272 /* Try to find SLP reduction chain. */
2273 if (check_reduction && vect_is_slp_reduction (loop_info, phi, def_stmt))
2275 if (vect_print_dump_info (REPORT_DETAILS))
2276 report_vect_op (def_stmt, "reduction: detected reduction chain: ");
2278 return def_stmt;
2281 if (vect_print_dump_info (REPORT_DETAILS))
2282 report_vect_op (def_stmt, "reduction: unknown pattern: ");
2284 return NULL;
2287 /* Wrapper around vect_is_simple_reduction_1, that won't modify code
2288 in-place. Arguments as there. */
2290 static gimple
2291 vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
2292 bool check_reduction, bool *double_reduc)
2294 return vect_is_simple_reduction_1 (loop_info, phi, check_reduction,
2295 double_reduc, false);
2298 /* Wrapper around vect_is_simple_reduction_1, which will modify code
2299 in-place if it enables detection of more reductions. Arguments
2300 as there. */
2302 gimple
2303 vect_force_simple_reduction (loop_vec_info loop_info, gimple phi,
2304 bool check_reduction, bool *double_reduc)
2306 return vect_is_simple_reduction_1 (loop_info, phi, check_reduction,
2307 double_reduc, true);
2310 /* Calculate the cost of one scalar iteration of the loop. */
2312 vect_get_single_scalar_iteraion_cost (loop_vec_info loop_vinfo)
2314 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2315 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
2316 int nbbs = loop->num_nodes, factor, scalar_single_iter_cost = 0;
2317 int innerloop_iters, i, stmt_cost;
2319 /* Count statements in scalar loop. Using this as scalar cost for a single
2320 iteration for now.
2322 TODO: Add outer loop support.
2324 TODO: Consider assigning different costs to different scalar
2325 statements. */
2327 /* FORNOW. */
2328 innerloop_iters = 1;
2329 if (loop->inner)
2330 innerloop_iters = 50; /* FIXME */
2332 for (i = 0; i < nbbs; i++)
2334 gimple_stmt_iterator si;
2335 basic_block bb = bbs[i];
2337 if (bb->loop_father == loop->inner)
2338 factor = innerloop_iters;
2339 else
2340 factor = 1;
2342 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
2344 gimple stmt = gsi_stmt (si);
2345 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2347 if (!is_gimple_assign (stmt) && !is_gimple_call (stmt))
2348 continue;
2350 /* Skip stmts that are not vectorized inside the loop. */
2351 if (stmt_info
2352 && !STMT_VINFO_RELEVANT_P (stmt_info)
2353 && (!STMT_VINFO_LIVE_P (stmt_info)
2354 || STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def))
2355 continue;
2357 if (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt)))
2359 if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))))
2360 stmt_cost = vect_get_cost (scalar_load);
2361 else
2362 stmt_cost = vect_get_cost (scalar_store);
2364 else
2365 stmt_cost = vect_get_cost (scalar_stmt);
2367 scalar_single_iter_cost += stmt_cost * factor;
2370 return scalar_single_iter_cost;
2373 /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */
2375 vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue,
2376 int *peel_iters_epilogue,
2377 int scalar_single_iter_cost)
2379 int peel_guard_costs = 0;
2380 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2382 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
2384 *peel_iters_epilogue = vf/2;
2385 if (vect_print_dump_info (REPORT_COST))
2386 fprintf (vect_dump, "cost model: "
2387 "epilogue peel iters set to vf/2 because "
2388 "loop iterations are unknown .");
2390 /* If peeled iterations are known but number of scalar loop
2391 iterations are unknown, count a taken branch per peeled loop. */
2392 peel_guard_costs = 2 * vect_get_cost (cond_branch_taken);
2394 else
2396 int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
2397 peel_iters_prologue = niters < peel_iters_prologue ?
2398 niters : peel_iters_prologue;
2399 *peel_iters_epilogue = (niters - peel_iters_prologue) % vf;
2400 /* If we need to peel for gaps, but no peeling is required, we have to
2401 peel VF iterations. */
2402 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && !*peel_iters_epilogue)
2403 *peel_iters_epilogue = vf;
2406 return (peel_iters_prologue * scalar_single_iter_cost)
2407 + (*peel_iters_epilogue * scalar_single_iter_cost)
2408 + peel_guard_costs;
2411 /* Function vect_estimate_min_profitable_iters
2413 Return the number of iterations required for the vector version of the
2414 loop to be profitable relative to the cost of the scalar version of the
2415 loop.
2417 TODO: Take profile info into account before making vectorization
2418 decisions, if available. */
2421 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo)
2423 int i;
2424 int min_profitable_iters;
2425 int peel_iters_prologue;
2426 int peel_iters_epilogue;
2427 int vec_inside_cost = 0;
2428 int vec_outside_cost = 0;
2429 int scalar_single_iter_cost = 0;
2430 int scalar_outside_cost = 0;
2431 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2432 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2433 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
2434 int nbbs = loop->num_nodes;
2435 int npeel = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo);
2436 int peel_guard_costs = 0;
2437 int innerloop_iters = 0, factor;
2438 VEC (slp_instance, heap) *slp_instances;
2439 slp_instance instance;
2441 /* Cost model disabled. */
2442 if (!flag_vect_cost_model)
2444 if (vect_print_dump_info (REPORT_COST))
2445 fprintf (vect_dump, "cost model disabled.");
2446 return 0;
2449 /* Requires loop versioning tests to handle misalignment. */
2450 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
2452 /* FIXME: Make cost depend on complexity of individual check. */
2453 vec_outside_cost +=
2454 VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
2455 if (vect_print_dump_info (REPORT_COST))
2456 fprintf (vect_dump, "cost model: Adding cost of checks for loop "
2457 "versioning to treat misalignment.\n");
2460 /* Requires loop versioning with alias checks. */
2461 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2463 /* FIXME: Make cost depend on complexity of individual check. */
2464 vec_outside_cost +=
2465 VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
2466 if (vect_print_dump_info (REPORT_COST))
2467 fprintf (vect_dump, "cost model: Adding cost of checks for loop "
2468 "versioning aliasing.\n");
2471 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2472 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2473 vec_outside_cost += vect_get_cost (cond_branch_taken);
2475 /* Count statements in scalar loop. Using this as scalar cost for a single
2476 iteration for now.
2478 TODO: Add outer loop support.
2480 TODO: Consider assigning different costs to different scalar
2481 statements. */
2483 /* FORNOW. */
2484 if (loop->inner)
2485 innerloop_iters = 50; /* FIXME */
2487 for (i = 0; i < nbbs; i++)
2489 gimple_stmt_iterator si;
2490 basic_block bb = bbs[i];
2492 if (bb->loop_father == loop->inner)
2493 factor = innerloop_iters;
2494 else
2495 factor = 1;
2497 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
2499 gimple stmt = gsi_stmt (si);
2500 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2501 /* Skip stmts that are not vectorized inside the loop. */
2502 if (!STMT_VINFO_RELEVANT_P (stmt_info)
2503 && (!STMT_VINFO_LIVE_P (stmt_info)
2504 || STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def))
2505 continue;
2506 vec_inside_cost += STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) * factor;
2507 /* FIXME: for stmts in the inner-loop in outer-loop vectorization,
2508 some of the "outside" costs are generated inside the outer-loop. */
2509 vec_outside_cost += STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info);
2513 scalar_single_iter_cost = vect_get_single_scalar_iteraion_cost (loop_vinfo);
2515 /* Add additional cost for the peeled instructions in prologue and epilogue
2516 loop.
2518 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
2519 at compile-time - we assume it's vf/2 (the worst would be vf-1).
2521 TODO: Build an expression that represents peel_iters for prologue and
2522 epilogue to be used in a run-time test. */
2524 if (npeel < 0)
2526 peel_iters_prologue = vf/2;
2527 if (vect_print_dump_info (REPORT_COST))
2528 fprintf (vect_dump, "cost model: "
2529 "prologue peel iters set to vf/2.");
2531 /* If peeling for alignment is unknown, loop bound of main loop becomes
2532 unknown. */
2533 peel_iters_epilogue = vf/2;
2534 if (vect_print_dump_info (REPORT_COST))
2535 fprintf (vect_dump, "cost model: "
2536 "epilogue peel iters set to vf/2 because "
2537 "peeling for alignment is unknown .");
2539 /* If peeled iterations are unknown, count a taken branch and a not taken
2540 branch per peeled loop. Even if scalar loop iterations are known,
2541 vector iterations are not known since peeled prologue iterations are
2542 not known. Hence guards remain the same. */
2543 peel_guard_costs += 2 * (vect_get_cost (cond_branch_taken)
2544 + vect_get_cost (cond_branch_not_taken));
2545 vec_outside_cost += (peel_iters_prologue * scalar_single_iter_cost)
2546 + (peel_iters_epilogue * scalar_single_iter_cost)
2547 + peel_guard_costs;
2549 else
2551 peel_iters_prologue = npeel;
2552 vec_outside_cost += vect_get_known_peeling_cost (loop_vinfo,
2553 peel_iters_prologue, &peel_iters_epilogue,
2554 scalar_single_iter_cost);
2557 /* FORNOW: The scalar outside cost is incremented in one of the
2558 following ways:
2560 1. The vectorizer checks for alignment and aliasing and generates
2561 a condition that allows dynamic vectorization. A cost model
2562 check is ANDED with the versioning condition. Hence scalar code
2563 path now has the added cost of the versioning check.
2565 if (cost > th & versioning_check)
2566 jmp to vector code
2568 Hence run-time scalar is incremented by not-taken branch cost.
2570 2. The vectorizer then checks if a prologue is required. If the
2571 cost model check was not done before during versioning, it has to
2572 be done before the prologue check.
2574 if (cost <= th)
2575 prologue = scalar_iters
2576 if (prologue == 0)
2577 jmp to vector code
2578 else
2579 execute prologue
2580 if (prologue == num_iters)
2581 go to exit
2583 Hence the run-time scalar cost is incremented by a taken branch,
2584 plus a not-taken branch, plus a taken branch cost.
2586 3. The vectorizer then checks if an epilogue is required. If the
2587 cost model check was not done before during prologue check, it
2588 has to be done with the epilogue check.
2590 if (prologue == 0)
2591 jmp to vector code
2592 else
2593 execute prologue
2594 if (prologue == num_iters)
2595 go to exit
2596 vector code:
2597 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
2598 jmp to epilogue
2600 Hence the run-time scalar cost should be incremented by 2 taken
2601 branches.
2603 TODO: The back end may reorder the BBS's differently and reverse
2604 conditions/branch directions. Change the estimates below to
2605 something more reasonable. */
2607 /* If the number of iterations is known and we do not do versioning, we can
2608 decide whether to vectorize at compile time. Hence the scalar version
2609 do not carry cost model guard costs. */
2610 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2611 || LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2612 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2614 /* Cost model check occurs at versioning. */
2615 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2616 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2617 scalar_outside_cost += vect_get_cost (cond_branch_not_taken);
2618 else
2620 /* Cost model check occurs at prologue generation. */
2621 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
2622 scalar_outside_cost += 2 * vect_get_cost (cond_branch_taken)
2623 + vect_get_cost (cond_branch_not_taken);
2624 /* Cost model check occurs at epilogue generation. */
2625 else
2626 scalar_outside_cost += 2 * vect_get_cost (cond_branch_taken);
2630 /* Add SLP costs. */
2631 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
2632 FOR_EACH_VEC_ELT (slp_instance, slp_instances, i, instance)
2634 vec_outside_cost += SLP_INSTANCE_OUTSIDE_OF_LOOP_COST (instance);
2635 vec_inside_cost += SLP_INSTANCE_INSIDE_OF_LOOP_COST (instance);
2638 /* Calculate number of iterations required to make the vector version
2639 profitable, relative to the loop bodies only. The following condition
2640 must hold true:
2641 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
2642 where
2643 SIC = scalar iteration cost, VIC = vector iteration cost,
2644 VOC = vector outside cost, VF = vectorization factor,
2645 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
2646 SOC = scalar outside cost for run time cost model check. */
2648 if ((scalar_single_iter_cost * vf) > vec_inside_cost)
2650 if (vec_outside_cost <= 0)
2651 min_profitable_iters = 1;
2652 else
2654 min_profitable_iters = ((vec_outside_cost - scalar_outside_cost) * vf
2655 - vec_inside_cost * peel_iters_prologue
2656 - vec_inside_cost * peel_iters_epilogue)
2657 / ((scalar_single_iter_cost * vf)
2658 - vec_inside_cost);
2660 if ((scalar_single_iter_cost * vf * min_profitable_iters)
2661 <= ((vec_inside_cost * min_profitable_iters)
2662 + ((vec_outside_cost - scalar_outside_cost) * vf)))
2663 min_profitable_iters++;
2666 /* vector version will never be profitable. */
2667 else
2669 if (vect_print_dump_info (REPORT_COST))
2670 fprintf (vect_dump, "cost model: the vector iteration cost = %d "
2671 "divided by the scalar iteration cost = %d "
2672 "is greater or equal to the vectorization factor = %d.",
2673 vec_inside_cost, scalar_single_iter_cost, vf);
2674 return -1;
2677 if (vect_print_dump_info (REPORT_COST))
2679 fprintf (vect_dump, "Cost model analysis: \n");
2680 fprintf (vect_dump, " Vector inside of loop cost: %d\n",
2681 vec_inside_cost);
2682 fprintf (vect_dump, " Vector outside of loop cost: %d\n",
2683 vec_outside_cost);
2684 fprintf (vect_dump, " Scalar iteration cost: %d\n",
2685 scalar_single_iter_cost);
2686 fprintf (vect_dump, " Scalar outside cost: %d\n", scalar_outside_cost);
2687 fprintf (vect_dump, " prologue iterations: %d\n",
2688 peel_iters_prologue);
2689 fprintf (vect_dump, " epilogue iterations: %d\n",
2690 peel_iters_epilogue);
2691 fprintf (vect_dump, " Calculated minimum iters for profitability: %d\n",
2692 min_profitable_iters);
2695 min_profitable_iters =
2696 min_profitable_iters < vf ? vf : min_profitable_iters;
2698 /* Because the condition we create is:
2699 if (niters <= min_profitable_iters)
2700 then skip the vectorized loop. */
2701 min_profitable_iters--;
2703 if (vect_print_dump_info (REPORT_COST))
2704 fprintf (vect_dump, " Profitability threshold = %d\n",
2705 min_profitable_iters);
2707 return min_profitable_iters;
2711 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
2712 functions. Design better to avoid maintenance issues. */
2714 /* Function vect_model_reduction_cost.
2716 Models cost for a reduction operation, including the vector ops
2717 generated within the strip-mine loop, the initial definition before
2718 the loop, and the epilogue code that must be generated. */
2720 static bool
2721 vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
2722 int ncopies)
2724 int outer_cost = 0;
2725 enum tree_code code;
2726 optab optab;
2727 tree vectype;
2728 gimple stmt, orig_stmt;
2729 tree reduction_op;
2730 enum machine_mode mode;
2731 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2732 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2735 /* Cost of reduction op inside loop. */
2736 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info)
2737 += ncopies * vect_get_cost (vector_stmt);
2739 stmt = STMT_VINFO_STMT (stmt_info);
2741 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
2743 case GIMPLE_SINGLE_RHS:
2744 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)) == ternary_op);
2745 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), 2);
2746 break;
2747 case GIMPLE_UNARY_RHS:
2748 reduction_op = gimple_assign_rhs1 (stmt);
2749 break;
2750 case GIMPLE_BINARY_RHS:
2751 reduction_op = gimple_assign_rhs2 (stmt);
2752 break;
2753 case GIMPLE_TERNARY_RHS:
2754 reduction_op = gimple_assign_rhs3 (stmt);
2755 break;
2756 default:
2757 gcc_unreachable ();
2760 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
2761 if (!vectype)
2763 if (vect_print_dump_info (REPORT_COST))
2765 fprintf (vect_dump, "unsupported data-type ");
2766 print_generic_expr (vect_dump, TREE_TYPE (reduction_op), TDF_SLIM);
2768 return false;
2771 mode = TYPE_MODE (vectype);
2772 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2774 if (!orig_stmt)
2775 orig_stmt = STMT_VINFO_STMT (stmt_info);
2777 code = gimple_assign_rhs_code (orig_stmt);
2779 /* Add in cost for initial definition. */
2780 outer_cost += vect_get_cost (scalar_to_vec);
2782 /* Determine cost of epilogue code.
2784 We have a reduction operator that will reduce the vector in one statement.
2785 Also requires scalar extract. */
2787 if (!nested_in_vect_loop_p (loop, orig_stmt))
2789 if (reduc_code != ERROR_MARK)
2790 outer_cost += vect_get_cost (vector_stmt)
2791 + vect_get_cost (vec_to_scalar);
2792 else
2794 int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
2795 tree bitsize =
2796 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt)));
2797 int element_bitsize = tree_low_cst (bitsize, 1);
2798 int nelements = vec_size_in_bits / element_bitsize;
2800 optab = optab_for_tree_code (code, vectype, optab_default);
2802 /* We have a whole vector shift available. */
2803 if (VECTOR_MODE_P (mode)
2804 && optab_handler (optab, mode) != CODE_FOR_nothing
2805 && optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
2806 /* Final reduction via vector shifts and the reduction operator. Also
2807 requires scalar extract. */
2808 outer_cost += ((exact_log2(nelements) * 2)
2809 * vect_get_cost (vector_stmt)
2810 + vect_get_cost (vec_to_scalar));
2811 else
2812 /* Use extracts and reduction op for final reduction. For N elements,
2813 we have N extracts and N-1 reduction ops. */
2814 outer_cost += ((nelements + nelements - 1)
2815 * vect_get_cost (vector_stmt));
2819 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = outer_cost;
2821 if (vect_print_dump_info (REPORT_COST))
2822 fprintf (vect_dump, "vect_model_reduction_cost: inside_cost = %d, "
2823 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
2824 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
2826 return true;
2830 /* Function vect_model_induction_cost.
2832 Models cost for induction operations. */
2834 static void
2835 vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies)
2837 /* loop cost for vec_loop. */
2838 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info)
2839 = ncopies * vect_get_cost (vector_stmt);
2840 /* prologue cost for vec_init and vec_step. */
2841 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info)
2842 = 2 * vect_get_cost (scalar_to_vec);
2844 if (vect_print_dump_info (REPORT_COST))
2845 fprintf (vect_dump, "vect_model_induction_cost: inside_cost = %d, "
2846 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
2847 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
2851 /* Function get_initial_def_for_induction
2853 Input:
2854 STMT - a stmt that performs an induction operation in the loop.
2855 IV_PHI - the initial value of the induction variable
2857 Output:
2858 Return a vector variable, initialized with the first VF values of
2859 the induction variable. E.g., for an iv with IV_PHI='X' and
2860 evolution S, for a vector of 4 units, we want to return:
2861 [X, X + S, X + 2*S, X + 3*S]. */
2863 static tree
2864 get_initial_def_for_induction (gimple iv_phi)
2866 stmt_vec_info stmt_vinfo = vinfo_for_stmt (iv_phi);
2867 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
2868 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2869 tree scalar_type;
2870 tree vectype;
2871 int nunits;
2872 edge pe = loop_preheader_edge (loop);
2873 struct loop *iv_loop;
2874 basic_block new_bb;
2875 tree vec, vec_init, vec_step, t;
2876 tree access_fn;
2877 tree new_var;
2878 tree new_name;
2879 gimple init_stmt, induction_phi, new_stmt;
2880 tree induc_def, vec_def, vec_dest;
2881 tree init_expr, step_expr;
2882 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2883 int i;
2884 bool ok;
2885 int ncopies;
2886 tree expr;
2887 stmt_vec_info phi_info = vinfo_for_stmt (iv_phi);
2888 bool nested_in_vect_loop = false;
2889 gimple_seq stmts = NULL;
2890 imm_use_iterator imm_iter;
2891 use_operand_p use_p;
2892 gimple exit_phi;
2893 edge latch_e;
2894 tree loop_arg;
2895 gimple_stmt_iterator si;
2896 basic_block bb = gimple_bb (iv_phi);
2897 tree stepvectype;
2898 tree resvectype;
2900 /* Is phi in an inner-loop, while vectorizing an enclosing outer-loop? */
2901 if (nested_in_vect_loop_p (loop, iv_phi))
2903 nested_in_vect_loop = true;
2904 iv_loop = loop->inner;
2906 else
2907 iv_loop = loop;
2908 gcc_assert (iv_loop == (gimple_bb (iv_phi))->loop_father);
2910 latch_e = loop_latch_edge (iv_loop);
2911 loop_arg = PHI_ARG_DEF_FROM_EDGE (iv_phi, latch_e);
2913 access_fn = analyze_scalar_evolution (iv_loop, PHI_RESULT (iv_phi));
2914 gcc_assert (access_fn);
2915 STRIP_NOPS (access_fn);
2916 ok = vect_is_simple_iv_evolution (iv_loop->num, access_fn,
2917 &init_expr, &step_expr);
2918 gcc_assert (ok);
2919 pe = loop_preheader_edge (iv_loop);
2921 scalar_type = TREE_TYPE (init_expr);
2922 vectype = get_vectype_for_scalar_type (scalar_type);
2923 resvectype = get_vectype_for_scalar_type (TREE_TYPE (PHI_RESULT (iv_phi)));
2924 gcc_assert (vectype);
2925 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2926 ncopies = vf / nunits;
2928 gcc_assert (phi_info);
2929 gcc_assert (ncopies >= 1);
2931 /* Find the first insertion point in the BB. */
2932 si = gsi_after_labels (bb);
2934 /* Create the vector that holds the initial_value of the induction. */
2935 if (nested_in_vect_loop)
2937 /* iv_loop is nested in the loop to be vectorized. init_expr had already
2938 been created during vectorization of previous stmts. We obtain it
2939 from the STMT_VINFO_VEC_STMT of the defining stmt. */
2940 tree iv_def = PHI_ARG_DEF_FROM_EDGE (iv_phi,
2941 loop_preheader_edge (iv_loop));
2942 vec_init = vect_get_vec_def_for_operand (iv_def, iv_phi, NULL);
2944 else
2946 /* iv_loop is the loop to be vectorized. Create:
2947 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
2948 new_var = vect_get_new_vect_var (scalar_type, vect_scalar_var, "var_");
2949 add_referenced_var (new_var);
2951 new_name = force_gimple_operand (init_expr, &stmts, false, new_var);
2952 if (stmts)
2954 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
2955 gcc_assert (!new_bb);
2958 t = NULL_TREE;
2959 t = tree_cons (NULL_TREE, new_name, t);
2960 for (i = 1; i < nunits; i++)
2962 /* Create: new_name_i = new_name + step_expr */
2963 enum tree_code code = POINTER_TYPE_P (scalar_type)
2964 ? POINTER_PLUS_EXPR : PLUS_EXPR;
2965 init_stmt = gimple_build_assign_with_ops (code, new_var,
2966 new_name, step_expr);
2967 new_name = make_ssa_name (new_var, init_stmt);
2968 gimple_assign_set_lhs (init_stmt, new_name);
2970 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
2971 gcc_assert (!new_bb);
2973 if (vect_print_dump_info (REPORT_DETAILS))
2975 fprintf (vect_dump, "created new init_stmt: ");
2976 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
2978 t = tree_cons (NULL_TREE, new_name, t);
2980 /* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1] */
2981 vec = build_constructor_from_list (vectype, nreverse (t));
2982 vec_init = vect_init_vector (iv_phi, vec, vectype, NULL);
2986 /* Create the vector that holds the step of the induction. */
2987 if (nested_in_vect_loop)
2988 /* iv_loop is nested in the loop to be vectorized. Generate:
2989 vec_step = [S, S, S, S] */
2990 new_name = step_expr;
2991 else
2993 /* iv_loop is the loop to be vectorized. Generate:
2994 vec_step = [VF*S, VF*S, VF*S, VF*S] */
2995 expr = build_int_cst (TREE_TYPE (step_expr), vf);
2996 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
2997 expr, step_expr);
3000 t = unshare_expr (new_name);
3001 gcc_assert (CONSTANT_CLASS_P (new_name));
3002 stepvectype = get_vectype_for_scalar_type (TREE_TYPE (new_name));
3003 gcc_assert (stepvectype);
3004 vec = build_vector_from_val (stepvectype, t);
3005 vec_step = vect_init_vector (iv_phi, vec, stepvectype, NULL);
3008 /* Create the following def-use cycle:
3009 loop prolog:
3010 vec_init = ...
3011 vec_step = ...
3012 loop:
3013 vec_iv = PHI <vec_init, vec_loop>
3015 STMT
3017 vec_loop = vec_iv + vec_step; */
3019 /* Create the induction-phi that defines the induction-operand. */
3020 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
3021 add_referenced_var (vec_dest);
3022 induction_phi = create_phi_node (vec_dest, iv_loop->header);
3023 set_vinfo_for_stmt (induction_phi,
3024 new_stmt_vec_info (induction_phi, loop_vinfo, NULL));
3025 induc_def = PHI_RESULT (induction_phi);
3027 /* Create the iv update inside the loop */
3028 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
3029 induc_def, vec_step);
3030 vec_def = make_ssa_name (vec_dest, new_stmt);
3031 gimple_assign_set_lhs (new_stmt, vec_def);
3032 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3033 set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo,
3034 NULL));
3036 /* Set the arguments of the phi node: */
3037 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
3038 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
3039 UNKNOWN_LOCATION);
3042 /* In case that vectorization factor (VF) is bigger than the number
3043 of elements that we can fit in a vectype (nunits), we have to generate
3044 more than one vector stmt - i.e - we need to "unroll" the
3045 vector stmt by a factor VF/nunits. For more details see documentation
3046 in vectorizable_operation. */
3048 if (ncopies > 1)
3050 stmt_vec_info prev_stmt_vinfo;
3051 /* FORNOW. This restriction should be relaxed. */
3052 gcc_assert (!nested_in_vect_loop);
3054 /* Create the vector that holds the step of the induction. */
3055 expr = build_int_cst (TREE_TYPE (step_expr), nunits);
3056 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
3057 expr, step_expr);
3058 t = unshare_expr (new_name);
3059 gcc_assert (CONSTANT_CLASS_P (new_name));
3060 vec = build_vector_from_val (stepvectype, t);
3061 vec_step = vect_init_vector (iv_phi, vec, stepvectype, NULL);
3063 vec_def = induc_def;
3064 prev_stmt_vinfo = vinfo_for_stmt (induction_phi);
3065 for (i = 1; i < ncopies; i++)
3067 /* vec_i = vec_prev + vec_step */
3068 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
3069 vec_def, vec_step);
3070 vec_def = make_ssa_name (vec_dest, new_stmt);
3071 gimple_assign_set_lhs (new_stmt, vec_def);
3073 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3074 if (!useless_type_conversion_p (resvectype, vectype))
3076 new_stmt = gimple_build_assign_with_ops
3077 (VIEW_CONVERT_EXPR,
3078 vect_get_new_vect_var (resvectype, vect_simple_var,
3079 "vec_iv_"),
3080 build1 (VIEW_CONVERT_EXPR, resvectype,
3081 gimple_assign_lhs (new_stmt)), NULL_TREE);
3082 gimple_assign_set_lhs (new_stmt,
3083 make_ssa_name
3084 (gimple_assign_lhs (new_stmt), new_stmt));
3085 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3087 set_vinfo_for_stmt (new_stmt,
3088 new_stmt_vec_info (new_stmt, loop_vinfo, NULL));
3089 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt;
3090 prev_stmt_vinfo = vinfo_for_stmt (new_stmt);
3094 if (nested_in_vect_loop)
3096 /* Find the loop-closed exit-phi of the induction, and record
3097 the final vector of induction results: */
3098 exit_phi = NULL;
3099 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
3101 if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (USE_STMT (use_p))))
3103 exit_phi = USE_STMT (use_p);
3104 break;
3107 if (exit_phi)
3109 stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi);
3110 /* FORNOW. Currently not supporting the case that an inner-loop induction
3111 is not used in the outer-loop (i.e. only outside the outer-loop). */
3112 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
3113 && !STMT_VINFO_LIVE_P (stmt_vinfo));
3115 STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt;
3116 if (vect_print_dump_info (REPORT_DETAILS))
3118 fprintf (vect_dump, "vector of inductions after inner-loop:");
3119 print_gimple_stmt (vect_dump, new_stmt, 0, TDF_SLIM);
3125 if (vect_print_dump_info (REPORT_DETAILS))
3127 fprintf (vect_dump, "transform induction: created def-use cycle: ");
3128 print_gimple_stmt (vect_dump, induction_phi, 0, TDF_SLIM);
3129 fprintf (vect_dump, "\n");
3130 print_gimple_stmt (vect_dump, SSA_NAME_DEF_STMT (vec_def), 0, TDF_SLIM);
3133 STMT_VINFO_VEC_STMT (phi_info) = induction_phi;
3134 if (!useless_type_conversion_p (resvectype, vectype))
3136 new_stmt = gimple_build_assign_with_ops
3137 (VIEW_CONVERT_EXPR,
3138 vect_get_new_vect_var (resvectype, vect_simple_var, "vec_iv_"),
3139 build1 (VIEW_CONVERT_EXPR, resvectype, induc_def), NULL_TREE);
3140 induc_def = make_ssa_name (gimple_assign_lhs (new_stmt), new_stmt);
3141 gimple_assign_set_lhs (new_stmt, induc_def);
3142 si = gsi_start_bb (bb);
3143 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3144 set_vinfo_for_stmt (new_stmt,
3145 new_stmt_vec_info (new_stmt, loop_vinfo, NULL));
3146 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_stmt))
3147 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (induction_phi));
3150 return induc_def;
3154 /* Function get_initial_def_for_reduction
3156 Input:
3157 STMT - a stmt that performs a reduction operation in the loop.
3158 INIT_VAL - the initial value of the reduction variable
3160 Output:
3161 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
3162 of the reduction (used for adjusting the epilog - see below).
3163 Return a vector variable, initialized according to the operation that STMT
3164 performs. This vector will be used as the initial value of the
3165 vector of partial results.
3167 Option1 (adjust in epilog): Initialize the vector as follows:
3168 add/bit or/xor: [0,0,...,0,0]
3169 mult/bit and: [1,1,...,1,1]
3170 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
3171 and when necessary (e.g. add/mult case) let the caller know
3172 that it needs to adjust the result by init_val.
3174 Option2: Initialize the vector as follows:
3175 add/bit or/xor: [init_val,0,0,...,0]
3176 mult/bit and: [init_val,1,1,...,1]
3177 min/max/cond_expr: [init_val,init_val,...,init_val]
3178 and no adjustments are needed.
3180 For example, for the following code:
3182 s = init_val;
3183 for (i=0;i<n;i++)
3184 s = s + a[i];
3186 STMT is 's = s + a[i]', and the reduction variable is 's'.
3187 For a vector of 4 units, we want to return either [0,0,0,init_val],
3188 or [0,0,0,0] and let the caller know that it needs to adjust
3189 the result at the end by 'init_val'.
3191 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
3192 initialization vector is simpler (same element in all entries), if
3193 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
3195 A cost model should help decide between these two schemes. */
3197 tree
3198 get_initial_def_for_reduction (gimple stmt, tree init_val,
3199 tree *adjustment_def)
3201 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
3202 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
3203 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3204 tree scalar_type = TREE_TYPE (init_val);
3205 tree vectype = get_vectype_for_scalar_type (scalar_type);
3206 int nunits;
3207 enum tree_code code = gimple_assign_rhs_code (stmt);
3208 tree def_for_init;
3209 tree init_def;
3210 tree t = NULL_TREE;
3211 int i;
3212 bool nested_in_vect_loop = false;
3213 tree init_value;
3214 REAL_VALUE_TYPE real_init_val = dconst0;
3215 int int_init_val = 0;
3216 gimple def_stmt = NULL;
3218 gcc_assert (vectype);
3219 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3221 gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
3222 || SCALAR_FLOAT_TYPE_P (scalar_type));
3224 if (nested_in_vect_loop_p (loop, stmt))
3225 nested_in_vect_loop = true;
3226 else
3227 gcc_assert (loop == (gimple_bb (stmt))->loop_father);
3229 /* In case of double reduction we only create a vector variable to be put
3230 in the reduction phi node. The actual statement creation is done in
3231 vect_create_epilog_for_reduction. */
3232 if (adjustment_def && nested_in_vect_loop
3233 && TREE_CODE (init_val) == SSA_NAME
3234 && (def_stmt = SSA_NAME_DEF_STMT (init_val))
3235 && gimple_code (def_stmt) == GIMPLE_PHI
3236 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
3237 && vinfo_for_stmt (def_stmt)
3238 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
3239 == vect_double_reduction_def)
3241 *adjustment_def = NULL;
3242 return vect_create_destination_var (init_val, vectype);
3245 if (TREE_CONSTANT (init_val))
3247 if (SCALAR_FLOAT_TYPE_P (scalar_type))
3248 init_value = build_real (scalar_type, TREE_REAL_CST (init_val));
3249 else
3250 init_value = build_int_cst (scalar_type, TREE_INT_CST_LOW (init_val));
3252 else
3253 init_value = init_val;
3255 switch (code)
3257 case WIDEN_SUM_EXPR:
3258 case DOT_PROD_EXPR:
3259 case PLUS_EXPR:
3260 case MINUS_EXPR:
3261 case BIT_IOR_EXPR:
3262 case BIT_XOR_EXPR:
3263 case MULT_EXPR:
3264 case BIT_AND_EXPR:
3265 /* ADJUSMENT_DEF is NULL when called from
3266 vect_create_epilog_for_reduction to vectorize double reduction. */
3267 if (adjustment_def)
3269 if (nested_in_vect_loop)
3270 *adjustment_def = vect_get_vec_def_for_operand (init_val, stmt,
3271 NULL);
3272 else
3273 *adjustment_def = init_val;
3276 if (code == MULT_EXPR)
3278 real_init_val = dconst1;
3279 int_init_val = 1;
3282 if (code == BIT_AND_EXPR)
3283 int_init_val = -1;
3285 if (SCALAR_FLOAT_TYPE_P (scalar_type))
3286 def_for_init = build_real (scalar_type, real_init_val);
3287 else
3288 def_for_init = build_int_cst (scalar_type, int_init_val);
3290 /* Create a vector of '0' or '1' except the first element. */
3291 for (i = nunits - 2; i >= 0; --i)
3292 t = tree_cons (NULL_TREE, def_for_init, t);
3294 /* Option1: the first element is '0' or '1' as well. */
3295 if (adjustment_def)
3297 t = tree_cons (NULL_TREE, def_for_init, t);
3298 init_def = build_vector (vectype, t);
3299 break;
3302 /* Option2: the first element is INIT_VAL. */
3303 t = tree_cons (NULL_TREE, init_value, t);
3304 if (TREE_CONSTANT (init_val))
3305 init_def = build_vector (vectype, t);
3306 else
3307 init_def = build_constructor_from_list (vectype, t);
3309 break;
3311 case MIN_EXPR:
3312 case MAX_EXPR:
3313 case COND_EXPR:
3314 if (adjustment_def)
3316 *adjustment_def = NULL_TREE;
3317 init_def = vect_get_vec_def_for_operand (init_val, stmt, NULL);
3318 break;
3321 init_def = build_vector_from_val (vectype, init_value);
3322 break;
3324 default:
3325 gcc_unreachable ();
3328 return init_def;
3332 /* Function vect_create_epilog_for_reduction
3334 Create code at the loop-epilog to finalize the result of a reduction
3335 computation.
3337 VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector
3338 reduction statements.
3339 STMT is the scalar reduction stmt that is being vectorized.
3340 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
3341 number of elements that we can fit in a vectype (nunits). In this case
3342 we have to generate more than one vector stmt - i.e - we need to "unroll"
3343 the vector stmt by a factor VF/nunits. For more details see documentation
3344 in vectorizable_operation.
3345 REDUC_CODE is the tree-code for the epilog reduction.
3346 REDUCTION_PHIS is a list of the phi-nodes that carry the reduction
3347 computation.
3348 REDUC_INDEX is the index of the operand in the right hand side of the
3349 statement that is defined by REDUCTION_PHI.
3350 DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
3351 SLP_NODE is an SLP node containing a group of reduction statements. The
3352 first one in this group is STMT.
3354 This function:
3355 1. Creates the reduction def-use cycles: sets the arguments for
3356 REDUCTION_PHIS:
3357 The loop-entry argument is the vectorized initial-value of the reduction.
3358 The loop-latch argument is taken from VECT_DEFS - the vector of partial
3359 sums.
3360 2. "Reduces" each vector of partial results VECT_DEFS into a single result,
3361 by applying the operation specified by REDUC_CODE if available, or by
3362 other means (whole-vector shifts or a scalar loop).
3363 The function also creates a new phi node at the loop exit to preserve
3364 loop-closed form, as illustrated below.
3366 The flow at the entry to this function:
3368 loop:
3369 vec_def = phi <null, null> # REDUCTION_PHI
3370 VECT_DEF = vector_stmt # vectorized form of STMT
3371 s_loop = scalar_stmt # (scalar) STMT
3372 loop_exit:
3373 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3374 use <s_out0>
3375 use <s_out0>
3377 The above is transformed by this function into:
3379 loop:
3380 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
3381 VECT_DEF = vector_stmt # vectorized form of STMT
3382 s_loop = scalar_stmt # (scalar) STMT
3383 loop_exit:
3384 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3385 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3386 v_out2 = reduce <v_out1>
3387 s_out3 = extract_field <v_out2, 0>
3388 s_out4 = adjust_result <s_out3>
3389 use <s_out4>
3390 use <s_out4>
3393 static void
3394 vect_create_epilog_for_reduction (VEC (tree, heap) *vect_defs, gimple stmt,
3395 int ncopies, enum tree_code reduc_code,
3396 VEC (gimple, heap) *reduction_phis,
3397 int reduc_index, bool double_reduc,
3398 slp_tree slp_node)
3400 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3401 stmt_vec_info prev_phi_info;
3402 tree vectype;
3403 enum machine_mode mode;
3404 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3405 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
3406 basic_block exit_bb;
3407 tree scalar_dest;
3408 tree scalar_type;
3409 gimple new_phi = NULL, phi;
3410 gimple_stmt_iterator exit_gsi;
3411 tree vec_dest;
3412 tree new_temp = NULL_TREE, new_dest, new_name, new_scalar_dest;
3413 gimple epilog_stmt = NULL;
3414 enum tree_code code = gimple_assign_rhs_code (stmt);
3415 gimple exit_phi;
3416 tree bitsize, bitpos;
3417 tree adjustment_def = NULL;
3418 tree vec_initial_def = NULL;
3419 tree reduction_op, expr, def;
3420 tree orig_name, scalar_result;
3421 imm_use_iterator imm_iter, phi_imm_iter;
3422 use_operand_p use_p, phi_use_p;
3423 bool extract_scalar_result = false;
3424 gimple use_stmt, orig_stmt, reduction_phi = NULL;
3425 bool nested_in_vect_loop = false;
3426 VEC (gimple, heap) *new_phis = NULL;
3427 enum vect_def_type dt = vect_unknown_def_type;
3428 int j, i;
3429 VEC (tree, heap) *scalar_results = NULL;
3430 unsigned int group_size = 1, k, ratio;
3431 VEC (tree, heap) *vec_initial_defs = NULL;
3432 VEC (gimple, heap) *phis;
3433 bool slp_reduc = false;
3434 tree new_phi_result;
3436 if (slp_node)
3437 group_size = VEC_length (gimple, SLP_TREE_SCALAR_STMTS (slp_node));
3439 if (nested_in_vect_loop_p (loop, stmt))
3441 outer_loop = loop;
3442 loop = loop->inner;
3443 nested_in_vect_loop = true;
3444 gcc_assert (!slp_node);
3447 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
3449 case GIMPLE_SINGLE_RHS:
3450 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt))
3451 == ternary_op);
3452 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), reduc_index);
3453 break;
3454 case GIMPLE_UNARY_RHS:
3455 reduction_op = gimple_assign_rhs1 (stmt);
3456 break;
3457 case GIMPLE_BINARY_RHS:
3458 reduction_op = reduc_index ?
3459 gimple_assign_rhs2 (stmt) : gimple_assign_rhs1 (stmt);
3460 break;
3461 case GIMPLE_TERNARY_RHS:
3462 reduction_op = gimple_op (stmt, reduc_index + 1);
3463 break;
3464 default:
3465 gcc_unreachable ();
3468 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
3469 gcc_assert (vectype);
3470 mode = TYPE_MODE (vectype);
3472 /* 1. Create the reduction def-use cycle:
3473 Set the arguments of REDUCTION_PHIS, i.e., transform
3475 loop:
3476 vec_def = phi <null, null> # REDUCTION_PHI
3477 VECT_DEF = vector_stmt # vectorized form of STMT
3480 into:
3482 loop:
3483 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
3484 VECT_DEF = vector_stmt # vectorized form of STMT
3487 (in case of SLP, do it for all the phis). */
3489 /* Get the loop-entry arguments. */
3490 if (slp_node)
3491 vect_get_slp_defs (reduction_op, NULL_TREE, slp_node, &vec_initial_defs,
3492 NULL, reduc_index);
3493 else
3495 vec_initial_defs = VEC_alloc (tree, heap, 1);
3496 /* For the case of reduction, vect_get_vec_def_for_operand returns
3497 the scalar def before the loop, that defines the initial value
3498 of the reduction variable. */
3499 vec_initial_def = vect_get_vec_def_for_operand (reduction_op, stmt,
3500 &adjustment_def);
3501 VEC_quick_push (tree, vec_initial_defs, vec_initial_def);
3504 /* Set phi nodes arguments. */
3505 FOR_EACH_VEC_ELT (gimple, reduction_phis, i, phi)
3507 tree vec_init_def = VEC_index (tree, vec_initial_defs, i);
3508 tree def = VEC_index (tree, vect_defs, i);
3509 for (j = 0; j < ncopies; j++)
3511 /* Set the loop-entry arg of the reduction-phi. */
3512 add_phi_arg (phi, vec_init_def, loop_preheader_edge (loop),
3513 UNKNOWN_LOCATION);
3515 /* Set the loop-latch arg for the reduction-phi. */
3516 if (j > 0)
3517 def = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type, def);
3519 add_phi_arg (phi, def, loop_latch_edge (loop), UNKNOWN_LOCATION);
3521 if (vect_print_dump_info (REPORT_DETAILS))
3523 fprintf (vect_dump, "transform reduction: created def-use"
3524 " cycle: ");
3525 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
3526 fprintf (vect_dump, "\n");
3527 print_gimple_stmt (vect_dump, SSA_NAME_DEF_STMT (def), 0,
3528 TDF_SLIM);
3531 phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
3535 VEC_free (tree, heap, vec_initial_defs);
3537 /* 2. Create epilog code.
3538 The reduction epilog code operates across the elements of the vector
3539 of partial results computed by the vectorized loop.
3540 The reduction epilog code consists of:
3542 step 1: compute the scalar result in a vector (v_out2)
3543 step 2: extract the scalar result (s_out3) from the vector (v_out2)
3544 step 3: adjust the scalar result (s_out3) if needed.
3546 Step 1 can be accomplished using one the following three schemes:
3547 (scheme 1) using reduc_code, if available.
3548 (scheme 2) using whole-vector shifts, if available.
3549 (scheme 3) using a scalar loop. In this case steps 1+2 above are
3550 combined.
3552 The overall epilog code looks like this:
3554 s_out0 = phi <s_loop> # original EXIT_PHI
3555 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3556 v_out2 = reduce <v_out1> # step 1
3557 s_out3 = extract_field <v_out2, 0> # step 2
3558 s_out4 = adjust_result <s_out3> # step 3
3560 (step 3 is optional, and steps 1 and 2 may be combined).
3561 Lastly, the uses of s_out0 are replaced by s_out4. */
3564 /* 2.1 Create new loop-exit-phis to preserve loop-closed form:
3565 v_out1 = phi <VECT_DEF>
3566 Store them in NEW_PHIS. */
3568 exit_bb = single_exit (loop)->dest;
3569 prev_phi_info = NULL;
3570 new_phis = VEC_alloc (gimple, heap, VEC_length (tree, vect_defs));
3571 FOR_EACH_VEC_ELT (tree, vect_defs, i, def)
3573 for (j = 0; j < ncopies; j++)
3575 phi = create_phi_node (SSA_NAME_VAR (def), exit_bb);
3576 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, loop_vinfo, NULL));
3577 if (j == 0)
3578 VEC_quick_push (gimple, new_phis, phi);
3579 else
3581 def = vect_get_vec_def_for_stmt_copy (dt, def);
3582 STMT_VINFO_RELATED_STMT (prev_phi_info) = phi;
3585 SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
3586 prev_phi_info = vinfo_for_stmt (phi);
3590 /* The epilogue is created for the outer-loop, i.e., for the loop being
3591 vectorized. */
3592 if (double_reduc)
3594 loop = outer_loop;
3595 exit_bb = single_exit (loop)->dest;
3598 exit_gsi = gsi_after_labels (exit_bb);
3600 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
3601 (i.e. when reduc_code is not available) and in the final adjustment
3602 code (if needed). Also get the original scalar reduction variable as
3603 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
3604 represents a reduction pattern), the tree-code and scalar-def are
3605 taken from the original stmt that the pattern-stmt (STMT) replaces.
3606 Otherwise (it is a regular reduction) - the tree-code and scalar-def
3607 are taken from STMT. */
3609 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
3610 if (!orig_stmt)
3612 /* Regular reduction */
3613 orig_stmt = stmt;
3615 else
3617 /* Reduction pattern */
3618 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt);
3619 gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo));
3620 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
3623 code = gimple_assign_rhs_code (orig_stmt);
3624 /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
3625 partial results are added and not subtracted. */
3626 if (code == MINUS_EXPR)
3627 code = PLUS_EXPR;
3629 scalar_dest = gimple_assign_lhs (orig_stmt);
3630 scalar_type = TREE_TYPE (scalar_dest);
3631 scalar_results = VEC_alloc (tree, heap, group_size);
3632 new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
3633 bitsize = TYPE_SIZE (scalar_type);
3635 /* In case this is a reduction in an inner-loop while vectorizing an outer
3636 loop - we don't need to extract a single scalar result at the end of the
3637 inner-loop (unless it is double reduction, i.e., the use of reduction is
3638 outside the outer-loop). The final vector of partial results will be used
3639 in the vectorized outer-loop, or reduced to a scalar result at the end of
3640 the outer-loop. */
3641 if (nested_in_vect_loop && !double_reduc)
3642 goto vect_finalize_reduction;
3644 /* SLP reduction without reduction chain, e.g.,
3645 # a1 = phi <a2, a0>
3646 # b1 = phi <b2, b0>
3647 a2 = operation (a1)
3648 b2 = operation (b1) */
3649 slp_reduc = (slp_node && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)));
3651 /* In case of reduction chain, e.g.,
3652 # a1 = phi <a3, a0>
3653 a2 = operation (a1)
3654 a3 = operation (a2),
3656 we may end up with more than one vector result. Here we reduce them to
3657 one vector. */
3658 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
3660 tree first_vect = PHI_RESULT (VEC_index (gimple, new_phis, 0));
3661 tree tmp;
3663 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3664 for (k = 1; k < VEC_length (gimple, new_phis); k++)
3666 gimple next_phi = VEC_index (gimple, new_phis, k);
3667 tree second_vect = PHI_RESULT (next_phi);
3668 gimple new_vec_stmt;
3670 tmp = build2 (code, vectype, first_vect, second_vect);
3671 new_vec_stmt = gimple_build_assign (vec_dest, tmp);
3672 first_vect = make_ssa_name (vec_dest, new_vec_stmt);
3673 gimple_assign_set_lhs (new_vec_stmt, first_vect);
3674 gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT);
3677 new_phi_result = first_vect;
3679 else
3680 new_phi_result = PHI_RESULT (VEC_index (gimple, new_phis, 0));
3682 /* 2.3 Create the reduction code, using one of the three schemes described
3683 above. In SLP we simply need to extract all the elements from the
3684 vector (without reducing them), so we use scalar shifts. */
3685 if (reduc_code != ERROR_MARK && !slp_reduc)
3687 tree tmp;
3689 /*** Case 1: Create:
3690 v_out2 = reduc_expr <v_out1> */
3692 if (vect_print_dump_info (REPORT_DETAILS))
3693 fprintf (vect_dump, "Reduce using direct vector reduction.");
3695 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3696 tmp = build1 (reduc_code, vectype, new_phi_result);
3697 epilog_stmt = gimple_build_assign (vec_dest, tmp);
3698 new_temp = make_ssa_name (vec_dest, epilog_stmt);
3699 gimple_assign_set_lhs (epilog_stmt, new_temp);
3700 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3702 extract_scalar_result = true;
3704 else
3706 enum tree_code shift_code = ERROR_MARK;
3707 bool have_whole_vector_shift = true;
3708 int bit_offset;
3709 int element_bitsize = tree_low_cst (bitsize, 1);
3710 int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
3711 tree vec_temp;
3713 if (optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
3714 shift_code = VEC_RSHIFT_EXPR;
3715 else
3716 have_whole_vector_shift = false;
3718 /* Regardless of whether we have a whole vector shift, if we're
3719 emulating the operation via tree-vect-generic, we don't want
3720 to use it. Only the first round of the reduction is likely
3721 to still be profitable via emulation. */
3722 /* ??? It might be better to emit a reduction tree code here, so that
3723 tree-vect-generic can expand the first round via bit tricks. */
3724 if (!VECTOR_MODE_P (mode))
3725 have_whole_vector_shift = false;
3726 else
3728 optab optab = optab_for_tree_code (code, vectype, optab_default);
3729 if (optab_handler (optab, mode) == CODE_FOR_nothing)
3730 have_whole_vector_shift = false;
3733 if (have_whole_vector_shift && !slp_reduc)
3735 /*** Case 2: Create:
3736 for (offset = VS/2; offset >= element_size; offset/=2)
3738 Create: va' = vec_shift <va, offset>
3739 Create: va = vop <va, va'>
3740 } */
3742 if (vect_print_dump_info (REPORT_DETAILS))
3743 fprintf (vect_dump, "Reduce using vector shifts");
3745 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3746 new_temp = new_phi_result;
3747 for (bit_offset = vec_size_in_bits/2;
3748 bit_offset >= element_bitsize;
3749 bit_offset /= 2)
3751 tree bitpos = size_int (bit_offset);
3753 epilog_stmt = gimple_build_assign_with_ops (shift_code,
3754 vec_dest, new_temp, bitpos);
3755 new_name = make_ssa_name (vec_dest, epilog_stmt);
3756 gimple_assign_set_lhs (epilog_stmt, new_name);
3757 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3759 epilog_stmt = gimple_build_assign_with_ops (code, vec_dest,
3760 new_name, new_temp);
3761 new_temp = make_ssa_name (vec_dest, epilog_stmt);
3762 gimple_assign_set_lhs (epilog_stmt, new_temp);
3763 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3766 extract_scalar_result = true;
3768 else
3770 tree rhs;
3772 /*** Case 3: Create:
3773 s = extract_field <v_out2, 0>
3774 for (offset = element_size;
3775 offset < vector_size;
3776 offset += element_size;)
3778 Create: s' = extract_field <v_out2, offset>
3779 Create: s = op <s, s'> // For non SLP cases
3780 } */
3782 if (vect_print_dump_info (REPORT_DETAILS))
3783 fprintf (vect_dump, "Reduce using scalar code. ");
3785 vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
3786 FOR_EACH_VEC_ELT (gimple, new_phis, i, new_phi)
3788 vec_temp = PHI_RESULT (new_phi);
3789 rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
3790 bitsize_zero_node);
3791 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3792 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3793 gimple_assign_set_lhs (epilog_stmt, new_temp);
3794 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3796 /* In SLP we don't need to apply reduction operation, so we just
3797 collect s' values in SCALAR_RESULTS. */
3798 if (slp_reduc)
3799 VEC_safe_push (tree, heap, scalar_results, new_temp);
3801 for (bit_offset = element_bitsize;
3802 bit_offset < vec_size_in_bits;
3803 bit_offset += element_bitsize)
3805 tree bitpos = bitsize_int (bit_offset);
3806 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp,
3807 bitsize, bitpos);
3809 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3810 new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
3811 gimple_assign_set_lhs (epilog_stmt, new_name);
3812 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3814 if (slp_reduc)
3816 /* In SLP we don't need to apply reduction operation, so
3817 we just collect s' values in SCALAR_RESULTS. */
3818 new_temp = new_name;
3819 VEC_safe_push (tree, heap, scalar_results, new_name);
3821 else
3823 epilog_stmt = gimple_build_assign_with_ops (code,
3824 new_scalar_dest, new_name, new_temp);
3825 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3826 gimple_assign_set_lhs (epilog_stmt, new_temp);
3827 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3832 /* The only case where we need to reduce scalar results in SLP, is
3833 unrolling. If the size of SCALAR_RESULTS is greater than
3834 GROUP_SIZE, we reduce them combining elements modulo
3835 GROUP_SIZE. */
3836 if (slp_reduc)
3838 tree res, first_res, new_res;
3839 gimple new_stmt;
3841 /* Reduce multiple scalar results in case of SLP unrolling. */
3842 for (j = group_size; VEC_iterate (tree, scalar_results, j, res);
3843 j++)
3845 first_res = VEC_index (tree, scalar_results, j % group_size);
3846 new_stmt = gimple_build_assign_with_ops (code,
3847 new_scalar_dest, first_res, res);
3848 new_res = make_ssa_name (new_scalar_dest, new_stmt);
3849 gimple_assign_set_lhs (new_stmt, new_res);
3850 gsi_insert_before (&exit_gsi, new_stmt, GSI_SAME_STMT);
3851 VEC_replace (tree, scalar_results, j % group_size, new_res);
3854 else
3855 /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
3856 VEC_safe_push (tree, heap, scalar_results, new_temp);
3858 extract_scalar_result = false;
3862 /* 2.4 Extract the final scalar result. Create:
3863 s_out3 = extract_field <v_out2, bitpos> */
3865 if (extract_scalar_result)
3867 tree rhs;
3869 if (vect_print_dump_info (REPORT_DETAILS))
3870 fprintf (vect_dump, "extract scalar result");
3872 if (BYTES_BIG_ENDIAN)
3873 bitpos = size_binop (MULT_EXPR,
3874 bitsize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1),
3875 TYPE_SIZE (scalar_type));
3876 else
3877 bitpos = bitsize_zero_node;
3879 rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp, bitsize, bitpos);
3880 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3881 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3882 gimple_assign_set_lhs (epilog_stmt, new_temp);
3883 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3884 VEC_safe_push (tree, heap, scalar_results, new_temp);
3887 vect_finalize_reduction:
3889 if (double_reduc)
3890 loop = loop->inner;
3892 /* 2.5 Adjust the final result by the initial value of the reduction
3893 variable. (When such adjustment is not needed, then
3894 'adjustment_def' is zero). For example, if code is PLUS we create:
3895 new_temp = loop_exit_def + adjustment_def */
3897 if (adjustment_def)
3899 gcc_assert (!slp_reduc);
3900 if (nested_in_vect_loop)
3902 new_phi = VEC_index (gimple, new_phis, 0);
3903 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
3904 expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
3905 new_dest = vect_create_destination_var (scalar_dest, vectype);
3907 else
3909 new_temp = VEC_index (tree, scalar_results, 0);
3910 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
3911 expr = build2 (code, scalar_type, new_temp, adjustment_def);
3912 new_dest = vect_create_destination_var (scalar_dest, scalar_type);
3915 epilog_stmt = gimple_build_assign (new_dest, expr);
3916 new_temp = make_ssa_name (new_dest, epilog_stmt);
3917 gimple_assign_set_lhs (epilog_stmt, new_temp);
3918 SSA_NAME_DEF_STMT (new_temp) = epilog_stmt;
3919 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3920 if (nested_in_vect_loop)
3922 set_vinfo_for_stmt (epilog_stmt,
3923 new_stmt_vec_info (epilog_stmt, loop_vinfo,
3924 NULL));
3925 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt)) =
3926 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi));
3928 if (!double_reduc)
3929 VEC_quick_push (tree, scalar_results, new_temp);
3930 else
3931 VEC_replace (tree, scalar_results, 0, new_temp);
3933 else
3934 VEC_replace (tree, scalar_results, 0, new_temp);
3936 VEC_replace (gimple, new_phis, 0, epilog_stmt);
3939 /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit
3940 phis with new adjusted scalar results, i.e., replace use <s_out0>
3941 with use <s_out4>.
3943 Transform:
3944 loop_exit:
3945 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3946 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3947 v_out2 = reduce <v_out1>
3948 s_out3 = extract_field <v_out2, 0>
3949 s_out4 = adjust_result <s_out3>
3950 use <s_out0>
3951 use <s_out0>
3953 into:
3955 loop_exit:
3956 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3957 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3958 v_out2 = reduce <v_out1>
3959 s_out3 = extract_field <v_out2, 0>
3960 s_out4 = adjust_result <s_out3>
3961 use <s_out4>
3962 use <s_out4> */
3965 /* In SLP reduction chain we reduce vector results into one vector if
3966 necessary, hence we set here GROUP_SIZE to 1. SCALAR_DEST is the LHS of
3967 the last stmt in the reduction chain, since we are looking for the loop
3968 exit phi node. */
3969 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
3971 scalar_dest = gimple_assign_lhs (VEC_index (gimple,
3972 SLP_TREE_SCALAR_STMTS (slp_node),
3973 group_size - 1));
3974 group_size = 1;
3977 /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
3978 case that GROUP_SIZE is greater than vectorization factor). Therefore, we
3979 need to match SCALAR_RESULTS with corresponding statements. The first
3980 (GROUP_SIZE / number of new vector stmts) scalar results correspond to
3981 the first vector stmt, etc.
3982 (RATIO is equal to (GROUP_SIZE / number of new vector stmts)). */
3983 if (group_size > VEC_length (gimple, new_phis))
3985 ratio = group_size / VEC_length (gimple, new_phis);
3986 gcc_assert (!(group_size % VEC_length (gimple, new_phis)));
3988 else
3989 ratio = 1;
3991 for (k = 0; k < group_size; k++)
3993 if (k % ratio == 0)
3995 epilog_stmt = VEC_index (gimple, new_phis, k / ratio);
3996 reduction_phi = VEC_index (gimple, reduction_phis, k / ratio);
3999 if (slp_reduc)
4001 gimple current_stmt = VEC_index (gimple,
4002 SLP_TREE_SCALAR_STMTS (slp_node), k);
4004 orig_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (current_stmt));
4005 /* SLP statements can't participate in patterns. */
4006 gcc_assert (!orig_stmt);
4007 scalar_dest = gimple_assign_lhs (current_stmt);
4010 phis = VEC_alloc (gimple, heap, 3);
4011 /* Find the loop-closed-use at the loop exit of the original scalar
4012 result. (The reduction result is expected to have two immediate uses -
4013 one at the latch block, and one at the loop exit). */
4014 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4015 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
4016 VEC_safe_push (gimple, heap, phis, USE_STMT (use_p));
4018 /* We expect to have found an exit_phi because of loop-closed-ssa
4019 form. */
4020 gcc_assert (!VEC_empty (gimple, phis));
4022 FOR_EACH_VEC_ELT (gimple, phis, i, exit_phi)
4024 if (outer_loop)
4026 stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi);
4027 gimple vect_phi;
4029 /* FORNOW. Currently not supporting the case that an inner-loop
4030 reduction is not used in the outer-loop (but only outside the
4031 outer-loop), unless it is double reduction. */
4032 gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
4033 && !STMT_VINFO_LIVE_P (exit_phi_vinfo))
4034 || double_reduc);
4036 STMT_VINFO_VEC_STMT (exit_phi_vinfo) = epilog_stmt;
4037 if (!double_reduc
4038 || STMT_VINFO_DEF_TYPE (exit_phi_vinfo)
4039 != vect_double_reduction_def)
4040 continue;
4042 /* Handle double reduction:
4044 stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
4045 stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop)
4046 stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop)
4047 stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
4049 At that point the regular reduction (stmt2 and stmt3) is
4050 already vectorized, as well as the exit phi node, stmt4.
4051 Here we vectorize the phi node of double reduction, stmt1, and
4052 update all relevant statements. */
4054 /* Go through all the uses of s2 to find double reduction phi
4055 node, i.e., stmt1 above. */
4056 orig_name = PHI_RESULT (exit_phi);
4057 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
4059 stmt_vec_info use_stmt_vinfo = vinfo_for_stmt (use_stmt);
4060 stmt_vec_info new_phi_vinfo;
4061 tree vect_phi_init, preheader_arg, vect_phi_res, init_def;
4062 basic_block bb = gimple_bb (use_stmt);
4063 gimple use;
4065 /* Check that USE_STMT is really double reduction phi
4066 node. */
4067 if (gimple_code (use_stmt) != GIMPLE_PHI
4068 || gimple_phi_num_args (use_stmt) != 2
4069 || !use_stmt_vinfo
4070 || STMT_VINFO_DEF_TYPE (use_stmt_vinfo)
4071 != vect_double_reduction_def
4072 || bb->loop_father != outer_loop)
4073 continue;
4075 /* Create vector phi node for double reduction:
4076 vs1 = phi <vs0, vs2>
4077 vs1 was created previously in this function by a call to
4078 vect_get_vec_def_for_operand and is stored in
4079 vec_initial_def;
4080 vs2 is defined by EPILOG_STMT, the vectorized EXIT_PHI;
4081 vs0 is created here. */
4083 /* Create vector phi node. */
4084 vect_phi = create_phi_node (vec_initial_def, bb);
4085 new_phi_vinfo = new_stmt_vec_info (vect_phi,
4086 loop_vec_info_for_loop (outer_loop), NULL);
4087 set_vinfo_for_stmt (vect_phi, new_phi_vinfo);
4089 /* Create vs0 - initial def of the double reduction phi. */
4090 preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt,
4091 loop_preheader_edge (outer_loop));
4092 init_def = get_initial_def_for_reduction (stmt,
4093 preheader_arg, NULL);
4094 vect_phi_init = vect_init_vector (use_stmt, init_def,
4095 vectype, NULL);
4097 /* Update phi node arguments with vs0 and vs2. */
4098 add_phi_arg (vect_phi, vect_phi_init,
4099 loop_preheader_edge (outer_loop),
4100 UNKNOWN_LOCATION);
4101 add_phi_arg (vect_phi, PHI_RESULT (epilog_stmt),
4102 loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
4103 if (vect_print_dump_info (REPORT_DETAILS))
4105 fprintf (vect_dump, "created double reduction phi "
4106 "node: ");
4107 print_gimple_stmt (vect_dump, vect_phi, 0, TDF_SLIM);
4110 vect_phi_res = PHI_RESULT (vect_phi);
4112 /* Replace the use, i.e., set the correct vs1 in the regular
4113 reduction phi node. FORNOW, NCOPIES is always 1, so the
4114 loop is redundant. */
4115 use = reduction_phi;
4116 for (j = 0; j < ncopies; j++)
4118 edge pr_edge = loop_preheader_edge (loop);
4119 SET_PHI_ARG_DEF (use, pr_edge->dest_idx, vect_phi_res);
4120 use = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use));
4126 VEC_free (gimple, heap, phis);
4127 if (nested_in_vect_loop)
4129 if (double_reduc)
4130 loop = outer_loop;
4131 else
4132 continue;
4135 phis = VEC_alloc (gimple, heap, 3);
4136 /* Find the loop-closed-use at the loop exit of the original scalar
4137 result. (The reduction result is expected to have two immediate uses,
4138 one at the latch block, and one at the loop exit). For double
4139 reductions we are looking for exit phis of the outer loop. */
4140 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4142 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
4143 VEC_safe_push (gimple, heap, phis, USE_STMT (use_p));
4144 else
4146 if (double_reduc && gimple_code (USE_STMT (use_p)) == GIMPLE_PHI)
4148 tree phi_res = PHI_RESULT (USE_STMT (use_p));
4150 FOR_EACH_IMM_USE_FAST (phi_use_p, phi_imm_iter, phi_res)
4152 if (!flow_bb_inside_loop_p (loop,
4153 gimple_bb (USE_STMT (phi_use_p))))
4154 VEC_safe_push (gimple, heap, phis,
4155 USE_STMT (phi_use_p));
4161 FOR_EACH_VEC_ELT (gimple, phis, i, exit_phi)
4163 /* Replace the uses: */
4164 orig_name = PHI_RESULT (exit_phi);
4165 scalar_result = VEC_index (tree, scalar_results, k);
4166 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
4167 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
4168 SET_USE (use_p, scalar_result);
4171 VEC_free (gimple, heap, phis);
4174 VEC_free (tree, heap, scalar_results);
4175 VEC_free (gimple, heap, new_phis);
4179 /* Function vectorizable_reduction.
4181 Check if STMT performs a reduction operation that can be vectorized.
4182 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4183 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
4184 Return FALSE if not a vectorizable STMT, TRUE otherwise.
4186 This function also handles reduction idioms (patterns) that have been
4187 recognized in advance during vect_pattern_recog. In this case, STMT may be
4188 of this form:
4189 X = pattern_expr (arg0, arg1, ..., X)
4190 and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
4191 sequence that had been detected and replaced by the pattern-stmt (STMT).
4193 In some cases of reduction patterns, the type of the reduction variable X is
4194 different than the type of the other arguments of STMT.
4195 In such cases, the vectype that is used when transforming STMT into a vector
4196 stmt is different than the vectype that is used to determine the
4197 vectorization factor, because it consists of a different number of elements
4198 than the actual number of elements that are being operated upon in parallel.
4200 For example, consider an accumulation of shorts into an int accumulator.
4201 On some targets it's possible to vectorize this pattern operating on 8
4202 shorts at a time (hence, the vectype for purposes of determining the
4203 vectorization factor should be V8HI); on the other hand, the vectype that
4204 is used to create the vector form is actually V4SI (the type of the result).
4206 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
4207 indicates what is the actual level of parallelism (V8HI in the example), so
4208 that the right vectorization factor would be derived. This vectype
4209 corresponds to the type of arguments to the reduction stmt, and should *NOT*
4210 be used to create the vectorized stmt. The right vectype for the vectorized
4211 stmt is obtained from the type of the result X:
4212 get_vectype_for_scalar_type (TREE_TYPE (X))
4214 This means that, contrary to "regular" reductions (or "regular" stmts in
4215 general), the following equation:
4216 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
4217 does *NOT* necessarily hold for reduction patterns. */
4219 bool
4220 vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
4221 gimple *vec_stmt, slp_tree slp_node)
4223 tree vec_dest;
4224 tree scalar_dest;
4225 tree loop_vec_def0 = NULL_TREE, loop_vec_def1 = NULL_TREE;
4226 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4227 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4228 tree vectype_in = NULL_TREE;
4229 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4230 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4231 enum tree_code code, orig_code, epilog_reduc_code;
4232 enum machine_mode vec_mode;
4233 int op_type;
4234 optab optab, reduc_optab;
4235 tree new_temp = NULL_TREE;
4236 tree def;
4237 gimple def_stmt;
4238 enum vect_def_type dt;
4239 gimple new_phi = NULL;
4240 tree scalar_type;
4241 bool is_simple_use;
4242 gimple orig_stmt;
4243 stmt_vec_info orig_stmt_info;
4244 tree expr = NULL_TREE;
4245 int i;
4246 int ncopies;
4247 int epilog_copies;
4248 stmt_vec_info prev_stmt_info, prev_phi_info;
4249 bool single_defuse_cycle = false;
4250 tree reduc_def = NULL_TREE;
4251 gimple new_stmt = NULL;
4252 int j;
4253 tree ops[3];
4254 bool nested_cycle = false, found_nested_cycle_def = false;
4255 gimple reduc_def_stmt = NULL;
4256 /* The default is that the reduction variable is the last in statement. */
4257 int reduc_index = 2;
4258 bool double_reduc = false, dummy;
4259 basic_block def_bb;
4260 struct loop * def_stmt_loop, *outer_loop = NULL;
4261 tree def_arg;
4262 gimple def_arg_stmt;
4263 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL, *vect_defs = NULL;
4264 VEC (gimple, heap) *phis = NULL;
4265 int vec_num;
4266 tree def0, def1, tem;
4268 /* In case of reduction chain we switch to the first stmt in the chain, but
4269 we don't update STMT_INFO, since only the last stmt is marked as reduction
4270 and has reduction properties. */
4271 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
4272 stmt = GROUP_FIRST_ELEMENT (stmt_info);
4274 if (nested_in_vect_loop_p (loop, stmt))
4276 outer_loop = loop;
4277 loop = loop->inner;
4278 nested_cycle = true;
4281 /* 1. Is vectorizable reduction? */
4282 /* Not supportable if the reduction variable is used in the loop, unless
4283 it's a reduction chain. */
4284 if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer
4285 && !GROUP_FIRST_ELEMENT (stmt_info))
4286 return false;
4288 /* Reductions that are not used even in an enclosing outer-loop,
4289 are expected to be "live" (used out of the loop). */
4290 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope
4291 && !STMT_VINFO_LIVE_P (stmt_info))
4292 return false;
4294 /* Make sure it was already recognized as a reduction computation. */
4295 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def
4296 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_nested_cycle)
4297 return false;
4299 /* 2. Has this been recognized as a reduction pattern?
4301 Check if STMT represents a pattern that has been recognized
4302 in earlier analysis stages. For stmts that represent a pattern,
4303 the STMT_VINFO_RELATED_STMT field records the last stmt in
4304 the original sequence that constitutes the pattern. */
4306 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
4307 if (orig_stmt)
4309 orig_stmt_info = vinfo_for_stmt (orig_stmt);
4310 gcc_assert (STMT_VINFO_RELATED_STMT (orig_stmt_info) == stmt);
4311 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
4312 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
4315 /* 3. Check the operands of the operation. The first operands are defined
4316 inside the loop body. The last operand is the reduction variable,
4317 which is defined by the loop-header-phi. */
4319 gcc_assert (is_gimple_assign (stmt));
4321 /* Flatten RHS. */
4322 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
4324 case GIMPLE_SINGLE_RHS:
4325 op_type = TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt));
4326 if (op_type == ternary_op)
4328 tree rhs = gimple_assign_rhs1 (stmt);
4329 ops[0] = TREE_OPERAND (rhs, 0);
4330 ops[1] = TREE_OPERAND (rhs, 1);
4331 ops[2] = TREE_OPERAND (rhs, 2);
4332 code = TREE_CODE (rhs);
4334 else
4335 return false;
4336 break;
4338 case GIMPLE_BINARY_RHS:
4339 code = gimple_assign_rhs_code (stmt);
4340 op_type = TREE_CODE_LENGTH (code);
4341 gcc_assert (op_type == binary_op);
4342 ops[0] = gimple_assign_rhs1 (stmt);
4343 ops[1] = gimple_assign_rhs2 (stmt);
4344 break;
4346 case GIMPLE_TERNARY_RHS:
4347 code = gimple_assign_rhs_code (stmt);
4348 op_type = TREE_CODE_LENGTH (code);
4349 gcc_assert (op_type == ternary_op);
4350 ops[0] = gimple_assign_rhs1 (stmt);
4351 ops[1] = gimple_assign_rhs2 (stmt);
4352 ops[2] = gimple_assign_rhs3 (stmt);
4353 break;
4355 case GIMPLE_UNARY_RHS:
4356 return false;
4358 default:
4359 gcc_unreachable ();
4362 scalar_dest = gimple_assign_lhs (stmt);
4363 scalar_type = TREE_TYPE (scalar_dest);
4364 if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
4365 && !SCALAR_FLOAT_TYPE_P (scalar_type))
4366 return false;
4368 /* All uses but the last are expected to be defined in the loop.
4369 The last use is the reduction variable. In case of nested cycle this
4370 assumption is not true: we use reduc_index to record the index of the
4371 reduction variable. */
4372 for (i = 0; i < op_type-1; i++)
4374 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
4375 if (i == 0 && code == COND_EXPR)
4376 continue;
4378 is_simple_use = vect_is_simple_use_1 (ops[i], loop_vinfo, NULL,
4379 &def_stmt, &def, &dt, &tem);
4380 if (!vectype_in)
4381 vectype_in = tem;
4382 gcc_assert (is_simple_use);
4384 if (dt != vect_internal_def
4385 && dt != vect_external_def
4386 && dt != vect_constant_def
4387 && dt != vect_induction_def
4388 && !(dt == vect_nested_cycle && nested_cycle))
4389 return false;
4391 if (dt == vect_nested_cycle)
4393 found_nested_cycle_def = true;
4394 reduc_def_stmt = def_stmt;
4395 reduc_index = i;
4399 is_simple_use = vect_is_simple_use_1 (ops[i], loop_vinfo, NULL, &def_stmt,
4400 &def, &dt, &tem);
4401 if (!vectype_in)
4402 vectype_in = tem;
4403 gcc_assert (is_simple_use);
4404 gcc_assert (dt == vect_reduction_def
4405 || dt == vect_nested_cycle
4406 || ((dt == vect_internal_def || dt == vect_external_def
4407 || dt == vect_constant_def || dt == vect_induction_def)
4408 && nested_cycle && found_nested_cycle_def));
4409 if (!found_nested_cycle_def)
4410 reduc_def_stmt = def_stmt;
4412 gcc_assert (gimple_code (reduc_def_stmt) == GIMPLE_PHI);
4413 if (orig_stmt)
4414 gcc_assert (orig_stmt == vect_is_simple_reduction (loop_vinfo,
4415 reduc_def_stmt,
4416 !nested_cycle,
4417 &dummy));
4418 else
4420 gimple tmp = vect_is_simple_reduction (loop_vinfo, reduc_def_stmt,
4421 !nested_cycle, &dummy);
4422 /* We changed STMT to be the first stmt in reduction chain, hence we
4423 check that in this case the first element in the chain is STMT. */
4424 gcc_assert (stmt == tmp
4425 || GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) == stmt);
4428 if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt)))
4429 return false;
4431 if (slp_node || PURE_SLP_STMT (stmt_info))
4432 ncopies = 1;
4433 else
4434 ncopies = (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
4435 / TYPE_VECTOR_SUBPARTS (vectype_in));
4437 gcc_assert (ncopies >= 1);
4439 vec_mode = TYPE_MODE (vectype_in);
4441 if (code == COND_EXPR)
4443 if (!vectorizable_condition (stmt, gsi, NULL, ops[reduc_index], 0))
4445 if (vect_print_dump_info (REPORT_DETAILS))
4446 fprintf (vect_dump, "unsupported condition in reduction");
4448 return false;
4451 else
4453 /* 4. Supportable by target? */
4455 /* 4.1. check support for the operation in the loop */
4456 optab = optab_for_tree_code (code, vectype_in, optab_default);
4457 if (!optab)
4459 if (vect_print_dump_info (REPORT_DETAILS))
4460 fprintf (vect_dump, "no optab.");
4462 return false;
4465 if (optab_handler (optab, vec_mode) == CODE_FOR_nothing)
4467 if (vect_print_dump_info (REPORT_DETAILS))
4468 fprintf (vect_dump, "op not supported by target.");
4470 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4471 || LOOP_VINFO_VECT_FACTOR (loop_vinfo)
4472 < vect_min_worthwhile_factor (code))
4473 return false;
4475 if (vect_print_dump_info (REPORT_DETAILS))
4476 fprintf (vect_dump, "proceeding using word mode.");
4479 /* Worthwhile without SIMD support? */
4480 if (!VECTOR_MODE_P (TYPE_MODE (vectype_in))
4481 && LOOP_VINFO_VECT_FACTOR (loop_vinfo)
4482 < vect_min_worthwhile_factor (code))
4484 if (vect_print_dump_info (REPORT_DETAILS))
4485 fprintf (vect_dump, "not worthwhile without SIMD support.");
4487 return false;
4491 /* 4.2. Check support for the epilog operation.
4493 If STMT represents a reduction pattern, then the type of the
4494 reduction variable may be different than the type of the rest
4495 of the arguments. For example, consider the case of accumulation
4496 of shorts into an int accumulator; The original code:
4497 S1: int_a = (int) short_a;
4498 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
4500 was replaced with:
4501 STMT: int_acc = widen_sum <short_a, int_acc>
4503 This means that:
4504 1. The tree-code that is used to create the vector operation in the
4505 epilog code (that reduces the partial results) is not the
4506 tree-code of STMT, but is rather the tree-code of the original
4507 stmt from the pattern that STMT is replacing. I.e, in the example
4508 above we want to use 'widen_sum' in the loop, but 'plus' in the
4509 epilog.
4510 2. The type (mode) we use to check available target support
4511 for the vector operation to be created in the *epilog*, is
4512 determined by the type of the reduction variable (in the example
4513 above we'd check this: optab_handler (plus_optab, vect_int_mode])).
4514 However the type (mode) we use to check available target support
4515 for the vector operation to be created *inside the loop*, is
4516 determined by the type of the other arguments to STMT (in the
4517 example we'd check this: optab_handler (widen_sum_optab,
4518 vect_short_mode)).
4520 This is contrary to "regular" reductions, in which the types of all
4521 the arguments are the same as the type of the reduction variable.
4522 For "regular" reductions we can therefore use the same vector type
4523 (and also the same tree-code) when generating the epilog code and
4524 when generating the code inside the loop. */
4526 if (orig_stmt)
4528 /* This is a reduction pattern: get the vectype from the type of the
4529 reduction variable, and get the tree-code from orig_stmt. */
4530 orig_code = gimple_assign_rhs_code (orig_stmt);
4531 gcc_assert (vectype_out);
4532 vec_mode = TYPE_MODE (vectype_out);
4534 else
4536 /* Regular reduction: use the same vectype and tree-code as used for
4537 the vector code inside the loop can be used for the epilog code. */
4538 orig_code = code;
4541 if (nested_cycle)
4543 def_bb = gimple_bb (reduc_def_stmt);
4544 def_stmt_loop = def_bb->loop_father;
4545 def_arg = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
4546 loop_preheader_edge (def_stmt_loop));
4547 if (TREE_CODE (def_arg) == SSA_NAME
4548 && (def_arg_stmt = SSA_NAME_DEF_STMT (def_arg))
4549 && gimple_code (def_arg_stmt) == GIMPLE_PHI
4550 && flow_bb_inside_loop_p (outer_loop, gimple_bb (def_arg_stmt))
4551 && vinfo_for_stmt (def_arg_stmt)
4552 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_arg_stmt))
4553 == vect_double_reduction_def)
4554 double_reduc = true;
4557 epilog_reduc_code = ERROR_MARK;
4558 if (reduction_code_for_scalar_code (orig_code, &epilog_reduc_code))
4560 reduc_optab = optab_for_tree_code (epilog_reduc_code, vectype_out,
4561 optab_default);
4562 if (!reduc_optab)
4564 if (vect_print_dump_info (REPORT_DETAILS))
4565 fprintf (vect_dump, "no optab for reduction.");
4567 epilog_reduc_code = ERROR_MARK;
4570 if (reduc_optab
4571 && optab_handler (reduc_optab, vec_mode) == CODE_FOR_nothing)
4573 if (vect_print_dump_info (REPORT_DETAILS))
4574 fprintf (vect_dump, "reduc op not supported by target.");
4576 epilog_reduc_code = ERROR_MARK;
4579 else
4581 if (!nested_cycle || double_reduc)
4583 if (vect_print_dump_info (REPORT_DETAILS))
4584 fprintf (vect_dump, "no reduc code for scalar code.");
4586 return false;
4590 if (double_reduc && ncopies > 1)
4592 if (vect_print_dump_info (REPORT_DETAILS))
4593 fprintf (vect_dump, "multiple types in double reduction");
4595 return false;
4598 if (!vec_stmt) /* transformation not required. */
4600 if (!vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies))
4601 return false;
4602 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
4603 return true;
4606 /** Transform. **/
4608 if (vect_print_dump_info (REPORT_DETAILS))
4609 fprintf (vect_dump, "transform reduction.");
4611 /* FORNOW: Multiple types are not supported for condition. */
4612 if (code == COND_EXPR)
4613 gcc_assert (ncopies == 1);
4615 /* Create the destination vector */
4616 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
4618 /* In case the vectorization factor (VF) is bigger than the number
4619 of elements that we can fit in a vectype (nunits), we have to generate
4620 more than one vector stmt - i.e - we need to "unroll" the
4621 vector stmt by a factor VF/nunits. For more details see documentation
4622 in vectorizable_operation. */
4624 /* If the reduction is used in an outer loop we need to generate
4625 VF intermediate results, like so (e.g. for ncopies=2):
4626 r0 = phi (init, r0)
4627 r1 = phi (init, r1)
4628 r0 = x0 + r0;
4629 r1 = x1 + r1;
4630 (i.e. we generate VF results in 2 registers).
4631 In this case we have a separate def-use cycle for each copy, and therefore
4632 for each copy we get the vector def for the reduction variable from the
4633 respective phi node created for this copy.
4635 Otherwise (the reduction is unused in the loop nest), we can combine
4636 together intermediate results, like so (e.g. for ncopies=2):
4637 r = phi (init, r)
4638 r = x0 + r;
4639 r = x1 + r;
4640 (i.e. we generate VF/2 results in a single register).
4641 In this case for each copy we get the vector def for the reduction variable
4642 from the vectorized reduction operation generated in the previous iteration.
4645 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope)
4647 single_defuse_cycle = true;
4648 epilog_copies = 1;
4650 else
4651 epilog_copies = ncopies;
4653 prev_stmt_info = NULL;
4654 prev_phi_info = NULL;
4655 if (slp_node)
4657 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
4658 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype_out)
4659 == TYPE_VECTOR_SUBPARTS (vectype_in));
4661 else
4663 vec_num = 1;
4664 vec_oprnds0 = VEC_alloc (tree, heap, 1);
4665 if (op_type == ternary_op)
4666 vec_oprnds1 = VEC_alloc (tree, heap, 1);
4669 phis = VEC_alloc (gimple, heap, vec_num);
4670 vect_defs = VEC_alloc (tree, heap, vec_num);
4671 if (!slp_node)
4672 VEC_quick_push (tree, vect_defs, NULL_TREE);
4674 for (j = 0; j < ncopies; j++)
4676 if (j == 0 || !single_defuse_cycle)
4678 for (i = 0; i < vec_num; i++)
4680 /* Create the reduction-phi that defines the reduction
4681 operand. */
4682 new_phi = create_phi_node (vec_dest, loop->header);
4683 set_vinfo_for_stmt (new_phi,
4684 new_stmt_vec_info (new_phi, loop_vinfo,
4685 NULL));
4686 if (j == 0 || slp_node)
4687 VEC_quick_push (gimple, phis, new_phi);
4691 if (code == COND_EXPR)
4693 gcc_assert (!slp_node);
4694 vectorizable_condition (stmt, gsi, vec_stmt,
4695 PHI_RESULT (VEC_index (gimple, phis, 0)),
4696 reduc_index);
4697 /* Multiple types are not supported for condition. */
4698 break;
4701 /* Handle uses. */
4702 if (j == 0)
4704 tree op0, op1 = NULL_TREE;
4706 op0 = ops[!reduc_index];
4707 if (op_type == ternary_op)
4709 if (reduc_index == 0)
4710 op1 = ops[2];
4711 else
4712 op1 = ops[1];
4715 if (slp_node)
4716 vect_get_slp_defs (op0, op1, slp_node, &vec_oprnds0, &vec_oprnds1,
4717 -1);
4718 else
4720 loop_vec_def0 = vect_get_vec_def_for_operand (ops[!reduc_index],
4721 stmt, NULL);
4722 VEC_quick_push (tree, vec_oprnds0, loop_vec_def0);
4723 if (op_type == ternary_op)
4725 loop_vec_def1 = vect_get_vec_def_for_operand (op1, stmt,
4726 NULL);
4727 VEC_quick_push (tree, vec_oprnds1, loop_vec_def1);
4731 else
4733 if (!slp_node)
4735 enum vect_def_type dt = vect_unknown_def_type; /* Dummy */
4736 loop_vec_def0 = vect_get_vec_def_for_stmt_copy (dt, loop_vec_def0);
4737 VEC_replace (tree, vec_oprnds0, 0, loop_vec_def0);
4738 if (op_type == ternary_op)
4740 loop_vec_def1 = vect_get_vec_def_for_stmt_copy (dt,
4741 loop_vec_def1);
4742 VEC_replace (tree, vec_oprnds1, 0, loop_vec_def1);
4746 if (single_defuse_cycle)
4747 reduc_def = gimple_assign_lhs (new_stmt);
4749 STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi;
4752 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, def0)
4754 if (slp_node)
4755 reduc_def = PHI_RESULT (VEC_index (gimple, phis, i));
4756 else
4758 if (!single_defuse_cycle || j == 0)
4759 reduc_def = PHI_RESULT (new_phi);
4762 def1 = ((op_type == ternary_op)
4763 ? VEC_index (tree, vec_oprnds1, i) : NULL);
4764 if (op_type == binary_op)
4766 if (reduc_index == 0)
4767 expr = build2 (code, vectype_out, reduc_def, def0);
4768 else
4769 expr = build2 (code, vectype_out, def0, reduc_def);
4771 else
4773 if (reduc_index == 0)
4774 expr = build3 (code, vectype_out, reduc_def, def0, def1);
4775 else
4777 if (reduc_index == 1)
4778 expr = build3 (code, vectype_out, def0, reduc_def, def1);
4779 else
4780 expr = build3 (code, vectype_out, def0, def1, reduc_def);
4784 new_stmt = gimple_build_assign (vec_dest, expr);
4785 new_temp = make_ssa_name (vec_dest, new_stmt);
4786 gimple_assign_set_lhs (new_stmt, new_temp);
4787 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4789 if (slp_node)
4791 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
4792 VEC_quick_push (tree, vect_defs, new_temp);
4794 else
4795 VEC_replace (tree, vect_defs, 0, new_temp);
4798 if (slp_node)
4799 continue;
4801 if (j == 0)
4802 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4803 else
4804 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4806 prev_stmt_info = vinfo_for_stmt (new_stmt);
4807 prev_phi_info = vinfo_for_stmt (new_phi);
4810 /* Finalize the reduction-phi (set its arguments) and create the
4811 epilog reduction code. */
4812 if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node)
4814 new_temp = gimple_assign_lhs (*vec_stmt);
4815 VEC_replace (tree, vect_defs, 0, new_temp);
4818 vect_create_epilog_for_reduction (vect_defs, stmt, epilog_copies,
4819 epilog_reduc_code, phis, reduc_index,
4820 double_reduc, slp_node);
4822 VEC_free (gimple, heap, phis);
4823 VEC_free (tree, heap, vec_oprnds0);
4824 if (vec_oprnds1)
4825 VEC_free (tree, heap, vec_oprnds1);
4827 return true;
4830 /* Function vect_min_worthwhile_factor.
4832 For a loop where we could vectorize the operation indicated by CODE,
4833 return the minimum vectorization factor that makes it worthwhile
4834 to use generic vectors. */
4836 vect_min_worthwhile_factor (enum tree_code code)
4838 switch (code)
4840 case PLUS_EXPR:
4841 case MINUS_EXPR:
4842 case NEGATE_EXPR:
4843 return 4;
4845 case BIT_AND_EXPR:
4846 case BIT_IOR_EXPR:
4847 case BIT_XOR_EXPR:
4848 case BIT_NOT_EXPR:
4849 return 2;
4851 default:
4852 return INT_MAX;
4857 /* Function vectorizable_induction
4859 Check if PHI performs an induction computation that can be vectorized.
4860 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
4861 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
4862 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4864 bool
4865 vectorizable_induction (gimple phi, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
4866 gimple *vec_stmt)
4868 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
4869 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4870 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4871 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4872 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4873 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4874 tree vec_def;
4876 gcc_assert (ncopies >= 1);
4877 /* FORNOW. This restriction should be relaxed. */
4878 if (nested_in_vect_loop_p (loop, phi) && ncopies > 1)
4880 if (vect_print_dump_info (REPORT_DETAILS))
4881 fprintf (vect_dump, "multiple types in nested loop.");
4882 return false;
4885 if (!STMT_VINFO_RELEVANT_P (stmt_info))
4886 return false;
4888 /* FORNOW: SLP not supported. */
4889 if (STMT_SLP_TYPE (stmt_info))
4890 return false;
4892 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def);
4894 if (gimple_code (phi) != GIMPLE_PHI)
4895 return false;
4897 if (!vec_stmt) /* transformation not required. */
4899 STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
4900 if (vect_print_dump_info (REPORT_DETAILS))
4901 fprintf (vect_dump, "=== vectorizable_induction ===");
4902 vect_model_induction_cost (stmt_info, ncopies);
4903 return true;
4906 /** Transform. **/
4908 if (vect_print_dump_info (REPORT_DETAILS))
4909 fprintf (vect_dump, "transform induction phi.");
4911 vec_def = get_initial_def_for_induction (phi);
4912 *vec_stmt = SSA_NAME_DEF_STMT (vec_def);
4913 return true;
4916 /* Function vectorizable_live_operation.
4918 STMT computes a value that is used outside the loop. Check if
4919 it can be supported. */
4921 bool
4922 vectorizable_live_operation (gimple stmt,
4923 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
4924 gimple *vec_stmt ATTRIBUTE_UNUSED)
4926 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4927 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4928 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4929 int i;
4930 int op_type;
4931 tree op;
4932 tree def;
4933 gimple def_stmt;
4934 enum vect_def_type dt;
4935 enum tree_code code;
4936 enum gimple_rhs_class rhs_class;
4938 gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
4940 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
4941 return false;
4943 if (!is_gimple_assign (stmt))
4944 return false;
4946 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4947 return false;
4949 /* FORNOW. CHECKME. */
4950 if (nested_in_vect_loop_p (loop, stmt))
4951 return false;
4953 code = gimple_assign_rhs_code (stmt);
4954 op_type = TREE_CODE_LENGTH (code);
4955 rhs_class = get_gimple_rhs_class (code);
4956 gcc_assert (rhs_class != GIMPLE_UNARY_RHS || op_type == unary_op);
4957 gcc_assert (rhs_class != GIMPLE_BINARY_RHS || op_type == binary_op);
4959 /* FORNOW: support only if all uses are invariant. This means
4960 that the scalar operations can remain in place, unvectorized.
4961 The original last scalar value that they compute will be used. */
4963 for (i = 0; i < op_type; i++)
4965 if (rhs_class == GIMPLE_SINGLE_RHS)
4966 op = TREE_OPERAND (gimple_op (stmt, 1), i);
4967 else
4968 op = gimple_op (stmt, i + 1);
4969 if (op
4970 && !vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def, &dt))
4972 if (vect_print_dump_info (REPORT_DETAILS))
4973 fprintf (vect_dump, "use not simple.");
4974 return false;
4977 if (dt != vect_external_def && dt != vect_constant_def)
4978 return false;
4981 /* No transformation is required for the cases we currently support. */
4982 return true;
4985 /* Kill any debug uses outside LOOP of SSA names defined in STMT. */
4987 static void
4988 vect_loop_kill_debug_uses (struct loop *loop, gimple stmt)
4990 ssa_op_iter op_iter;
4991 imm_use_iterator imm_iter;
4992 def_operand_p def_p;
4993 gimple ustmt;
4995 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
4997 FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p))
4999 basic_block bb;
5001 if (!is_gimple_debug (ustmt))
5002 continue;
5004 bb = gimple_bb (ustmt);
5006 if (!flow_bb_inside_loop_p (loop, bb))
5008 if (gimple_debug_bind_p (ustmt))
5010 if (vect_print_dump_info (REPORT_DETAILS))
5011 fprintf (vect_dump, "killing debug use");
5013 gimple_debug_bind_reset_value (ustmt);
5014 update_stmt (ustmt);
5016 else
5017 gcc_unreachable ();
5023 /* Function vect_transform_loop.
5025 The analysis phase has determined that the loop is vectorizable.
5026 Vectorize the loop - created vectorized stmts to replace the scalar
5027 stmts in the loop, and update the loop exit condition. */
5029 void
5030 vect_transform_loop (loop_vec_info loop_vinfo)
5032 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5033 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
5034 int nbbs = loop->num_nodes;
5035 gimple_stmt_iterator si;
5036 int i;
5037 tree ratio = NULL;
5038 int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
5039 bool strided_store;
5040 bool slp_scheduled = false;
5041 unsigned int nunits;
5042 tree cond_expr = NULL_TREE;
5043 gimple_seq cond_expr_stmt_list = NULL;
5044 bool do_peeling_for_loop_bound;
5046 if (vect_print_dump_info (REPORT_DETAILS))
5047 fprintf (vect_dump, "=== vec_transform_loop ===");
5049 /* Peel the loop if there are data refs with unknown alignment.
5050 Only one data ref with unknown store is allowed. */
5052 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
5053 vect_do_peeling_for_alignment (loop_vinfo);
5055 do_peeling_for_loop_bound
5056 = (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
5057 || (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
5058 && LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0)
5059 || LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo));
5061 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
5062 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
5063 vect_loop_versioning (loop_vinfo,
5064 !do_peeling_for_loop_bound,
5065 &cond_expr, &cond_expr_stmt_list);
5067 /* If the loop has a symbolic number of iterations 'n' (i.e. it's not a
5068 compile time constant), or it is a constant that doesn't divide by the
5069 vectorization factor, then an epilog loop needs to be created.
5070 We therefore duplicate the loop: the original loop will be vectorized,
5071 and will compute the first (n/VF) iterations. The second copy of the loop
5072 will remain scalar and will compute the remaining (n%VF) iterations.
5073 (VF is the vectorization factor). */
5075 if (do_peeling_for_loop_bound)
5076 vect_do_peeling_for_loop_bound (loop_vinfo, &ratio,
5077 cond_expr, cond_expr_stmt_list);
5078 else
5079 ratio = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
5080 LOOP_VINFO_INT_NITERS (loop_vinfo) / vectorization_factor);
5082 /* 1) Make sure the loop header has exactly two entries
5083 2) Make sure we have a preheader basic block. */
5085 gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
5087 split_edge (loop_preheader_edge (loop));
5089 /* FORNOW: the vectorizer supports only loops which body consist
5090 of one basic block (header + empty latch). When the vectorizer will
5091 support more involved loop forms, the order by which the BBs are
5092 traversed need to be reconsidered. */
5094 for (i = 0; i < nbbs; i++)
5096 basic_block bb = bbs[i];
5097 stmt_vec_info stmt_info;
5098 gimple phi;
5100 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
5102 phi = gsi_stmt (si);
5103 if (vect_print_dump_info (REPORT_DETAILS))
5105 fprintf (vect_dump, "------>vectorizing phi: ");
5106 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
5108 stmt_info = vinfo_for_stmt (phi);
5109 if (!stmt_info)
5110 continue;
5112 if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
5113 vect_loop_kill_debug_uses (loop, phi);
5115 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5116 && !STMT_VINFO_LIVE_P (stmt_info))
5117 continue;
5119 if ((TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info))
5120 != (unsigned HOST_WIDE_INT) vectorization_factor)
5121 && vect_print_dump_info (REPORT_DETAILS))
5122 fprintf (vect_dump, "multiple-types.");
5124 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
5126 if (vect_print_dump_info (REPORT_DETAILS))
5127 fprintf (vect_dump, "transform phi.");
5128 vect_transform_stmt (phi, NULL, NULL, NULL, NULL);
5132 for (si = gsi_start_bb (bb); !gsi_end_p (si);)
5134 gimple stmt = gsi_stmt (si);
5135 bool is_store;
5137 if (vect_print_dump_info (REPORT_DETAILS))
5139 fprintf (vect_dump, "------>vectorizing statement: ");
5140 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
5143 stmt_info = vinfo_for_stmt (stmt);
5145 /* vector stmts created in the outer-loop during vectorization of
5146 stmts in an inner-loop may not have a stmt_info, and do not
5147 need to be vectorized. */
5148 if (!stmt_info)
5150 gsi_next (&si);
5151 continue;
5154 if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
5155 vect_loop_kill_debug_uses (loop, stmt);
5157 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5158 && !STMT_VINFO_LIVE_P (stmt_info))
5160 gsi_next (&si);
5161 continue;
5164 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
5165 nunits =
5166 (unsigned int) TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
5167 if (!STMT_SLP_TYPE (stmt_info)
5168 && nunits != (unsigned int) vectorization_factor
5169 && vect_print_dump_info (REPORT_DETAILS))
5170 /* For SLP VF is set according to unrolling factor, and not to
5171 vector size, hence for SLP this print is not valid. */
5172 fprintf (vect_dump, "multiple-types.");
5174 /* SLP. Schedule all the SLP instances when the first SLP stmt is
5175 reached. */
5176 if (STMT_SLP_TYPE (stmt_info))
5178 if (!slp_scheduled)
5180 slp_scheduled = true;
5182 if (vect_print_dump_info (REPORT_DETAILS))
5183 fprintf (vect_dump, "=== scheduling SLP instances ===");
5185 vect_schedule_slp (loop_vinfo, NULL);
5188 /* Hybrid SLP stmts must be vectorized in addition to SLP. */
5189 if (!vinfo_for_stmt (stmt) || PURE_SLP_STMT (stmt_info))
5191 gsi_next (&si);
5192 continue;
5196 /* -------- vectorize statement ------------ */
5197 if (vect_print_dump_info (REPORT_DETAILS))
5198 fprintf (vect_dump, "transform statement.");
5200 strided_store = false;
5201 is_store = vect_transform_stmt (stmt, &si, &strided_store, NULL, NULL);
5202 if (is_store)
5204 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
5206 /* Interleaving. If IS_STORE is TRUE, the vectorization of the
5207 interleaving chain was completed - free all the stores in
5208 the chain. */
5209 vect_remove_stores (GROUP_FIRST_ELEMENT (stmt_info));
5210 gsi_remove (&si, true);
5211 continue;
5213 else
5215 /* Free the attached stmt_vec_info and remove the stmt. */
5216 free_stmt_vec_info (stmt);
5217 gsi_remove (&si, true);
5218 continue;
5221 gsi_next (&si);
5222 } /* stmts in BB */
5223 } /* BBs in loop */
5225 slpeel_make_loop_iterate_ntimes (loop, ratio);
5227 /* The memory tags and pointers in vectorized statements need to
5228 have their SSA forms updated. FIXME, why can't this be delayed
5229 until all the loops have been transformed? */
5230 update_ssa (TODO_update_ssa);
5232 if (vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
5233 fprintf (vect_dump, "LOOP VECTORIZED.");
5234 if (loop->inner && vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
5235 fprintf (vect_dump, "OUTER LOOP VECTORIZED.");