In libobjc/: 2010-12-11 Nicola Pero <nicola.pero@meta-innovation.com>
[official-gcc.git] / gcc / tree-vect-loop.c
blob2644c9e4349bed8d805de691edf5d87ac66b7ef1
1 /* Loop Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
5 Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "basic-block.h"
30 #include "tree-pretty-print.h"
31 #include "gimple-pretty-print.h"
32 #include "tree-flow.h"
33 #include "tree-dump.h"
34 #include "cfgloop.h"
35 #include "cfglayout.h"
36 #include "expr.h"
37 #include "recog.h"
38 #include "optabs.h"
39 #include "params.h"
40 #include "diagnostic-core.h"
41 #include "tree-chrec.h"
42 #include "tree-scalar-evolution.h"
43 #include "tree-vectorizer.h"
44 #include "target.h"
46 /* Loop Vectorization Pass.
48 This pass tries to vectorize loops.
50 For example, the vectorizer transforms the following simple loop:
52 short a[N]; short b[N]; short c[N]; int i;
54 for (i=0; i<N; i++){
55 a[i] = b[i] + c[i];
58 as if it was manually vectorized by rewriting the source code into:
60 typedef int __attribute__((mode(V8HI))) v8hi;
61 short a[N]; short b[N]; short c[N]; int i;
62 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
63 v8hi va, vb, vc;
65 for (i=0; i<N/8; i++){
66 vb = pb[i];
67 vc = pc[i];
68 va = vb + vc;
69 pa[i] = va;
72 The main entry to this pass is vectorize_loops(), in which
73 the vectorizer applies a set of analyses on a given set of loops,
74 followed by the actual vectorization transformation for the loops that
75 had successfully passed the analysis phase.
76 Throughout this pass we make a distinction between two types of
77 data: scalars (which are represented by SSA_NAMES), and memory references
78 ("data-refs"). These two types of data require different handling both
79 during analysis and transformation. The types of data-refs that the
80 vectorizer currently supports are ARRAY_REFS which base is an array DECL
81 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
82 accesses are required to have a simple (consecutive) access pattern.
84 Analysis phase:
85 ===============
86 The driver for the analysis phase is vect_analyze_loop().
87 It applies a set of analyses, some of which rely on the scalar evolution
88 analyzer (scev) developed by Sebastian Pop.
90 During the analysis phase the vectorizer records some information
91 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
92 loop, as well as general information about the loop as a whole, which is
93 recorded in a "loop_vec_info" struct attached to each loop.
95 Transformation phase:
96 =====================
97 The loop transformation phase scans all the stmts in the loop, and
98 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
99 the loop that needs to be vectorized. It inserts the vector code sequence
100 just before the scalar stmt S, and records a pointer to the vector code
101 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
102 attached to S). This pointer will be used for the vectorization of following
103 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
104 otherwise, we rely on dead code elimination for removing it.
106 For example, say stmt S1 was vectorized into stmt VS1:
108 VS1: vb = px[i];
109 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
110 S2: a = b;
112 To vectorize stmt S2, the vectorizer first finds the stmt that defines
113 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
114 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
115 resulting sequence would be:
117 VS1: vb = px[i];
118 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
119 VS2: va = vb;
120 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
122 Operands that are not SSA_NAMEs, are data-refs that appear in
123 load/store operations (like 'x[i]' in S1), and are handled differently.
125 Target modeling:
126 =================
127 Currently the only target specific information that is used is the
128 size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".
129 Targets that can support different sizes of vectors, for now will need
130 to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More
131 flexibility will be added in the future.
133 Since we only vectorize operations which vector form can be
134 expressed using existing tree codes, to verify that an operation is
135 supported, the vectorizer checks the relevant optab at the relevant
136 machine_mode (e.g, optab_handler (add_optab, V8HImode)). If
137 the value found is CODE_FOR_nothing, then there's no target support, and
138 we can't vectorize the stmt.
140 For additional information on this project see:
141 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
144 /* Function vect_determine_vectorization_factor
146 Determine the vectorization factor (VF). VF is the number of data elements
147 that are operated upon in parallel in a single iteration of the vectorized
148 loop. For example, when vectorizing a loop that operates on 4byte elements,
149 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
150 elements can fit in a single vector register.
152 We currently support vectorization of loops in which all types operated upon
153 are of the same size. Therefore this function currently sets VF according to
154 the size of the types operated upon, and fails if there are multiple sizes
155 in the loop.
157 VF is also the factor by which the loop iterations are strip-mined, e.g.:
158 original loop:
159 for (i=0; i<N; i++){
160 a[i] = b[i] + c[i];
163 vectorized loop:
164 for (i=0; i<N; i+=VF){
165 a[i:VF] = b[i:VF] + c[i:VF];
169 static bool
170 vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
172 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
173 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
174 int nbbs = loop->num_nodes;
175 gimple_stmt_iterator si;
176 unsigned int vectorization_factor = 0;
177 tree scalar_type;
178 gimple phi;
179 tree vectype;
180 unsigned int nunits;
181 stmt_vec_info stmt_info;
182 int i;
183 HOST_WIDE_INT dummy;
185 if (vect_print_dump_info (REPORT_DETAILS))
186 fprintf (vect_dump, "=== vect_determine_vectorization_factor ===");
188 for (i = 0; i < nbbs; i++)
190 basic_block bb = bbs[i];
192 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
194 phi = gsi_stmt (si);
195 stmt_info = vinfo_for_stmt (phi);
196 if (vect_print_dump_info (REPORT_DETAILS))
198 fprintf (vect_dump, "==> examining phi: ");
199 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
202 gcc_assert (stmt_info);
204 if (STMT_VINFO_RELEVANT_P (stmt_info))
206 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
207 scalar_type = TREE_TYPE (PHI_RESULT (phi));
209 if (vect_print_dump_info (REPORT_DETAILS))
211 fprintf (vect_dump, "get vectype for scalar type: ");
212 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
215 vectype = get_vectype_for_scalar_type (scalar_type);
216 if (!vectype)
218 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
220 fprintf (vect_dump,
221 "not vectorized: unsupported data-type ");
222 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
224 return false;
226 STMT_VINFO_VECTYPE (stmt_info) = vectype;
228 if (vect_print_dump_info (REPORT_DETAILS))
230 fprintf (vect_dump, "vectype: ");
231 print_generic_expr (vect_dump, vectype, TDF_SLIM);
234 nunits = TYPE_VECTOR_SUBPARTS (vectype);
235 if (vect_print_dump_info (REPORT_DETAILS))
236 fprintf (vect_dump, "nunits = %d", nunits);
238 if (!vectorization_factor
239 || (nunits > vectorization_factor))
240 vectorization_factor = nunits;
244 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
246 tree vf_vectype;
247 gimple stmt = gsi_stmt (si);
248 stmt_info = vinfo_for_stmt (stmt);
250 if (vect_print_dump_info (REPORT_DETAILS))
252 fprintf (vect_dump, "==> examining statement: ");
253 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
256 gcc_assert (stmt_info);
258 /* skip stmts which do not need to be vectorized. */
259 if (!STMT_VINFO_RELEVANT_P (stmt_info)
260 && !STMT_VINFO_LIVE_P (stmt_info))
262 if (vect_print_dump_info (REPORT_DETAILS))
263 fprintf (vect_dump, "skip.");
264 continue;
267 if (gimple_get_lhs (stmt) == NULL_TREE)
269 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
271 fprintf (vect_dump, "not vectorized: irregular stmt.");
272 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
274 return false;
277 if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
279 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
281 fprintf (vect_dump, "not vectorized: vector stmt in loop:");
282 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
284 return false;
287 if (STMT_VINFO_VECTYPE (stmt_info))
289 /* The only case when a vectype had been already set is for stmts
290 that contain a dataref, or for "pattern-stmts" (stmts generated
291 by the vectorizer to represent/replace a certain idiom). */
292 gcc_assert (STMT_VINFO_DATA_REF (stmt_info)
293 || is_pattern_stmt_p (stmt_info));
294 vectype = STMT_VINFO_VECTYPE (stmt_info);
296 else
298 gcc_assert (!STMT_VINFO_DATA_REF (stmt_info)
299 && !is_pattern_stmt_p (stmt_info));
301 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
302 if (vect_print_dump_info (REPORT_DETAILS))
304 fprintf (vect_dump, "get vectype for scalar type: ");
305 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
307 vectype = get_vectype_for_scalar_type (scalar_type);
308 if (!vectype)
310 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
312 fprintf (vect_dump,
313 "not vectorized: unsupported data-type ");
314 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
316 return false;
319 STMT_VINFO_VECTYPE (stmt_info) = vectype;
322 /* The vectorization factor is according to the smallest
323 scalar type (or the largest vector size, but we only
324 support one vector size per loop). */
325 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy,
326 &dummy);
327 if (vect_print_dump_info (REPORT_DETAILS))
329 fprintf (vect_dump, "get vectype for scalar type: ");
330 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
332 vf_vectype = get_vectype_for_scalar_type (scalar_type);
333 if (!vf_vectype)
335 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
337 fprintf (vect_dump,
338 "not vectorized: unsupported data-type ");
339 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
341 return false;
344 if ((GET_MODE_SIZE (TYPE_MODE (vectype))
345 != GET_MODE_SIZE (TYPE_MODE (vf_vectype))))
347 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
349 fprintf (vect_dump,
350 "not vectorized: different sized vector "
351 "types in statement, ");
352 print_generic_expr (vect_dump, vectype, TDF_SLIM);
353 fprintf (vect_dump, " and ");
354 print_generic_expr (vect_dump, vf_vectype, TDF_SLIM);
356 return false;
359 if (vect_print_dump_info (REPORT_DETAILS))
361 fprintf (vect_dump, "vectype: ");
362 print_generic_expr (vect_dump, vf_vectype, TDF_SLIM);
365 nunits = TYPE_VECTOR_SUBPARTS (vf_vectype);
366 if (vect_print_dump_info (REPORT_DETAILS))
367 fprintf (vect_dump, "nunits = %d", nunits);
369 if (!vectorization_factor
370 || (nunits > vectorization_factor))
371 vectorization_factor = nunits;
375 /* TODO: Analyze cost. Decide if worth while to vectorize. */
376 if (vect_print_dump_info (REPORT_DETAILS))
377 fprintf (vect_dump, "vectorization factor = %d", vectorization_factor);
378 if (vectorization_factor <= 1)
380 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
381 fprintf (vect_dump, "not vectorized: unsupported data-type");
382 return false;
384 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
386 return true;
390 /* Function vect_is_simple_iv_evolution.
392 FORNOW: A simple evolution of an induction variables in the loop is
393 considered a polynomial evolution with constant step. */
395 static bool
396 vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
397 tree * step)
399 tree init_expr;
400 tree step_expr;
401 tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
403 /* When there is no evolution in this loop, the evolution function
404 is not "simple". */
405 if (evolution_part == NULL_TREE)
406 return false;
408 /* When the evolution is a polynomial of degree >= 2
409 the evolution function is not "simple". */
410 if (tree_is_chrec (evolution_part))
411 return false;
413 step_expr = evolution_part;
414 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
416 if (vect_print_dump_info (REPORT_DETAILS))
418 fprintf (vect_dump, "step: ");
419 print_generic_expr (vect_dump, step_expr, TDF_SLIM);
420 fprintf (vect_dump, ", init: ");
421 print_generic_expr (vect_dump, init_expr, TDF_SLIM);
424 *init = init_expr;
425 *step = step_expr;
427 if (TREE_CODE (step_expr) != INTEGER_CST)
429 if (vect_print_dump_info (REPORT_DETAILS))
430 fprintf (vect_dump, "step unknown.");
431 return false;
434 return true;
437 /* Function vect_analyze_scalar_cycles_1.
439 Examine the cross iteration def-use cycles of scalar variables
440 in LOOP. LOOP_VINFO represents the loop that is now being
441 considered for vectorization (can be LOOP, or an outer-loop
442 enclosing LOOP). */
444 static void
445 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
447 basic_block bb = loop->header;
448 tree dumy;
449 VEC(gimple,heap) *worklist = VEC_alloc (gimple, heap, 64);
450 gimple_stmt_iterator gsi;
451 bool double_reduc;
453 if (vect_print_dump_info (REPORT_DETAILS))
454 fprintf (vect_dump, "=== vect_analyze_scalar_cycles ===");
456 /* First - identify all inductions. Reduction detection assumes that all the
457 inductions have been identified, therefore, this order must not be
458 changed. */
459 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
461 gimple phi = gsi_stmt (gsi);
462 tree access_fn = NULL;
463 tree def = PHI_RESULT (phi);
464 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
466 if (vect_print_dump_info (REPORT_DETAILS))
468 fprintf (vect_dump, "Analyze phi: ");
469 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
472 /* Skip virtual phi's. The data dependences that are associated with
473 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
474 if (!is_gimple_reg (SSA_NAME_VAR (def)))
475 continue;
477 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
479 /* Analyze the evolution function. */
480 access_fn = analyze_scalar_evolution (loop, def);
481 if (access_fn)
482 STRIP_NOPS (access_fn);
483 if (access_fn && vect_print_dump_info (REPORT_DETAILS))
485 fprintf (vect_dump, "Access function of PHI: ");
486 print_generic_expr (vect_dump, access_fn, TDF_SLIM);
489 if (!access_fn
490 || !vect_is_simple_iv_evolution (loop->num, access_fn, &dumy, &dumy))
492 VEC_safe_push (gimple, heap, worklist, phi);
493 continue;
496 if (vect_print_dump_info (REPORT_DETAILS))
497 fprintf (vect_dump, "Detected induction.");
498 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
502 /* Second - identify all reductions and nested cycles. */
503 while (VEC_length (gimple, worklist) > 0)
505 gimple phi = VEC_pop (gimple, worklist);
506 tree def = PHI_RESULT (phi);
507 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
508 gimple reduc_stmt;
509 bool nested_cycle;
511 if (vect_print_dump_info (REPORT_DETAILS))
513 fprintf (vect_dump, "Analyze phi: ");
514 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
517 gcc_assert (is_gimple_reg (SSA_NAME_VAR (def)));
518 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
520 nested_cycle = (loop != LOOP_VINFO_LOOP (loop_vinfo));
521 reduc_stmt = vect_force_simple_reduction (loop_vinfo, phi, !nested_cycle,
522 &double_reduc);
523 if (reduc_stmt)
525 if (double_reduc)
527 if (vect_print_dump_info (REPORT_DETAILS))
528 fprintf (vect_dump, "Detected double reduction.");
530 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
531 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
532 vect_double_reduction_def;
534 else
536 if (nested_cycle)
538 if (vect_print_dump_info (REPORT_DETAILS))
539 fprintf (vect_dump, "Detected vectorizable nested cycle.");
541 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
542 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
543 vect_nested_cycle;
545 else
547 if (vect_print_dump_info (REPORT_DETAILS))
548 fprintf (vect_dump, "Detected reduction.");
550 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
551 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
552 vect_reduction_def;
553 /* Store the reduction cycles for possible vectorization in
554 loop-aware SLP. */
555 VEC_safe_push (gimple, heap,
556 LOOP_VINFO_REDUCTIONS (loop_vinfo),
557 reduc_stmt);
561 else
562 if (vect_print_dump_info (REPORT_DETAILS))
563 fprintf (vect_dump, "Unknown def-use cycle pattern.");
566 VEC_free (gimple, heap, worklist);
570 /* Function vect_analyze_scalar_cycles.
572 Examine the cross iteration def-use cycles of scalar variables, by
573 analyzing the loop-header PHIs of scalar variables. Classify each
574 cycle as one of the following: invariant, induction, reduction, unknown.
575 We do that for the loop represented by LOOP_VINFO, and also to its
576 inner-loop, if exists.
577 Examples for scalar cycles:
579 Example1: reduction:
581 loop1:
582 for (i=0; i<N; i++)
583 sum += a[i];
585 Example2: induction:
587 loop2:
588 for (i=0; i<N; i++)
589 a[i] = i; */
591 static void
592 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
594 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
596 vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
598 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
599 Reductions in such inner-loop therefore have different properties than
600 the reductions in the nest that gets vectorized:
601 1. When vectorized, they are executed in the same order as in the original
602 scalar loop, so we can't change the order of computation when
603 vectorizing them.
604 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
605 current checks are too strict. */
607 if (loop->inner)
608 vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
611 /* Function vect_get_loop_niters.
613 Determine how many iterations the loop is executed.
614 If an expression that represents the number of iterations
615 can be constructed, place it in NUMBER_OF_ITERATIONS.
616 Return the loop exit condition. */
618 static gimple
619 vect_get_loop_niters (struct loop *loop, tree *number_of_iterations)
621 tree niters;
623 if (vect_print_dump_info (REPORT_DETAILS))
624 fprintf (vect_dump, "=== get_loop_niters ===");
626 niters = number_of_exit_cond_executions (loop);
628 if (niters != NULL_TREE
629 && niters != chrec_dont_know)
631 *number_of_iterations = niters;
633 if (vect_print_dump_info (REPORT_DETAILS))
635 fprintf (vect_dump, "==> get_loop_niters:" );
636 print_generic_expr (vect_dump, *number_of_iterations, TDF_SLIM);
640 return get_loop_exit_condition (loop);
644 /* Function bb_in_loop_p
646 Used as predicate for dfs order traversal of the loop bbs. */
648 static bool
649 bb_in_loop_p (const_basic_block bb, const void *data)
651 const struct loop *const loop = (const struct loop *)data;
652 if (flow_bb_inside_loop_p (loop, bb))
653 return true;
654 return false;
658 /* Function new_loop_vec_info.
660 Create and initialize a new loop_vec_info struct for LOOP, as well as
661 stmt_vec_info structs for all the stmts in LOOP. */
663 static loop_vec_info
664 new_loop_vec_info (struct loop *loop)
666 loop_vec_info res;
667 basic_block *bbs;
668 gimple_stmt_iterator si;
669 unsigned int i, nbbs;
671 res = (loop_vec_info) xcalloc (1, sizeof (struct _loop_vec_info));
672 LOOP_VINFO_LOOP (res) = loop;
674 bbs = get_loop_body (loop);
676 /* Create/Update stmt_info for all stmts in the loop. */
677 for (i = 0; i < loop->num_nodes; i++)
679 basic_block bb = bbs[i];
681 /* BBs in a nested inner-loop will have been already processed (because
682 we will have called vect_analyze_loop_form for any nested inner-loop).
683 Therefore, for stmts in an inner-loop we just want to update the
684 STMT_VINFO_LOOP_VINFO field of their stmt_info to point to the new
685 loop_info of the outer-loop we are currently considering to vectorize
686 (instead of the loop_info of the inner-loop).
687 For stmts in other BBs we need to create a stmt_info from scratch. */
688 if (bb->loop_father != loop)
690 /* Inner-loop bb. */
691 gcc_assert (loop->inner && bb->loop_father == loop->inner);
692 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
694 gimple phi = gsi_stmt (si);
695 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
696 loop_vec_info inner_loop_vinfo =
697 STMT_VINFO_LOOP_VINFO (stmt_info);
698 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
699 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
701 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
703 gimple stmt = gsi_stmt (si);
704 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
705 loop_vec_info inner_loop_vinfo =
706 STMT_VINFO_LOOP_VINFO (stmt_info);
707 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
708 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
711 else
713 /* bb in current nest. */
714 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
716 gimple phi = gsi_stmt (si);
717 gimple_set_uid (phi, 0);
718 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, res, NULL));
721 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
723 gimple stmt = gsi_stmt (si);
724 gimple_set_uid (stmt, 0);
725 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, res, NULL));
730 /* CHECKME: We want to visit all BBs before their successors (except for
731 latch blocks, for which this assertion wouldn't hold). In the simple
732 case of the loop forms we allow, a dfs order of the BBs would the same
733 as reversed postorder traversal, so we are safe. */
735 free (bbs);
736 bbs = XCNEWVEC (basic_block, loop->num_nodes);
737 nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
738 bbs, loop->num_nodes, loop);
739 gcc_assert (nbbs == loop->num_nodes);
741 LOOP_VINFO_BBS (res) = bbs;
742 LOOP_VINFO_NITERS (res) = NULL;
743 LOOP_VINFO_NITERS_UNCHANGED (res) = NULL;
744 LOOP_VINFO_COST_MODEL_MIN_ITERS (res) = 0;
745 LOOP_VINFO_VECTORIZABLE_P (res) = 0;
746 LOOP_PEELING_FOR_ALIGNMENT (res) = 0;
747 LOOP_VINFO_VECT_FACTOR (res) = 0;
748 LOOP_VINFO_DATAREFS (res) = VEC_alloc (data_reference_p, heap, 10);
749 LOOP_VINFO_DDRS (res) = VEC_alloc (ddr_p, heap, 10 * 10);
750 LOOP_VINFO_UNALIGNED_DR (res) = NULL;
751 LOOP_VINFO_MAY_MISALIGN_STMTS (res) =
752 VEC_alloc (gimple, heap,
753 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS));
754 LOOP_VINFO_MAY_ALIAS_DDRS (res) =
755 VEC_alloc (ddr_p, heap,
756 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS));
757 LOOP_VINFO_STRIDED_STORES (res) = VEC_alloc (gimple, heap, 10);
758 LOOP_VINFO_REDUCTIONS (res) = VEC_alloc (gimple, heap, 10);
759 LOOP_VINFO_SLP_INSTANCES (res) = VEC_alloc (slp_instance, heap, 10);
760 LOOP_VINFO_SLP_UNROLLING_FACTOR (res) = 1;
761 LOOP_VINFO_PEELING_HTAB (res) = NULL;
763 return res;
767 /* Function destroy_loop_vec_info.
769 Free LOOP_VINFO struct, as well as all the stmt_vec_info structs of all the
770 stmts in the loop. */
772 void
773 destroy_loop_vec_info (loop_vec_info loop_vinfo, bool clean_stmts)
775 struct loop *loop;
776 basic_block *bbs;
777 int nbbs;
778 gimple_stmt_iterator si;
779 int j;
780 VEC (slp_instance, heap) *slp_instances;
781 slp_instance instance;
783 if (!loop_vinfo)
784 return;
786 loop = LOOP_VINFO_LOOP (loop_vinfo);
788 bbs = LOOP_VINFO_BBS (loop_vinfo);
789 nbbs = loop->num_nodes;
791 if (!clean_stmts)
793 free (LOOP_VINFO_BBS (loop_vinfo));
794 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
795 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
796 VEC_free (gimple, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
798 free (loop_vinfo);
799 loop->aux = NULL;
800 return;
803 for (j = 0; j < nbbs; j++)
805 basic_block bb = bbs[j];
806 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
807 free_stmt_vec_info (gsi_stmt (si));
809 for (si = gsi_start_bb (bb); !gsi_end_p (si); )
811 gimple stmt = gsi_stmt (si);
812 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
814 if (stmt_info)
816 /* Check if this is a "pattern stmt" (introduced by the
817 vectorizer during the pattern recognition pass). */
818 bool remove_stmt_p = false;
819 gimple orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
820 if (orig_stmt)
822 stmt_vec_info orig_stmt_info = vinfo_for_stmt (orig_stmt);
823 if (orig_stmt_info
824 && STMT_VINFO_IN_PATTERN_P (orig_stmt_info))
825 remove_stmt_p = true;
828 /* Free stmt_vec_info. */
829 free_stmt_vec_info (stmt);
831 /* Remove dead "pattern stmts". */
832 if (remove_stmt_p)
833 gsi_remove (&si, true);
835 gsi_next (&si);
839 free (LOOP_VINFO_BBS (loop_vinfo));
840 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
841 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
842 VEC_free (gimple, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
843 VEC_free (ddr_p, heap, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
844 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
845 FOR_EACH_VEC_ELT (slp_instance, slp_instances, j, instance)
846 vect_free_slp_instance (instance);
848 VEC_free (slp_instance, heap, LOOP_VINFO_SLP_INSTANCES (loop_vinfo));
849 VEC_free (gimple, heap, LOOP_VINFO_STRIDED_STORES (loop_vinfo));
850 VEC_free (gimple, heap, LOOP_VINFO_REDUCTIONS (loop_vinfo));
852 if (LOOP_VINFO_PEELING_HTAB (loop_vinfo))
853 htab_delete (LOOP_VINFO_PEELING_HTAB (loop_vinfo));
855 free (loop_vinfo);
856 loop->aux = NULL;
860 /* Function vect_analyze_loop_1.
862 Apply a set of analyses on LOOP, and create a loop_vec_info struct
863 for it. The different analyses will record information in the
864 loop_vec_info struct. This is a subset of the analyses applied in
865 vect_analyze_loop, to be applied on an inner-loop nested in the loop
866 that is now considered for (outer-loop) vectorization. */
868 static loop_vec_info
869 vect_analyze_loop_1 (struct loop *loop)
871 loop_vec_info loop_vinfo;
873 if (vect_print_dump_info (REPORT_DETAILS))
874 fprintf (vect_dump, "===== analyze_loop_nest_1 =====");
876 /* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */
878 loop_vinfo = vect_analyze_loop_form (loop);
879 if (!loop_vinfo)
881 if (vect_print_dump_info (REPORT_DETAILS))
882 fprintf (vect_dump, "bad inner-loop form.");
883 return NULL;
886 return loop_vinfo;
890 /* Function vect_analyze_loop_form.
892 Verify that certain CFG restrictions hold, including:
893 - the loop has a pre-header
894 - the loop has a single entry and exit
895 - the loop exit condition is simple enough, and the number of iterations
896 can be analyzed (a countable loop). */
898 loop_vec_info
899 vect_analyze_loop_form (struct loop *loop)
901 loop_vec_info loop_vinfo;
902 gimple loop_cond;
903 tree number_of_iterations = NULL;
904 loop_vec_info inner_loop_vinfo = NULL;
906 if (vect_print_dump_info (REPORT_DETAILS))
907 fprintf (vect_dump, "=== vect_analyze_loop_form ===");
909 /* Different restrictions apply when we are considering an inner-most loop,
910 vs. an outer (nested) loop.
911 (FORNOW. May want to relax some of these restrictions in the future). */
913 if (!loop->inner)
915 /* Inner-most loop. We currently require that the number of BBs is
916 exactly 2 (the header and latch). Vectorizable inner-most loops
917 look like this:
919 (pre-header)
921 header <--------+
922 | | |
923 | +--> latch --+
925 (exit-bb) */
927 if (loop->num_nodes != 2)
929 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
930 fprintf (vect_dump, "not vectorized: control flow in loop.");
931 return NULL;
934 if (empty_block_p (loop->header))
936 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
937 fprintf (vect_dump, "not vectorized: empty loop.");
938 return NULL;
941 else
943 struct loop *innerloop = loop->inner;
944 edge entryedge;
946 /* Nested loop. We currently require that the loop is doubly-nested,
947 contains a single inner loop, and the number of BBs is exactly 5.
948 Vectorizable outer-loops look like this:
950 (pre-header)
952 header <---+
954 inner-loop |
956 tail ------+
958 (exit-bb)
960 The inner-loop has the properties expected of inner-most loops
961 as described above. */
963 if ((loop->inner)->inner || (loop->inner)->next)
965 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
966 fprintf (vect_dump, "not vectorized: multiple nested loops.");
967 return NULL;
970 /* Analyze the inner-loop. */
971 inner_loop_vinfo = vect_analyze_loop_1 (loop->inner);
972 if (!inner_loop_vinfo)
974 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
975 fprintf (vect_dump, "not vectorized: Bad inner loop.");
976 return NULL;
979 if (!expr_invariant_in_loop_p (loop,
980 LOOP_VINFO_NITERS (inner_loop_vinfo)))
982 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
983 fprintf (vect_dump,
984 "not vectorized: inner-loop count not invariant.");
985 destroy_loop_vec_info (inner_loop_vinfo, true);
986 return NULL;
989 if (loop->num_nodes != 5)
991 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
992 fprintf (vect_dump, "not vectorized: control flow in loop.");
993 destroy_loop_vec_info (inner_loop_vinfo, true);
994 return NULL;
997 gcc_assert (EDGE_COUNT (innerloop->header->preds) == 2);
998 entryedge = EDGE_PRED (innerloop->header, 0);
999 if (EDGE_PRED (innerloop->header, 0)->src == innerloop->latch)
1000 entryedge = EDGE_PRED (innerloop->header, 1);
1002 if (entryedge->src != loop->header
1003 || !single_exit (innerloop)
1004 || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
1006 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1007 fprintf (vect_dump, "not vectorized: unsupported outerloop form.");
1008 destroy_loop_vec_info (inner_loop_vinfo, true);
1009 return NULL;
1012 if (vect_print_dump_info (REPORT_DETAILS))
1013 fprintf (vect_dump, "Considering outer-loop vectorization.");
1016 if (!single_exit (loop)
1017 || EDGE_COUNT (loop->header->preds) != 2)
1019 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1021 if (!single_exit (loop))
1022 fprintf (vect_dump, "not vectorized: multiple exits.");
1023 else if (EDGE_COUNT (loop->header->preds) != 2)
1024 fprintf (vect_dump, "not vectorized: too many incoming edges.");
1026 if (inner_loop_vinfo)
1027 destroy_loop_vec_info (inner_loop_vinfo, true);
1028 return NULL;
1031 /* We assume that the loop exit condition is at the end of the loop. i.e,
1032 that the loop is represented as a do-while (with a proper if-guard
1033 before the loop if needed), where the loop header contains all the
1034 executable statements, and the latch is empty. */
1035 if (!empty_block_p (loop->latch)
1036 || !gimple_seq_empty_p (phi_nodes (loop->latch)))
1038 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1039 fprintf (vect_dump, "not vectorized: unexpected loop form.");
1040 if (inner_loop_vinfo)
1041 destroy_loop_vec_info (inner_loop_vinfo, true);
1042 return NULL;
1045 /* Make sure there exists a single-predecessor exit bb: */
1046 if (!single_pred_p (single_exit (loop)->dest))
1048 edge e = single_exit (loop);
1049 if (!(e->flags & EDGE_ABNORMAL))
1051 split_loop_exit_edge (e);
1052 if (vect_print_dump_info (REPORT_DETAILS))
1053 fprintf (vect_dump, "split exit edge.");
1055 else
1057 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1058 fprintf (vect_dump, "not vectorized: abnormal loop exit edge.");
1059 if (inner_loop_vinfo)
1060 destroy_loop_vec_info (inner_loop_vinfo, true);
1061 return NULL;
1065 loop_cond = vect_get_loop_niters (loop, &number_of_iterations);
1066 if (!loop_cond)
1068 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1069 fprintf (vect_dump, "not vectorized: complicated exit condition.");
1070 if (inner_loop_vinfo)
1071 destroy_loop_vec_info (inner_loop_vinfo, true);
1072 return NULL;
1075 if (!number_of_iterations)
1077 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1078 fprintf (vect_dump,
1079 "not vectorized: number of iterations cannot be computed.");
1080 if (inner_loop_vinfo)
1081 destroy_loop_vec_info (inner_loop_vinfo, true);
1082 return NULL;
1085 if (chrec_contains_undetermined (number_of_iterations))
1087 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1088 fprintf (vect_dump, "Infinite number of iterations.");
1089 if (inner_loop_vinfo)
1090 destroy_loop_vec_info (inner_loop_vinfo, true);
1091 return NULL;
1094 if (!NITERS_KNOWN_P (number_of_iterations))
1096 if (vect_print_dump_info (REPORT_DETAILS))
1098 fprintf (vect_dump, "Symbolic number of iterations is ");
1099 print_generic_expr (vect_dump, number_of_iterations, TDF_DETAILS);
1102 else if (TREE_INT_CST_LOW (number_of_iterations) == 0)
1104 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1105 fprintf (vect_dump, "not vectorized: number of iterations = 0.");
1106 if (inner_loop_vinfo)
1107 destroy_loop_vec_info (inner_loop_vinfo, false);
1108 return NULL;
1111 loop_vinfo = new_loop_vec_info (loop);
1112 LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
1113 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
1115 STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond)) = loop_exit_ctrl_vec_info_type;
1117 /* CHECKME: May want to keep it around it in the future. */
1118 if (inner_loop_vinfo)
1119 destroy_loop_vec_info (inner_loop_vinfo, false);
1121 gcc_assert (!loop->aux);
1122 loop->aux = loop_vinfo;
1123 return loop_vinfo;
1127 /* Get cost by calling cost target builtin. */
1129 static inline int
1130 vect_get_cost (enum vect_cost_for_stmt type_of_cost)
1132 tree dummy_type = NULL;
1133 int dummy = 0;
1135 return targetm.vectorize.builtin_vectorization_cost (type_of_cost,
1136 dummy_type, dummy);
1140 /* Function vect_analyze_loop_operations.
1142 Scan the loop stmts and make sure they are all vectorizable. */
1144 static bool
1145 vect_analyze_loop_operations (loop_vec_info loop_vinfo)
1147 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1148 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1149 int nbbs = loop->num_nodes;
1150 gimple_stmt_iterator si;
1151 unsigned int vectorization_factor = 0;
1152 int i;
1153 gimple phi;
1154 stmt_vec_info stmt_info;
1155 bool need_to_vectorize = false;
1156 int min_profitable_iters;
1157 int min_scalar_loop_bound;
1158 unsigned int th;
1159 bool only_slp_in_loop = true, ok;
1161 if (vect_print_dump_info (REPORT_DETAILS))
1162 fprintf (vect_dump, "=== vect_analyze_loop_operations ===");
1164 gcc_assert (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
1165 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1167 for (i = 0; i < nbbs; i++)
1169 basic_block bb = bbs[i];
1171 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
1173 phi = gsi_stmt (si);
1174 ok = true;
1176 stmt_info = vinfo_for_stmt (phi);
1177 if (vect_print_dump_info (REPORT_DETAILS))
1179 fprintf (vect_dump, "examining phi: ");
1180 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
1183 if (! is_loop_header_bb_p (bb))
1185 /* inner-loop loop-closed exit phi in outer-loop vectorization
1186 (i.e. a phi in the tail of the outer-loop).
1187 FORNOW: we currently don't support the case that these phis
1188 are not used in the outerloop (unless it is double reduction,
1189 i.e., this phi is vect_reduction_def), cause this case
1190 requires to actually do something here. */
1191 if ((!STMT_VINFO_RELEVANT_P (stmt_info)
1192 || STMT_VINFO_LIVE_P (stmt_info))
1193 && STMT_VINFO_DEF_TYPE (stmt_info)
1194 != vect_double_reduction_def)
1196 if (vect_print_dump_info (REPORT_DETAILS))
1197 fprintf (vect_dump,
1198 "Unsupported loop-closed phi in outer-loop.");
1199 return false;
1201 continue;
1204 gcc_assert (stmt_info);
1206 if (STMT_VINFO_LIVE_P (stmt_info))
1208 /* FORNOW: not yet supported. */
1209 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1210 fprintf (vect_dump, "not vectorized: value used after loop.");
1211 return false;
1214 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
1215 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
1217 /* A scalar-dependence cycle that we don't support. */
1218 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1219 fprintf (vect_dump, "not vectorized: scalar dependence cycle.");
1220 return false;
1223 if (STMT_VINFO_RELEVANT_P (stmt_info))
1225 need_to_vectorize = true;
1226 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
1227 ok = vectorizable_induction (phi, NULL, NULL);
1230 if (!ok)
1232 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1234 fprintf (vect_dump,
1235 "not vectorized: relevant phi not supported: ");
1236 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
1238 return false;
1242 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1244 gimple stmt = gsi_stmt (si);
1245 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1247 gcc_assert (stmt_info);
1249 if (!vect_analyze_stmt (stmt, &need_to_vectorize, NULL))
1250 return false;
1252 if ((STMT_VINFO_RELEVANT_P (stmt_info)
1253 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1254 && !PURE_SLP_STMT (stmt_info))
1255 /* STMT needs both SLP and loop-based vectorization. */
1256 only_slp_in_loop = false;
1258 } /* bbs */
1260 /* All operations in the loop are either irrelevant (deal with loop
1261 control, or dead), or only used outside the loop and can be moved
1262 out of the loop (e.g. invariants, inductions). The loop can be
1263 optimized away by scalar optimizations. We're better off not
1264 touching this loop. */
1265 if (!need_to_vectorize)
1267 if (vect_print_dump_info (REPORT_DETAILS))
1268 fprintf (vect_dump,
1269 "All the computation can be taken out of the loop.");
1270 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1271 fprintf (vect_dump,
1272 "not vectorized: redundant loop. no profit to vectorize.");
1273 return false;
1276 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1277 vectorization factor of the loop is the unrolling factor required by the
1278 SLP instances. If that unrolling factor is 1, we say, that we perform
1279 pure SLP on loop - cross iteration parallelism is not exploited. */
1280 if (only_slp_in_loop)
1281 vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
1282 else
1283 vectorization_factor = least_common_multiple (vectorization_factor,
1284 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
1286 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
1288 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1289 && vect_print_dump_info (REPORT_DETAILS))
1290 fprintf (vect_dump,
1291 "vectorization_factor = %d, niters = " HOST_WIDE_INT_PRINT_DEC,
1292 vectorization_factor, LOOP_VINFO_INT_NITERS (loop_vinfo));
1294 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1295 && (LOOP_VINFO_INT_NITERS (loop_vinfo) < vectorization_factor))
1297 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1298 fprintf (vect_dump, "not vectorized: iteration count too small.");
1299 if (vect_print_dump_info (REPORT_DETAILS))
1300 fprintf (vect_dump,"not vectorized: iteration count smaller than "
1301 "vectorization factor.");
1302 return false;
1305 /* Analyze cost. Decide if worth while to vectorize. */
1307 /* Once VF is set, SLP costs should be updated since the number of created
1308 vector stmts depends on VF. */
1309 vect_update_slp_costs_according_to_vf (loop_vinfo);
1311 min_profitable_iters = vect_estimate_min_profitable_iters (loop_vinfo);
1312 LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo) = min_profitable_iters;
1314 if (min_profitable_iters < 0)
1316 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1317 fprintf (vect_dump, "not vectorized: vectorization not profitable.");
1318 if (vect_print_dump_info (REPORT_DETAILS))
1319 fprintf (vect_dump, "not vectorized: vector version will never be "
1320 "profitable.");
1321 return false;
1324 min_scalar_loop_bound = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
1325 * vectorization_factor) - 1);
1327 /* Use the cost model only if it is more conservative than user specified
1328 threshold. */
1330 th = (unsigned) min_scalar_loop_bound;
1331 if (min_profitable_iters
1332 && (!min_scalar_loop_bound
1333 || min_profitable_iters > min_scalar_loop_bound))
1334 th = (unsigned) min_profitable_iters;
1336 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1337 && LOOP_VINFO_INT_NITERS (loop_vinfo) <= th)
1339 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1340 fprintf (vect_dump, "not vectorized: vectorization not "
1341 "profitable.");
1342 if (vect_print_dump_info (REPORT_DETAILS))
1343 fprintf (vect_dump, "not vectorized: iteration count smaller than "
1344 "user specified loop bound parameter or minimum "
1345 "profitable iterations (whichever is more conservative).");
1346 return false;
1349 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1350 || LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0
1351 || LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
1353 if (vect_print_dump_info (REPORT_DETAILS))
1354 fprintf (vect_dump, "epilog loop required.");
1355 if (!vect_can_advance_ivs_p (loop_vinfo))
1357 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1358 fprintf (vect_dump,
1359 "not vectorized: can't create epilog loop 1.");
1360 return false;
1362 if (!slpeel_can_duplicate_loop_p (loop, single_exit (loop)))
1364 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1365 fprintf (vect_dump,
1366 "not vectorized: can't create epilog loop 2.");
1367 return false;
1371 return true;
1375 /* Function vect_analyze_loop_2.
1377 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1378 for it. The different analyses will record information in the
1379 loop_vec_info struct. */
1380 static bool
1381 vect_analyze_loop_2 (loop_vec_info loop_vinfo)
1383 bool ok, dummy;
1384 int max_vf = MAX_VECTORIZATION_FACTOR;
1385 int min_vf = 2;
1387 /* Find all data references in the loop (which correspond to vdefs/vuses)
1388 and analyze their evolution in the loop. Also adjust the minimal
1389 vectorization factor according to the loads and stores.
1391 FORNOW: Handle only simple, array references, which
1392 alignment can be forced, and aligned pointer-references. */
1394 ok = vect_analyze_data_refs (loop_vinfo, NULL, &min_vf);
1395 if (!ok)
1397 if (vect_print_dump_info (REPORT_DETAILS))
1398 fprintf (vect_dump, "bad data references.");
1399 return false;
1402 /* Classify all cross-iteration scalar data-flow cycles.
1403 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1405 vect_analyze_scalar_cycles (loop_vinfo);
1407 vect_pattern_recog (loop_vinfo);
1409 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1411 ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
1412 if (!ok)
1414 if (vect_print_dump_info (REPORT_DETAILS))
1415 fprintf (vect_dump, "unexpected pattern.");
1416 return false;
1419 /* Analyze data dependences between the data-refs in the loop
1420 and adjust the maximum vectorization factor according to
1421 the dependences.
1422 FORNOW: fail at the first data dependence that we encounter. */
1424 ok = vect_analyze_data_ref_dependences (loop_vinfo, NULL, &max_vf, &dummy);
1425 if (!ok
1426 || max_vf < min_vf)
1428 if (vect_print_dump_info (REPORT_DETAILS))
1429 fprintf (vect_dump, "bad data dependence.");
1430 return false;
1433 ok = vect_determine_vectorization_factor (loop_vinfo);
1434 if (!ok)
1436 if (vect_print_dump_info (REPORT_DETAILS))
1437 fprintf (vect_dump, "can't determine vectorization factor.");
1438 return false;
1440 if (max_vf < LOOP_VINFO_VECT_FACTOR (loop_vinfo))
1442 if (vect_print_dump_info (REPORT_DETAILS))
1443 fprintf (vect_dump, "bad data dependence.");
1444 return false;
1447 /* Analyze the alignment of the data-refs in the loop.
1448 Fail if a data reference is found that cannot be vectorized. */
1450 ok = vect_analyze_data_refs_alignment (loop_vinfo, NULL);
1451 if (!ok)
1453 if (vect_print_dump_info (REPORT_DETAILS))
1454 fprintf (vect_dump, "bad data alignment.");
1455 return false;
1458 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1459 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1461 ok = vect_analyze_data_ref_accesses (loop_vinfo, NULL);
1462 if (!ok)
1464 if (vect_print_dump_info (REPORT_DETAILS))
1465 fprintf (vect_dump, "bad data access.");
1466 return false;
1469 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
1470 It is important to call pruning after vect_analyze_data_ref_accesses,
1471 since we use grouping information gathered by interleaving analysis. */
1472 ok = vect_prune_runtime_alias_test_list (loop_vinfo);
1473 if (!ok)
1475 if (vect_print_dump_info (REPORT_DETAILS))
1476 fprintf (vect_dump, "too long list of versioning for alias "
1477 "run-time tests.");
1478 return false;
1481 /* This pass will decide on using loop versioning and/or loop peeling in
1482 order to enhance the alignment of data references in the loop. */
1484 ok = vect_enhance_data_refs_alignment (loop_vinfo);
1485 if (!ok)
1487 if (vect_print_dump_info (REPORT_DETAILS))
1488 fprintf (vect_dump, "bad data alignment.");
1489 return false;
1492 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
1493 ok = vect_analyze_slp (loop_vinfo, NULL);
1494 if (ok)
1496 /* Decide which possible SLP instances to SLP. */
1497 vect_make_slp_decision (loop_vinfo);
1499 /* Find stmts that need to be both vectorized and SLPed. */
1500 vect_detect_hybrid_slp (loop_vinfo);
1503 /* Scan all the operations in the loop and make sure they are
1504 vectorizable. */
1506 ok = vect_analyze_loop_operations (loop_vinfo);
1507 if (!ok)
1509 if (vect_print_dump_info (REPORT_DETAILS))
1510 fprintf (vect_dump, "bad operation or unsupported loop bound.");
1511 return false;
1514 return true;
1517 /* Function vect_analyze_loop.
1519 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1520 for it. The different analyses will record information in the
1521 loop_vec_info struct. */
1522 loop_vec_info
1523 vect_analyze_loop (struct loop *loop)
1525 loop_vec_info loop_vinfo;
1526 unsigned int vector_sizes;
1528 /* Autodetect first vector size we try. */
1529 current_vector_size = 0;
1530 vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
1532 if (vect_print_dump_info (REPORT_DETAILS))
1533 fprintf (vect_dump, "===== analyze_loop_nest =====");
1535 if (loop_outer (loop)
1536 && loop_vec_info_for_loop (loop_outer (loop))
1537 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
1539 if (vect_print_dump_info (REPORT_DETAILS))
1540 fprintf (vect_dump, "outer-loop already vectorized.");
1541 return NULL;
1544 while (1)
1546 /* Check the CFG characteristics of the loop (nesting, entry/exit). */
1547 loop_vinfo = vect_analyze_loop_form (loop);
1548 if (!loop_vinfo)
1550 if (vect_print_dump_info (REPORT_DETAILS))
1551 fprintf (vect_dump, "bad loop form.");
1552 return NULL;
1555 if (vect_analyze_loop_2 (loop_vinfo))
1557 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
1559 return loop_vinfo;
1562 destroy_loop_vec_info (loop_vinfo, true);
1564 vector_sizes &= ~current_vector_size;
1565 if (vector_sizes == 0
1566 || current_vector_size == 0)
1567 return NULL;
1569 /* Try the next biggest vector size. */
1570 current_vector_size = 1 << floor_log2 (vector_sizes);
1571 if (vect_print_dump_info (REPORT_DETAILS))
1572 fprintf (vect_dump, "***** Re-trying analysis with "
1573 "vector size %d\n", current_vector_size);
1578 /* Function reduction_code_for_scalar_code
1580 Input:
1581 CODE - tree_code of a reduction operations.
1583 Output:
1584 REDUC_CODE - the corresponding tree-code to be used to reduce the
1585 vector of partial results into a single scalar result (which
1586 will also reside in a vector) or ERROR_MARK if the operation is
1587 a supported reduction operation, but does not have such tree-code.
1589 Return FALSE if CODE currently cannot be vectorized as reduction. */
1591 static bool
1592 reduction_code_for_scalar_code (enum tree_code code,
1593 enum tree_code *reduc_code)
1595 switch (code)
1597 case MAX_EXPR:
1598 *reduc_code = REDUC_MAX_EXPR;
1599 return true;
1601 case MIN_EXPR:
1602 *reduc_code = REDUC_MIN_EXPR;
1603 return true;
1605 case PLUS_EXPR:
1606 *reduc_code = REDUC_PLUS_EXPR;
1607 return true;
1609 case MULT_EXPR:
1610 case MINUS_EXPR:
1611 case BIT_IOR_EXPR:
1612 case BIT_XOR_EXPR:
1613 case BIT_AND_EXPR:
1614 *reduc_code = ERROR_MARK;
1615 return true;
1617 default:
1618 return false;
1623 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
1624 STMT is printed with a message MSG. */
1626 static void
1627 report_vect_op (gimple stmt, const char *msg)
1629 fprintf (vect_dump, "%s", msg);
1630 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
1634 /* Function vect_is_simple_reduction_1
1636 (1) Detect a cross-iteration def-use cycle that represents a simple
1637 reduction computation. We look for the following pattern:
1639 loop_header:
1640 a1 = phi < a0, a2 >
1641 a3 = ...
1642 a2 = operation (a3, a1)
1644 such that:
1645 1. operation is commutative and associative and it is safe to
1646 change the order of the computation (if CHECK_REDUCTION is true)
1647 2. no uses for a2 in the loop (a2 is used out of the loop)
1648 3. no uses of a1 in the loop besides the reduction operation.
1650 Condition 1 is tested here.
1651 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
1653 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
1654 nested cycles, if CHECK_REDUCTION is false.
1656 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
1657 reductions:
1659 a1 = phi < a0, a2 >
1660 inner loop (def of a3)
1661 a2 = phi < a3 >
1663 If MODIFY is true it tries also to rework the code in-place to enable
1664 detection of more reduction patterns. For the time being we rewrite
1665 "res -= RHS" into "rhs += -RHS" when it seems worthwhile.
1668 static gimple
1669 vect_is_simple_reduction_1 (loop_vec_info loop_info, gimple phi,
1670 bool check_reduction, bool *double_reduc,
1671 bool modify)
1673 struct loop *loop = (gimple_bb (phi))->loop_father;
1674 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
1675 edge latch_e = loop_latch_edge (loop);
1676 tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
1677 gimple def_stmt, def1 = NULL, def2 = NULL;
1678 enum tree_code orig_code, code;
1679 tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE;
1680 tree type;
1681 int nloop_uses;
1682 tree name;
1683 imm_use_iterator imm_iter;
1684 use_operand_p use_p;
1685 bool phi_def;
1687 *double_reduc = false;
1689 /* If CHECK_REDUCTION is true, we assume inner-most loop vectorization,
1690 otherwise, we assume outer loop vectorization. */
1691 gcc_assert ((check_reduction && loop == vect_loop)
1692 || (!check_reduction && flow_loop_nested_p (vect_loop, loop)));
1694 name = PHI_RESULT (phi);
1695 nloop_uses = 0;
1696 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
1698 gimple use_stmt = USE_STMT (use_p);
1699 if (is_gimple_debug (use_stmt))
1700 continue;
1701 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
1702 && vinfo_for_stmt (use_stmt)
1703 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
1704 nloop_uses++;
1705 if (nloop_uses > 1)
1707 if (vect_print_dump_info (REPORT_DETAILS))
1708 fprintf (vect_dump, "reduction used in loop.");
1709 return NULL;
1713 if (TREE_CODE (loop_arg) != SSA_NAME)
1715 if (vect_print_dump_info (REPORT_DETAILS))
1717 fprintf (vect_dump, "reduction: not ssa_name: ");
1718 print_generic_expr (vect_dump, loop_arg, TDF_SLIM);
1720 return NULL;
1723 def_stmt = SSA_NAME_DEF_STMT (loop_arg);
1724 if (!def_stmt)
1726 if (vect_print_dump_info (REPORT_DETAILS))
1727 fprintf (vect_dump, "reduction: no def_stmt.");
1728 return NULL;
1731 if (!is_gimple_assign (def_stmt) && gimple_code (def_stmt) != GIMPLE_PHI)
1733 if (vect_print_dump_info (REPORT_DETAILS))
1734 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
1735 return NULL;
1738 if (is_gimple_assign (def_stmt))
1740 name = gimple_assign_lhs (def_stmt);
1741 phi_def = false;
1743 else
1745 name = PHI_RESULT (def_stmt);
1746 phi_def = true;
1749 nloop_uses = 0;
1750 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
1752 gimple use_stmt = USE_STMT (use_p);
1753 if (is_gimple_debug (use_stmt))
1754 continue;
1755 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
1756 && vinfo_for_stmt (use_stmt)
1757 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
1758 nloop_uses++;
1759 if (nloop_uses > 1)
1761 if (vect_print_dump_info (REPORT_DETAILS))
1762 fprintf (vect_dump, "reduction used in loop.");
1763 return NULL;
1767 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
1768 defined in the inner loop. */
1769 if (phi_def)
1771 op1 = PHI_ARG_DEF (def_stmt, 0);
1773 if (gimple_phi_num_args (def_stmt) != 1
1774 || TREE_CODE (op1) != SSA_NAME)
1776 if (vect_print_dump_info (REPORT_DETAILS))
1777 fprintf (vect_dump, "unsupported phi node definition.");
1779 return NULL;
1782 def1 = SSA_NAME_DEF_STMT (op1);
1783 if (flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
1784 && loop->inner
1785 && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1))
1786 && is_gimple_assign (def1))
1788 if (vect_print_dump_info (REPORT_DETAILS))
1789 report_vect_op (def_stmt, "detected double reduction: ");
1791 *double_reduc = true;
1792 return def_stmt;
1795 return NULL;
1798 code = orig_code = gimple_assign_rhs_code (def_stmt);
1800 /* We can handle "res -= x[i]", which is non-associative by
1801 simply rewriting this into "res += -x[i]". Avoid changing
1802 gimple instruction for the first simple tests and only do this
1803 if we're allowed to change code at all. */
1804 if (code == MINUS_EXPR
1805 && modify
1806 && (op1 = gimple_assign_rhs1 (def_stmt))
1807 && TREE_CODE (op1) == SSA_NAME
1808 && SSA_NAME_DEF_STMT (op1) == phi)
1809 code = PLUS_EXPR;
1811 if (check_reduction
1812 && (!commutative_tree_code (code) || !associative_tree_code (code)))
1814 if (vect_print_dump_info (REPORT_DETAILS))
1815 report_vect_op (def_stmt, "reduction: not commutative/associative: ");
1816 return NULL;
1819 if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS)
1821 if (code != COND_EXPR)
1823 if (vect_print_dump_info (REPORT_DETAILS))
1824 report_vect_op (def_stmt, "reduction: not binary operation: ");
1826 return NULL;
1829 op3 = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 0);
1830 if (COMPARISON_CLASS_P (op3))
1832 op4 = TREE_OPERAND (op3, 1);
1833 op3 = TREE_OPERAND (op3, 0);
1836 op1 = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 1);
1837 op2 = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 2);
1839 if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
1841 if (vect_print_dump_info (REPORT_DETAILS))
1842 report_vect_op (def_stmt, "reduction: uses not ssa_names: ");
1844 return NULL;
1847 else
1849 op1 = gimple_assign_rhs1 (def_stmt);
1850 op2 = gimple_assign_rhs2 (def_stmt);
1852 if (TREE_CODE (op1) != SSA_NAME || TREE_CODE (op2) != SSA_NAME)
1854 if (vect_print_dump_info (REPORT_DETAILS))
1855 report_vect_op (def_stmt, "reduction: uses not ssa_names: ");
1857 return NULL;
1861 type = TREE_TYPE (gimple_assign_lhs (def_stmt));
1862 if ((TREE_CODE (op1) == SSA_NAME
1863 && !types_compatible_p (type,TREE_TYPE (op1)))
1864 || (TREE_CODE (op2) == SSA_NAME
1865 && !types_compatible_p (type, TREE_TYPE (op2)))
1866 || (op3 && TREE_CODE (op3) == SSA_NAME
1867 && !types_compatible_p (type, TREE_TYPE (op3)))
1868 || (op4 && TREE_CODE (op4) == SSA_NAME
1869 && !types_compatible_p (type, TREE_TYPE (op4))))
1871 if (vect_print_dump_info (REPORT_DETAILS))
1873 fprintf (vect_dump, "reduction: multiple types: operation type: ");
1874 print_generic_expr (vect_dump, type, TDF_SLIM);
1875 fprintf (vect_dump, ", operands types: ");
1876 print_generic_expr (vect_dump, TREE_TYPE (op1), TDF_SLIM);
1877 fprintf (vect_dump, ",");
1878 print_generic_expr (vect_dump, TREE_TYPE (op2), TDF_SLIM);
1879 if (op3)
1881 fprintf (vect_dump, ",");
1882 print_generic_expr (vect_dump, TREE_TYPE (op3), TDF_SLIM);
1885 if (op4)
1887 fprintf (vect_dump, ",");
1888 print_generic_expr (vect_dump, TREE_TYPE (op4), TDF_SLIM);
1892 return NULL;
1895 /* Check that it's ok to change the order of the computation.
1896 Generally, when vectorizing a reduction we change the order of the
1897 computation. This may change the behavior of the program in some
1898 cases, so we need to check that this is ok. One exception is when
1899 vectorizing an outer-loop: the inner-loop is executed sequentially,
1900 and therefore vectorizing reductions in the inner-loop during
1901 outer-loop vectorization is safe. */
1903 /* CHECKME: check for !flag_finite_math_only too? */
1904 if (SCALAR_FLOAT_TYPE_P (type) && !flag_associative_math
1905 && check_reduction)
1907 /* Changing the order of operations changes the semantics. */
1908 if (vect_print_dump_info (REPORT_DETAILS))
1909 report_vect_op (def_stmt, "reduction: unsafe fp math optimization: ");
1910 return NULL;
1912 else if (INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type)
1913 && check_reduction)
1915 /* Changing the order of operations changes the semantics. */
1916 if (vect_print_dump_info (REPORT_DETAILS))
1917 report_vect_op (def_stmt, "reduction: unsafe int math optimization: ");
1918 return NULL;
1920 else if (SAT_FIXED_POINT_TYPE_P (type) && check_reduction)
1922 /* Changing the order of operations changes the semantics. */
1923 if (vect_print_dump_info (REPORT_DETAILS))
1924 report_vect_op (def_stmt,
1925 "reduction: unsafe fixed-point math optimization: ");
1926 return NULL;
1929 /* If we detected "res -= x[i]" earlier, rewrite it into
1930 "res += -x[i]" now. If this turns out to be useless reassoc
1931 will clean it up again. */
1932 if (orig_code == MINUS_EXPR)
1934 tree rhs = gimple_assign_rhs2 (def_stmt);
1935 tree negrhs = make_ssa_name (SSA_NAME_VAR (rhs), NULL);
1936 gimple negate_stmt = gimple_build_assign_with_ops (NEGATE_EXPR, negrhs,
1937 rhs, NULL);
1938 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
1939 set_vinfo_for_stmt (negate_stmt, new_stmt_vec_info (negate_stmt,
1940 loop_info, NULL));
1941 gsi_insert_before (&gsi, negate_stmt, GSI_NEW_STMT);
1942 gimple_assign_set_rhs2 (def_stmt, negrhs);
1943 gimple_assign_set_rhs_code (def_stmt, PLUS_EXPR);
1944 update_stmt (def_stmt);
1947 /* Reduction is safe. We're dealing with one of the following:
1948 1) integer arithmetic and no trapv
1949 2) floating point arithmetic, and special flags permit this optimization
1950 3) nested cycle (i.e., outer loop vectorization). */
1951 if (TREE_CODE (op1) == SSA_NAME)
1952 def1 = SSA_NAME_DEF_STMT (op1);
1954 if (TREE_CODE (op2) == SSA_NAME)
1955 def2 = SSA_NAME_DEF_STMT (op2);
1957 if (code != COND_EXPR
1958 && (!def1 || !def2 || gimple_nop_p (def1) || gimple_nop_p (def2)))
1960 if (vect_print_dump_info (REPORT_DETAILS))
1961 report_vect_op (def_stmt, "reduction: no defs for operands: ");
1962 return NULL;
1965 /* Check that one def is the reduction def, defined by PHI,
1966 the other def is either defined in the loop ("vect_internal_def"),
1967 or it's an induction (defined by a loop-header phi-node). */
1969 if (def2 && def2 == phi
1970 && (code == COND_EXPR
1971 || (def1 && flow_bb_inside_loop_p (loop, gimple_bb (def1))
1972 && (is_gimple_assign (def1)
1973 || is_gimple_call (def1)
1974 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
1975 == vect_induction_def
1976 || (gimple_code (def1) == GIMPLE_PHI
1977 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
1978 == vect_internal_def
1979 && !is_loop_header_bb_p (gimple_bb (def1)))))))
1981 if (vect_print_dump_info (REPORT_DETAILS))
1982 report_vect_op (def_stmt, "detected reduction: ");
1983 return def_stmt;
1985 else if (def1 && def1 == phi
1986 && (code == COND_EXPR
1987 || (def2 && flow_bb_inside_loop_p (loop, gimple_bb (def2))
1988 && (is_gimple_assign (def2)
1989 || is_gimple_call (def2)
1990 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
1991 == vect_induction_def
1992 || (gimple_code (def2) == GIMPLE_PHI
1993 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
1994 == vect_internal_def
1995 && !is_loop_header_bb_p (gimple_bb (def2)))))))
1997 if (check_reduction)
1999 /* Swap operands (just for simplicity - so that the rest of the code
2000 can assume that the reduction variable is always the last (second)
2001 argument). */
2002 if (vect_print_dump_info (REPORT_DETAILS))
2003 report_vect_op (def_stmt,
2004 "detected reduction: need to swap operands: ");
2006 swap_tree_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
2007 gimple_assign_rhs2_ptr (def_stmt));
2009 else
2011 if (vect_print_dump_info (REPORT_DETAILS))
2012 report_vect_op (def_stmt, "detected reduction: ");
2015 return def_stmt;
2017 else
2019 if (vect_print_dump_info (REPORT_DETAILS))
2020 report_vect_op (def_stmt, "reduction: unknown pattern: ");
2022 return NULL;
2026 /* Wrapper around vect_is_simple_reduction_1, that won't modify code
2027 in-place. Arguments as there. */
2029 static gimple
2030 vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
2031 bool check_reduction, bool *double_reduc)
2033 return vect_is_simple_reduction_1 (loop_info, phi, check_reduction,
2034 double_reduc, false);
2037 /* Wrapper around vect_is_simple_reduction_1, which will modify code
2038 in-place if it enables detection of more reductions. Arguments
2039 as there. */
2041 gimple
2042 vect_force_simple_reduction (loop_vec_info loop_info, gimple phi,
2043 bool check_reduction, bool *double_reduc)
2045 return vect_is_simple_reduction_1 (loop_info, phi, check_reduction,
2046 double_reduc, true);
2049 /* Calculate the cost of one scalar iteration of the loop. */
2051 vect_get_single_scalar_iteraion_cost (loop_vec_info loop_vinfo)
2053 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2054 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
2055 int nbbs = loop->num_nodes, factor, scalar_single_iter_cost = 0;
2056 int innerloop_iters, i, stmt_cost;
2058 /* Count statements in scalar loop. Using this as scalar cost for a single
2059 iteration for now.
2061 TODO: Add outer loop support.
2063 TODO: Consider assigning different costs to different scalar
2064 statements. */
2066 /* FORNOW. */
2067 innerloop_iters = 1;
2068 if (loop->inner)
2069 innerloop_iters = 50; /* FIXME */
2071 for (i = 0; i < nbbs; i++)
2073 gimple_stmt_iterator si;
2074 basic_block bb = bbs[i];
2076 if (bb->loop_father == loop->inner)
2077 factor = innerloop_iters;
2078 else
2079 factor = 1;
2081 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
2083 gimple stmt = gsi_stmt (si);
2084 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2086 if (!is_gimple_assign (stmt) && !is_gimple_call (stmt))
2087 continue;
2089 /* Skip stmts that are not vectorized inside the loop. */
2090 if (stmt_info
2091 && !STMT_VINFO_RELEVANT_P (stmt_info)
2092 && (!STMT_VINFO_LIVE_P (stmt_info)
2093 || STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def))
2094 continue;
2096 if (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt)))
2098 if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))))
2099 stmt_cost = vect_get_cost (scalar_load);
2100 else
2101 stmt_cost = vect_get_cost (scalar_store);
2103 else
2104 stmt_cost = vect_get_cost (scalar_stmt);
2106 scalar_single_iter_cost += stmt_cost * factor;
2109 return scalar_single_iter_cost;
2112 /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */
2114 vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue,
2115 int *peel_iters_epilogue,
2116 int scalar_single_iter_cost)
2118 int peel_guard_costs = 0;
2119 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2121 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
2123 *peel_iters_epilogue = vf/2;
2124 if (vect_print_dump_info (REPORT_COST))
2125 fprintf (vect_dump, "cost model: "
2126 "epilogue peel iters set to vf/2 because "
2127 "loop iterations are unknown .");
2129 /* If peeled iterations are known but number of scalar loop
2130 iterations are unknown, count a taken branch per peeled loop. */
2131 peel_guard_costs = 2 * vect_get_cost (cond_branch_taken);
2133 else
2135 int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
2136 peel_iters_prologue = niters < peel_iters_prologue ?
2137 niters : peel_iters_prologue;
2138 *peel_iters_epilogue = (niters - peel_iters_prologue) % vf;
2141 return (peel_iters_prologue * scalar_single_iter_cost)
2142 + (*peel_iters_epilogue * scalar_single_iter_cost)
2143 + peel_guard_costs;
2146 /* Function vect_estimate_min_profitable_iters
2148 Return the number of iterations required for the vector version of the
2149 loop to be profitable relative to the cost of the scalar version of the
2150 loop.
2152 TODO: Take profile info into account before making vectorization
2153 decisions, if available. */
2156 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo)
2158 int i;
2159 int min_profitable_iters;
2160 int peel_iters_prologue;
2161 int peel_iters_epilogue;
2162 int vec_inside_cost = 0;
2163 int vec_outside_cost = 0;
2164 int scalar_single_iter_cost = 0;
2165 int scalar_outside_cost = 0;
2166 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2167 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2168 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
2169 int nbbs = loop->num_nodes;
2170 int npeel = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo);
2171 int peel_guard_costs = 0;
2172 int innerloop_iters = 0, factor;
2173 VEC (slp_instance, heap) *slp_instances;
2174 slp_instance instance;
2176 /* Cost model disabled. */
2177 if (!flag_vect_cost_model)
2179 if (vect_print_dump_info (REPORT_COST))
2180 fprintf (vect_dump, "cost model disabled.");
2181 return 0;
2184 /* Requires loop versioning tests to handle misalignment. */
2185 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
2187 /* FIXME: Make cost depend on complexity of individual check. */
2188 vec_outside_cost +=
2189 VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
2190 if (vect_print_dump_info (REPORT_COST))
2191 fprintf (vect_dump, "cost model: Adding cost of checks for loop "
2192 "versioning to treat misalignment.\n");
2195 /* Requires loop versioning with alias checks. */
2196 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2198 /* FIXME: Make cost depend on complexity of individual check. */
2199 vec_outside_cost +=
2200 VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
2201 if (vect_print_dump_info (REPORT_COST))
2202 fprintf (vect_dump, "cost model: Adding cost of checks for loop "
2203 "versioning aliasing.\n");
2206 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2207 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2208 vec_outside_cost += vect_get_cost (cond_branch_taken);
2210 /* Count statements in scalar loop. Using this as scalar cost for a single
2211 iteration for now.
2213 TODO: Add outer loop support.
2215 TODO: Consider assigning different costs to different scalar
2216 statements. */
2218 /* FORNOW. */
2219 if (loop->inner)
2220 innerloop_iters = 50; /* FIXME */
2222 for (i = 0; i < nbbs; i++)
2224 gimple_stmt_iterator si;
2225 basic_block bb = bbs[i];
2227 if (bb->loop_father == loop->inner)
2228 factor = innerloop_iters;
2229 else
2230 factor = 1;
2232 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
2234 gimple stmt = gsi_stmt (si);
2235 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2236 /* Skip stmts that are not vectorized inside the loop. */
2237 if (!STMT_VINFO_RELEVANT_P (stmt_info)
2238 && (!STMT_VINFO_LIVE_P (stmt_info)
2239 || STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def))
2240 continue;
2241 vec_inside_cost += STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) * factor;
2242 /* FIXME: for stmts in the inner-loop in outer-loop vectorization,
2243 some of the "outside" costs are generated inside the outer-loop. */
2244 vec_outside_cost += STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info);
2248 scalar_single_iter_cost = vect_get_single_scalar_iteraion_cost (loop_vinfo);
2250 /* Add additional cost for the peeled instructions in prologue and epilogue
2251 loop.
2253 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
2254 at compile-time - we assume it's vf/2 (the worst would be vf-1).
2256 TODO: Build an expression that represents peel_iters for prologue and
2257 epilogue to be used in a run-time test. */
2259 if (npeel < 0)
2261 peel_iters_prologue = vf/2;
2262 if (vect_print_dump_info (REPORT_COST))
2263 fprintf (vect_dump, "cost model: "
2264 "prologue peel iters set to vf/2.");
2266 /* If peeling for alignment is unknown, loop bound of main loop becomes
2267 unknown. */
2268 peel_iters_epilogue = vf/2;
2269 if (vect_print_dump_info (REPORT_COST))
2270 fprintf (vect_dump, "cost model: "
2271 "epilogue peel iters set to vf/2 because "
2272 "peeling for alignment is unknown .");
2274 /* If peeled iterations are unknown, count a taken branch and a not taken
2275 branch per peeled loop. Even if scalar loop iterations are known,
2276 vector iterations are not known since peeled prologue iterations are
2277 not known. Hence guards remain the same. */
2278 peel_guard_costs += 2 * (vect_get_cost (cond_branch_taken)
2279 + vect_get_cost (cond_branch_not_taken));
2280 vec_outside_cost += (peel_iters_prologue * scalar_single_iter_cost)
2281 + (peel_iters_epilogue * scalar_single_iter_cost)
2282 + peel_guard_costs;
2284 else
2286 peel_iters_prologue = npeel;
2287 vec_outside_cost += vect_get_known_peeling_cost (loop_vinfo,
2288 peel_iters_prologue, &peel_iters_epilogue,
2289 scalar_single_iter_cost);
2292 /* FORNOW: The scalar outside cost is incremented in one of the
2293 following ways:
2295 1. The vectorizer checks for alignment and aliasing and generates
2296 a condition that allows dynamic vectorization. A cost model
2297 check is ANDED with the versioning condition. Hence scalar code
2298 path now has the added cost of the versioning check.
2300 if (cost > th & versioning_check)
2301 jmp to vector code
2303 Hence run-time scalar is incremented by not-taken branch cost.
2305 2. The vectorizer then checks if a prologue is required. If the
2306 cost model check was not done before during versioning, it has to
2307 be done before the prologue check.
2309 if (cost <= th)
2310 prologue = scalar_iters
2311 if (prologue == 0)
2312 jmp to vector code
2313 else
2314 execute prologue
2315 if (prologue == num_iters)
2316 go to exit
2318 Hence the run-time scalar cost is incremented by a taken branch,
2319 plus a not-taken branch, plus a taken branch cost.
2321 3. The vectorizer then checks if an epilogue is required. If the
2322 cost model check was not done before during prologue check, it
2323 has to be done with the epilogue check.
2325 if (prologue == 0)
2326 jmp to vector code
2327 else
2328 execute prologue
2329 if (prologue == num_iters)
2330 go to exit
2331 vector code:
2332 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
2333 jmp to epilogue
2335 Hence the run-time scalar cost should be incremented by 2 taken
2336 branches.
2338 TODO: The back end may reorder the BBS's differently and reverse
2339 conditions/branch directions. Change the estimates below to
2340 something more reasonable. */
2342 /* If the number of iterations is known and we do not do versioning, we can
2343 decide whether to vectorize at compile time. Hence the scalar version
2344 do not carry cost model guard costs. */
2345 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2346 || LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2347 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2349 /* Cost model check occurs at versioning. */
2350 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2351 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2352 scalar_outside_cost += vect_get_cost (cond_branch_not_taken);
2353 else
2355 /* Cost model check occurs at prologue generation. */
2356 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
2357 scalar_outside_cost += 2 * vect_get_cost (cond_branch_taken)
2358 + vect_get_cost (cond_branch_not_taken);
2359 /* Cost model check occurs at epilogue generation. */
2360 else
2361 scalar_outside_cost += 2 * vect_get_cost (cond_branch_taken);
2365 /* Add SLP costs. */
2366 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
2367 FOR_EACH_VEC_ELT (slp_instance, slp_instances, i, instance)
2369 vec_outside_cost += SLP_INSTANCE_OUTSIDE_OF_LOOP_COST (instance);
2370 vec_inside_cost += SLP_INSTANCE_INSIDE_OF_LOOP_COST (instance);
2373 /* Calculate number of iterations required to make the vector version
2374 profitable, relative to the loop bodies only. The following condition
2375 must hold true:
2376 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
2377 where
2378 SIC = scalar iteration cost, VIC = vector iteration cost,
2379 VOC = vector outside cost, VF = vectorization factor,
2380 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
2381 SOC = scalar outside cost for run time cost model check. */
2383 if ((scalar_single_iter_cost * vf) > vec_inside_cost)
2385 if (vec_outside_cost <= 0)
2386 min_profitable_iters = 1;
2387 else
2389 min_profitable_iters = ((vec_outside_cost - scalar_outside_cost) * vf
2390 - vec_inside_cost * peel_iters_prologue
2391 - vec_inside_cost * peel_iters_epilogue)
2392 / ((scalar_single_iter_cost * vf)
2393 - vec_inside_cost);
2395 if ((scalar_single_iter_cost * vf * min_profitable_iters)
2396 <= ((vec_inside_cost * min_profitable_iters)
2397 + ((vec_outside_cost - scalar_outside_cost) * vf)))
2398 min_profitable_iters++;
2401 /* vector version will never be profitable. */
2402 else
2404 if (vect_print_dump_info (REPORT_COST))
2405 fprintf (vect_dump, "cost model: the vector iteration cost = %d "
2406 "divided by the scalar iteration cost = %d "
2407 "is greater or equal to the vectorization factor = %d.",
2408 vec_inside_cost, scalar_single_iter_cost, vf);
2409 return -1;
2412 if (vect_print_dump_info (REPORT_COST))
2414 fprintf (vect_dump, "Cost model analysis: \n");
2415 fprintf (vect_dump, " Vector inside of loop cost: %d\n",
2416 vec_inside_cost);
2417 fprintf (vect_dump, " Vector outside of loop cost: %d\n",
2418 vec_outside_cost);
2419 fprintf (vect_dump, " Scalar iteration cost: %d\n",
2420 scalar_single_iter_cost);
2421 fprintf (vect_dump, " Scalar outside cost: %d\n", scalar_outside_cost);
2422 fprintf (vect_dump, " prologue iterations: %d\n",
2423 peel_iters_prologue);
2424 fprintf (vect_dump, " epilogue iterations: %d\n",
2425 peel_iters_epilogue);
2426 fprintf (vect_dump, " Calculated minimum iters for profitability: %d\n",
2427 min_profitable_iters);
2430 min_profitable_iters =
2431 min_profitable_iters < vf ? vf : min_profitable_iters;
2433 /* Because the condition we create is:
2434 if (niters <= min_profitable_iters)
2435 then skip the vectorized loop. */
2436 min_profitable_iters--;
2438 if (vect_print_dump_info (REPORT_COST))
2439 fprintf (vect_dump, " Profitability threshold = %d\n",
2440 min_profitable_iters);
2442 return min_profitable_iters;
2446 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
2447 functions. Design better to avoid maintenance issues. */
2449 /* Function vect_model_reduction_cost.
2451 Models cost for a reduction operation, including the vector ops
2452 generated within the strip-mine loop, the initial definition before
2453 the loop, and the epilogue code that must be generated. */
2455 static bool
2456 vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
2457 int ncopies)
2459 int outer_cost = 0;
2460 enum tree_code code;
2461 optab optab;
2462 tree vectype;
2463 gimple stmt, orig_stmt;
2464 tree reduction_op;
2465 enum machine_mode mode;
2466 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2467 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2470 /* Cost of reduction op inside loop. */
2471 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info)
2472 += ncopies * vect_get_cost (vector_stmt);
2474 stmt = STMT_VINFO_STMT (stmt_info);
2476 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
2478 case GIMPLE_SINGLE_RHS:
2479 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)) == ternary_op);
2480 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), 2);
2481 break;
2482 case GIMPLE_UNARY_RHS:
2483 reduction_op = gimple_assign_rhs1 (stmt);
2484 break;
2485 case GIMPLE_BINARY_RHS:
2486 reduction_op = gimple_assign_rhs2 (stmt);
2487 break;
2488 default:
2489 gcc_unreachable ();
2492 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
2493 if (!vectype)
2495 if (vect_print_dump_info (REPORT_COST))
2497 fprintf (vect_dump, "unsupported data-type ");
2498 print_generic_expr (vect_dump, TREE_TYPE (reduction_op), TDF_SLIM);
2500 return false;
2503 mode = TYPE_MODE (vectype);
2504 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2506 if (!orig_stmt)
2507 orig_stmt = STMT_VINFO_STMT (stmt_info);
2509 code = gimple_assign_rhs_code (orig_stmt);
2511 /* Add in cost for initial definition. */
2512 outer_cost += vect_get_cost (scalar_to_vec);
2514 /* Determine cost of epilogue code.
2516 We have a reduction operator that will reduce the vector in one statement.
2517 Also requires scalar extract. */
2519 if (!nested_in_vect_loop_p (loop, orig_stmt))
2521 if (reduc_code != ERROR_MARK)
2522 outer_cost += vect_get_cost (vector_stmt)
2523 + vect_get_cost (vec_to_scalar);
2524 else
2526 int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
2527 tree bitsize =
2528 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt)));
2529 int element_bitsize = tree_low_cst (bitsize, 1);
2530 int nelements = vec_size_in_bits / element_bitsize;
2532 optab = optab_for_tree_code (code, vectype, optab_default);
2534 /* We have a whole vector shift available. */
2535 if (VECTOR_MODE_P (mode)
2536 && optab_handler (optab, mode) != CODE_FOR_nothing
2537 && optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
2538 /* Final reduction via vector shifts and the reduction operator. Also
2539 requires scalar extract. */
2540 outer_cost += ((exact_log2(nelements) * 2)
2541 * vect_get_cost (vector_stmt)
2542 + vect_get_cost (vec_to_scalar));
2543 else
2544 /* Use extracts and reduction op for final reduction. For N elements,
2545 we have N extracts and N-1 reduction ops. */
2546 outer_cost += ((nelements + nelements - 1)
2547 * vect_get_cost (vector_stmt));
2551 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = outer_cost;
2553 if (vect_print_dump_info (REPORT_COST))
2554 fprintf (vect_dump, "vect_model_reduction_cost: inside_cost = %d, "
2555 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
2556 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
2558 return true;
2562 /* Function vect_model_induction_cost.
2564 Models cost for induction operations. */
2566 static void
2567 vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies)
2569 /* loop cost for vec_loop. */
2570 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info)
2571 = ncopies * vect_get_cost (vector_stmt);
2572 /* prologue cost for vec_init and vec_step. */
2573 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info)
2574 = 2 * vect_get_cost (scalar_to_vec);
2576 if (vect_print_dump_info (REPORT_COST))
2577 fprintf (vect_dump, "vect_model_induction_cost: inside_cost = %d, "
2578 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
2579 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
2583 /* Function get_initial_def_for_induction
2585 Input:
2586 STMT - a stmt that performs an induction operation in the loop.
2587 IV_PHI - the initial value of the induction variable
2589 Output:
2590 Return a vector variable, initialized with the first VF values of
2591 the induction variable. E.g., for an iv with IV_PHI='X' and
2592 evolution S, for a vector of 4 units, we want to return:
2593 [X, X + S, X + 2*S, X + 3*S]. */
2595 static tree
2596 get_initial_def_for_induction (gimple iv_phi)
2598 stmt_vec_info stmt_vinfo = vinfo_for_stmt (iv_phi);
2599 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
2600 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2601 tree scalar_type;
2602 tree vectype;
2603 int nunits;
2604 edge pe = loop_preheader_edge (loop);
2605 struct loop *iv_loop;
2606 basic_block new_bb;
2607 tree vec, vec_init, vec_step, t;
2608 tree access_fn;
2609 tree new_var;
2610 tree new_name;
2611 gimple init_stmt, induction_phi, new_stmt;
2612 tree induc_def, vec_def, vec_dest;
2613 tree init_expr, step_expr;
2614 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2615 int i;
2616 bool ok;
2617 int ncopies;
2618 tree expr;
2619 stmt_vec_info phi_info = vinfo_for_stmt (iv_phi);
2620 bool nested_in_vect_loop = false;
2621 gimple_seq stmts = NULL;
2622 imm_use_iterator imm_iter;
2623 use_operand_p use_p;
2624 gimple exit_phi;
2625 edge latch_e;
2626 tree loop_arg;
2627 gimple_stmt_iterator si;
2628 basic_block bb = gimple_bb (iv_phi);
2629 tree stepvectype;
2630 tree resvectype;
2632 /* Is phi in an inner-loop, while vectorizing an enclosing outer-loop? */
2633 if (nested_in_vect_loop_p (loop, iv_phi))
2635 nested_in_vect_loop = true;
2636 iv_loop = loop->inner;
2638 else
2639 iv_loop = loop;
2640 gcc_assert (iv_loop == (gimple_bb (iv_phi))->loop_father);
2642 latch_e = loop_latch_edge (iv_loop);
2643 loop_arg = PHI_ARG_DEF_FROM_EDGE (iv_phi, latch_e);
2645 access_fn = analyze_scalar_evolution (iv_loop, PHI_RESULT (iv_phi));
2646 gcc_assert (access_fn);
2647 STRIP_NOPS (access_fn);
2648 ok = vect_is_simple_iv_evolution (iv_loop->num, access_fn,
2649 &init_expr, &step_expr);
2650 gcc_assert (ok);
2651 pe = loop_preheader_edge (iv_loop);
2653 scalar_type = TREE_TYPE (init_expr);
2654 vectype = get_vectype_for_scalar_type (scalar_type);
2655 resvectype = get_vectype_for_scalar_type (TREE_TYPE (PHI_RESULT (iv_phi)));
2656 gcc_assert (vectype);
2657 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2658 ncopies = vf / nunits;
2660 gcc_assert (phi_info);
2661 gcc_assert (ncopies >= 1);
2663 /* Find the first insertion point in the BB. */
2664 si = gsi_after_labels (bb);
2666 /* Create the vector that holds the initial_value of the induction. */
2667 if (nested_in_vect_loop)
2669 /* iv_loop is nested in the loop to be vectorized. init_expr had already
2670 been created during vectorization of previous stmts. We obtain it
2671 from the STMT_VINFO_VEC_STMT of the defining stmt. */
2672 tree iv_def = PHI_ARG_DEF_FROM_EDGE (iv_phi,
2673 loop_preheader_edge (iv_loop));
2674 vec_init = vect_get_vec_def_for_operand (iv_def, iv_phi, NULL);
2676 else
2678 /* iv_loop is the loop to be vectorized. Create:
2679 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
2680 new_var = vect_get_new_vect_var (scalar_type, vect_scalar_var, "var_");
2681 add_referenced_var (new_var);
2683 new_name = force_gimple_operand (init_expr, &stmts, false, new_var);
2684 if (stmts)
2686 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
2687 gcc_assert (!new_bb);
2690 t = NULL_TREE;
2691 t = tree_cons (NULL_TREE, new_name, t);
2692 for (i = 1; i < nunits; i++)
2694 /* Create: new_name_i = new_name + step_expr */
2695 enum tree_code code = POINTER_TYPE_P (scalar_type)
2696 ? POINTER_PLUS_EXPR : PLUS_EXPR;
2697 init_stmt = gimple_build_assign_with_ops (code, new_var,
2698 new_name, step_expr);
2699 new_name = make_ssa_name (new_var, init_stmt);
2700 gimple_assign_set_lhs (init_stmt, new_name);
2702 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
2703 gcc_assert (!new_bb);
2705 if (vect_print_dump_info (REPORT_DETAILS))
2707 fprintf (vect_dump, "created new init_stmt: ");
2708 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
2710 t = tree_cons (NULL_TREE, new_name, t);
2712 /* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1] */
2713 vec = build_constructor_from_list (vectype, nreverse (t));
2714 vec_init = vect_init_vector (iv_phi, vec, vectype, NULL);
2718 /* Create the vector that holds the step of the induction. */
2719 if (nested_in_vect_loop)
2720 /* iv_loop is nested in the loop to be vectorized. Generate:
2721 vec_step = [S, S, S, S] */
2722 new_name = step_expr;
2723 else
2725 /* iv_loop is the loop to be vectorized. Generate:
2726 vec_step = [VF*S, VF*S, VF*S, VF*S] */
2727 expr = build_int_cst (TREE_TYPE (step_expr), vf);
2728 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
2729 expr, step_expr);
2732 t = unshare_expr (new_name);
2733 gcc_assert (CONSTANT_CLASS_P (new_name));
2734 stepvectype = get_vectype_for_scalar_type (TREE_TYPE (new_name));
2735 gcc_assert (stepvectype);
2736 vec = build_vector_from_val (stepvectype, t);
2737 vec_step = vect_init_vector (iv_phi, vec, stepvectype, NULL);
2740 /* Create the following def-use cycle:
2741 loop prolog:
2742 vec_init = ...
2743 vec_step = ...
2744 loop:
2745 vec_iv = PHI <vec_init, vec_loop>
2747 STMT
2749 vec_loop = vec_iv + vec_step; */
2751 /* Create the induction-phi that defines the induction-operand. */
2752 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
2753 add_referenced_var (vec_dest);
2754 induction_phi = create_phi_node (vec_dest, iv_loop->header);
2755 set_vinfo_for_stmt (induction_phi,
2756 new_stmt_vec_info (induction_phi, loop_vinfo, NULL));
2757 induc_def = PHI_RESULT (induction_phi);
2759 /* Create the iv update inside the loop */
2760 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
2761 induc_def, vec_step);
2762 vec_def = make_ssa_name (vec_dest, new_stmt);
2763 gimple_assign_set_lhs (new_stmt, vec_def);
2764 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
2765 set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo,
2766 NULL));
2768 /* Set the arguments of the phi node: */
2769 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
2770 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
2771 UNKNOWN_LOCATION);
2774 /* In case that vectorization factor (VF) is bigger than the number
2775 of elements that we can fit in a vectype (nunits), we have to generate
2776 more than one vector stmt - i.e - we need to "unroll" the
2777 vector stmt by a factor VF/nunits. For more details see documentation
2778 in vectorizable_operation. */
2780 if (ncopies > 1)
2782 stmt_vec_info prev_stmt_vinfo;
2783 /* FORNOW. This restriction should be relaxed. */
2784 gcc_assert (!nested_in_vect_loop);
2786 /* Create the vector that holds the step of the induction. */
2787 expr = build_int_cst (TREE_TYPE (step_expr), nunits);
2788 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
2789 expr, step_expr);
2790 t = unshare_expr (new_name);
2791 gcc_assert (CONSTANT_CLASS_P (new_name));
2792 vec = build_vector_from_val (stepvectype, t);
2793 vec_step = vect_init_vector (iv_phi, vec, stepvectype, NULL);
2795 vec_def = induc_def;
2796 prev_stmt_vinfo = vinfo_for_stmt (induction_phi);
2797 for (i = 1; i < ncopies; i++)
2799 /* vec_i = vec_prev + vec_step */
2800 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
2801 vec_def, vec_step);
2802 vec_def = make_ssa_name (vec_dest, new_stmt);
2803 gimple_assign_set_lhs (new_stmt, vec_def);
2805 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
2806 if (!useless_type_conversion_p (resvectype, vectype))
2808 new_stmt = gimple_build_assign_with_ops
2809 (VIEW_CONVERT_EXPR,
2810 vect_get_new_vect_var (resvectype, vect_simple_var,
2811 "vec_iv_"),
2812 build1 (VIEW_CONVERT_EXPR, resvectype,
2813 gimple_assign_lhs (new_stmt)), NULL_TREE);
2814 gimple_assign_set_lhs (new_stmt,
2815 make_ssa_name
2816 (gimple_assign_lhs (new_stmt), new_stmt));
2817 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
2819 set_vinfo_for_stmt (new_stmt,
2820 new_stmt_vec_info (new_stmt, loop_vinfo, NULL));
2821 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt;
2822 prev_stmt_vinfo = vinfo_for_stmt (new_stmt);
2826 if (nested_in_vect_loop)
2828 /* Find the loop-closed exit-phi of the induction, and record
2829 the final vector of induction results: */
2830 exit_phi = NULL;
2831 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
2833 if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (USE_STMT (use_p))))
2835 exit_phi = USE_STMT (use_p);
2836 break;
2839 if (exit_phi)
2841 stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi);
2842 /* FORNOW. Currently not supporting the case that an inner-loop induction
2843 is not used in the outer-loop (i.e. only outside the outer-loop). */
2844 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
2845 && !STMT_VINFO_LIVE_P (stmt_vinfo));
2847 STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt;
2848 if (vect_print_dump_info (REPORT_DETAILS))
2850 fprintf (vect_dump, "vector of inductions after inner-loop:");
2851 print_gimple_stmt (vect_dump, new_stmt, 0, TDF_SLIM);
2857 if (vect_print_dump_info (REPORT_DETAILS))
2859 fprintf (vect_dump, "transform induction: created def-use cycle: ");
2860 print_gimple_stmt (vect_dump, induction_phi, 0, TDF_SLIM);
2861 fprintf (vect_dump, "\n");
2862 print_gimple_stmt (vect_dump, SSA_NAME_DEF_STMT (vec_def), 0, TDF_SLIM);
2865 STMT_VINFO_VEC_STMT (phi_info) = induction_phi;
2866 if (!useless_type_conversion_p (resvectype, vectype))
2868 new_stmt = gimple_build_assign_with_ops
2869 (VIEW_CONVERT_EXPR,
2870 vect_get_new_vect_var (resvectype, vect_simple_var, "vec_iv_"),
2871 build1 (VIEW_CONVERT_EXPR, resvectype, induc_def), NULL_TREE);
2872 induc_def = make_ssa_name (gimple_assign_lhs (new_stmt), new_stmt);
2873 gimple_assign_set_lhs (new_stmt, induc_def);
2874 si = gsi_start_bb (bb);
2875 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
2878 return induc_def;
2882 /* Function get_initial_def_for_reduction
2884 Input:
2885 STMT - a stmt that performs a reduction operation in the loop.
2886 INIT_VAL - the initial value of the reduction variable
2888 Output:
2889 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
2890 of the reduction (used for adjusting the epilog - see below).
2891 Return a vector variable, initialized according to the operation that STMT
2892 performs. This vector will be used as the initial value of the
2893 vector of partial results.
2895 Option1 (adjust in epilog): Initialize the vector as follows:
2896 add/bit or/xor: [0,0,...,0,0]
2897 mult/bit and: [1,1,...,1,1]
2898 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
2899 and when necessary (e.g. add/mult case) let the caller know
2900 that it needs to adjust the result by init_val.
2902 Option2: Initialize the vector as follows:
2903 add/bit or/xor: [init_val,0,0,...,0]
2904 mult/bit and: [init_val,1,1,...,1]
2905 min/max/cond_expr: [init_val,init_val,...,init_val]
2906 and no adjustments are needed.
2908 For example, for the following code:
2910 s = init_val;
2911 for (i=0;i<n;i++)
2912 s = s + a[i];
2914 STMT is 's = s + a[i]', and the reduction variable is 's'.
2915 For a vector of 4 units, we want to return either [0,0,0,init_val],
2916 or [0,0,0,0] and let the caller know that it needs to adjust
2917 the result at the end by 'init_val'.
2919 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
2920 initialization vector is simpler (same element in all entries), if
2921 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
2923 A cost model should help decide between these two schemes. */
2925 tree
2926 get_initial_def_for_reduction (gimple stmt, tree init_val,
2927 tree *adjustment_def)
2929 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
2930 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
2931 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2932 tree scalar_type = TREE_TYPE (init_val);
2933 tree vectype = get_vectype_for_scalar_type (scalar_type);
2934 int nunits;
2935 enum tree_code code = gimple_assign_rhs_code (stmt);
2936 tree def_for_init;
2937 tree init_def;
2938 tree t = NULL_TREE;
2939 int i;
2940 bool nested_in_vect_loop = false;
2941 tree init_value;
2942 REAL_VALUE_TYPE real_init_val = dconst0;
2943 int int_init_val = 0;
2944 gimple def_stmt = NULL;
2946 gcc_assert (vectype);
2947 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2949 gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
2950 || SCALAR_FLOAT_TYPE_P (scalar_type));
2952 if (nested_in_vect_loop_p (loop, stmt))
2953 nested_in_vect_loop = true;
2954 else
2955 gcc_assert (loop == (gimple_bb (stmt))->loop_father);
2957 /* In case of double reduction we only create a vector variable to be put
2958 in the reduction phi node. The actual statement creation is done in
2959 vect_create_epilog_for_reduction. */
2960 if (adjustment_def && nested_in_vect_loop
2961 && TREE_CODE (init_val) == SSA_NAME
2962 && (def_stmt = SSA_NAME_DEF_STMT (init_val))
2963 && gimple_code (def_stmt) == GIMPLE_PHI
2964 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
2965 && vinfo_for_stmt (def_stmt)
2966 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
2967 == vect_double_reduction_def)
2969 *adjustment_def = NULL;
2970 return vect_create_destination_var (init_val, vectype);
2973 if (TREE_CONSTANT (init_val))
2975 if (SCALAR_FLOAT_TYPE_P (scalar_type))
2976 init_value = build_real (scalar_type, TREE_REAL_CST (init_val));
2977 else
2978 init_value = build_int_cst (scalar_type, TREE_INT_CST_LOW (init_val));
2980 else
2981 init_value = init_val;
2983 switch (code)
2985 case WIDEN_SUM_EXPR:
2986 case DOT_PROD_EXPR:
2987 case PLUS_EXPR:
2988 case MINUS_EXPR:
2989 case BIT_IOR_EXPR:
2990 case BIT_XOR_EXPR:
2991 case MULT_EXPR:
2992 case BIT_AND_EXPR:
2993 /* ADJUSMENT_DEF is NULL when called from
2994 vect_create_epilog_for_reduction to vectorize double reduction. */
2995 if (adjustment_def)
2997 if (nested_in_vect_loop)
2998 *adjustment_def = vect_get_vec_def_for_operand (init_val, stmt,
2999 NULL);
3000 else
3001 *adjustment_def = init_val;
3004 if (code == MULT_EXPR)
3006 real_init_val = dconst1;
3007 int_init_val = 1;
3010 if (code == BIT_AND_EXPR)
3011 int_init_val = -1;
3013 if (SCALAR_FLOAT_TYPE_P (scalar_type))
3014 def_for_init = build_real (scalar_type, real_init_val);
3015 else
3016 def_for_init = build_int_cst (scalar_type, int_init_val);
3018 /* Create a vector of '0' or '1' except the first element. */
3019 for (i = nunits - 2; i >= 0; --i)
3020 t = tree_cons (NULL_TREE, def_for_init, t);
3022 /* Option1: the first element is '0' or '1' as well. */
3023 if (adjustment_def)
3025 t = tree_cons (NULL_TREE, def_for_init, t);
3026 init_def = build_vector (vectype, t);
3027 break;
3030 /* Option2: the first element is INIT_VAL. */
3031 t = tree_cons (NULL_TREE, init_value, t);
3032 if (TREE_CONSTANT (init_val))
3033 init_def = build_vector (vectype, t);
3034 else
3035 init_def = build_constructor_from_list (vectype, t);
3037 break;
3039 case MIN_EXPR:
3040 case MAX_EXPR:
3041 case COND_EXPR:
3042 if (adjustment_def)
3044 *adjustment_def = NULL_TREE;
3045 init_def = vect_get_vec_def_for_operand (init_val, stmt, NULL);
3046 break;
3049 init_def = build_vector_from_val (vectype, init_value);
3050 break;
3052 default:
3053 gcc_unreachable ();
3056 return init_def;
3060 /* Function vect_create_epilog_for_reduction
3062 Create code at the loop-epilog to finalize the result of a reduction
3063 computation.
3065 VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector
3066 reduction statements.
3067 STMT is the scalar reduction stmt that is being vectorized.
3068 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
3069 number of elements that we can fit in a vectype (nunits). In this case
3070 we have to generate more than one vector stmt - i.e - we need to "unroll"
3071 the vector stmt by a factor VF/nunits. For more details see documentation
3072 in vectorizable_operation.
3073 REDUC_CODE is the tree-code for the epilog reduction.
3074 REDUCTION_PHIS is a list of the phi-nodes that carry the reduction
3075 computation.
3076 REDUC_INDEX is the index of the operand in the right hand side of the
3077 statement that is defined by REDUCTION_PHI.
3078 DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
3079 SLP_NODE is an SLP node containing a group of reduction statements. The
3080 first one in this group is STMT.
3082 This function:
3083 1. Creates the reduction def-use cycles: sets the arguments for
3084 REDUCTION_PHIS:
3085 The loop-entry argument is the vectorized initial-value of the reduction.
3086 The loop-latch argument is taken from VECT_DEFS - the vector of partial
3087 sums.
3088 2. "Reduces" each vector of partial results VECT_DEFS into a single result,
3089 by applying the operation specified by REDUC_CODE if available, or by
3090 other means (whole-vector shifts or a scalar loop).
3091 The function also creates a new phi node at the loop exit to preserve
3092 loop-closed form, as illustrated below.
3094 The flow at the entry to this function:
3096 loop:
3097 vec_def = phi <null, null> # REDUCTION_PHI
3098 VECT_DEF = vector_stmt # vectorized form of STMT
3099 s_loop = scalar_stmt # (scalar) STMT
3100 loop_exit:
3101 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3102 use <s_out0>
3103 use <s_out0>
3105 The above is transformed by this function into:
3107 loop:
3108 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
3109 VECT_DEF = vector_stmt # vectorized form of STMT
3110 s_loop = scalar_stmt # (scalar) STMT
3111 loop_exit:
3112 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3113 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3114 v_out2 = reduce <v_out1>
3115 s_out3 = extract_field <v_out2, 0>
3116 s_out4 = adjust_result <s_out3>
3117 use <s_out4>
3118 use <s_out4>
3121 static void
3122 vect_create_epilog_for_reduction (VEC (tree, heap) *vect_defs, gimple stmt,
3123 int ncopies, enum tree_code reduc_code,
3124 VEC (gimple, heap) *reduction_phis,
3125 int reduc_index, bool double_reduc,
3126 slp_tree slp_node)
3128 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3129 stmt_vec_info prev_phi_info;
3130 tree vectype;
3131 enum machine_mode mode;
3132 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3133 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
3134 basic_block exit_bb;
3135 tree scalar_dest;
3136 tree scalar_type;
3137 gimple new_phi = NULL, phi;
3138 gimple_stmt_iterator exit_gsi;
3139 tree vec_dest;
3140 tree new_temp = NULL_TREE, new_dest, new_name, new_scalar_dest;
3141 gimple epilog_stmt = NULL;
3142 enum tree_code code = gimple_assign_rhs_code (stmt);
3143 gimple exit_phi;
3144 tree bitsize, bitpos;
3145 tree adjustment_def = NULL;
3146 tree vec_initial_def = NULL;
3147 tree reduction_op, expr, def;
3148 tree orig_name, scalar_result;
3149 imm_use_iterator imm_iter, phi_imm_iter;
3150 use_operand_p use_p, phi_use_p;
3151 bool extract_scalar_result = false;
3152 gimple use_stmt, orig_stmt, reduction_phi = NULL;
3153 bool nested_in_vect_loop = false;
3154 VEC (gimple, heap) *new_phis = NULL;
3155 enum vect_def_type dt = vect_unknown_def_type;
3156 int j, i;
3157 VEC (tree, heap) *scalar_results = NULL;
3158 unsigned int group_size = 1, k, ratio;
3159 VEC (tree, heap) *vec_initial_defs = NULL;
3160 VEC (gimple, heap) *phis;
3162 if (slp_node)
3163 group_size = VEC_length (gimple, SLP_TREE_SCALAR_STMTS (slp_node));
3165 if (nested_in_vect_loop_p (loop, stmt))
3167 outer_loop = loop;
3168 loop = loop->inner;
3169 nested_in_vect_loop = true;
3170 gcc_assert (!slp_node);
3173 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
3175 case GIMPLE_SINGLE_RHS:
3176 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt))
3177 == ternary_op);
3178 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), reduc_index);
3179 break;
3180 case GIMPLE_UNARY_RHS:
3181 reduction_op = gimple_assign_rhs1 (stmt);
3182 break;
3183 case GIMPLE_BINARY_RHS:
3184 reduction_op = reduc_index ?
3185 gimple_assign_rhs2 (stmt) : gimple_assign_rhs1 (stmt);
3186 break;
3187 default:
3188 gcc_unreachable ();
3191 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
3192 gcc_assert (vectype);
3193 mode = TYPE_MODE (vectype);
3195 /* 1. Create the reduction def-use cycle:
3196 Set the arguments of REDUCTION_PHIS, i.e., transform
3198 loop:
3199 vec_def = phi <null, null> # REDUCTION_PHI
3200 VECT_DEF = vector_stmt # vectorized form of STMT
3203 into:
3205 loop:
3206 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
3207 VECT_DEF = vector_stmt # vectorized form of STMT
3210 (in case of SLP, do it for all the phis). */
3212 /* Get the loop-entry arguments. */
3213 if (slp_node)
3214 vect_get_slp_defs (reduction_op, NULL_TREE, slp_node, &vec_initial_defs,
3215 NULL, reduc_index);
3216 else
3218 vec_initial_defs = VEC_alloc (tree, heap, 1);
3219 /* For the case of reduction, vect_get_vec_def_for_operand returns
3220 the scalar def before the loop, that defines the initial value
3221 of the reduction variable. */
3222 vec_initial_def = vect_get_vec_def_for_operand (reduction_op, stmt,
3223 &adjustment_def);
3224 VEC_quick_push (tree, vec_initial_defs, vec_initial_def);
3227 /* Set phi nodes arguments. */
3228 FOR_EACH_VEC_ELT (gimple, reduction_phis, i, phi)
3230 tree vec_init_def = VEC_index (tree, vec_initial_defs, i);
3231 tree def = VEC_index (tree, vect_defs, i);
3232 for (j = 0; j < ncopies; j++)
3234 /* Set the loop-entry arg of the reduction-phi. */
3235 add_phi_arg (phi, vec_init_def, loop_preheader_edge (loop),
3236 UNKNOWN_LOCATION);
3238 /* Set the loop-latch arg for the reduction-phi. */
3239 if (j > 0)
3240 def = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type, def);
3242 add_phi_arg (phi, def, loop_latch_edge (loop), UNKNOWN_LOCATION);
3244 if (vect_print_dump_info (REPORT_DETAILS))
3246 fprintf (vect_dump, "transform reduction: created def-use"
3247 " cycle: ");
3248 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
3249 fprintf (vect_dump, "\n");
3250 print_gimple_stmt (vect_dump, SSA_NAME_DEF_STMT (def), 0,
3251 TDF_SLIM);
3254 phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
3258 VEC_free (tree, heap, vec_initial_defs);
3260 /* 2. Create epilog code.
3261 The reduction epilog code operates across the elements of the vector
3262 of partial results computed by the vectorized loop.
3263 The reduction epilog code consists of:
3265 step 1: compute the scalar result in a vector (v_out2)
3266 step 2: extract the scalar result (s_out3) from the vector (v_out2)
3267 step 3: adjust the scalar result (s_out3) if needed.
3269 Step 1 can be accomplished using one the following three schemes:
3270 (scheme 1) using reduc_code, if available.
3271 (scheme 2) using whole-vector shifts, if available.
3272 (scheme 3) using a scalar loop. In this case steps 1+2 above are
3273 combined.
3275 The overall epilog code looks like this:
3277 s_out0 = phi <s_loop> # original EXIT_PHI
3278 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3279 v_out2 = reduce <v_out1> # step 1
3280 s_out3 = extract_field <v_out2, 0> # step 2
3281 s_out4 = adjust_result <s_out3> # step 3
3283 (step 3 is optional, and steps 1 and 2 may be combined).
3284 Lastly, the uses of s_out0 are replaced by s_out4. */
3287 /* 2.1 Create new loop-exit-phis to preserve loop-closed form:
3288 v_out1 = phi <VECT_DEF>
3289 Store them in NEW_PHIS. */
3291 exit_bb = single_exit (loop)->dest;
3292 prev_phi_info = NULL;
3293 new_phis = VEC_alloc (gimple, heap, VEC_length (tree, vect_defs));
3294 FOR_EACH_VEC_ELT (tree, vect_defs, i, def)
3296 for (j = 0; j < ncopies; j++)
3298 phi = create_phi_node (SSA_NAME_VAR (def), exit_bb);
3299 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, loop_vinfo, NULL));
3300 if (j == 0)
3301 VEC_quick_push (gimple, new_phis, phi);
3302 else
3304 def = vect_get_vec_def_for_stmt_copy (dt, def);
3305 STMT_VINFO_RELATED_STMT (prev_phi_info) = phi;
3308 SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
3309 prev_phi_info = vinfo_for_stmt (phi);
3313 /* The epilogue is created for the outer-loop, i.e., for the loop being
3314 vectorized. */
3315 if (double_reduc)
3317 loop = outer_loop;
3318 exit_bb = single_exit (loop)->dest;
3321 exit_gsi = gsi_after_labels (exit_bb);
3323 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
3324 (i.e. when reduc_code is not available) and in the final adjustment
3325 code (if needed). Also get the original scalar reduction variable as
3326 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
3327 represents a reduction pattern), the tree-code and scalar-def are
3328 taken from the original stmt that the pattern-stmt (STMT) replaces.
3329 Otherwise (it is a regular reduction) - the tree-code and scalar-def
3330 are taken from STMT. */
3332 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
3333 if (!orig_stmt)
3335 /* Regular reduction */
3336 orig_stmt = stmt;
3338 else
3340 /* Reduction pattern */
3341 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt);
3342 gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo));
3343 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
3346 code = gimple_assign_rhs_code (orig_stmt);
3347 /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
3348 partial results are added and not subtracted. */
3349 if (code == MINUS_EXPR)
3350 code = PLUS_EXPR;
3352 scalar_dest = gimple_assign_lhs (orig_stmt);
3353 scalar_type = TREE_TYPE (scalar_dest);
3354 scalar_results = VEC_alloc (tree, heap, group_size);
3355 new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
3356 bitsize = TYPE_SIZE (scalar_type);
3358 /* In case this is a reduction in an inner-loop while vectorizing an outer
3359 loop - we don't need to extract a single scalar result at the end of the
3360 inner-loop (unless it is double reduction, i.e., the use of reduction is
3361 outside the outer-loop). The final vector of partial results will be used
3362 in the vectorized outer-loop, or reduced to a scalar result at the end of
3363 the outer-loop. */
3364 if (nested_in_vect_loop && !double_reduc)
3365 goto vect_finalize_reduction;
3367 /* 2.3 Create the reduction code, using one of the three schemes described
3368 above. In SLP we simply need to extract all the elements from the
3369 vector (without reducing them), so we use scalar shifts. */
3370 if (reduc_code != ERROR_MARK && !slp_node)
3372 tree tmp;
3374 /*** Case 1: Create:
3375 v_out2 = reduc_expr <v_out1> */
3377 if (vect_print_dump_info (REPORT_DETAILS))
3378 fprintf (vect_dump, "Reduce using direct vector reduction.");
3380 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3381 new_phi = VEC_index (gimple, new_phis, 0);
3382 tmp = build1 (reduc_code, vectype, PHI_RESULT (new_phi));
3383 epilog_stmt = gimple_build_assign (vec_dest, tmp);
3384 new_temp = make_ssa_name (vec_dest, epilog_stmt);
3385 gimple_assign_set_lhs (epilog_stmt, new_temp);
3386 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3388 extract_scalar_result = true;
3390 else
3392 enum tree_code shift_code = ERROR_MARK;
3393 bool have_whole_vector_shift = true;
3394 int bit_offset;
3395 int element_bitsize = tree_low_cst (bitsize, 1);
3396 int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
3397 tree vec_temp;
3399 if (optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
3400 shift_code = VEC_RSHIFT_EXPR;
3401 else
3402 have_whole_vector_shift = false;
3404 /* Regardless of whether we have a whole vector shift, if we're
3405 emulating the operation via tree-vect-generic, we don't want
3406 to use it. Only the first round of the reduction is likely
3407 to still be profitable via emulation. */
3408 /* ??? It might be better to emit a reduction tree code here, so that
3409 tree-vect-generic can expand the first round via bit tricks. */
3410 if (!VECTOR_MODE_P (mode))
3411 have_whole_vector_shift = false;
3412 else
3414 optab optab = optab_for_tree_code (code, vectype, optab_default);
3415 if (optab_handler (optab, mode) == CODE_FOR_nothing)
3416 have_whole_vector_shift = false;
3419 if (have_whole_vector_shift && !slp_node)
3421 /*** Case 2: Create:
3422 for (offset = VS/2; offset >= element_size; offset/=2)
3424 Create: va' = vec_shift <va, offset>
3425 Create: va = vop <va, va'>
3426 } */
3428 if (vect_print_dump_info (REPORT_DETAILS))
3429 fprintf (vect_dump, "Reduce using vector shifts");
3431 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3432 new_phi = VEC_index (gimple, new_phis, 0);
3433 new_temp = PHI_RESULT (new_phi);
3434 for (bit_offset = vec_size_in_bits/2;
3435 bit_offset >= element_bitsize;
3436 bit_offset /= 2)
3438 tree bitpos = size_int (bit_offset);
3440 epilog_stmt = gimple_build_assign_with_ops (shift_code,
3441 vec_dest, new_temp, bitpos);
3442 new_name = make_ssa_name (vec_dest, epilog_stmt);
3443 gimple_assign_set_lhs (epilog_stmt, new_name);
3444 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3446 epilog_stmt = gimple_build_assign_with_ops (code, vec_dest,
3447 new_name, new_temp);
3448 new_temp = make_ssa_name (vec_dest, epilog_stmt);
3449 gimple_assign_set_lhs (epilog_stmt, new_temp);
3450 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3453 extract_scalar_result = true;
3455 else
3457 tree rhs;
3459 /*** Case 3: Create:
3460 s = extract_field <v_out2, 0>
3461 for (offset = element_size;
3462 offset < vector_size;
3463 offset += element_size;)
3465 Create: s' = extract_field <v_out2, offset>
3466 Create: s = op <s, s'> // For non SLP cases
3467 } */
3469 if (vect_print_dump_info (REPORT_DETAILS))
3470 fprintf (vect_dump, "Reduce using scalar code. ");
3472 vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
3473 FOR_EACH_VEC_ELT (gimple, new_phis, i, new_phi)
3475 vec_temp = PHI_RESULT (new_phi);
3476 rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
3477 bitsize_zero_node);
3478 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3479 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3480 gimple_assign_set_lhs (epilog_stmt, new_temp);
3481 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3483 /* In SLP we don't need to apply reduction operation, so we just
3484 collect s' values in SCALAR_RESULTS. */
3485 if (slp_node)
3486 VEC_safe_push (tree, heap, scalar_results, new_temp);
3488 for (bit_offset = element_bitsize;
3489 bit_offset < vec_size_in_bits;
3490 bit_offset += element_bitsize)
3492 tree bitpos = bitsize_int (bit_offset);
3493 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp,
3494 bitsize, bitpos);
3496 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3497 new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
3498 gimple_assign_set_lhs (epilog_stmt, new_name);
3499 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3501 if (slp_node)
3503 /* In SLP we don't need to apply reduction operation, so
3504 we just collect s' values in SCALAR_RESULTS. */
3505 new_temp = new_name;
3506 VEC_safe_push (tree, heap, scalar_results, new_name);
3508 else
3510 epilog_stmt = gimple_build_assign_with_ops (code,
3511 new_scalar_dest, new_name, new_temp);
3512 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3513 gimple_assign_set_lhs (epilog_stmt, new_temp);
3514 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3519 /* The only case where we need to reduce scalar results in SLP, is
3520 unrolling. If the size of SCALAR_RESULTS is greater than
3521 GROUP_SIZE, we reduce them combining elements modulo
3522 GROUP_SIZE. */
3523 if (slp_node)
3525 tree res, first_res, new_res;
3526 gimple new_stmt;
3528 /* Reduce multiple scalar results in case of SLP unrolling. */
3529 for (j = group_size; VEC_iterate (tree, scalar_results, j, res);
3530 j++)
3532 first_res = VEC_index (tree, scalar_results, j % group_size);
3533 new_stmt = gimple_build_assign_with_ops (code,
3534 new_scalar_dest, first_res, res);
3535 new_res = make_ssa_name (new_scalar_dest, new_stmt);
3536 gimple_assign_set_lhs (new_stmt, new_res);
3537 gsi_insert_before (&exit_gsi, new_stmt, GSI_SAME_STMT);
3538 VEC_replace (tree, scalar_results, j % group_size, new_res);
3541 else
3542 /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
3543 VEC_safe_push (tree, heap, scalar_results, new_temp);
3545 extract_scalar_result = false;
3549 /* 2.4 Extract the final scalar result. Create:
3550 s_out3 = extract_field <v_out2, bitpos> */
3552 if (extract_scalar_result)
3554 tree rhs;
3556 if (vect_print_dump_info (REPORT_DETAILS))
3557 fprintf (vect_dump, "extract scalar result");
3559 if (BYTES_BIG_ENDIAN)
3560 bitpos = size_binop (MULT_EXPR,
3561 bitsize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1),
3562 TYPE_SIZE (scalar_type));
3563 else
3564 bitpos = bitsize_zero_node;
3566 rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp, bitsize, bitpos);
3567 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3568 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3569 gimple_assign_set_lhs (epilog_stmt, new_temp);
3570 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3571 VEC_safe_push (tree, heap, scalar_results, new_temp);
3574 vect_finalize_reduction:
3576 if (double_reduc)
3577 loop = loop->inner;
3579 /* 2.5 Adjust the final result by the initial value of the reduction
3580 variable. (When such adjustment is not needed, then
3581 'adjustment_def' is zero). For example, if code is PLUS we create:
3582 new_temp = loop_exit_def + adjustment_def */
3584 if (adjustment_def)
3586 gcc_assert (!slp_node);
3587 if (nested_in_vect_loop)
3589 new_phi = VEC_index (gimple, new_phis, 0);
3590 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
3591 expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
3592 new_dest = vect_create_destination_var (scalar_dest, vectype);
3594 else
3596 new_temp = VEC_index (tree, scalar_results, 0);
3597 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
3598 expr = build2 (code, scalar_type, new_temp, adjustment_def);
3599 new_dest = vect_create_destination_var (scalar_dest, scalar_type);
3602 epilog_stmt = gimple_build_assign (new_dest, expr);
3603 new_temp = make_ssa_name (new_dest, epilog_stmt);
3604 gimple_assign_set_lhs (epilog_stmt, new_temp);
3605 SSA_NAME_DEF_STMT (new_temp) = epilog_stmt;
3606 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3607 if (nested_in_vect_loop)
3609 set_vinfo_for_stmt (epilog_stmt,
3610 new_stmt_vec_info (epilog_stmt, loop_vinfo,
3611 NULL));
3612 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt)) =
3613 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi));
3615 if (!double_reduc)
3616 VEC_quick_push (tree, scalar_results, new_temp);
3617 else
3618 VEC_replace (tree, scalar_results, 0, new_temp);
3620 else
3621 VEC_replace (tree, scalar_results, 0, new_temp);
3623 VEC_replace (gimple, new_phis, 0, epilog_stmt);
3626 /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit
3627 phis with new adjusted scalar results, i.e., replace use <s_out0>
3628 with use <s_out4>.
3630 Transform:
3631 loop_exit:
3632 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3633 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3634 v_out2 = reduce <v_out1>
3635 s_out3 = extract_field <v_out2, 0>
3636 s_out4 = adjust_result <s_out3>
3637 use <s_out0>
3638 use <s_out0>
3640 into:
3642 loop_exit:
3643 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3644 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3645 v_out2 = reduce <v_out1>
3646 s_out3 = extract_field <v_out2, 0>
3647 s_out4 = adjust_result <s_out3>
3648 use <s_out4>
3649 use <s_out4> */
3651 /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
3652 case that GROUP_SIZE is greater than vectorization factor). Therefore, we
3653 need to match SCALAR_RESULTS with corresponding statements. The first
3654 (GROUP_SIZE / number of new vector stmts) scalar results correspond to
3655 the first vector stmt, etc.
3656 (RATIO is equal to (GROUP_SIZE / number of new vector stmts)). */
3657 if (group_size > VEC_length (gimple, new_phis))
3659 ratio = group_size / VEC_length (gimple, new_phis);
3660 gcc_assert (!(group_size % VEC_length (gimple, new_phis)));
3662 else
3663 ratio = 1;
3665 for (k = 0; k < group_size; k++)
3667 if (k % ratio == 0)
3669 epilog_stmt = VEC_index (gimple, new_phis, k / ratio);
3670 reduction_phi = VEC_index (gimple, reduction_phis, k / ratio);
3673 if (slp_node)
3675 gimple current_stmt = VEC_index (gimple,
3676 SLP_TREE_SCALAR_STMTS (slp_node), k);
3678 orig_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (current_stmt));
3679 /* SLP statements can't participate in patterns. */
3680 gcc_assert (!orig_stmt);
3681 scalar_dest = gimple_assign_lhs (current_stmt);
3684 phis = VEC_alloc (gimple, heap, 3);
3685 /* Find the loop-closed-use at the loop exit of the original scalar
3686 result. (The reduction result is expected to have two immediate uses -
3687 one at the latch block, and one at the loop exit). */
3688 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
3689 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
3690 VEC_safe_push (gimple, heap, phis, USE_STMT (use_p));
3692 /* We expect to have found an exit_phi because of loop-closed-ssa
3693 form. */
3694 gcc_assert (!VEC_empty (gimple, phis));
3696 FOR_EACH_VEC_ELT (gimple, phis, i, exit_phi)
3698 if (outer_loop)
3700 stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi);
3701 gimple vect_phi;
3703 /* FORNOW. Currently not supporting the case that an inner-loop
3704 reduction is not used in the outer-loop (but only outside the
3705 outer-loop), unless it is double reduction. */
3706 gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
3707 && !STMT_VINFO_LIVE_P (exit_phi_vinfo))
3708 || double_reduc);
3710 STMT_VINFO_VEC_STMT (exit_phi_vinfo) = epilog_stmt;
3711 if (!double_reduc
3712 || STMT_VINFO_DEF_TYPE (exit_phi_vinfo)
3713 != vect_double_reduction_def)
3714 continue;
3716 /* Handle double reduction:
3718 stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
3719 stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop)
3720 stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop)
3721 stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
3723 At that point the regular reduction (stmt2 and stmt3) is
3724 already vectorized, as well as the exit phi node, stmt4.
3725 Here we vectorize the phi node of double reduction, stmt1, and
3726 update all relevant statements. */
3728 /* Go through all the uses of s2 to find double reduction phi
3729 node, i.e., stmt1 above. */
3730 orig_name = PHI_RESULT (exit_phi);
3731 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
3733 stmt_vec_info use_stmt_vinfo = vinfo_for_stmt (use_stmt);
3734 stmt_vec_info new_phi_vinfo;
3735 tree vect_phi_init, preheader_arg, vect_phi_res, init_def;
3736 basic_block bb = gimple_bb (use_stmt);
3737 gimple use;
3739 /* Check that USE_STMT is really double reduction phi
3740 node. */
3741 if (gimple_code (use_stmt) != GIMPLE_PHI
3742 || gimple_phi_num_args (use_stmt) != 2
3743 || !use_stmt_vinfo
3744 || STMT_VINFO_DEF_TYPE (use_stmt_vinfo)
3745 != vect_double_reduction_def
3746 || bb->loop_father != outer_loop)
3747 continue;
3749 /* Create vector phi node for double reduction:
3750 vs1 = phi <vs0, vs2>
3751 vs1 was created previously in this function by a call to
3752 vect_get_vec_def_for_operand and is stored in
3753 vec_initial_def;
3754 vs2 is defined by EPILOG_STMT, the vectorized EXIT_PHI;
3755 vs0 is created here. */
3757 /* Create vector phi node. */
3758 vect_phi = create_phi_node (vec_initial_def, bb);
3759 new_phi_vinfo = new_stmt_vec_info (vect_phi,
3760 loop_vec_info_for_loop (outer_loop), NULL);
3761 set_vinfo_for_stmt (vect_phi, new_phi_vinfo);
3763 /* Create vs0 - initial def of the double reduction phi. */
3764 preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt,
3765 loop_preheader_edge (outer_loop));
3766 init_def = get_initial_def_for_reduction (stmt,
3767 preheader_arg, NULL);
3768 vect_phi_init = vect_init_vector (use_stmt, init_def,
3769 vectype, NULL);
3771 /* Update phi node arguments with vs0 and vs2. */
3772 add_phi_arg (vect_phi, vect_phi_init,
3773 loop_preheader_edge (outer_loop),
3774 UNKNOWN_LOCATION);
3775 add_phi_arg (vect_phi, PHI_RESULT (epilog_stmt),
3776 loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
3777 if (vect_print_dump_info (REPORT_DETAILS))
3779 fprintf (vect_dump, "created double reduction phi "
3780 "node: ");
3781 print_gimple_stmt (vect_dump, vect_phi, 0, TDF_SLIM);
3784 vect_phi_res = PHI_RESULT (vect_phi);
3786 /* Replace the use, i.e., set the correct vs1 in the regular
3787 reduction phi node. FORNOW, NCOPIES is always 1, so the
3788 loop is redundant. */
3789 use = reduction_phi;
3790 for (j = 0; j < ncopies; j++)
3792 edge pr_edge = loop_preheader_edge (loop);
3793 SET_PHI_ARG_DEF (use, pr_edge->dest_idx, vect_phi_res);
3794 use = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use));
3800 VEC_free (gimple, heap, phis);
3801 if (nested_in_vect_loop)
3803 if (double_reduc)
3804 loop = outer_loop;
3805 else
3806 continue;
3809 phis = VEC_alloc (gimple, heap, 3);
3810 /* Find the loop-closed-use at the loop exit of the original scalar
3811 result. (The reduction result is expected to have two immediate uses,
3812 one at the latch block, and one at the loop exit). For double
3813 reductions we are looking for exit phis of the outer loop. */
3814 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
3816 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
3817 VEC_safe_push (gimple, heap, phis, USE_STMT (use_p));
3818 else
3820 if (double_reduc && gimple_code (USE_STMT (use_p)) == GIMPLE_PHI)
3822 tree phi_res = PHI_RESULT (USE_STMT (use_p));
3824 FOR_EACH_IMM_USE_FAST (phi_use_p, phi_imm_iter, phi_res)
3826 if (!flow_bb_inside_loop_p (loop,
3827 gimple_bb (USE_STMT (phi_use_p))))
3828 VEC_safe_push (gimple, heap, phis,
3829 USE_STMT (phi_use_p));
3835 FOR_EACH_VEC_ELT (gimple, phis, i, exit_phi)
3837 /* Replace the uses: */
3838 orig_name = PHI_RESULT (exit_phi);
3839 scalar_result = VEC_index (tree, scalar_results, k);
3840 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
3841 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
3842 SET_USE (use_p, scalar_result);
3845 VEC_free (gimple, heap, phis);
3848 VEC_free (tree, heap, scalar_results);
3849 VEC_free (gimple, heap, new_phis);
3853 /* Function vectorizable_reduction.
3855 Check if STMT performs a reduction operation that can be vectorized.
3856 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3857 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3858 Return FALSE if not a vectorizable STMT, TRUE otherwise.
3860 This function also handles reduction idioms (patterns) that have been
3861 recognized in advance during vect_pattern_recog. In this case, STMT may be
3862 of this form:
3863 X = pattern_expr (arg0, arg1, ..., X)
3864 and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
3865 sequence that had been detected and replaced by the pattern-stmt (STMT).
3867 In some cases of reduction patterns, the type of the reduction variable X is
3868 different than the type of the other arguments of STMT.
3869 In such cases, the vectype that is used when transforming STMT into a vector
3870 stmt is different than the vectype that is used to determine the
3871 vectorization factor, because it consists of a different number of elements
3872 than the actual number of elements that are being operated upon in parallel.
3874 For example, consider an accumulation of shorts into an int accumulator.
3875 On some targets it's possible to vectorize this pattern operating on 8
3876 shorts at a time (hence, the vectype for purposes of determining the
3877 vectorization factor should be V8HI); on the other hand, the vectype that
3878 is used to create the vector form is actually V4SI (the type of the result).
3880 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
3881 indicates what is the actual level of parallelism (V8HI in the example), so
3882 that the right vectorization factor would be derived. This vectype
3883 corresponds to the type of arguments to the reduction stmt, and should *NOT*
3884 be used to create the vectorized stmt. The right vectype for the vectorized
3885 stmt is obtained from the type of the result X:
3886 get_vectype_for_scalar_type (TREE_TYPE (X))
3888 This means that, contrary to "regular" reductions (or "regular" stmts in
3889 general), the following equation:
3890 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
3891 does *NOT* necessarily hold for reduction patterns. */
3893 bool
3894 vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
3895 gimple *vec_stmt, slp_tree slp_node)
3897 tree vec_dest;
3898 tree scalar_dest;
3899 tree loop_vec_def0 = NULL_TREE, loop_vec_def1 = NULL_TREE;
3900 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3901 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3902 tree vectype_in = NULL_TREE;
3903 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3904 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3905 enum tree_code code, orig_code, epilog_reduc_code;
3906 enum machine_mode vec_mode;
3907 int op_type;
3908 optab optab, reduc_optab;
3909 tree new_temp = NULL_TREE;
3910 tree def;
3911 gimple def_stmt;
3912 enum vect_def_type dt;
3913 gimple new_phi = NULL;
3914 tree scalar_type;
3915 bool is_simple_use;
3916 gimple orig_stmt;
3917 stmt_vec_info orig_stmt_info;
3918 tree expr = NULL_TREE;
3919 int i;
3920 int ncopies;
3921 int epilog_copies;
3922 stmt_vec_info prev_stmt_info, prev_phi_info;
3923 bool single_defuse_cycle = false;
3924 tree reduc_def = NULL_TREE;
3925 gimple new_stmt = NULL;
3926 int j;
3927 tree ops[3];
3928 bool nested_cycle = false, found_nested_cycle_def = false;
3929 gimple reduc_def_stmt = NULL;
3930 /* The default is that the reduction variable is the last in statement. */
3931 int reduc_index = 2;
3932 bool double_reduc = false, dummy;
3933 basic_block def_bb;
3934 struct loop * def_stmt_loop, *outer_loop = NULL;
3935 tree def_arg;
3936 gimple def_arg_stmt;
3937 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL, *vect_defs = NULL;
3938 VEC (gimple, heap) *phis = NULL;
3939 int vec_num;
3940 tree def0, def1, tem;
3942 if (nested_in_vect_loop_p (loop, stmt))
3944 outer_loop = loop;
3945 loop = loop->inner;
3946 nested_cycle = true;
3949 /* 1. Is vectorizable reduction? */
3950 /* Not supportable if the reduction variable is used in the loop. */
3951 if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer)
3952 return false;
3954 /* Reductions that are not used even in an enclosing outer-loop,
3955 are expected to be "live" (used out of the loop). */
3956 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope
3957 && !STMT_VINFO_LIVE_P (stmt_info))
3958 return false;
3960 /* Make sure it was already recognized as a reduction computation. */
3961 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def
3962 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_nested_cycle)
3963 return false;
3965 /* 2. Has this been recognized as a reduction pattern?
3967 Check if STMT represents a pattern that has been recognized
3968 in earlier analysis stages. For stmts that represent a pattern,
3969 the STMT_VINFO_RELATED_STMT field records the last stmt in
3970 the original sequence that constitutes the pattern. */
3972 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
3973 if (orig_stmt)
3975 orig_stmt_info = vinfo_for_stmt (orig_stmt);
3976 gcc_assert (STMT_VINFO_RELATED_STMT (orig_stmt_info) == stmt);
3977 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
3978 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
3981 /* 3. Check the operands of the operation. The first operands are defined
3982 inside the loop body. The last operand is the reduction variable,
3983 which is defined by the loop-header-phi. */
3985 gcc_assert (is_gimple_assign (stmt));
3987 /* Flatten RHS. */
3988 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
3990 case GIMPLE_SINGLE_RHS:
3991 op_type = TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt));
3992 if (op_type == ternary_op)
3994 tree rhs = gimple_assign_rhs1 (stmt);
3995 ops[0] = TREE_OPERAND (rhs, 0);
3996 ops[1] = TREE_OPERAND (rhs, 1);
3997 ops[2] = TREE_OPERAND (rhs, 2);
3998 code = TREE_CODE (rhs);
4000 else
4001 return false;
4002 break;
4004 case GIMPLE_BINARY_RHS:
4005 code = gimple_assign_rhs_code (stmt);
4006 op_type = TREE_CODE_LENGTH (code);
4007 gcc_assert (op_type == binary_op);
4008 ops[0] = gimple_assign_rhs1 (stmt);
4009 ops[1] = gimple_assign_rhs2 (stmt);
4010 break;
4012 case GIMPLE_UNARY_RHS:
4013 return false;
4015 default:
4016 gcc_unreachable ();
4019 scalar_dest = gimple_assign_lhs (stmt);
4020 scalar_type = TREE_TYPE (scalar_dest);
4021 if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
4022 && !SCALAR_FLOAT_TYPE_P (scalar_type))
4023 return false;
4025 /* All uses but the last are expected to be defined in the loop.
4026 The last use is the reduction variable. In case of nested cycle this
4027 assumption is not true: we use reduc_index to record the index of the
4028 reduction variable. */
4029 for (i = 0; i < op_type-1; i++)
4031 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
4032 if (i == 0 && code == COND_EXPR)
4033 continue;
4035 is_simple_use = vect_is_simple_use_1 (ops[i], loop_vinfo, NULL,
4036 &def_stmt, &def, &dt, &tem);
4037 if (!vectype_in)
4038 vectype_in = tem;
4039 gcc_assert (is_simple_use);
4040 if (dt != vect_internal_def
4041 && dt != vect_external_def
4042 && dt != vect_constant_def
4043 && dt != vect_induction_def
4044 && !(dt == vect_nested_cycle && nested_cycle))
4045 return false;
4047 if (dt == vect_nested_cycle)
4049 found_nested_cycle_def = true;
4050 reduc_def_stmt = def_stmt;
4051 reduc_index = i;
4055 is_simple_use = vect_is_simple_use_1 (ops[i], loop_vinfo, NULL, &def_stmt,
4056 &def, &dt, &tem);
4057 if (!vectype_in)
4058 vectype_in = tem;
4059 gcc_assert (is_simple_use);
4060 gcc_assert (dt == vect_reduction_def
4061 || dt == vect_nested_cycle
4062 || ((dt == vect_internal_def || dt == vect_external_def
4063 || dt == vect_constant_def || dt == vect_induction_def)
4064 && nested_cycle && found_nested_cycle_def));
4065 if (!found_nested_cycle_def)
4066 reduc_def_stmt = def_stmt;
4068 gcc_assert (gimple_code (reduc_def_stmt) == GIMPLE_PHI);
4069 if (orig_stmt)
4070 gcc_assert (orig_stmt == vect_is_simple_reduction (loop_vinfo,
4071 reduc_def_stmt,
4072 !nested_cycle,
4073 &dummy));
4074 else
4075 gcc_assert (stmt == vect_is_simple_reduction (loop_vinfo, reduc_def_stmt,
4076 !nested_cycle, &dummy));
4078 if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt)))
4079 return false;
4081 if (slp_node)
4082 ncopies = 1;
4083 else
4084 ncopies = (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
4085 / TYPE_VECTOR_SUBPARTS (vectype_in));
4087 gcc_assert (ncopies >= 1);
4089 vec_mode = TYPE_MODE (vectype_in);
4091 if (code == COND_EXPR)
4093 if (!vectorizable_condition (stmt, gsi, NULL, ops[reduc_index], 0))
4095 if (vect_print_dump_info (REPORT_DETAILS))
4096 fprintf (vect_dump, "unsupported condition in reduction");
4098 return false;
4101 else
4103 /* 4. Supportable by target? */
4105 /* 4.1. check support for the operation in the loop */
4106 optab = optab_for_tree_code (code, vectype_in, optab_default);
4107 if (!optab)
4109 if (vect_print_dump_info (REPORT_DETAILS))
4110 fprintf (vect_dump, "no optab.");
4112 return false;
4115 if (optab_handler (optab, vec_mode) == CODE_FOR_nothing)
4117 if (vect_print_dump_info (REPORT_DETAILS))
4118 fprintf (vect_dump, "op not supported by target.");
4120 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4121 || LOOP_VINFO_VECT_FACTOR (loop_vinfo)
4122 < vect_min_worthwhile_factor (code))
4123 return false;
4125 if (vect_print_dump_info (REPORT_DETAILS))
4126 fprintf (vect_dump, "proceeding using word mode.");
4129 /* Worthwhile without SIMD support? */
4130 if (!VECTOR_MODE_P (TYPE_MODE (vectype_in))
4131 && LOOP_VINFO_VECT_FACTOR (loop_vinfo)
4132 < vect_min_worthwhile_factor (code))
4134 if (vect_print_dump_info (REPORT_DETAILS))
4135 fprintf (vect_dump, "not worthwhile without SIMD support.");
4137 return false;
4141 /* 4.2. Check support for the epilog operation.
4143 If STMT represents a reduction pattern, then the type of the
4144 reduction variable may be different than the type of the rest
4145 of the arguments. For example, consider the case of accumulation
4146 of shorts into an int accumulator; The original code:
4147 S1: int_a = (int) short_a;
4148 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
4150 was replaced with:
4151 STMT: int_acc = widen_sum <short_a, int_acc>
4153 This means that:
4154 1. The tree-code that is used to create the vector operation in the
4155 epilog code (that reduces the partial results) is not the
4156 tree-code of STMT, but is rather the tree-code of the original
4157 stmt from the pattern that STMT is replacing. I.e, in the example
4158 above we want to use 'widen_sum' in the loop, but 'plus' in the
4159 epilog.
4160 2. The type (mode) we use to check available target support
4161 for the vector operation to be created in the *epilog*, is
4162 determined by the type of the reduction variable (in the example
4163 above we'd check this: optab_handler (plus_optab, vect_int_mode])).
4164 However the type (mode) we use to check available target support
4165 for the vector operation to be created *inside the loop*, is
4166 determined by the type of the other arguments to STMT (in the
4167 example we'd check this: optab_handler (widen_sum_optab,
4168 vect_short_mode)).
4170 This is contrary to "regular" reductions, in which the types of all
4171 the arguments are the same as the type of the reduction variable.
4172 For "regular" reductions we can therefore use the same vector type
4173 (and also the same tree-code) when generating the epilog code and
4174 when generating the code inside the loop. */
4176 if (orig_stmt)
4178 /* This is a reduction pattern: get the vectype from the type of the
4179 reduction variable, and get the tree-code from orig_stmt. */
4180 orig_code = gimple_assign_rhs_code (orig_stmt);
4181 gcc_assert (vectype_out);
4182 vec_mode = TYPE_MODE (vectype_out);
4184 else
4186 /* Regular reduction: use the same vectype and tree-code as used for
4187 the vector code inside the loop can be used for the epilog code. */
4188 orig_code = code;
4191 if (nested_cycle)
4193 def_bb = gimple_bb (reduc_def_stmt);
4194 def_stmt_loop = def_bb->loop_father;
4195 def_arg = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
4196 loop_preheader_edge (def_stmt_loop));
4197 if (TREE_CODE (def_arg) == SSA_NAME
4198 && (def_arg_stmt = SSA_NAME_DEF_STMT (def_arg))
4199 && gimple_code (def_arg_stmt) == GIMPLE_PHI
4200 && flow_bb_inside_loop_p (outer_loop, gimple_bb (def_arg_stmt))
4201 && vinfo_for_stmt (def_arg_stmt)
4202 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_arg_stmt))
4203 == vect_double_reduction_def)
4204 double_reduc = true;
4207 epilog_reduc_code = ERROR_MARK;
4208 if (reduction_code_for_scalar_code (orig_code, &epilog_reduc_code))
4210 reduc_optab = optab_for_tree_code (epilog_reduc_code, vectype_out,
4211 optab_default);
4212 if (!reduc_optab)
4214 if (vect_print_dump_info (REPORT_DETAILS))
4215 fprintf (vect_dump, "no optab for reduction.");
4217 epilog_reduc_code = ERROR_MARK;
4220 if (reduc_optab
4221 && optab_handler (reduc_optab, vec_mode) == CODE_FOR_nothing)
4223 if (vect_print_dump_info (REPORT_DETAILS))
4224 fprintf (vect_dump, "reduc op not supported by target.");
4226 epilog_reduc_code = ERROR_MARK;
4229 else
4231 if (!nested_cycle || double_reduc)
4233 if (vect_print_dump_info (REPORT_DETAILS))
4234 fprintf (vect_dump, "no reduc code for scalar code.");
4236 return false;
4240 if (double_reduc && ncopies > 1)
4242 if (vect_print_dump_info (REPORT_DETAILS))
4243 fprintf (vect_dump, "multiple types in double reduction");
4245 return false;
4248 if (!vec_stmt) /* transformation not required. */
4250 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
4251 if (!vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies))
4252 return false;
4253 return true;
4256 /** Transform. **/
4258 if (vect_print_dump_info (REPORT_DETAILS))
4259 fprintf (vect_dump, "transform reduction.");
4261 /* FORNOW: Multiple types are not supported for condition. */
4262 if (code == COND_EXPR)
4263 gcc_assert (ncopies == 1);
4265 /* Create the destination vector */
4266 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
4268 /* In case the vectorization factor (VF) is bigger than the number
4269 of elements that we can fit in a vectype (nunits), we have to generate
4270 more than one vector stmt - i.e - we need to "unroll" the
4271 vector stmt by a factor VF/nunits. For more details see documentation
4272 in vectorizable_operation. */
4274 /* If the reduction is used in an outer loop we need to generate
4275 VF intermediate results, like so (e.g. for ncopies=2):
4276 r0 = phi (init, r0)
4277 r1 = phi (init, r1)
4278 r0 = x0 + r0;
4279 r1 = x1 + r1;
4280 (i.e. we generate VF results in 2 registers).
4281 In this case we have a separate def-use cycle for each copy, and therefore
4282 for each copy we get the vector def for the reduction variable from the
4283 respective phi node created for this copy.
4285 Otherwise (the reduction is unused in the loop nest), we can combine
4286 together intermediate results, like so (e.g. for ncopies=2):
4287 r = phi (init, r)
4288 r = x0 + r;
4289 r = x1 + r;
4290 (i.e. we generate VF/2 results in a single register).
4291 In this case for each copy we get the vector def for the reduction variable
4292 from the vectorized reduction operation generated in the previous iteration.
4295 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope)
4297 single_defuse_cycle = true;
4298 epilog_copies = 1;
4300 else
4301 epilog_copies = ncopies;
4303 prev_stmt_info = NULL;
4304 prev_phi_info = NULL;
4305 if (slp_node)
4307 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
4308 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype_out)
4309 == TYPE_VECTOR_SUBPARTS (vectype_in));
4311 else
4313 vec_num = 1;
4314 vec_oprnds0 = VEC_alloc (tree, heap, 1);
4315 if (op_type == ternary_op)
4316 vec_oprnds1 = VEC_alloc (tree, heap, 1);
4319 phis = VEC_alloc (gimple, heap, vec_num);
4320 vect_defs = VEC_alloc (tree, heap, vec_num);
4321 if (!slp_node)
4322 VEC_quick_push (tree, vect_defs, NULL_TREE);
4324 for (j = 0; j < ncopies; j++)
4326 if (j == 0 || !single_defuse_cycle)
4328 for (i = 0; i < vec_num; i++)
4330 /* Create the reduction-phi that defines the reduction
4331 operand. */
4332 new_phi = create_phi_node (vec_dest, loop->header);
4333 set_vinfo_for_stmt (new_phi,
4334 new_stmt_vec_info (new_phi, loop_vinfo,
4335 NULL));
4336 if (j == 0 || slp_node)
4337 VEC_quick_push (gimple, phis, new_phi);
4341 if (code == COND_EXPR)
4343 gcc_assert (!slp_node);
4344 vectorizable_condition (stmt, gsi, vec_stmt,
4345 PHI_RESULT (VEC_index (gimple, phis, 0)),
4346 reduc_index);
4347 /* Multiple types are not supported for condition. */
4348 break;
4351 /* Handle uses. */
4352 if (j == 0)
4354 tree op0, op1 = NULL_TREE;
4356 op0 = ops[!reduc_index];
4357 if (op_type == ternary_op)
4359 if (reduc_index == 0)
4360 op1 = ops[2];
4361 else
4362 op1 = ops[1];
4365 if (slp_node)
4366 vect_get_slp_defs (op0, op1, slp_node, &vec_oprnds0, &vec_oprnds1,
4367 -1);
4368 else
4370 loop_vec_def0 = vect_get_vec_def_for_operand (ops[!reduc_index],
4371 stmt, NULL);
4372 VEC_quick_push (tree, vec_oprnds0, loop_vec_def0);
4373 if (op_type == ternary_op)
4375 loop_vec_def1 = vect_get_vec_def_for_operand (op1, stmt,
4376 NULL);
4377 VEC_quick_push (tree, vec_oprnds1, loop_vec_def1);
4381 else
4383 if (!slp_node)
4385 enum vect_def_type dt = vect_unknown_def_type; /* Dummy */
4386 loop_vec_def0 = vect_get_vec_def_for_stmt_copy (dt, loop_vec_def0);
4387 VEC_replace (tree, vec_oprnds0, 0, loop_vec_def0);
4388 if (op_type == ternary_op)
4390 loop_vec_def1 = vect_get_vec_def_for_stmt_copy (dt,
4391 loop_vec_def1);
4392 VEC_replace (tree, vec_oprnds1, 0, loop_vec_def1);
4396 if (single_defuse_cycle)
4397 reduc_def = gimple_assign_lhs (new_stmt);
4399 STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi;
4402 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, def0)
4404 if (slp_node)
4405 reduc_def = PHI_RESULT (VEC_index (gimple, phis, i));
4406 else
4408 if (!single_defuse_cycle || j == 0)
4409 reduc_def = PHI_RESULT (new_phi);
4412 def1 = ((op_type == ternary_op)
4413 ? VEC_index (tree, vec_oprnds1, i) : NULL);
4414 if (op_type == binary_op)
4416 if (reduc_index == 0)
4417 expr = build2 (code, vectype_out, reduc_def, def0);
4418 else
4419 expr = build2 (code, vectype_out, def0, reduc_def);
4421 else
4423 if (reduc_index == 0)
4424 expr = build3 (code, vectype_out, reduc_def, def0, def1);
4425 else
4427 if (reduc_index == 1)
4428 expr = build3 (code, vectype_out, def0, reduc_def, def1);
4429 else
4430 expr = build3 (code, vectype_out, def0, def1, reduc_def);
4434 new_stmt = gimple_build_assign (vec_dest, expr);
4435 new_temp = make_ssa_name (vec_dest, new_stmt);
4436 gimple_assign_set_lhs (new_stmt, new_temp);
4437 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4438 if (slp_node)
4440 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
4441 VEC_quick_push (tree, vect_defs, new_temp);
4443 else
4444 VEC_replace (tree, vect_defs, 0, new_temp);
4447 if (slp_node)
4448 continue;
4450 if (j == 0)
4451 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4452 else
4453 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4455 prev_stmt_info = vinfo_for_stmt (new_stmt);
4456 prev_phi_info = vinfo_for_stmt (new_phi);
4459 /* Finalize the reduction-phi (set its arguments) and create the
4460 epilog reduction code. */
4461 if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node)
4463 new_temp = gimple_assign_lhs (*vec_stmt);
4464 VEC_replace (tree, vect_defs, 0, new_temp);
4467 vect_create_epilog_for_reduction (vect_defs, stmt, epilog_copies,
4468 epilog_reduc_code, phis, reduc_index,
4469 double_reduc, slp_node);
4471 VEC_free (gimple, heap, phis);
4472 VEC_free (tree, heap, vec_oprnds0);
4473 if (vec_oprnds1)
4474 VEC_free (tree, heap, vec_oprnds1);
4476 return true;
4479 /* Function vect_min_worthwhile_factor.
4481 For a loop where we could vectorize the operation indicated by CODE,
4482 return the minimum vectorization factor that makes it worthwhile
4483 to use generic vectors. */
4485 vect_min_worthwhile_factor (enum tree_code code)
4487 switch (code)
4489 case PLUS_EXPR:
4490 case MINUS_EXPR:
4491 case NEGATE_EXPR:
4492 return 4;
4494 case BIT_AND_EXPR:
4495 case BIT_IOR_EXPR:
4496 case BIT_XOR_EXPR:
4497 case BIT_NOT_EXPR:
4498 return 2;
4500 default:
4501 return INT_MAX;
4506 /* Function vectorizable_induction
4508 Check if PHI performs an induction computation that can be vectorized.
4509 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
4510 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
4511 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4513 bool
4514 vectorizable_induction (gimple phi, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
4515 gimple *vec_stmt)
4517 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
4518 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4519 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4520 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4521 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4522 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4523 tree vec_def;
4525 gcc_assert (ncopies >= 1);
4526 /* FORNOW. This restriction should be relaxed. */
4527 if (nested_in_vect_loop_p (loop, phi) && ncopies > 1)
4529 if (vect_print_dump_info (REPORT_DETAILS))
4530 fprintf (vect_dump, "multiple types in nested loop.");
4531 return false;
4534 if (!STMT_VINFO_RELEVANT_P (stmt_info))
4535 return false;
4537 /* FORNOW: SLP not supported. */
4538 if (STMT_SLP_TYPE (stmt_info))
4539 return false;
4541 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def);
4543 if (gimple_code (phi) != GIMPLE_PHI)
4544 return false;
4546 if (!vec_stmt) /* transformation not required. */
4548 STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
4549 if (vect_print_dump_info (REPORT_DETAILS))
4550 fprintf (vect_dump, "=== vectorizable_induction ===");
4551 vect_model_induction_cost (stmt_info, ncopies);
4552 return true;
4555 /** Transform. **/
4557 if (vect_print_dump_info (REPORT_DETAILS))
4558 fprintf (vect_dump, "transform induction phi.");
4560 vec_def = get_initial_def_for_induction (phi);
4561 *vec_stmt = SSA_NAME_DEF_STMT (vec_def);
4562 return true;
4565 /* Function vectorizable_live_operation.
4567 STMT computes a value that is used outside the loop. Check if
4568 it can be supported. */
4570 bool
4571 vectorizable_live_operation (gimple stmt,
4572 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
4573 gimple *vec_stmt ATTRIBUTE_UNUSED)
4575 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4576 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4577 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4578 int i;
4579 int op_type;
4580 tree op;
4581 tree def;
4582 gimple def_stmt;
4583 enum vect_def_type dt;
4584 enum tree_code code;
4585 enum gimple_rhs_class rhs_class;
4587 gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
4589 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
4590 return false;
4592 if (!is_gimple_assign (stmt))
4593 return false;
4595 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4596 return false;
4598 /* FORNOW. CHECKME. */
4599 if (nested_in_vect_loop_p (loop, stmt))
4600 return false;
4602 code = gimple_assign_rhs_code (stmt);
4603 op_type = TREE_CODE_LENGTH (code);
4604 rhs_class = get_gimple_rhs_class (code);
4605 gcc_assert (rhs_class != GIMPLE_UNARY_RHS || op_type == unary_op);
4606 gcc_assert (rhs_class != GIMPLE_BINARY_RHS || op_type == binary_op);
4608 /* FORNOW: support only if all uses are invariant. This means
4609 that the scalar operations can remain in place, unvectorized.
4610 The original last scalar value that they compute will be used. */
4612 for (i = 0; i < op_type; i++)
4614 if (rhs_class == GIMPLE_SINGLE_RHS)
4615 op = TREE_OPERAND (gimple_op (stmt, 1), i);
4616 else
4617 op = gimple_op (stmt, i + 1);
4618 if (op
4619 && !vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def, &dt))
4621 if (vect_print_dump_info (REPORT_DETAILS))
4622 fprintf (vect_dump, "use not simple.");
4623 return false;
4626 if (dt != vect_external_def && dt != vect_constant_def)
4627 return false;
4630 /* No transformation is required for the cases we currently support. */
4631 return true;
4634 /* Kill any debug uses outside LOOP of SSA names defined in STMT. */
4636 static void
4637 vect_loop_kill_debug_uses (struct loop *loop, gimple stmt)
4639 ssa_op_iter op_iter;
4640 imm_use_iterator imm_iter;
4641 def_operand_p def_p;
4642 gimple ustmt;
4644 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
4646 FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p))
4648 basic_block bb;
4650 if (!is_gimple_debug (ustmt))
4651 continue;
4653 bb = gimple_bb (ustmt);
4655 if (!flow_bb_inside_loop_p (loop, bb))
4657 if (gimple_debug_bind_p (ustmt))
4659 if (vect_print_dump_info (REPORT_DETAILS))
4660 fprintf (vect_dump, "killing debug use");
4662 gimple_debug_bind_reset_value (ustmt);
4663 update_stmt (ustmt);
4665 else
4666 gcc_unreachable ();
4672 /* Function vect_transform_loop.
4674 The analysis phase has determined that the loop is vectorizable.
4675 Vectorize the loop - created vectorized stmts to replace the scalar
4676 stmts in the loop, and update the loop exit condition. */
4678 void
4679 vect_transform_loop (loop_vec_info loop_vinfo)
4681 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4682 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
4683 int nbbs = loop->num_nodes;
4684 gimple_stmt_iterator si;
4685 int i;
4686 tree ratio = NULL;
4687 int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4688 bool strided_store;
4689 bool slp_scheduled = false;
4690 unsigned int nunits;
4691 tree cond_expr = NULL_TREE;
4692 gimple_seq cond_expr_stmt_list = NULL;
4693 bool do_peeling_for_loop_bound;
4695 if (vect_print_dump_info (REPORT_DETAILS))
4696 fprintf (vect_dump, "=== vec_transform_loop ===");
4698 /* Peel the loop if there are data refs with unknown alignment.
4699 Only one data ref with unknown store is allowed. */
4701 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
4702 vect_do_peeling_for_alignment (loop_vinfo);
4704 do_peeling_for_loop_bound
4705 = (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
4706 || (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
4707 && LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0));
4709 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
4710 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
4711 vect_loop_versioning (loop_vinfo,
4712 !do_peeling_for_loop_bound,
4713 &cond_expr, &cond_expr_stmt_list);
4715 /* If the loop has a symbolic number of iterations 'n' (i.e. it's not a
4716 compile time constant), or it is a constant that doesn't divide by the
4717 vectorization factor, then an epilog loop needs to be created.
4718 We therefore duplicate the loop: the original loop will be vectorized,
4719 and will compute the first (n/VF) iterations. The second copy of the loop
4720 will remain scalar and will compute the remaining (n%VF) iterations.
4721 (VF is the vectorization factor). */
4723 if (do_peeling_for_loop_bound)
4724 vect_do_peeling_for_loop_bound (loop_vinfo, &ratio,
4725 cond_expr, cond_expr_stmt_list);
4726 else
4727 ratio = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
4728 LOOP_VINFO_INT_NITERS (loop_vinfo) / vectorization_factor);
4730 /* 1) Make sure the loop header has exactly two entries
4731 2) Make sure we have a preheader basic block. */
4733 gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
4735 split_edge (loop_preheader_edge (loop));
4737 /* FORNOW: the vectorizer supports only loops which body consist
4738 of one basic block (header + empty latch). When the vectorizer will
4739 support more involved loop forms, the order by which the BBs are
4740 traversed need to be reconsidered. */
4742 for (i = 0; i < nbbs; i++)
4744 basic_block bb = bbs[i];
4745 stmt_vec_info stmt_info;
4746 gimple phi;
4748 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
4750 phi = gsi_stmt (si);
4751 if (vect_print_dump_info (REPORT_DETAILS))
4753 fprintf (vect_dump, "------>vectorizing phi: ");
4754 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
4756 stmt_info = vinfo_for_stmt (phi);
4757 if (!stmt_info)
4758 continue;
4760 if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
4761 vect_loop_kill_debug_uses (loop, phi);
4763 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4764 && !STMT_VINFO_LIVE_P (stmt_info))
4765 continue;
4767 if ((TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info))
4768 != (unsigned HOST_WIDE_INT) vectorization_factor)
4769 && vect_print_dump_info (REPORT_DETAILS))
4770 fprintf (vect_dump, "multiple-types.");
4772 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
4774 if (vect_print_dump_info (REPORT_DETAILS))
4775 fprintf (vect_dump, "transform phi.");
4776 vect_transform_stmt (phi, NULL, NULL, NULL, NULL);
4780 for (si = gsi_start_bb (bb); !gsi_end_p (si);)
4782 gimple stmt = gsi_stmt (si);
4783 bool is_store;
4785 if (vect_print_dump_info (REPORT_DETAILS))
4787 fprintf (vect_dump, "------>vectorizing statement: ");
4788 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4791 stmt_info = vinfo_for_stmt (stmt);
4793 /* vector stmts created in the outer-loop during vectorization of
4794 stmts in an inner-loop may not have a stmt_info, and do not
4795 need to be vectorized. */
4796 if (!stmt_info)
4798 gsi_next (&si);
4799 continue;
4802 if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
4803 vect_loop_kill_debug_uses (loop, stmt);
4805 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4806 && !STMT_VINFO_LIVE_P (stmt_info))
4808 gsi_next (&si);
4809 continue;
4812 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
4813 nunits =
4814 (unsigned int) TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
4815 if (!STMT_SLP_TYPE (stmt_info)
4816 && nunits != (unsigned int) vectorization_factor
4817 && vect_print_dump_info (REPORT_DETAILS))
4818 /* For SLP VF is set according to unrolling factor, and not to
4819 vector size, hence for SLP this print is not valid. */
4820 fprintf (vect_dump, "multiple-types.");
4822 /* SLP. Schedule all the SLP instances when the first SLP stmt is
4823 reached. */
4824 if (STMT_SLP_TYPE (stmt_info))
4826 if (!slp_scheduled)
4828 slp_scheduled = true;
4830 if (vect_print_dump_info (REPORT_DETAILS))
4831 fprintf (vect_dump, "=== scheduling SLP instances ===");
4833 vect_schedule_slp (loop_vinfo, NULL);
4836 /* Hybrid SLP stmts must be vectorized in addition to SLP. */
4837 if (!vinfo_for_stmt (stmt) || PURE_SLP_STMT (stmt_info))
4839 gsi_next (&si);
4840 continue;
4844 /* -------- vectorize statement ------------ */
4845 if (vect_print_dump_info (REPORT_DETAILS))
4846 fprintf (vect_dump, "transform statement.");
4848 strided_store = false;
4849 is_store = vect_transform_stmt (stmt, &si, &strided_store, NULL, NULL);
4850 if (is_store)
4852 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
4854 /* Interleaving. If IS_STORE is TRUE, the vectorization of the
4855 interleaving chain was completed - free all the stores in
4856 the chain. */
4857 vect_remove_stores (DR_GROUP_FIRST_DR (stmt_info));
4858 gsi_remove (&si, true);
4859 continue;
4861 else
4863 /* Free the attached stmt_vec_info and remove the stmt. */
4864 free_stmt_vec_info (stmt);
4865 gsi_remove (&si, true);
4866 continue;
4869 gsi_next (&si);
4870 } /* stmts in BB */
4871 } /* BBs in loop */
4873 slpeel_make_loop_iterate_ntimes (loop, ratio);
4875 /* The memory tags and pointers in vectorized statements need to
4876 have their SSA forms updated. FIXME, why can't this be delayed
4877 until all the loops have been transformed? */
4878 update_ssa (TODO_update_ssa);
4880 if (vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
4881 fprintf (vect_dump, "LOOP VECTORIZED.");
4882 if (loop->inner && vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
4883 fprintf (vect_dump, "OUTER LOOP VECTORIZED.");