re PR middle-end/52074 (ICE: RTL flag check: MEM_VOLATILE_P used with unexpected...
[official-gcc.git] / gcc / tree-vect-loop.c
blob051d340e2d89a932d21690304a9efb13de7307aa
1 /* Loop Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
5 Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "basic-block.h"
30 #include "tree-pretty-print.h"
31 #include "gimple-pretty-print.h"
32 #include "tree-flow.h"
33 #include "tree-dump.h"
34 #include "cfgloop.h"
35 #include "cfglayout.h"
36 #include "expr.h"
37 #include "recog.h"
38 #include "optabs.h"
39 #include "params.h"
40 #include "diagnostic-core.h"
41 #include "tree-chrec.h"
42 #include "tree-scalar-evolution.h"
43 #include "tree-vectorizer.h"
44 #include "target.h"
46 /* Loop Vectorization Pass.
48 This pass tries to vectorize loops.
50 For example, the vectorizer transforms the following simple loop:
52 short a[N]; short b[N]; short c[N]; int i;
54 for (i=0; i<N; i++){
55 a[i] = b[i] + c[i];
58 as if it was manually vectorized by rewriting the source code into:
60 typedef int __attribute__((mode(V8HI))) v8hi;
61 short a[N]; short b[N]; short c[N]; int i;
62 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
63 v8hi va, vb, vc;
65 for (i=0; i<N/8; i++){
66 vb = pb[i];
67 vc = pc[i];
68 va = vb + vc;
69 pa[i] = va;
72 The main entry to this pass is vectorize_loops(), in which
73 the vectorizer applies a set of analyses on a given set of loops,
74 followed by the actual vectorization transformation for the loops that
75 had successfully passed the analysis phase.
76 Throughout this pass we make a distinction between two types of
77 data: scalars (which are represented by SSA_NAMES), and memory references
78 ("data-refs"). These two types of data require different handling both
79 during analysis and transformation. The types of data-refs that the
80 vectorizer currently supports are ARRAY_REFS which base is an array DECL
81 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
82 accesses are required to have a simple (consecutive) access pattern.
84 Analysis phase:
85 ===============
86 The driver for the analysis phase is vect_analyze_loop().
87 It applies a set of analyses, some of which rely on the scalar evolution
88 analyzer (scev) developed by Sebastian Pop.
90 During the analysis phase the vectorizer records some information
91 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
92 loop, as well as general information about the loop as a whole, which is
93 recorded in a "loop_vec_info" struct attached to each loop.
95 Transformation phase:
96 =====================
97 The loop transformation phase scans all the stmts in the loop, and
98 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
99 the loop that needs to be vectorized. It inserts the vector code sequence
100 just before the scalar stmt S, and records a pointer to the vector code
101 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
102 attached to S). This pointer will be used for the vectorization of following
103 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
104 otherwise, we rely on dead code elimination for removing it.
106 For example, say stmt S1 was vectorized into stmt VS1:
108 VS1: vb = px[i];
109 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
110 S2: a = b;
112 To vectorize stmt S2, the vectorizer first finds the stmt that defines
113 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
114 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
115 resulting sequence would be:
117 VS1: vb = px[i];
118 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
119 VS2: va = vb;
120 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
122 Operands that are not SSA_NAMEs, are data-refs that appear in
123 load/store operations (like 'x[i]' in S1), and are handled differently.
125 Target modeling:
126 =================
127 Currently the only target specific information that is used is the
128 size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".
129 Targets that can support different sizes of vectors, for now will need
130 to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More
131 flexibility will be added in the future.
133 Since we only vectorize operations which vector form can be
134 expressed using existing tree codes, to verify that an operation is
135 supported, the vectorizer checks the relevant optab at the relevant
136 machine_mode (e.g, optab_handler (add_optab, V8HImode)). If
137 the value found is CODE_FOR_nothing, then there's no target support, and
138 we can't vectorize the stmt.
140 For additional information on this project see:
141 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
144 /* Function vect_determine_vectorization_factor
146 Determine the vectorization factor (VF). VF is the number of data elements
147 that are operated upon in parallel in a single iteration of the vectorized
148 loop. For example, when vectorizing a loop that operates on 4byte elements,
149 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
150 elements can fit in a single vector register.
152 We currently support vectorization of loops in which all types operated upon
153 are of the same size. Therefore this function currently sets VF according to
154 the size of the types operated upon, and fails if there are multiple sizes
155 in the loop.
157 VF is also the factor by which the loop iterations are strip-mined, e.g.:
158 original loop:
159 for (i=0; i<N; i++){
160 a[i] = b[i] + c[i];
163 vectorized loop:
164 for (i=0; i<N; i+=VF){
165 a[i:VF] = b[i:VF] + c[i:VF];
169 static bool
170 vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
172 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
173 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
174 int nbbs = loop->num_nodes;
175 gimple_stmt_iterator si;
176 unsigned int vectorization_factor = 0;
177 tree scalar_type;
178 gimple phi;
179 tree vectype;
180 unsigned int nunits;
181 stmt_vec_info stmt_info;
182 int i;
183 HOST_WIDE_INT dummy;
184 gimple stmt, pattern_stmt = NULL;
185 gimple_seq pattern_def_seq = NULL;
186 gimple_stmt_iterator pattern_def_si = gsi_start (NULL);
187 bool analyze_pattern_stmt = false;
189 if (vect_print_dump_info (REPORT_DETAILS))
190 fprintf (vect_dump, "=== vect_determine_vectorization_factor ===");
192 for (i = 0; i < nbbs; i++)
194 basic_block bb = bbs[i];
196 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
198 phi = gsi_stmt (si);
199 stmt_info = vinfo_for_stmt (phi);
200 if (vect_print_dump_info (REPORT_DETAILS))
202 fprintf (vect_dump, "==> examining phi: ");
203 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
206 gcc_assert (stmt_info);
208 if (STMT_VINFO_RELEVANT_P (stmt_info))
210 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
211 scalar_type = TREE_TYPE (PHI_RESULT (phi));
213 if (vect_print_dump_info (REPORT_DETAILS))
215 fprintf (vect_dump, "get vectype for scalar type: ");
216 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
219 vectype = get_vectype_for_scalar_type (scalar_type);
220 if (!vectype)
222 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
224 fprintf (vect_dump,
225 "not vectorized: unsupported data-type ");
226 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
228 return false;
230 STMT_VINFO_VECTYPE (stmt_info) = vectype;
232 if (vect_print_dump_info (REPORT_DETAILS))
234 fprintf (vect_dump, "vectype: ");
235 print_generic_expr (vect_dump, vectype, TDF_SLIM);
238 nunits = TYPE_VECTOR_SUBPARTS (vectype);
239 if (vect_print_dump_info (REPORT_DETAILS))
240 fprintf (vect_dump, "nunits = %d", nunits);
242 if (!vectorization_factor
243 || (nunits > vectorization_factor))
244 vectorization_factor = nunits;
248 for (si = gsi_start_bb (bb); !gsi_end_p (si) || analyze_pattern_stmt;)
250 tree vf_vectype;
252 if (analyze_pattern_stmt)
253 stmt = pattern_stmt;
254 else
255 stmt = gsi_stmt (si);
257 stmt_info = vinfo_for_stmt (stmt);
259 if (vect_print_dump_info (REPORT_DETAILS))
261 fprintf (vect_dump, "==> examining statement: ");
262 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
265 gcc_assert (stmt_info);
267 /* Skip stmts which do not need to be vectorized. */
268 if (!STMT_VINFO_RELEVANT_P (stmt_info)
269 && !STMT_VINFO_LIVE_P (stmt_info))
271 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
272 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
273 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
274 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
276 stmt = pattern_stmt;
277 stmt_info = vinfo_for_stmt (pattern_stmt);
278 if (vect_print_dump_info (REPORT_DETAILS))
280 fprintf (vect_dump, "==> examining pattern statement: ");
281 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
284 else
286 if (vect_print_dump_info (REPORT_DETAILS))
287 fprintf (vect_dump, "skip.");
288 gsi_next (&si);
289 continue;
292 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
293 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
294 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
295 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
296 analyze_pattern_stmt = true;
298 /* If a pattern statement has def stmts, analyze them too. */
299 if (is_pattern_stmt_p (stmt_info))
301 if (pattern_def_seq == NULL)
303 pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
304 pattern_def_si = gsi_start (pattern_def_seq);
306 else if (!gsi_end_p (pattern_def_si))
307 gsi_next (&pattern_def_si);
308 if (pattern_def_seq != NULL)
310 gimple pattern_def_stmt = NULL;
311 stmt_vec_info pattern_def_stmt_info = NULL;
313 while (!gsi_end_p (pattern_def_si))
315 pattern_def_stmt = gsi_stmt (pattern_def_si);
316 pattern_def_stmt_info
317 = vinfo_for_stmt (pattern_def_stmt);
318 if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info)
319 || STMT_VINFO_LIVE_P (pattern_def_stmt_info))
320 break;
321 gsi_next (&pattern_def_si);
324 if (!gsi_end_p (pattern_def_si))
326 if (vect_print_dump_info (REPORT_DETAILS))
328 fprintf (vect_dump,
329 "==> examining pattern def stmt: ");
330 print_gimple_stmt (vect_dump, pattern_def_stmt, 0,
331 TDF_SLIM);
334 stmt = pattern_def_stmt;
335 stmt_info = pattern_def_stmt_info;
337 else
339 pattern_def_si = gsi_start (NULL);
340 analyze_pattern_stmt = false;
343 else
344 analyze_pattern_stmt = false;
347 if (gimple_get_lhs (stmt) == NULL_TREE)
349 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
351 fprintf (vect_dump, "not vectorized: irregular stmt.");
352 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
354 return false;
357 if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
359 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
361 fprintf (vect_dump, "not vectorized: vector stmt in loop:");
362 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
364 return false;
367 if (STMT_VINFO_VECTYPE (stmt_info))
369 /* The only case when a vectype had been already set is for stmts
370 that contain a dataref, or for "pattern-stmts" (stmts
371 generated by the vectorizer to represent/replace a certain
372 idiom). */
373 gcc_assert (STMT_VINFO_DATA_REF (stmt_info)
374 || is_pattern_stmt_p (stmt_info)
375 || !gsi_end_p (pattern_def_si));
376 vectype = STMT_VINFO_VECTYPE (stmt_info);
378 else
380 gcc_assert (!STMT_VINFO_DATA_REF (stmt_info));
381 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
382 if (vect_print_dump_info (REPORT_DETAILS))
384 fprintf (vect_dump, "get vectype for scalar type: ");
385 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
387 vectype = get_vectype_for_scalar_type (scalar_type);
388 if (!vectype)
390 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
392 fprintf (vect_dump,
393 "not vectorized: unsupported data-type ");
394 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
396 return false;
399 STMT_VINFO_VECTYPE (stmt_info) = vectype;
402 /* The vectorization factor is according to the smallest
403 scalar type (or the largest vector size, but we only
404 support one vector size per loop). */
405 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy,
406 &dummy);
407 if (vect_print_dump_info (REPORT_DETAILS))
409 fprintf (vect_dump, "get vectype for scalar type: ");
410 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
412 vf_vectype = get_vectype_for_scalar_type (scalar_type);
413 if (!vf_vectype)
415 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
417 fprintf (vect_dump,
418 "not vectorized: unsupported data-type ");
419 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
421 return false;
424 if ((GET_MODE_SIZE (TYPE_MODE (vectype))
425 != GET_MODE_SIZE (TYPE_MODE (vf_vectype))))
427 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
429 fprintf (vect_dump,
430 "not vectorized: different sized vector "
431 "types in statement, ");
432 print_generic_expr (vect_dump, vectype, TDF_SLIM);
433 fprintf (vect_dump, " and ");
434 print_generic_expr (vect_dump, vf_vectype, TDF_SLIM);
436 return false;
439 if (vect_print_dump_info (REPORT_DETAILS))
441 fprintf (vect_dump, "vectype: ");
442 print_generic_expr (vect_dump, vf_vectype, TDF_SLIM);
445 nunits = TYPE_VECTOR_SUBPARTS (vf_vectype);
446 if (vect_print_dump_info (REPORT_DETAILS))
447 fprintf (vect_dump, "nunits = %d", nunits);
449 if (!vectorization_factor
450 || (nunits > vectorization_factor))
451 vectorization_factor = nunits;
453 if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si))
455 pattern_def_seq = NULL;
456 gsi_next (&si);
461 /* TODO: Analyze cost. Decide if worth while to vectorize. */
462 if (vect_print_dump_info (REPORT_DETAILS))
463 fprintf (vect_dump, "vectorization factor = %d", vectorization_factor);
464 if (vectorization_factor <= 1)
466 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
467 fprintf (vect_dump, "not vectorized: unsupported data-type");
468 return false;
470 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
472 return true;
476 /* Function vect_is_simple_iv_evolution.
478 FORNOW: A simple evolution of an induction variables in the loop is
479 considered a polynomial evolution with constant step. */
481 static bool
482 vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
483 tree * step)
485 tree init_expr;
486 tree step_expr;
487 tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
489 /* When there is no evolution in this loop, the evolution function
490 is not "simple". */
491 if (evolution_part == NULL_TREE)
492 return false;
494 /* When the evolution is a polynomial of degree >= 2
495 the evolution function is not "simple". */
496 if (tree_is_chrec (evolution_part))
497 return false;
499 step_expr = evolution_part;
500 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
502 if (vect_print_dump_info (REPORT_DETAILS))
504 fprintf (vect_dump, "step: ");
505 print_generic_expr (vect_dump, step_expr, TDF_SLIM);
506 fprintf (vect_dump, ", init: ");
507 print_generic_expr (vect_dump, init_expr, TDF_SLIM);
510 *init = init_expr;
511 *step = step_expr;
513 if (TREE_CODE (step_expr) != INTEGER_CST)
515 if (vect_print_dump_info (REPORT_DETAILS))
516 fprintf (vect_dump, "step unknown.");
517 return false;
520 return true;
523 /* Function vect_analyze_scalar_cycles_1.
525 Examine the cross iteration def-use cycles of scalar variables
526 in LOOP. LOOP_VINFO represents the loop that is now being
527 considered for vectorization (can be LOOP, or an outer-loop
528 enclosing LOOP). */
530 static void
531 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
533 basic_block bb = loop->header;
534 tree dumy;
535 VEC(gimple,heap) *worklist = VEC_alloc (gimple, heap, 64);
536 gimple_stmt_iterator gsi;
537 bool double_reduc;
539 if (vect_print_dump_info (REPORT_DETAILS))
540 fprintf (vect_dump, "=== vect_analyze_scalar_cycles ===");
542 /* First - identify all inductions. Reduction detection assumes that all the
543 inductions have been identified, therefore, this order must not be
544 changed. */
545 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
547 gimple phi = gsi_stmt (gsi);
548 tree access_fn = NULL;
549 tree def = PHI_RESULT (phi);
550 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
552 if (vect_print_dump_info (REPORT_DETAILS))
554 fprintf (vect_dump, "Analyze phi: ");
555 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
558 /* Skip virtual phi's. The data dependences that are associated with
559 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
560 if (!is_gimple_reg (SSA_NAME_VAR (def)))
561 continue;
563 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
565 /* Analyze the evolution function. */
566 access_fn = analyze_scalar_evolution (loop, def);
567 if (access_fn)
568 STRIP_NOPS (access_fn);
569 if (access_fn && vect_print_dump_info (REPORT_DETAILS))
571 fprintf (vect_dump, "Access function of PHI: ");
572 print_generic_expr (vect_dump, access_fn, TDF_SLIM);
575 if (!access_fn
576 || !vect_is_simple_iv_evolution (loop->num, access_fn, &dumy, &dumy))
578 VEC_safe_push (gimple, heap, worklist, phi);
579 continue;
582 if (vect_print_dump_info (REPORT_DETAILS))
583 fprintf (vect_dump, "Detected induction.");
584 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
588 /* Second - identify all reductions and nested cycles. */
589 while (VEC_length (gimple, worklist) > 0)
591 gimple phi = VEC_pop (gimple, worklist);
592 tree def = PHI_RESULT (phi);
593 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
594 gimple reduc_stmt;
595 bool nested_cycle;
597 if (vect_print_dump_info (REPORT_DETAILS))
599 fprintf (vect_dump, "Analyze phi: ");
600 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
603 gcc_assert (is_gimple_reg (SSA_NAME_VAR (def)));
604 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
606 nested_cycle = (loop != LOOP_VINFO_LOOP (loop_vinfo));
607 reduc_stmt = vect_force_simple_reduction (loop_vinfo, phi, !nested_cycle,
608 &double_reduc);
609 if (reduc_stmt)
611 if (double_reduc)
613 if (vect_print_dump_info (REPORT_DETAILS))
614 fprintf (vect_dump, "Detected double reduction.");
616 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
617 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
618 vect_double_reduction_def;
620 else
622 if (nested_cycle)
624 if (vect_print_dump_info (REPORT_DETAILS))
625 fprintf (vect_dump, "Detected vectorizable nested cycle.");
627 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
628 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
629 vect_nested_cycle;
631 else
633 if (vect_print_dump_info (REPORT_DETAILS))
634 fprintf (vect_dump, "Detected reduction.");
636 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
637 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
638 vect_reduction_def;
639 /* Store the reduction cycles for possible vectorization in
640 loop-aware SLP. */
641 VEC_safe_push (gimple, heap,
642 LOOP_VINFO_REDUCTIONS (loop_vinfo),
643 reduc_stmt);
647 else
648 if (vect_print_dump_info (REPORT_DETAILS))
649 fprintf (vect_dump, "Unknown def-use cycle pattern.");
652 VEC_free (gimple, heap, worklist);
656 /* Function vect_analyze_scalar_cycles.
658 Examine the cross iteration def-use cycles of scalar variables, by
659 analyzing the loop-header PHIs of scalar variables. Classify each
660 cycle as one of the following: invariant, induction, reduction, unknown.
661 We do that for the loop represented by LOOP_VINFO, and also to its
662 inner-loop, if exists.
663 Examples for scalar cycles:
665 Example1: reduction:
667 loop1:
668 for (i=0; i<N; i++)
669 sum += a[i];
671 Example2: induction:
673 loop2:
674 for (i=0; i<N; i++)
675 a[i] = i; */
677 static void
678 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
680 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
682 vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
684 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
685 Reductions in such inner-loop therefore have different properties than
686 the reductions in the nest that gets vectorized:
687 1. When vectorized, they are executed in the same order as in the original
688 scalar loop, so we can't change the order of computation when
689 vectorizing them.
690 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
691 current checks are too strict. */
693 if (loop->inner)
694 vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
697 /* Function vect_get_loop_niters.
699 Determine how many iterations the loop is executed.
700 If an expression that represents the number of iterations
701 can be constructed, place it in NUMBER_OF_ITERATIONS.
702 Return the loop exit condition. */
704 static gimple
705 vect_get_loop_niters (struct loop *loop, tree *number_of_iterations)
707 tree niters;
709 if (vect_print_dump_info (REPORT_DETAILS))
710 fprintf (vect_dump, "=== get_loop_niters ===");
712 niters = number_of_exit_cond_executions (loop);
714 if (niters != NULL_TREE
715 && niters != chrec_dont_know)
717 *number_of_iterations = niters;
719 if (vect_print_dump_info (REPORT_DETAILS))
721 fprintf (vect_dump, "==> get_loop_niters:" );
722 print_generic_expr (vect_dump, *number_of_iterations, TDF_SLIM);
726 return get_loop_exit_condition (loop);
730 /* Function bb_in_loop_p
732 Used as predicate for dfs order traversal of the loop bbs. */
734 static bool
735 bb_in_loop_p (const_basic_block bb, const void *data)
737 const struct loop *const loop = (const struct loop *)data;
738 if (flow_bb_inside_loop_p (loop, bb))
739 return true;
740 return false;
744 /* Function new_loop_vec_info.
746 Create and initialize a new loop_vec_info struct for LOOP, as well as
747 stmt_vec_info structs for all the stmts in LOOP. */
749 static loop_vec_info
750 new_loop_vec_info (struct loop *loop)
752 loop_vec_info res;
753 basic_block *bbs;
754 gimple_stmt_iterator si;
755 unsigned int i, nbbs;
757 res = (loop_vec_info) xcalloc (1, sizeof (struct _loop_vec_info));
758 LOOP_VINFO_LOOP (res) = loop;
760 bbs = get_loop_body (loop);
762 /* Create/Update stmt_info for all stmts in the loop. */
763 for (i = 0; i < loop->num_nodes; i++)
765 basic_block bb = bbs[i];
767 /* BBs in a nested inner-loop will have been already processed (because
768 we will have called vect_analyze_loop_form for any nested inner-loop).
769 Therefore, for stmts in an inner-loop we just want to update the
770 STMT_VINFO_LOOP_VINFO field of their stmt_info to point to the new
771 loop_info of the outer-loop we are currently considering to vectorize
772 (instead of the loop_info of the inner-loop).
773 For stmts in other BBs we need to create a stmt_info from scratch. */
774 if (bb->loop_father != loop)
776 /* Inner-loop bb. */
777 gcc_assert (loop->inner && bb->loop_father == loop->inner);
778 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
780 gimple phi = gsi_stmt (si);
781 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
782 loop_vec_info inner_loop_vinfo =
783 STMT_VINFO_LOOP_VINFO (stmt_info);
784 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
785 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
787 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
789 gimple stmt = gsi_stmt (si);
790 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
791 loop_vec_info inner_loop_vinfo =
792 STMT_VINFO_LOOP_VINFO (stmt_info);
793 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
794 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
797 else
799 /* bb in current nest. */
800 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
802 gimple phi = gsi_stmt (si);
803 gimple_set_uid (phi, 0);
804 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, res, NULL));
807 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
809 gimple stmt = gsi_stmt (si);
810 gimple_set_uid (stmt, 0);
811 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, res, NULL));
816 /* CHECKME: We want to visit all BBs before their successors (except for
817 latch blocks, for which this assertion wouldn't hold). In the simple
818 case of the loop forms we allow, a dfs order of the BBs would the same
819 as reversed postorder traversal, so we are safe. */
821 free (bbs);
822 bbs = XCNEWVEC (basic_block, loop->num_nodes);
823 nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
824 bbs, loop->num_nodes, loop);
825 gcc_assert (nbbs == loop->num_nodes);
827 LOOP_VINFO_BBS (res) = bbs;
828 LOOP_VINFO_NITERS (res) = NULL;
829 LOOP_VINFO_NITERS_UNCHANGED (res) = NULL;
830 LOOP_VINFO_COST_MODEL_MIN_ITERS (res) = 0;
831 LOOP_VINFO_VECTORIZABLE_P (res) = 0;
832 LOOP_PEELING_FOR_ALIGNMENT (res) = 0;
833 LOOP_VINFO_VECT_FACTOR (res) = 0;
834 LOOP_VINFO_LOOP_NEST (res) = VEC_alloc (loop_p, heap, 3);
835 LOOP_VINFO_DATAREFS (res) = VEC_alloc (data_reference_p, heap, 10);
836 LOOP_VINFO_DDRS (res) = VEC_alloc (ddr_p, heap, 10 * 10);
837 LOOP_VINFO_UNALIGNED_DR (res) = NULL;
838 LOOP_VINFO_MAY_MISALIGN_STMTS (res) =
839 VEC_alloc (gimple, heap,
840 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS));
841 LOOP_VINFO_MAY_ALIAS_DDRS (res) =
842 VEC_alloc (ddr_p, heap,
843 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS));
844 LOOP_VINFO_STRIDED_STORES (res) = VEC_alloc (gimple, heap, 10);
845 LOOP_VINFO_REDUCTIONS (res) = VEC_alloc (gimple, heap, 10);
846 LOOP_VINFO_REDUCTION_CHAINS (res) = VEC_alloc (gimple, heap, 10);
847 LOOP_VINFO_SLP_INSTANCES (res) = VEC_alloc (slp_instance, heap, 10);
848 LOOP_VINFO_SLP_UNROLLING_FACTOR (res) = 1;
849 LOOP_VINFO_PEELING_HTAB (res) = NULL;
850 LOOP_VINFO_PEELING_FOR_GAPS (res) = false;
852 return res;
856 /* Function destroy_loop_vec_info.
858 Free LOOP_VINFO struct, as well as all the stmt_vec_info structs of all the
859 stmts in the loop. */
861 void
862 destroy_loop_vec_info (loop_vec_info loop_vinfo, bool clean_stmts)
864 struct loop *loop;
865 basic_block *bbs;
866 int nbbs;
867 gimple_stmt_iterator si;
868 int j;
869 VEC (slp_instance, heap) *slp_instances;
870 slp_instance instance;
872 if (!loop_vinfo)
873 return;
875 loop = LOOP_VINFO_LOOP (loop_vinfo);
877 bbs = LOOP_VINFO_BBS (loop_vinfo);
878 nbbs = loop->num_nodes;
880 if (!clean_stmts)
882 free (LOOP_VINFO_BBS (loop_vinfo));
883 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
884 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
885 VEC_free (loop_p, heap, LOOP_VINFO_LOOP_NEST (loop_vinfo));
886 VEC_free (gimple, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
887 VEC_free (ddr_p, heap, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
889 free (loop_vinfo);
890 loop->aux = NULL;
891 return;
894 for (j = 0; j < nbbs; j++)
896 basic_block bb = bbs[j];
897 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
898 free_stmt_vec_info (gsi_stmt (si));
900 for (si = gsi_start_bb (bb); !gsi_end_p (si); )
902 gimple stmt = gsi_stmt (si);
903 /* Free stmt_vec_info. */
904 free_stmt_vec_info (stmt);
905 gsi_next (&si);
909 free (LOOP_VINFO_BBS (loop_vinfo));
910 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
911 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
912 VEC_free (loop_p, heap, LOOP_VINFO_LOOP_NEST (loop_vinfo));
913 VEC_free (gimple, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
914 VEC_free (ddr_p, heap, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
915 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
916 FOR_EACH_VEC_ELT (slp_instance, slp_instances, j, instance)
917 vect_free_slp_instance (instance);
919 VEC_free (slp_instance, heap, LOOP_VINFO_SLP_INSTANCES (loop_vinfo));
920 VEC_free (gimple, heap, LOOP_VINFO_STRIDED_STORES (loop_vinfo));
921 VEC_free (gimple, heap, LOOP_VINFO_REDUCTIONS (loop_vinfo));
922 VEC_free (gimple, heap, LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo));
924 if (LOOP_VINFO_PEELING_HTAB (loop_vinfo))
925 htab_delete (LOOP_VINFO_PEELING_HTAB (loop_vinfo));
927 free (loop_vinfo);
928 loop->aux = NULL;
932 /* Function vect_analyze_loop_1.
934 Apply a set of analyses on LOOP, and create a loop_vec_info struct
935 for it. The different analyses will record information in the
936 loop_vec_info struct. This is a subset of the analyses applied in
937 vect_analyze_loop, to be applied on an inner-loop nested in the loop
938 that is now considered for (outer-loop) vectorization. */
940 static loop_vec_info
941 vect_analyze_loop_1 (struct loop *loop)
943 loop_vec_info loop_vinfo;
945 if (vect_print_dump_info (REPORT_DETAILS))
946 fprintf (vect_dump, "===== analyze_loop_nest_1 =====");
948 /* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */
950 loop_vinfo = vect_analyze_loop_form (loop);
951 if (!loop_vinfo)
953 if (vect_print_dump_info (REPORT_DETAILS))
954 fprintf (vect_dump, "bad inner-loop form.");
955 return NULL;
958 return loop_vinfo;
962 /* Function vect_analyze_loop_form.
964 Verify that certain CFG restrictions hold, including:
965 - the loop has a pre-header
966 - the loop has a single entry and exit
967 - the loop exit condition is simple enough, and the number of iterations
968 can be analyzed (a countable loop). */
970 loop_vec_info
971 vect_analyze_loop_form (struct loop *loop)
973 loop_vec_info loop_vinfo;
974 gimple loop_cond;
975 tree number_of_iterations = NULL;
976 loop_vec_info inner_loop_vinfo = NULL;
978 if (vect_print_dump_info (REPORT_DETAILS))
979 fprintf (vect_dump, "=== vect_analyze_loop_form ===");
981 /* Different restrictions apply when we are considering an inner-most loop,
982 vs. an outer (nested) loop.
983 (FORNOW. May want to relax some of these restrictions in the future). */
985 if (!loop->inner)
987 /* Inner-most loop. We currently require that the number of BBs is
988 exactly 2 (the header and latch). Vectorizable inner-most loops
989 look like this:
991 (pre-header)
993 header <--------+
994 | | |
995 | +--> latch --+
997 (exit-bb) */
999 if (loop->num_nodes != 2)
1001 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1002 fprintf (vect_dump, "not vectorized: control flow in loop.");
1003 return NULL;
1006 if (empty_block_p (loop->header))
1008 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1009 fprintf (vect_dump, "not vectorized: empty loop.");
1010 return NULL;
1013 else
1015 struct loop *innerloop = loop->inner;
1016 edge entryedge;
1018 /* Nested loop. We currently require that the loop is doubly-nested,
1019 contains a single inner loop, and the number of BBs is exactly 5.
1020 Vectorizable outer-loops look like this:
1022 (pre-header)
1024 header <---+
1026 inner-loop |
1028 tail ------+
1030 (exit-bb)
1032 The inner-loop has the properties expected of inner-most loops
1033 as described above. */
1035 if ((loop->inner)->inner || (loop->inner)->next)
1037 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1038 fprintf (vect_dump, "not vectorized: multiple nested loops.");
1039 return NULL;
1042 /* Analyze the inner-loop. */
1043 inner_loop_vinfo = vect_analyze_loop_1 (loop->inner);
1044 if (!inner_loop_vinfo)
1046 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1047 fprintf (vect_dump, "not vectorized: Bad inner loop.");
1048 return NULL;
1051 if (!expr_invariant_in_loop_p (loop,
1052 LOOP_VINFO_NITERS (inner_loop_vinfo)))
1054 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1055 fprintf (vect_dump,
1056 "not vectorized: inner-loop count not invariant.");
1057 destroy_loop_vec_info (inner_loop_vinfo, true);
1058 return NULL;
1061 if (loop->num_nodes != 5)
1063 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1064 fprintf (vect_dump, "not vectorized: control flow in loop.");
1065 destroy_loop_vec_info (inner_loop_vinfo, true);
1066 return NULL;
1069 gcc_assert (EDGE_COUNT (innerloop->header->preds) == 2);
1070 entryedge = EDGE_PRED (innerloop->header, 0);
1071 if (EDGE_PRED (innerloop->header, 0)->src == innerloop->latch)
1072 entryedge = EDGE_PRED (innerloop->header, 1);
1074 if (entryedge->src != loop->header
1075 || !single_exit (innerloop)
1076 || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
1078 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1079 fprintf (vect_dump, "not vectorized: unsupported outerloop form.");
1080 destroy_loop_vec_info (inner_loop_vinfo, true);
1081 return NULL;
1084 if (vect_print_dump_info (REPORT_DETAILS))
1085 fprintf (vect_dump, "Considering outer-loop vectorization.");
1088 if (!single_exit (loop)
1089 || EDGE_COUNT (loop->header->preds) != 2)
1091 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1093 if (!single_exit (loop))
1094 fprintf (vect_dump, "not vectorized: multiple exits.");
1095 else if (EDGE_COUNT (loop->header->preds) != 2)
1096 fprintf (vect_dump, "not vectorized: too many incoming edges.");
1098 if (inner_loop_vinfo)
1099 destroy_loop_vec_info (inner_loop_vinfo, true);
1100 return NULL;
1103 /* We assume that the loop exit condition is at the end of the loop. i.e,
1104 that the loop is represented as a do-while (with a proper if-guard
1105 before the loop if needed), where the loop header contains all the
1106 executable statements, and the latch is empty. */
1107 if (!empty_block_p (loop->latch)
1108 || !gimple_seq_empty_p (phi_nodes (loop->latch)))
1110 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1111 fprintf (vect_dump, "not vectorized: unexpected loop form.");
1112 if (inner_loop_vinfo)
1113 destroy_loop_vec_info (inner_loop_vinfo, true);
1114 return NULL;
1117 /* Make sure there exists a single-predecessor exit bb: */
1118 if (!single_pred_p (single_exit (loop)->dest))
1120 edge e = single_exit (loop);
1121 if (!(e->flags & EDGE_ABNORMAL))
1123 split_loop_exit_edge (e);
1124 if (vect_print_dump_info (REPORT_DETAILS))
1125 fprintf (vect_dump, "split exit edge.");
1127 else
1129 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1130 fprintf (vect_dump, "not vectorized: abnormal loop exit edge.");
1131 if (inner_loop_vinfo)
1132 destroy_loop_vec_info (inner_loop_vinfo, true);
1133 return NULL;
1137 loop_cond = vect_get_loop_niters (loop, &number_of_iterations);
1138 if (!loop_cond)
1140 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1141 fprintf (vect_dump, "not vectorized: complicated exit condition.");
1142 if (inner_loop_vinfo)
1143 destroy_loop_vec_info (inner_loop_vinfo, true);
1144 return NULL;
1147 if (!number_of_iterations)
1149 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1150 fprintf (vect_dump,
1151 "not vectorized: number of iterations cannot be computed.");
1152 if (inner_loop_vinfo)
1153 destroy_loop_vec_info (inner_loop_vinfo, true);
1154 return NULL;
1157 if (chrec_contains_undetermined (number_of_iterations))
1159 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1160 fprintf (vect_dump, "Infinite number of iterations.");
1161 if (inner_loop_vinfo)
1162 destroy_loop_vec_info (inner_loop_vinfo, true);
1163 return NULL;
1166 if (!NITERS_KNOWN_P (number_of_iterations))
1168 if (vect_print_dump_info (REPORT_DETAILS))
1170 fprintf (vect_dump, "Symbolic number of iterations is ");
1171 print_generic_expr (vect_dump, number_of_iterations, TDF_DETAILS);
1174 else if (TREE_INT_CST_LOW (number_of_iterations) == 0)
1176 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1177 fprintf (vect_dump, "not vectorized: number of iterations = 0.");
1178 if (inner_loop_vinfo)
1179 destroy_loop_vec_info (inner_loop_vinfo, false);
1180 return NULL;
1183 loop_vinfo = new_loop_vec_info (loop);
1184 LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
1185 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
1187 STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond)) = loop_exit_ctrl_vec_info_type;
1189 /* CHECKME: May want to keep it around it in the future. */
1190 if (inner_loop_vinfo)
1191 destroy_loop_vec_info (inner_loop_vinfo, false);
1193 gcc_assert (!loop->aux);
1194 loop->aux = loop_vinfo;
1195 return loop_vinfo;
1199 /* Get cost by calling cost target builtin. */
1201 static inline int
1202 vect_get_cost (enum vect_cost_for_stmt type_of_cost)
1204 tree dummy_type = NULL;
1205 int dummy = 0;
1207 return targetm.vectorize.builtin_vectorization_cost (type_of_cost,
1208 dummy_type, dummy);
1212 /* Function vect_analyze_loop_operations.
1214 Scan the loop stmts and make sure they are all vectorizable. */
1216 static bool
1217 vect_analyze_loop_operations (loop_vec_info loop_vinfo, bool slp)
1219 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1220 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1221 int nbbs = loop->num_nodes;
1222 gimple_stmt_iterator si;
1223 unsigned int vectorization_factor = 0;
1224 int i;
1225 gimple phi;
1226 stmt_vec_info stmt_info;
1227 bool need_to_vectorize = false;
1228 int min_profitable_iters;
1229 int min_scalar_loop_bound;
1230 unsigned int th;
1231 bool only_slp_in_loop = true, ok;
1233 if (vect_print_dump_info (REPORT_DETAILS))
1234 fprintf (vect_dump, "=== vect_analyze_loop_operations ===");
1236 gcc_assert (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
1237 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1238 if (slp)
1240 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1241 vectorization factor of the loop is the unrolling factor required by
1242 the SLP instances. If that unrolling factor is 1, we say, that we
1243 perform pure SLP on loop - cross iteration parallelism is not
1244 exploited. */
1245 for (i = 0; i < nbbs; i++)
1247 basic_block bb = bbs[i];
1248 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1250 gimple stmt = gsi_stmt (si);
1251 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1252 gcc_assert (stmt_info);
1253 if ((STMT_VINFO_RELEVANT_P (stmt_info)
1254 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1255 && !PURE_SLP_STMT (stmt_info))
1256 /* STMT needs both SLP and loop-based vectorization. */
1257 only_slp_in_loop = false;
1261 if (only_slp_in_loop)
1262 vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
1263 else
1264 vectorization_factor = least_common_multiple (vectorization_factor,
1265 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
1267 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
1268 if (vect_print_dump_info (REPORT_DETAILS))
1269 fprintf (vect_dump, "Updating vectorization factor to %d ",
1270 vectorization_factor);
1273 for (i = 0; i < nbbs; i++)
1275 basic_block bb = bbs[i];
1277 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
1279 phi = gsi_stmt (si);
1280 ok = true;
1282 stmt_info = vinfo_for_stmt (phi);
1283 if (vect_print_dump_info (REPORT_DETAILS))
1285 fprintf (vect_dump, "examining phi: ");
1286 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
1289 /* Inner-loop loop-closed exit phi in outer-loop vectorization
1290 (i.e., a phi in the tail of the outer-loop). */
1291 if (! is_loop_header_bb_p (bb))
1293 /* FORNOW: we currently don't support the case that these phis
1294 are not used in the outerloop (unless it is double reduction,
1295 i.e., this phi is vect_reduction_def), cause this case
1296 requires to actually do something here. */
1297 if ((!STMT_VINFO_RELEVANT_P (stmt_info)
1298 || STMT_VINFO_LIVE_P (stmt_info))
1299 && STMT_VINFO_DEF_TYPE (stmt_info)
1300 != vect_double_reduction_def)
1302 if (vect_print_dump_info (REPORT_DETAILS))
1303 fprintf (vect_dump,
1304 "Unsupported loop-closed phi in outer-loop.");
1305 return false;
1308 /* If PHI is used in the outer loop, we check that its operand
1309 is defined in the inner loop. */
1310 if (STMT_VINFO_RELEVANT_P (stmt_info))
1312 tree phi_op;
1313 gimple op_def_stmt;
1315 if (gimple_phi_num_args (phi) != 1)
1316 return false;
1318 phi_op = PHI_ARG_DEF (phi, 0);
1319 if (TREE_CODE (phi_op) != SSA_NAME)
1320 return false;
1322 op_def_stmt = SSA_NAME_DEF_STMT (phi_op);
1323 if (!op_def_stmt || !vinfo_for_stmt (op_def_stmt))
1324 return false;
1326 if (STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt))
1327 != vect_used_in_outer
1328 && STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt))
1329 != vect_used_in_outer_by_reduction)
1330 return false;
1333 continue;
1336 gcc_assert (stmt_info);
1338 if (STMT_VINFO_LIVE_P (stmt_info))
1340 /* FORNOW: not yet supported. */
1341 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1342 fprintf (vect_dump, "not vectorized: value used after loop.");
1343 return false;
1346 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
1347 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
1349 /* A scalar-dependence cycle that we don't support. */
1350 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1351 fprintf (vect_dump, "not vectorized: scalar dependence cycle.");
1352 return false;
1355 if (STMT_VINFO_RELEVANT_P (stmt_info))
1357 need_to_vectorize = true;
1358 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
1359 ok = vectorizable_induction (phi, NULL, NULL);
1362 if (!ok)
1364 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1366 fprintf (vect_dump,
1367 "not vectorized: relevant phi not supported: ");
1368 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
1370 return false;
1374 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1376 gimple stmt = gsi_stmt (si);
1377 if (!vect_analyze_stmt (stmt, &need_to_vectorize, NULL))
1378 return false;
1380 } /* bbs */
1382 /* All operations in the loop are either irrelevant (deal with loop
1383 control, or dead), or only used outside the loop and can be moved
1384 out of the loop (e.g. invariants, inductions). The loop can be
1385 optimized away by scalar optimizations. We're better off not
1386 touching this loop. */
1387 if (!need_to_vectorize)
1389 if (vect_print_dump_info (REPORT_DETAILS))
1390 fprintf (vect_dump,
1391 "All the computation can be taken out of the loop.");
1392 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1393 fprintf (vect_dump,
1394 "not vectorized: redundant loop. no profit to vectorize.");
1395 return false;
1398 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1399 && vect_print_dump_info (REPORT_DETAILS))
1400 fprintf (vect_dump,
1401 "vectorization_factor = %d, niters = " HOST_WIDE_INT_PRINT_DEC,
1402 vectorization_factor, LOOP_VINFO_INT_NITERS (loop_vinfo));
1404 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1405 && (LOOP_VINFO_INT_NITERS (loop_vinfo) < vectorization_factor))
1407 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1408 fprintf (vect_dump, "not vectorized: iteration count too small.");
1409 if (vect_print_dump_info (REPORT_DETAILS))
1410 fprintf (vect_dump,"not vectorized: iteration count smaller than "
1411 "vectorization factor.");
1412 return false;
1415 /* Analyze cost. Decide if worth while to vectorize. */
1417 /* Once VF is set, SLP costs should be updated since the number of created
1418 vector stmts depends on VF. */
1419 vect_update_slp_costs_according_to_vf (loop_vinfo);
1421 min_profitable_iters = vect_estimate_min_profitable_iters (loop_vinfo);
1422 LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo) = min_profitable_iters;
1424 if (min_profitable_iters < 0)
1426 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1427 fprintf (vect_dump, "not vectorized: vectorization not profitable.");
1428 if (vect_print_dump_info (REPORT_DETAILS))
1429 fprintf (vect_dump, "not vectorized: vector version will never be "
1430 "profitable.");
1431 return false;
1434 min_scalar_loop_bound = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
1435 * vectorization_factor) - 1);
1437 /* Use the cost model only if it is more conservative than user specified
1438 threshold. */
1440 th = (unsigned) min_scalar_loop_bound;
1441 if (min_profitable_iters
1442 && (!min_scalar_loop_bound
1443 || min_profitable_iters > min_scalar_loop_bound))
1444 th = (unsigned) min_profitable_iters;
1446 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1447 && LOOP_VINFO_INT_NITERS (loop_vinfo) <= th)
1449 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1450 fprintf (vect_dump, "not vectorized: vectorization not "
1451 "profitable.");
1452 if (vect_print_dump_info (REPORT_DETAILS))
1453 fprintf (vect_dump, "not vectorized: iteration count smaller than "
1454 "user specified loop bound parameter or minimum "
1455 "profitable iterations (whichever is more conservative).");
1456 return false;
1459 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1460 || LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0
1461 || LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
1463 if (vect_print_dump_info (REPORT_DETAILS))
1464 fprintf (vect_dump, "epilog loop required.");
1465 if (!vect_can_advance_ivs_p (loop_vinfo))
1467 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1468 fprintf (vect_dump,
1469 "not vectorized: can't create epilog loop 1.");
1470 return false;
1472 if (!slpeel_can_duplicate_loop_p (loop, single_exit (loop)))
1474 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1475 fprintf (vect_dump,
1476 "not vectorized: can't create epilog loop 2.");
1477 return false;
1481 return true;
1485 /* Function vect_analyze_loop_2.
1487 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1488 for it. The different analyses will record information in the
1489 loop_vec_info struct. */
1490 static bool
1491 vect_analyze_loop_2 (loop_vec_info loop_vinfo)
1493 bool ok, slp = false;
1494 int max_vf = MAX_VECTORIZATION_FACTOR;
1495 int min_vf = 2;
1497 /* Find all data references in the loop (which correspond to vdefs/vuses)
1498 and analyze their evolution in the loop. Also adjust the minimal
1499 vectorization factor according to the loads and stores.
1501 FORNOW: Handle only simple, array references, which
1502 alignment can be forced, and aligned pointer-references. */
1504 ok = vect_analyze_data_refs (loop_vinfo, NULL, &min_vf);
1505 if (!ok)
1507 if (vect_print_dump_info (REPORT_DETAILS))
1508 fprintf (vect_dump, "bad data references.");
1509 return false;
1512 /* Classify all cross-iteration scalar data-flow cycles.
1513 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1515 vect_analyze_scalar_cycles (loop_vinfo);
1517 vect_pattern_recog (loop_vinfo);
1519 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1521 ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
1522 if (!ok)
1524 if (vect_print_dump_info (REPORT_DETAILS))
1525 fprintf (vect_dump, "unexpected pattern.");
1526 return false;
1529 /* Analyze data dependences between the data-refs in the loop
1530 and adjust the maximum vectorization factor according to
1531 the dependences.
1532 FORNOW: fail at the first data dependence that we encounter. */
1534 ok = vect_analyze_data_ref_dependences (loop_vinfo, NULL, &max_vf);
1535 if (!ok
1536 || max_vf < min_vf)
1538 if (vect_print_dump_info (REPORT_DETAILS))
1539 fprintf (vect_dump, "bad data dependence.");
1540 return false;
1543 ok = vect_determine_vectorization_factor (loop_vinfo);
1544 if (!ok)
1546 if (vect_print_dump_info (REPORT_DETAILS))
1547 fprintf (vect_dump, "can't determine vectorization factor.");
1548 return false;
1550 if (max_vf < LOOP_VINFO_VECT_FACTOR (loop_vinfo))
1552 if (vect_print_dump_info (REPORT_DETAILS))
1553 fprintf (vect_dump, "bad data dependence.");
1554 return false;
1557 /* Analyze the alignment of the data-refs in the loop.
1558 Fail if a data reference is found that cannot be vectorized. */
1560 ok = vect_analyze_data_refs_alignment (loop_vinfo, NULL);
1561 if (!ok)
1563 if (vect_print_dump_info (REPORT_DETAILS))
1564 fprintf (vect_dump, "bad data alignment.");
1565 return false;
1568 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1569 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1571 ok = vect_analyze_data_ref_accesses (loop_vinfo, NULL);
1572 if (!ok)
1574 if (vect_print_dump_info (REPORT_DETAILS))
1575 fprintf (vect_dump, "bad data access.");
1576 return false;
1579 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
1580 It is important to call pruning after vect_analyze_data_ref_accesses,
1581 since we use grouping information gathered by interleaving analysis. */
1582 ok = vect_prune_runtime_alias_test_list (loop_vinfo);
1583 if (!ok)
1585 if (vect_print_dump_info (REPORT_DETAILS))
1586 fprintf (vect_dump, "too long list of versioning for alias "
1587 "run-time tests.");
1588 return false;
1591 /* This pass will decide on using loop versioning and/or loop peeling in
1592 order to enhance the alignment of data references in the loop. */
1594 ok = vect_enhance_data_refs_alignment (loop_vinfo);
1595 if (!ok)
1597 if (vect_print_dump_info (REPORT_DETAILS))
1598 fprintf (vect_dump, "bad data alignment.");
1599 return false;
1602 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
1603 ok = vect_analyze_slp (loop_vinfo, NULL);
1604 if (ok)
1606 /* Decide which possible SLP instances to SLP. */
1607 slp = vect_make_slp_decision (loop_vinfo);
1609 /* Find stmts that need to be both vectorized and SLPed. */
1610 vect_detect_hybrid_slp (loop_vinfo);
1612 else
1613 return false;
1615 /* Scan all the operations in the loop and make sure they are
1616 vectorizable. */
1618 ok = vect_analyze_loop_operations (loop_vinfo, slp);
1619 if (!ok)
1621 if (vect_print_dump_info (REPORT_DETAILS))
1622 fprintf (vect_dump, "bad operation or unsupported loop bound.");
1623 return false;
1626 return true;
1629 /* Function vect_analyze_loop.
1631 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1632 for it. The different analyses will record information in the
1633 loop_vec_info struct. */
1634 loop_vec_info
1635 vect_analyze_loop (struct loop *loop)
1637 loop_vec_info loop_vinfo;
1638 unsigned int vector_sizes;
1640 /* Autodetect first vector size we try. */
1641 current_vector_size = 0;
1642 vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
1644 if (vect_print_dump_info (REPORT_DETAILS))
1645 fprintf (vect_dump, "===== analyze_loop_nest =====");
1647 if (loop_outer (loop)
1648 && loop_vec_info_for_loop (loop_outer (loop))
1649 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
1651 if (vect_print_dump_info (REPORT_DETAILS))
1652 fprintf (vect_dump, "outer-loop already vectorized.");
1653 return NULL;
1656 while (1)
1658 /* Check the CFG characteristics of the loop (nesting, entry/exit). */
1659 loop_vinfo = vect_analyze_loop_form (loop);
1660 if (!loop_vinfo)
1662 if (vect_print_dump_info (REPORT_DETAILS))
1663 fprintf (vect_dump, "bad loop form.");
1664 return NULL;
1667 if (vect_analyze_loop_2 (loop_vinfo))
1669 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
1671 return loop_vinfo;
1674 destroy_loop_vec_info (loop_vinfo, true);
1676 vector_sizes &= ~current_vector_size;
1677 if (vector_sizes == 0
1678 || current_vector_size == 0)
1679 return NULL;
1681 /* Try the next biggest vector size. */
1682 current_vector_size = 1 << floor_log2 (vector_sizes);
1683 if (vect_print_dump_info (REPORT_DETAILS))
1684 fprintf (vect_dump, "***** Re-trying analysis with "
1685 "vector size %d\n", current_vector_size);
1690 /* Function reduction_code_for_scalar_code
1692 Input:
1693 CODE - tree_code of a reduction operations.
1695 Output:
1696 REDUC_CODE - the corresponding tree-code to be used to reduce the
1697 vector of partial results into a single scalar result (which
1698 will also reside in a vector) or ERROR_MARK if the operation is
1699 a supported reduction operation, but does not have such tree-code.
1701 Return FALSE if CODE currently cannot be vectorized as reduction. */
1703 static bool
1704 reduction_code_for_scalar_code (enum tree_code code,
1705 enum tree_code *reduc_code)
1707 switch (code)
1709 case MAX_EXPR:
1710 *reduc_code = REDUC_MAX_EXPR;
1711 return true;
1713 case MIN_EXPR:
1714 *reduc_code = REDUC_MIN_EXPR;
1715 return true;
1717 case PLUS_EXPR:
1718 *reduc_code = REDUC_PLUS_EXPR;
1719 return true;
1721 case MULT_EXPR:
1722 case MINUS_EXPR:
1723 case BIT_IOR_EXPR:
1724 case BIT_XOR_EXPR:
1725 case BIT_AND_EXPR:
1726 *reduc_code = ERROR_MARK;
1727 return true;
1729 default:
1730 return false;
1735 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
1736 STMT is printed with a message MSG. */
1738 static void
1739 report_vect_op (gimple stmt, const char *msg)
1741 fprintf (vect_dump, "%s", msg);
1742 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
1746 /* Detect SLP reduction of the form:
1748 #a1 = phi <a5, a0>
1749 a2 = operation (a1)
1750 a3 = operation (a2)
1751 a4 = operation (a3)
1752 a5 = operation (a4)
1754 #a = phi <a5>
1756 PHI is the reduction phi node (#a1 = phi <a5, a0> above)
1757 FIRST_STMT is the first reduction stmt in the chain
1758 (a2 = operation (a1)).
1760 Return TRUE if a reduction chain was detected. */
1762 static bool
1763 vect_is_slp_reduction (loop_vec_info loop_info, gimple phi, gimple first_stmt)
1765 struct loop *loop = (gimple_bb (phi))->loop_father;
1766 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
1767 enum tree_code code;
1768 gimple current_stmt = NULL, loop_use_stmt = NULL, first, next_stmt;
1769 stmt_vec_info use_stmt_info, current_stmt_info;
1770 tree lhs;
1771 imm_use_iterator imm_iter;
1772 use_operand_p use_p;
1773 int nloop_uses, size = 0, n_out_of_loop_uses;
1774 bool found = false;
1776 if (loop != vect_loop)
1777 return false;
1779 lhs = PHI_RESULT (phi);
1780 code = gimple_assign_rhs_code (first_stmt);
1781 while (1)
1783 nloop_uses = 0;
1784 n_out_of_loop_uses = 0;
1785 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
1787 gimple use_stmt = USE_STMT (use_p);
1788 if (is_gimple_debug (use_stmt))
1789 continue;
1791 use_stmt = USE_STMT (use_p);
1793 /* Check if we got back to the reduction phi. */
1794 if (use_stmt == phi)
1796 loop_use_stmt = use_stmt;
1797 found = true;
1798 break;
1801 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
1803 if (vinfo_for_stmt (use_stmt)
1804 && !STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt)))
1806 loop_use_stmt = use_stmt;
1807 nloop_uses++;
1810 else
1811 n_out_of_loop_uses++;
1813 /* There are can be either a single use in the loop or two uses in
1814 phi nodes. */
1815 if (nloop_uses > 1 || (n_out_of_loop_uses && nloop_uses))
1816 return false;
1819 if (found)
1820 break;
1822 /* We reached a statement with no loop uses. */
1823 if (nloop_uses == 0)
1824 return false;
1826 /* This is a loop exit phi, and we haven't reached the reduction phi. */
1827 if (gimple_code (loop_use_stmt) == GIMPLE_PHI)
1828 return false;
1830 if (!is_gimple_assign (loop_use_stmt)
1831 || code != gimple_assign_rhs_code (loop_use_stmt)
1832 || !flow_bb_inside_loop_p (loop, gimple_bb (loop_use_stmt)))
1833 return false;
1835 /* Insert USE_STMT into reduction chain. */
1836 use_stmt_info = vinfo_for_stmt (loop_use_stmt);
1837 if (current_stmt)
1839 current_stmt_info = vinfo_for_stmt (current_stmt);
1840 GROUP_NEXT_ELEMENT (current_stmt_info) = loop_use_stmt;
1841 GROUP_FIRST_ELEMENT (use_stmt_info)
1842 = GROUP_FIRST_ELEMENT (current_stmt_info);
1844 else
1845 GROUP_FIRST_ELEMENT (use_stmt_info) = loop_use_stmt;
1847 lhs = gimple_assign_lhs (loop_use_stmt);
1848 current_stmt = loop_use_stmt;
1849 size++;
1852 if (!found || loop_use_stmt != phi || size < 2)
1853 return false;
1855 /* Swap the operands, if needed, to make the reduction operand be the second
1856 operand. */
1857 lhs = PHI_RESULT (phi);
1858 next_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt));
1859 while (next_stmt)
1861 if (gimple_assign_rhs2 (next_stmt) == lhs)
1863 tree op = gimple_assign_rhs1 (next_stmt);
1864 gimple def_stmt = NULL;
1866 if (TREE_CODE (op) == SSA_NAME)
1867 def_stmt = SSA_NAME_DEF_STMT (op);
1869 /* Check that the other def is either defined in the loop
1870 ("vect_internal_def"), or it's an induction (defined by a
1871 loop-header phi-node). */
1872 if (def_stmt
1873 && gimple_bb (def_stmt)
1874 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
1875 && (is_gimple_assign (def_stmt)
1876 || is_gimple_call (def_stmt)
1877 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
1878 == vect_induction_def
1879 || (gimple_code (def_stmt) == GIMPLE_PHI
1880 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
1881 == vect_internal_def
1882 && !is_loop_header_bb_p (gimple_bb (def_stmt)))))
1884 lhs = gimple_assign_lhs (next_stmt);
1885 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
1886 continue;
1889 return false;
1891 else
1893 tree op = gimple_assign_rhs2 (next_stmt);
1894 gimple def_stmt = NULL;
1896 if (TREE_CODE (op) == SSA_NAME)
1897 def_stmt = SSA_NAME_DEF_STMT (op);
1899 /* Check that the other def is either defined in the loop
1900 ("vect_internal_def"), or it's an induction (defined by a
1901 loop-header phi-node). */
1902 if (def_stmt
1903 && gimple_bb (def_stmt)
1904 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
1905 && (is_gimple_assign (def_stmt)
1906 || is_gimple_call (def_stmt)
1907 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
1908 == vect_induction_def
1909 || (gimple_code (def_stmt) == GIMPLE_PHI
1910 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
1911 == vect_internal_def
1912 && !is_loop_header_bb_p (gimple_bb (def_stmt)))))
1914 if (vect_print_dump_info (REPORT_DETAILS))
1916 fprintf (vect_dump, "swapping oprnds: ");
1917 print_gimple_stmt (vect_dump, next_stmt, 0, TDF_SLIM);
1920 swap_tree_operands (next_stmt,
1921 gimple_assign_rhs1_ptr (next_stmt),
1922 gimple_assign_rhs2_ptr (next_stmt));
1923 mark_symbols_for_renaming (next_stmt);
1925 else
1926 return false;
1929 lhs = gimple_assign_lhs (next_stmt);
1930 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
1933 /* Save the chain for further analysis in SLP detection. */
1934 first = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt));
1935 VEC_safe_push (gimple, heap, LOOP_VINFO_REDUCTION_CHAINS (loop_info), first);
1936 GROUP_SIZE (vinfo_for_stmt (first)) = size;
1938 return true;
1942 /* Function vect_is_simple_reduction_1
1944 (1) Detect a cross-iteration def-use cycle that represents a simple
1945 reduction computation. We look for the following pattern:
1947 loop_header:
1948 a1 = phi < a0, a2 >
1949 a3 = ...
1950 a2 = operation (a3, a1)
1952 such that:
1953 1. operation is commutative and associative and it is safe to
1954 change the order of the computation (if CHECK_REDUCTION is true)
1955 2. no uses for a2 in the loop (a2 is used out of the loop)
1956 3. no uses of a1 in the loop besides the reduction operation
1957 4. no uses of a1 outside the loop.
1959 Conditions 1,4 are tested here.
1960 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
1962 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
1963 nested cycles, if CHECK_REDUCTION is false.
1965 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
1966 reductions:
1968 a1 = phi < a0, a2 >
1969 inner loop (def of a3)
1970 a2 = phi < a3 >
1972 If MODIFY is true it tries also to rework the code in-place to enable
1973 detection of more reduction patterns. For the time being we rewrite
1974 "res -= RHS" into "rhs += -RHS" when it seems worthwhile.
1977 static gimple
1978 vect_is_simple_reduction_1 (loop_vec_info loop_info, gimple phi,
1979 bool check_reduction, bool *double_reduc,
1980 bool modify)
1982 struct loop *loop = (gimple_bb (phi))->loop_father;
1983 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
1984 edge latch_e = loop_latch_edge (loop);
1985 tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
1986 gimple def_stmt, def1 = NULL, def2 = NULL;
1987 enum tree_code orig_code, code;
1988 tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE;
1989 tree type;
1990 int nloop_uses;
1991 tree name;
1992 imm_use_iterator imm_iter;
1993 use_operand_p use_p;
1994 bool phi_def;
1996 *double_reduc = false;
1998 /* If CHECK_REDUCTION is true, we assume inner-most loop vectorization,
1999 otherwise, we assume outer loop vectorization. */
2000 gcc_assert ((check_reduction && loop == vect_loop)
2001 || (!check_reduction && flow_loop_nested_p (vect_loop, loop)));
2003 name = PHI_RESULT (phi);
2004 nloop_uses = 0;
2005 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
2007 gimple use_stmt = USE_STMT (use_p);
2008 if (is_gimple_debug (use_stmt))
2009 continue;
2011 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2013 if (vect_print_dump_info (REPORT_DETAILS))
2014 fprintf (vect_dump, "intermediate value used outside loop.");
2016 return NULL;
2019 if (vinfo_for_stmt (use_stmt)
2020 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
2021 nloop_uses++;
2022 if (nloop_uses > 1)
2024 if (vect_print_dump_info (REPORT_DETAILS))
2025 fprintf (vect_dump, "reduction used in loop.");
2026 return NULL;
2030 if (TREE_CODE (loop_arg) != SSA_NAME)
2032 if (vect_print_dump_info (REPORT_DETAILS))
2034 fprintf (vect_dump, "reduction: not ssa_name: ");
2035 print_generic_expr (vect_dump, loop_arg, TDF_SLIM);
2037 return NULL;
2040 def_stmt = SSA_NAME_DEF_STMT (loop_arg);
2041 if (!def_stmt)
2043 if (vect_print_dump_info (REPORT_DETAILS))
2044 fprintf (vect_dump, "reduction: no def_stmt.");
2045 return NULL;
2048 if (!is_gimple_assign (def_stmt) && gimple_code (def_stmt) != GIMPLE_PHI)
2050 if (vect_print_dump_info (REPORT_DETAILS))
2051 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
2052 return NULL;
2055 if (is_gimple_assign (def_stmt))
2057 name = gimple_assign_lhs (def_stmt);
2058 phi_def = false;
2060 else
2062 name = PHI_RESULT (def_stmt);
2063 phi_def = true;
2066 nloop_uses = 0;
2067 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
2069 gimple use_stmt = USE_STMT (use_p);
2070 if (is_gimple_debug (use_stmt))
2071 continue;
2072 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
2073 && vinfo_for_stmt (use_stmt)
2074 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
2075 nloop_uses++;
2076 if (nloop_uses > 1)
2078 if (vect_print_dump_info (REPORT_DETAILS))
2079 fprintf (vect_dump, "reduction used in loop.");
2080 return NULL;
2084 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
2085 defined in the inner loop. */
2086 if (phi_def)
2088 op1 = PHI_ARG_DEF (def_stmt, 0);
2090 if (gimple_phi_num_args (def_stmt) != 1
2091 || TREE_CODE (op1) != SSA_NAME)
2093 if (vect_print_dump_info (REPORT_DETAILS))
2094 fprintf (vect_dump, "unsupported phi node definition.");
2096 return NULL;
2099 def1 = SSA_NAME_DEF_STMT (op1);
2100 if (flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
2101 && loop->inner
2102 && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1))
2103 && is_gimple_assign (def1))
2105 if (vect_print_dump_info (REPORT_DETAILS))
2106 report_vect_op (def_stmt, "detected double reduction: ");
2108 *double_reduc = true;
2109 return def_stmt;
2112 return NULL;
2115 code = orig_code = gimple_assign_rhs_code (def_stmt);
2117 /* We can handle "res -= x[i]", which is non-associative by
2118 simply rewriting this into "res += -x[i]". Avoid changing
2119 gimple instruction for the first simple tests and only do this
2120 if we're allowed to change code at all. */
2121 if (code == MINUS_EXPR
2122 && modify
2123 && (op1 = gimple_assign_rhs1 (def_stmt))
2124 && TREE_CODE (op1) == SSA_NAME
2125 && SSA_NAME_DEF_STMT (op1) == phi)
2126 code = PLUS_EXPR;
2128 if (check_reduction
2129 && (!commutative_tree_code (code) || !associative_tree_code (code)))
2131 if (vect_print_dump_info (REPORT_DETAILS))
2132 report_vect_op (def_stmt, "reduction: not commutative/associative: ");
2133 return NULL;
2136 if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS)
2138 if (code != COND_EXPR)
2140 if (vect_print_dump_info (REPORT_DETAILS))
2141 report_vect_op (def_stmt, "reduction: not binary operation: ");
2143 return NULL;
2146 op3 = gimple_assign_rhs1 (def_stmt);
2147 if (COMPARISON_CLASS_P (op3))
2149 op4 = TREE_OPERAND (op3, 1);
2150 op3 = TREE_OPERAND (op3, 0);
2153 op1 = gimple_assign_rhs2 (def_stmt);
2154 op2 = gimple_assign_rhs3 (def_stmt);
2156 if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
2158 if (vect_print_dump_info (REPORT_DETAILS))
2159 report_vect_op (def_stmt, "reduction: uses not ssa_names: ");
2161 return NULL;
2164 else
2166 op1 = gimple_assign_rhs1 (def_stmt);
2167 op2 = gimple_assign_rhs2 (def_stmt);
2169 if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
2171 if (vect_print_dump_info (REPORT_DETAILS))
2172 report_vect_op (def_stmt, "reduction: uses not ssa_names: ");
2174 return NULL;
2178 type = TREE_TYPE (gimple_assign_lhs (def_stmt));
2179 if ((TREE_CODE (op1) == SSA_NAME
2180 && !types_compatible_p (type,TREE_TYPE (op1)))
2181 || (TREE_CODE (op2) == SSA_NAME
2182 && !types_compatible_p (type, TREE_TYPE (op2)))
2183 || (op3 && TREE_CODE (op3) == SSA_NAME
2184 && !types_compatible_p (type, TREE_TYPE (op3)))
2185 || (op4 && TREE_CODE (op4) == SSA_NAME
2186 && !types_compatible_p (type, TREE_TYPE (op4))))
2188 if (vect_print_dump_info (REPORT_DETAILS))
2190 fprintf (vect_dump, "reduction: multiple types: operation type: ");
2191 print_generic_expr (vect_dump, type, TDF_SLIM);
2192 fprintf (vect_dump, ", operands types: ");
2193 print_generic_expr (vect_dump, TREE_TYPE (op1), TDF_SLIM);
2194 fprintf (vect_dump, ",");
2195 print_generic_expr (vect_dump, TREE_TYPE (op2), TDF_SLIM);
2196 if (op3)
2198 fprintf (vect_dump, ",");
2199 print_generic_expr (vect_dump, TREE_TYPE (op3), TDF_SLIM);
2202 if (op4)
2204 fprintf (vect_dump, ",");
2205 print_generic_expr (vect_dump, TREE_TYPE (op4), TDF_SLIM);
2209 return NULL;
2212 /* Check that it's ok to change the order of the computation.
2213 Generally, when vectorizing a reduction we change the order of the
2214 computation. This may change the behavior of the program in some
2215 cases, so we need to check that this is ok. One exception is when
2216 vectorizing an outer-loop: the inner-loop is executed sequentially,
2217 and therefore vectorizing reductions in the inner-loop during
2218 outer-loop vectorization is safe. */
2220 /* CHECKME: check for !flag_finite_math_only too? */
2221 if (SCALAR_FLOAT_TYPE_P (type) && !flag_associative_math
2222 && check_reduction)
2224 /* Changing the order of operations changes the semantics. */
2225 if (vect_print_dump_info (REPORT_DETAILS))
2226 report_vect_op (def_stmt, "reduction: unsafe fp math optimization: ");
2227 return NULL;
2229 else if (INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type)
2230 && check_reduction)
2232 /* Changing the order of operations changes the semantics. */
2233 if (vect_print_dump_info (REPORT_DETAILS))
2234 report_vect_op (def_stmt, "reduction: unsafe int math optimization: ");
2235 return NULL;
2237 else if (SAT_FIXED_POINT_TYPE_P (type) && check_reduction)
2239 /* Changing the order of operations changes the semantics. */
2240 if (vect_print_dump_info (REPORT_DETAILS))
2241 report_vect_op (def_stmt,
2242 "reduction: unsafe fixed-point math optimization: ");
2243 return NULL;
2246 /* If we detected "res -= x[i]" earlier, rewrite it into
2247 "res += -x[i]" now. If this turns out to be useless reassoc
2248 will clean it up again. */
2249 if (orig_code == MINUS_EXPR)
2251 tree rhs = gimple_assign_rhs2 (def_stmt);
2252 tree negrhs = make_ssa_name (SSA_NAME_VAR (rhs), NULL);
2253 gimple negate_stmt = gimple_build_assign_with_ops (NEGATE_EXPR, negrhs,
2254 rhs, NULL);
2255 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
2256 set_vinfo_for_stmt (negate_stmt, new_stmt_vec_info (negate_stmt,
2257 loop_info, NULL));
2258 gsi_insert_before (&gsi, negate_stmt, GSI_NEW_STMT);
2259 gimple_assign_set_rhs2 (def_stmt, negrhs);
2260 gimple_assign_set_rhs_code (def_stmt, PLUS_EXPR);
2261 update_stmt (def_stmt);
2264 /* Reduction is safe. We're dealing with one of the following:
2265 1) integer arithmetic and no trapv
2266 2) floating point arithmetic, and special flags permit this optimization
2267 3) nested cycle (i.e., outer loop vectorization). */
2268 if (TREE_CODE (op1) == SSA_NAME)
2269 def1 = SSA_NAME_DEF_STMT (op1);
2271 if (TREE_CODE (op2) == SSA_NAME)
2272 def2 = SSA_NAME_DEF_STMT (op2);
2274 if (code != COND_EXPR
2275 && ((!def1 || gimple_nop_p (def1)) && (!def2 || gimple_nop_p (def2))))
2277 if (vect_print_dump_info (REPORT_DETAILS))
2278 report_vect_op (def_stmt, "reduction: no defs for operands: ");
2279 return NULL;
2282 /* Check that one def is the reduction def, defined by PHI,
2283 the other def is either defined in the loop ("vect_internal_def"),
2284 or it's an induction (defined by a loop-header phi-node). */
2286 if (def2 && def2 == phi
2287 && (code == COND_EXPR
2288 || !def1 || gimple_nop_p (def1)
2289 || (def1 && flow_bb_inside_loop_p (loop, gimple_bb (def1))
2290 && (is_gimple_assign (def1)
2291 || is_gimple_call (def1)
2292 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
2293 == vect_induction_def
2294 || (gimple_code (def1) == GIMPLE_PHI
2295 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
2296 == vect_internal_def
2297 && !is_loop_header_bb_p (gimple_bb (def1)))))))
2299 if (vect_print_dump_info (REPORT_DETAILS))
2300 report_vect_op (def_stmt, "detected reduction: ");
2301 return def_stmt;
2304 if (def1 && def1 == phi
2305 && (code == COND_EXPR
2306 || !def2 || gimple_nop_p (def2)
2307 || (def2 && flow_bb_inside_loop_p (loop, gimple_bb (def2))
2308 && (is_gimple_assign (def2)
2309 || is_gimple_call (def2)
2310 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
2311 == vect_induction_def
2312 || (gimple_code (def2) == GIMPLE_PHI
2313 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
2314 == vect_internal_def
2315 && !is_loop_header_bb_p (gimple_bb (def2)))))))
2317 if (check_reduction)
2319 /* Swap operands (just for simplicity - so that the rest of the code
2320 can assume that the reduction variable is always the last (second)
2321 argument). */
2322 if (vect_print_dump_info (REPORT_DETAILS))
2323 report_vect_op (def_stmt,
2324 "detected reduction: need to swap operands: ");
2326 swap_tree_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
2327 gimple_assign_rhs2_ptr (def_stmt));
2329 else
2331 if (vect_print_dump_info (REPORT_DETAILS))
2332 report_vect_op (def_stmt, "detected reduction: ");
2335 return def_stmt;
2338 /* Try to find SLP reduction chain. */
2339 if (check_reduction && vect_is_slp_reduction (loop_info, phi, def_stmt))
2341 if (vect_print_dump_info (REPORT_DETAILS))
2342 report_vect_op (def_stmt, "reduction: detected reduction chain: ");
2344 return def_stmt;
2347 if (vect_print_dump_info (REPORT_DETAILS))
2348 report_vect_op (def_stmt, "reduction: unknown pattern: ");
2350 return NULL;
2353 /* Wrapper around vect_is_simple_reduction_1, that won't modify code
2354 in-place. Arguments as there. */
2356 static gimple
2357 vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
2358 bool check_reduction, bool *double_reduc)
2360 return vect_is_simple_reduction_1 (loop_info, phi, check_reduction,
2361 double_reduc, false);
2364 /* Wrapper around vect_is_simple_reduction_1, which will modify code
2365 in-place if it enables detection of more reductions. Arguments
2366 as there. */
2368 gimple
2369 vect_force_simple_reduction (loop_vec_info loop_info, gimple phi,
2370 bool check_reduction, bool *double_reduc)
2372 return vect_is_simple_reduction_1 (loop_info, phi, check_reduction,
2373 double_reduc, true);
2376 /* Calculate the cost of one scalar iteration of the loop. */
2378 vect_get_single_scalar_iteraion_cost (loop_vec_info loop_vinfo)
2380 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2381 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
2382 int nbbs = loop->num_nodes, factor, scalar_single_iter_cost = 0;
2383 int innerloop_iters, i, stmt_cost;
2385 /* Count statements in scalar loop. Using this as scalar cost for a single
2386 iteration for now.
2388 TODO: Add outer loop support.
2390 TODO: Consider assigning different costs to different scalar
2391 statements. */
2393 /* FORNOW. */
2394 innerloop_iters = 1;
2395 if (loop->inner)
2396 innerloop_iters = 50; /* FIXME */
2398 for (i = 0; i < nbbs; i++)
2400 gimple_stmt_iterator si;
2401 basic_block bb = bbs[i];
2403 if (bb->loop_father == loop->inner)
2404 factor = innerloop_iters;
2405 else
2406 factor = 1;
2408 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
2410 gimple stmt = gsi_stmt (si);
2411 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2413 if (!is_gimple_assign (stmt) && !is_gimple_call (stmt))
2414 continue;
2416 /* Skip stmts that are not vectorized inside the loop. */
2417 if (stmt_info
2418 && !STMT_VINFO_RELEVANT_P (stmt_info)
2419 && (!STMT_VINFO_LIVE_P (stmt_info)
2420 || STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def))
2421 continue;
2423 if (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt)))
2425 if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))))
2426 stmt_cost = vect_get_cost (scalar_load);
2427 else
2428 stmt_cost = vect_get_cost (scalar_store);
2430 else
2431 stmt_cost = vect_get_cost (scalar_stmt);
2433 scalar_single_iter_cost += stmt_cost * factor;
2436 return scalar_single_iter_cost;
2439 /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */
2441 vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue,
2442 int *peel_iters_epilogue,
2443 int scalar_single_iter_cost)
2445 int peel_guard_costs = 0;
2446 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2448 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
2450 *peel_iters_epilogue = vf/2;
2451 if (vect_print_dump_info (REPORT_COST))
2452 fprintf (vect_dump, "cost model: "
2453 "epilogue peel iters set to vf/2 because "
2454 "loop iterations are unknown .");
2456 /* If peeled iterations are known but number of scalar loop
2457 iterations are unknown, count a taken branch per peeled loop. */
2458 peel_guard_costs = 2 * vect_get_cost (cond_branch_taken);
2460 else
2462 int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
2463 peel_iters_prologue = niters < peel_iters_prologue ?
2464 niters : peel_iters_prologue;
2465 *peel_iters_epilogue = (niters - peel_iters_prologue) % vf;
2466 /* If we need to peel for gaps, but no peeling is required, we have to
2467 peel VF iterations. */
2468 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && !*peel_iters_epilogue)
2469 *peel_iters_epilogue = vf;
2472 return (peel_iters_prologue * scalar_single_iter_cost)
2473 + (*peel_iters_epilogue * scalar_single_iter_cost)
2474 + peel_guard_costs;
2477 /* Function vect_estimate_min_profitable_iters
2479 Return the number of iterations required for the vector version of the
2480 loop to be profitable relative to the cost of the scalar version of the
2481 loop.
2483 TODO: Take profile info into account before making vectorization
2484 decisions, if available. */
2487 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo)
2489 int i;
2490 int min_profitable_iters;
2491 int peel_iters_prologue;
2492 int peel_iters_epilogue;
2493 int vec_inside_cost = 0;
2494 int vec_outside_cost = 0;
2495 int scalar_single_iter_cost = 0;
2496 int scalar_outside_cost = 0;
2497 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2498 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2499 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
2500 int nbbs = loop->num_nodes;
2501 int npeel = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo);
2502 int peel_guard_costs = 0;
2503 int innerloop_iters = 0, factor;
2504 VEC (slp_instance, heap) *slp_instances;
2505 slp_instance instance;
2507 /* Cost model disabled. */
2508 if (!flag_vect_cost_model)
2510 if (vect_print_dump_info (REPORT_COST))
2511 fprintf (vect_dump, "cost model disabled.");
2512 return 0;
2515 /* Requires loop versioning tests to handle misalignment. */
2516 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
2518 /* FIXME: Make cost depend on complexity of individual check. */
2519 vec_outside_cost +=
2520 VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
2521 if (vect_print_dump_info (REPORT_COST))
2522 fprintf (vect_dump, "cost model: Adding cost of checks for loop "
2523 "versioning to treat misalignment.\n");
2526 /* Requires loop versioning with alias checks. */
2527 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2529 /* FIXME: Make cost depend on complexity of individual check. */
2530 vec_outside_cost +=
2531 VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
2532 if (vect_print_dump_info (REPORT_COST))
2533 fprintf (vect_dump, "cost model: Adding cost of checks for loop "
2534 "versioning aliasing.\n");
2537 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2538 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2539 vec_outside_cost += vect_get_cost (cond_branch_taken);
2541 /* Count statements in scalar loop. Using this as scalar cost for a single
2542 iteration for now.
2544 TODO: Add outer loop support.
2546 TODO: Consider assigning different costs to different scalar
2547 statements. */
2549 /* FORNOW. */
2550 if (loop->inner)
2551 innerloop_iters = 50; /* FIXME */
2553 for (i = 0; i < nbbs; i++)
2555 gimple_stmt_iterator si;
2556 basic_block bb = bbs[i];
2558 if (bb->loop_father == loop->inner)
2559 factor = innerloop_iters;
2560 else
2561 factor = 1;
2563 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
2565 gimple stmt = gsi_stmt (si);
2566 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2567 /* Skip stmts that are not vectorized inside the loop. */
2568 if (!STMT_VINFO_RELEVANT_P (stmt_info)
2569 && (!STMT_VINFO_LIVE_P (stmt_info)
2570 || STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def))
2571 continue;
2572 vec_inside_cost += STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) * factor;
2573 /* FIXME: for stmts in the inner-loop in outer-loop vectorization,
2574 some of the "outside" costs are generated inside the outer-loop. */
2575 vec_outside_cost += STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info);
2579 scalar_single_iter_cost = vect_get_single_scalar_iteraion_cost (loop_vinfo);
2581 /* Add additional cost for the peeled instructions in prologue and epilogue
2582 loop.
2584 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
2585 at compile-time - we assume it's vf/2 (the worst would be vf-1).
2587 TODO: Build an expression that represents peel_iters for prologue and
2588 epilogue to be used in a run-time test. */
2590 if (npeel < 0)
2592 peel_iters_prologue = vf/2;
2593 if (vect_print_dump_info (REPORT_COST))
2594 fprintf (vect_dump, "cost model: "
2595 "prologue peel iters set to vf/2.");
2597 /* If peeling for alignment is unknown, loop bound of main loop becomes
2598 unknown. */
2599 peel_iters_epilogue = vf/2;
2600 if (vect_print_dump_info (REPORT_COST))
2601 fprintf (vect_dump, "cost model: "
2602 "epilogue peel iters set to vf/2 because "
2603 "peeling for alignment is unknown .");
2605 /* If peeled iterations are unknown, count a taken branch and a not taken
2606 branch per peeled loop. Even if scalar loop iterations are known,
2607 vector iterations are not known since peeled prologue iterations are
2608 not known. Hence guards remain the same. */
2609 peel_guard_costs += 2 * (vect_get_cost (cond_branch_taken)
2610 + vect_get_cost (cond_branch_not_taken));
2611 vec_outside_cost += (peel_iters_prologue * scalar_single_iter_cost)
2612 + (peel_iters_epilogue * scalar_single_iter_cost)
2613 + peel_guard_costs;
2615 else
2617 peel_iters_prologue = npeel;
2618 vec_outside_cost += vect_get_known_peeling_cost (loop_vinfo,
2619 peel_iters_prologue, &peel_iters_epilogue,
2620 scalar_single_iter_cost);
2623 /* FORNOW: The scalar outside cost is incremented in one of the
2624 following ways:
2626 1. The vectorizer checks for alignment and aliasing and generates
2627 a condition that allows dynamic vectorization. A cost model
2628 check is ANDED with the versioning condition. Hence scalar code
2629 path now has the added cost of the versioning check.
2631 if (cost > th & versioning_check)
2632 jmp to vector code
2634 Hence run-time scalar is incremented by not-taken branch cost.
2636 2. The vectorizer then checks if a prologue is required. If the
2637 cost model check was not done before during versioning, it has to
2638 be done before the prologue check.
2640 if (cost <= th)
2641 prologue = scalar_iters
2642 if (prologue == 0)
2643 jmp to vector code
2644 else
2645 execute prologue
2646 if (prologue == num_iters)
2647 go to exit
2649 Hence the run-time scalar cost is incremented by a taken branch,
2650 plus a not-taken branch, plus a taken branch cost.
2652 3. The vectorizer then checks if an epilogue is required. If the
2653 cost model check was not done before during prologue check, it
2654 has to be done with the epilogue check.
2656 if (prologue == 0)
2657 jmp to vector code
2658 else
2659 execute prologue
2660 if (prologue == num_iters)
2661 go to exit
2662 vector code:
2663 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
2664 jmp to epilogue
2666 Hence the run-time scalar cost should be incremented by 2 taken
2667 branches.
2669 TODO: The back end may reorder the BBS's differently and reverse
2670 conditions/branch directions. Change the estimates below to
2671 something more reasonable. */
2673 /* If the number of iterations is known and we do not do versioning, we can
2674 decide whether to vectorize at compile time. Hence the scalar version
2675 do not carry cost model guard costs. */
2676 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2677 || LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2678 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2680 /* Cost model check occurs at versioning. */
2681 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2682 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2683 scalar_outside_cost += vect_get_cost (cond_branch_not_taken);
2684 else
2686 /* Cost model check occurs at prologue generation. */
2687 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
2688 scalar_outside_cost += 2 * vect_get_cost (cond_branch_taken)
2689 + vect_get_cost (cond_branch_not_taken);
2690 /* Cost model check occurs at epilogue generation. */
2691 else
2692 scalar_outside_cost += 2 * vect_get_cost (cond_branch_taken);
2696 /* Add SLP costs. */
2697 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
2698 FOR_EACH_VEC_ELT (slp_instance, slp_instances, i, instance)
2700 vec_outside_cost += SLP_INSTANCE_OUTSIDE_OF_LOOP_COST (instance);
2701 vec_inside_cost += SLP_INSTANCE_INSIDE_OF_LOOP_COST (instance);
2704 /* Calculate number of iterations required to make the vector version
2705 profitable, relative to the loop bodies only. The following condition
2706 must hold true:
2707 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
2708 where
2709 SIC = scalar iteration cost, VIC = vector iteration cost,
2710 VOC = vector outside cost, VF = vectorization factor,
2711 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
2712 SOC = scalar outside cost for run time cost model check. */
2714 if ((scalar_single_iter_cost * vf) > vec_inside_cost)
2716 if (vec_outside_cost <= 0)
2717 min_profitable_iters = 1;
2718 else
2720 min_profitable_iters = ((vec_outside_cost - scalar_outside_cost) * vf
2721 - vec_inside_cost * peel_iters_prologue
2722 - vec_inside_cost * peel_iters_epilogue)
2723 / ((scalar_single_iter_cost * vf)
2724 - vec_inside_cost);
2726 if ((scalar_single_iter_cost * vf * min_profitable_iters)
2727 <= ((vec_inside_cost * min_profitable_iters)
2728 + ((vec_outside_cost - scalar_outside_cost) * vf)))
2729 min_profitable_iters++;
2732 /* vector version will never be profitable. */
2733 else
2735 if (vect_print_dump_info (REPORT_COST))
2736 fprintf (vect_dump, "cost model: the vector iteration cost = %d "
2737 "divided by the scalar iteration cost = %d "
2738 "is greater or equal to the vectorization factor = %d.",
2739 vec_inside_cost, scalar_single_iter_cost, vf);
2740 return -1;
2743 if (vect_print_dump_info (REPORT_COST))
2745 fprintf (vect_dump, "Cost model analysis: \n");
2746 fprintf (vect_dump, " Vector inside of loop cost: %d\n",
2747 vec_inside_cost);
2748 fprintf (vect_dump, " Vector outside of loop cost: %d\n",
2749 vec_outside_cost);
2750 fprintf (vect_dump, " Scalar iteration cost: %d\n",
2751 scalar_single_iter_cost);
2752 fprintf (vect_dump, " Scalar outside cost: %d\n", scalar_outside_cost);
2753 fprintf (vect_dump, " prologue iterations: %d\n",
2754 peel_iters_prologue);
2755 fprintf (vect_dump, " epilogue iterations: %d\n",
2756 peel_iters_epilogue);
2757 fprintf (vect_dump, " Calculated minimum iters for profitability: %d\n",
2758 min_profitable_iters);
2761 min_profitable_iters =
2762 min_profitable_iters < vf ? vf : min_profitable_iters;
2764 /* Because the condition we create is:
2765 if (niters <= min_profitable_iters)
2766 then skip the vectorized loop. */
2767 min_profitable_iters--;
2769 if (vect_print_dump_info (REPORT_COST))
2770 fprintf (vect_dump, " Profitability threshold = %d\n",
2771 min_profitable_iters);
2773 return min_profitable_iters;
2777 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
2778 functions. Design better to avoid maintenance issues. */
2780 /* Function vect_model_reduction_cost.
2782 Models cost for a reduction operation, including the vector ops
2783 generated within the strip-mine loop, the initial definition before
2784 the loop, and the epilogue code that must be generated. */
2786 static bool
2787 vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
2788 int ncopies)
2790 int outer_cost = 0;
2791 enum tree_code code;
2792 optab optab;
2793 tree vectype;
2794 gimple stmt, orig_stmt;
2795 tree reduction_op;
2796 enum machine_mode mode;
2797 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2798 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2801 /* Cost of reduction op inside loop. */
2802 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info)
2803 += ncopies * vect_get_cost (vector_stmt);
2805 stmt = STMT_VINFO_STMT (stmt_info);
2807 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
2809 case GIMPLE_SINGLE_RHS:
2810 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)) == ternary_op);
2811 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), 2);
2812 break;
2813 case GIMPLE_UNARY_RHS:
2814 reduction_op = gimple_assign_rhs1 (stmt);
2815 break;
2816 case GIMPLE_BINARY_RHS:
2817 reduction_op = gimple_assign_rhs2 (stmt);
2818 break;
2819 case GIMPLE_TERNARY_RHS:
2820 reduction_op = gimple_assign_rhs3 (stmt);
2821 break;
2822 default:
2823 gcc_unreachable ();
2826 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
2827 if (!vectype)
2829 if (vect_print_dump_info (REPORT_COST))
2831 fprintf (vect_dump, "unsupported data-type ");
2832 print_generic_expr (vect_dump, TREE_TYPE (reduction_op), TDF_SLIM);
2834 return false;
2837 mode = TYPE_MODE (vectype);
2838 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2840 if (!orig_stmt)
2841 orig_stmt = STMT_VINFO_STMT (stmt_info);
2843 code = gimple_assign_rhs_code (orig_stmt);
2845 /* Add in cost for initial definition. */
2846 outer_cost += vect_get_cost (scalar_to_vec);
2848 /* Determine cost of epilogue code.
2850 We have a reduction operator that will reduce the vector in one statement.
2851 Also requires scalar extract. */
2853 if (!nested_in_vect_loop_p (loop, orig_stmt))
2855 if (reduc_code != ERROR_MARK)
2856 outer_cost += vect_get_cost (vector_stmt)
2857 + vect_get_cost (vec_to_scalar);
2858 else
2860 int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
2861 tree bitsize =
2862 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt)));
2863 int element_bitsize = tree_low_cst (bitsize, 1);
2864 int nelements = vec_size_in_bits / element_bitsize;
2866 optab = optab_for_tree_code (code, vectype, optab_default);
2868 /* We have a whole vector shift available. */
2869 if (VECTOR_MODE_P (mode)
2870 && optab_handler (optab, mode) != CODE_FOR_nothing
2871 && optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
2872 /* Final reduction via vector shifts and the reduction operator. Also
2873 requires scalar extract. */
2874 outer_cost += ((exact_log2(nelements) * 2)
2875 * vect_get_cost (vector_stmt)
2876 + vect_get_cost (vec_to_scalar));
2877 else
2878 /* Use extracts and reduction op for final reduction. For N elements,
2879 we have N extracts and N-1 reduction ops. */
2880 outer_cost += ((nelements + nelements - 1)
2881 * vect_get_cost (vector_stmt));
2885 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = outer_cost;
2887 if (vect_print_dump_info (REPORT_COST))
2888 fprintf (vect_dump, "vect_model_reduction_cost: inside_cost = %d, "
2889 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
2890 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
2892 return true;
2896 /* Function vect_model_induction_cost.
2898 Models cost for induction operations. */
2900 static void
2901 vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies)
2903 /* loop cost for vec_loop. */
2904 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info)
2905 = ncopies * vect_get_cost (vector_stmt);
2906 /* prologue cost for vec_init and vec_step. */
2907 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info)
2908 = 2 * vect_get_cost (scalar_to_vec);
2910 if (vect_print_dump_info (REPORT_COST))
2911 fprintf (vect_dump, "vect_model_induction_cost: inside_cost = %d, "
2912 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
2913 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
2917 /* Function get_initial_def_for_induction
2919 Input:
2920 STMT - a stmt that performs an induction operation in the loop.
2921 IV_PHI - the initial value of the induction variable
2923 Output:
2924 Return a vector variable, initialized with the first VF values of
2925 the induction variable. E.g., for an iv with IV_PHI='X' and
2926 evolution S, for a vector of 4 units, we want to return:
2927 [X, X + S, X + 2*S, X + 3*S]. */
2929 static tree
2930 get_initial_def_for_induction (gimple iv_phi)
2932 stmt_vec_info stmt_vinfo = vinfo_for_stmt (iv_phi);
2933 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
2934 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2935 tree scalar_type;
2936 tree vectype;
2937 int nunits;
2938 edge pe = loop_preheader_edge (loop);
2939 struct loop *iv_loop;
2940 basic_block new_bb;
2941 tree vec, vec_init, vec_step, t;
2942 tree access_fn;
2943 tree new_var;
2944 tree new_name;
2945 gimple init_stmt, induction_phi, new_stmt;
2946 tree induc_def, vec_def, vec_dest;
2947 tree init_expr, step_expr;
2948 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2949 int i;
2950 bool ok;
2951 int ncopies;
2952 tree expr;
2953 stmt_vec_info phi_info = vinfo_for_stmt (iv_phi);
2954 bool nested_in_vect_loop = false;
2955 gimple_seq stmts = NULL;
2956 imm_use_iterator imm_iter;
2957 use_operand_p use_p;
2958 gimple exit_phi;
2959 edge latch_e;
2960 tree loop_arg;
2961 gimple_stmt_iterator si;
2962 basic_block bb = gimple_bb (iv_phi);
2963 tree stepvectype;
2964 tree resvectype;
2966 /* Is phi in an inner-loop, while vectorizing an enclosing outer-loop? */
2967 if (nested_in_vect_loop_p (loop, iv_phi))
2969 nested_in_vect_loop = true;
2970 iv_loop = loop->inner;
2972 else
2973 iv_loop = loop;
2974 gcc_assert (iv_loop == (gimple_bb (iv_phi))->loop_father);
2976 latch_e = loop_latch_edge (iv_loop);
2977 loop_arg = PHI_ARG_DEF_FROM_EDGE (iv_phi, latch_e);
2979 access_fn = analyze_scalar_evolution (iv_loop, PHI_RESULT (iv_phi));
2980 gcc_assert (access_fn);
2981 STRIP_NOPS (access_fn);
2982 ok = vect_is_simple_iv_evolution (iv_loop->num, access_fn,
2983 &init_expr, &step_expr);
2984 gcc_assert (ok);
2985 pe = loop_preheader_edge (iv_loop);
2987 scalar_type = TREE_TYPE (init_expr);
2988 vectype = get_vectype_for_scalar_type (scalar_type);
2989 resvectype = get_vectype_for_scalar_type (TREE_TYPE (PHI_RESULT (iv_phi)));
2990 gcc_assert (vectype);
2991 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2992 ncopies = vf / nunits;
2994 gcc_assert (phi_info);
2995 gcc_assert (ncopies >= 1);
2997 /* Find the first insertion point in the BB. */
2998 si = gsi_after_labels (bb);
3000 /* Create the vector that holds the initial_value of the induction. */
3001 if (nested_in_vect_loop)
3003 /* iv_loop is nested in the loop to be vectorized. init_expr had already
3004 been created during vectorization of previous stmts. We obtain it
3005 from the STMT_VINFO_VEC_STMT of the defining stmt. */
3006 tree iv_def = PHI_ARG_DEF_FROM_EDGE (iv_phi,
3007 loop_preheader_edge (iv_loop));
3008 vec_init = vect_get_vec_def_for_operand (iv_def, iv_phi, NULL);
3010 else
3012 /* iv_loop is the loop to be vectorized. Create:
3013 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
3014 new_var = vect_get_new_vect_var (scalar_type, vect_scalar_var, "var_");
3015 add_referenced_var (new_var);
3017 new_name = force_gimple_operand (init_expr, &stmts, false, new_var);
3018 if (stmts)
3020 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
3021 gcc_assert (!new_bb);
3024 t = NULL_TREE;
3025 t = tree_cons (NULL_TREE, new_name, t);
3026 for (i = 1; i < nunits; i++)
3028 /* Create: new_name_i = new_name + step_expr */
3029 enum tree_code code = POINTER_TYPE_P (scalar_type)
3030 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3031 init_stmt = gimple_build_assign_with_ops (code, new_var,
3032 new_name, step_expr);
3033 new_name = make_ssa_name (new_var, init_stmt);
3034 gimple_assign_set_lhs (init_stmt, new_name);
3036 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
3037 gcc_assert (!new_bb);
3039 if (vect_print_dump_info (REPORT_DETAILS))
3041 fprintf (vect_dump, "created new init_stmt: ");
3042 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
3044 t = tree_cons (NULL_TREE, new_name, t);
3046 /* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1] */
3047 vec = build_constructor_from_list (vectype, nreverse (t));
3048 vec_init = vect_init_vector (iv_phi, vec, vectype, NULL);
3052 /* Create the vector that holds the step of the induction. */
3053 if (nested_in_vect_loop)
3054 /* iv_loop is nested in the loop to be vectorized. Generate:
3055 vec_step = [S, S, S, S] */
3056 new_name = step_expr;
3057 else
3059 /* iv_loop is the loop to be vectorized. Generate:
3060 vec_step = [VF*S, VF*S, VF*S, VF*S] */
3061 expr = build_int_cst (TREE_TYPE (step_expr), vf);
3062 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
3063 expr, step_expr);
3066 t = unshare_expr (new_name);
3067 gcc_assert (CONSTANT_CLASS_P (new_name));
3068 stepvectype = get_vectype_for_scalar_type (TREE_TYPE (new_name));
3069 gcc_assert (stepvectype);
3070 vec = build_vector_from_val (stepvectype, t);
3071 vec_step = vect_init_vector (iv_phi, vec, stepvectype, NULL);
3074 /* Create the following def-use cycle:
3075 loop prolog:
3076 vec_init = ...
3077 vec_step = ...
3078 loop:
3079 vec_iv = PHI <vec_init, vec_loop>
3081 STMT
3083 vec_loop = vec_iv + vec_step; */
3085 /* Create the induction-phi that defines the induction-operand. */
3086 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
3087 add_referenced_var (vec_dest);
3088 induction_phi = create_phi_node (vec_dest, iv_loop->header);
3089 set_vinfo_for_stmt (induction_phi,
3090 new_stmt_vec_info (induction_phi, loop_vinfo, NULL));
3091 induc_def = PHI_RESULT (induction_phi);
3093 /* Create the iv update inside the loop */
3094 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
3095 induc_def, vec_step);
3096 vec_def = make_ssa_name (vec_dest, new_stmt);
3097 gimple_assign_set_lhs (new_stmt, vec_def);
3098 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3099 set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo,
3100 NULL));
3102 /* Set the arguments of the phi node: */
3103 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
3104 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
3105 UNKNOWN_LOCATION);
3108 /* In case that vectorization factor (VF) is bigger than the number
3109 of elements that we can fit in a vectype (nunits), we have to generate
3110 more than one vector stmt - i.e - we need to "unroll" the
3111 vector stmt by a factor VF/nunits. For more details see documentation
3112 in vectorizable_operation. */
3114 if (ncopies > 1)
3116 stmt_vec_info prev_stmt_vinfo;
3117 /* FORNOW. This restriction should be relaxed. */
3118 gcc_assert (!nested_in_vect_loop);
3120 /* Create the vector that holds the step of the induction. */
3121 expr = build_int_cst (TREE_TYPE (step_expr), nunits);
3122 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
3123 expr, step_expr);
3124 t = unshare_expr (new_name);
3125 gcc_assert (CONSTANT_CLASS_P (new_name));
3126 vec = build_vector_from_val (stepvectype, t);
3127 vec_step = vect_init_vector (iv_phi, vec, stepvectype, NULL);
3129 vec_def = induc_def;
3130 prev_stmt_vinfo = vinfo_for_stmt (induction_phi);
3131 for (i = 1; i < ncopies; i++)
3133 /* vec_i = vec_prev + vec_step */
3134 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
3135 vec_def, vec_step);
3136 vec_def = make_ssa_name (vec_dest, new_stmt);
3137 gimple_assign_set_lhs (new_stmt, vec_def);
3139 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3140 if (!useless_type_conversion_p (resvectype, vectype))
3142 new_stmt = gimple_build_assign_with_ops
3143 (VIEW_CONVERT_EXPR,
3144 vect_get_new_vect_var (resvectype, vect_simple_var,
3145 "vec_iv_"),
3146 build1 (VIEW_CONVERT_EXPR, resvectype,
3147 gimple_assign_lhs (new_stmt)), NULL_TREE);
3148 gimple_assign_set_lhs (new_stmt,
3149 make_ssa_name
3150 (gimple_assign_lhs (new_stmt), new_stmt));
3151 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3153 set_vinfo_for_stmt (new_stmt,
3154 new_stmt_vec_info (new_stmt, loop_vinfo, NULL));
3155 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt;
3156 prev_stmt_vinfo = vinfo_for_stmt (new_stmt);
3160 if (nested_in_vect_loop)
3162 /* Find the loop-closed exit-phi of the induction, and record
3163 the final vector of induction results: */
3164 exit_phi = NULL;
3165 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
3167 if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (USE_STMT (use_p))))
3169 exit_phi = USE_STMT (use_p);
3170 break;
3173 if (exit_phi)
3175 stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi);
3176 /* FORNOW. Currently not supporting the case that an inner-loop induction
3177 is not used in the outer-loop (i.e. only outside the outer-loop). */
3178 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
3179 && !STMT_VINFO_LIVE_P (stmt_vinfo));
3181 STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt;
3182 if (vect_print_dump_info (REPORT_DETAILS))
3184 fprintf (vect_dump, "vector of inductions after inner-loop:");
3185 print_gimple_stmt (vect_dump, new_stmt, 0, TDF_SLIM);
3191 if (vect_print_dump_info (REPORT_DETAILS))
3193 fprintf (vect_dump, "transform induction: created def-use cycle: ");
3194 print_gimple_stmt (vect_dump, induction_phi, 0, TDF_SLIM);
3195 fprintf (vect_dump, "\n");
3196 print_gimple_stmt (vect_dump, SSA_NAME_DEF_STMT (vec_def), 0, TDF_SLIM);
3199 STMT_VINFO_VEC_STMT (phi_info) = induction_phi;
3200 if (!useless_type_conversion_p (resvectype, vectype))
3202 new_stmt = gimple_build_assign_with_ops
3203 (VIEW_CONVERT_EXPR,
3204 vect_get_new_vect_var (resvectype, vect_simple_var, "vec_iv_"),
3205 build1 (VIEW_CONVERT_EXPR, resvectype, induc_def), NULL_TREE);
3206 induc_def = make_ssa_name (gimple_assign_lhs (new_stmt), new_stmt);
3207 gimple_assign_set_lhs (new_stmt, induc_def);
3208 si = gsi_start_bb (bb);
3209 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3210 set_vinfo_for_stmt (new_stmt,
3211 new_stmt_vec_info (new_stmt, loop_vinfo, NULL));
3212 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_stmt))
3213 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (induction_phi));
3216 return induc_def;
3220 /* Function get_initial_def_for_reduction
3222 Input:
3223 STMT - a stmt that performs a reduction operation in the loop.
3224 INIT_VAL - the initial value of the reduction variable
3226 Output:
3227 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
3228 of the reduction (used for adjusting the epilog - see below).
3229 Return a vector variable, initialized according to the operation that STMT
3230 performs. This vector will be used as the initial value of the
3231 vector of partial results.
3233 Option1 (adjust in epilog): Initialize the vector as follows:
3234 add/bit or/xor: [0,0,...,0,0]
3235 mult/bit and: [1,1,...,1,1]
3236 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
3237 and when necessary (e.g. add/mult case) let the caller know
3238 that it needs to adjust the result by init_val.
3240 Option2: Initialize the vector as follows:
3241 add/bit or/xor: [init_val,0,0,...,0]
3242 mult/bit and: [init_val,1,1,...,1]
3243 min/max/cond_expr: [init_val,init_val,...,init_val]
3244 and no adjustments are needed.
3246 For example, for the following code:
3248 s = init_val;
3249 for (i=0;i<n;i++)
3250 s = s + a[i];
3252 STMT is 's = s + a[i]', and the reduction variable is 's'.
3253 For a vector of 4 units, we want to return either [0,0,0,init_val],
3254 or [0,0,0,0] and let the caller know that it needs to adjust
3255 the result at the end by 'init_val'.
3257 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
3258 initialization vector is simpler (same element in all entries), if
3259 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
3261 A cost model should help decide between these two schemes. */
3263 tree
3264 get_initial_def_for_reduction (gimple stmt, tree init_val,
3265 tree *adjustment_def)
3267 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
3268 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
3269 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3270 tree scalar_type = TREE_TYPE (init_val);
3271 tree vectype = get_vectype_for_scalar_type (scalar_type);
3272 int nunits;
3273 enum tree_code code = gimple_assign_rhs_code (stmt);
3274 tree def_for_init;
3275 tree init_def;
3276 tree t = NULL_TREE;
3277 int i;
3278 bool nested_in_vect_loop = false;
3279 tree init_value;
3280 REAL_VALUE_TYPE real_init_val = dconst0;
3281 int int_init_val = 0;
3282 gimple def_stmt = NULL;
3284 gcc_assert (vectype);
3285 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3287 gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
3288 || SCALAR_FLOAT_TYPE_P (scalar_type));
3290 if (nested_in_vect_loop_p (loop, stmt))
3291 nested_in_vect_loop = true;
3292 else
3293 gcc_assert (loop == (gimple_bb (stmt))->loop_father);
3295 /* In case of double reduction we only create a vector variable to be put
3296 in the reduction phi node. The actual statement creation is done in
3297 vect_create_epilog_for_reduction. */
3298 if (adjustment_def && nested_in_vect_loop
3299 && TREE_CODE (init_val) == SSA_NAME
3300 && (def_stmt = SSA_NAME_DEF_STMT (init_val))
3301 && gimple_code (def_stmt) == GIMPLE_PHI
3302 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
3303 && vinfo_for_stmt (def_stmt)
3304 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
3305 == vect_double_reduction_def)
3307 *adjustment_def = NULL;
3308 return vect_create_destination_var (init_val, vectype);
3311 if (TREE_CONSTANT (init_val))
3313 if (SCALAR_FLOAT_TYPE_P (scalar_type))
3314 init_value = build_real (scalar_type, TREE_REAL_CST (init_val));
3315 else
3316 init_value = build_int_cst (scalar_type, TREE_INT_CST_LOW (init_val));
3318 else
3319 init_value = init_val;
3321 switch (code)
3323 case WIDEN_SUM_EXPR:
3324 case DOT_PROD_EXPR:
3325 case PLUS_EXPR:
3326 case MINUS_EXPR:
3327 case BIT_IOR_EXPR:
3328 case BIT_XOR_EXPR:
3329 case MULT_EXPR:
3330 case BIT_AND_EXPR:
3331 /* ADJUSMENT_DEF is NULL when called from
3332 vect_create_epilog_for_reduction to vectorize double reduction. */
3333 if (adjustment_def)
3335 if (nested_in_vect_loop)
3336 *adjustment_def = vect_get_vec_def_for_operand (init_val, stmt,
3337 NULL);
3338 else
3339 *adjustment_def = init_val;
3342 if (code == MULT_EXPR)
3344 real_init_val = dconst1;
3345 int_init_val = 1;
3348 if (code == BIT_AND_EXPR)
3349 int_init_val = -1;
3351 if (SCALAR_FLOAT_TYPE_P (scalar_type))
3352 def_for_init = build_real (scalar_type, real_init_val);
3353 else
3354 def_for_init = build_int_cst (scalar_type, int_init_val);
3356 /* Create a vector of '0' or '1' except the first element. */
3357 for (i = nunits - 2; i >= 0; --i)
3358 t = tree_cons (NULL_TREE, def_for_init, t);
3360 /* Option1: the first element is '0' or '1' as well. */
3361 if (adjustment_def)
3363 t = tree_cons (NULL_TREE, def_for_init, t);
3364 init_def = build_vector (vectype, t);
3365 break;
3368 /* Option2: the first element is INIT_VAL. */
3369 t = tree_cons (NULL_TREE, init_value, t);
3370 if (TREE_CONSTANT (init_val))
3371 init_def = build_vector (vectype, t);
3372 else
3373 init_def = build_constructor_from_list (vectype, t);
3375 break;
3377 case MIN_EXPR:
3378 case MAX_EXPR:
3379 case COND_EXPR:
3380 if (adjustment_def)
3382 *adjustment_def = NULL_TREE;
3383 init_def = vect_get_vec_def_for_operand (init_val, stmt, NULL);
3384 break;
3387 init_def = build_vector_from_val (vectype, init_value);
3388 break;
3390 default:
3391 gcc_unreachable ();
3394 return init_def;
3398 /* Function vect_create_epilog_for_reduction
3400 Create code at the loop-epilog to finalize the result of a reduction
3401 computation.
3403 VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector
3404 reduction statements.
3405 STMT is the scalar reduction stmt that is being vectorized.
3406 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
3407 number of elements that we can fit in a vectype (nunits). In this case
3408 we have to generate more than one vector stmt - i.e - we need to "unroll"
3409 the vector stmt by a factor VF/nunits. For more details see documentation
3410 in vectorizable_operation.
3411 REDUC_CODE is the tree-code for the epilog reduction.
3412 REDUCTION_PHIS is a list of the phi-nodes that carry the reduction
3413 computation.
3414 REDUC_INDEX is the index of the operand in the right hand side of the
3415 statement that is defined by REDUCTION_PHI.
3416 DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
3417 SLP_NODE is an SLP node containing a group of reduction statements. The
3418 first one in this group is STMT.
3420 This function:
3421 1. Creates the reduction def-use cycles: sets the arguments for
3422 REDUCTION_PHIS:
3423 The loop-entry argument is the vectorized initial-value of the reduction.
3424 The loop-latch argument is taken from VECT_DEFS - the vector of partial
3425 sums.
3426 2. "Reduces" each vector of partial results VECT_DEFS into a single result,
3427 by applying the operation specified by REDUC_CODE if available, or by
3428 other means (whole-vector shifts or a scalar loop).
3429 The function also creates a new phi node at the loop exit to preserve
3430 loop-closed form, as illustrated below.
3432 The flow at the entry to this function:
3434 loop:
3435 vec_def = phi <null, null> # REDUCTION_PHI
3436 VECT_DEF = vector_stmt # vectorized form of STMT
3437 s_loop = scalar_stmt # (scalar) STMT
3438 loop_exit:
3439 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3440 use <s_out0>
3441 use <s_out0>
3443 The above is transformed by this function into:
3445 loop:
3446 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
3447 VECT_DEF = vector_stmt # vectorized form of STMT
3448 s_loop = scalar_stmt # (scalar) STMT
3449 loop_exit:
3450 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3451 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3452 v_out2 = reduce <v_out1>
3453 s_out3 = extract_field <v_out2, 0>
3454 s_out4 = adjust_result <s_out3>
3455 use <s_out4>
3456 use <s_out4>
3459 static void
3460 vect_create_epilog_for_reduction (VEC (tree, heap) *vect_defs, gimple stmt,
3461 int ncopies, enum tree_code reduc_code,
3462 VEC (gimple, heap) *reduction_phis,
3463 int reduc_index, bool double_reduc,
3464 slp_tree slp_node)
3466 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3467 stmt_vec_info prev_phi_info;
3468 tree vectype;
3469 enum machine_mode mode;
3470 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3471 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
3472 basic_block exit_bb;
3473 tree scalar_dest;
3474 tree scalar_type;
3475 gimple new_phi = NULL, phi;
3476 gimple_stmt_iterator exit_gsi;
3477 tree vec_dest;
3478 tree new_temp = NULL_TREE, new_dest, new_name, new_scalar_dest;
3479 gimple epilog_stmt = NULL;
3480 enum tree_code code = gimple_assign_rhs_code (stmt);
3481 gimple exit_phi;
3482 tree bitsize, bitpos;
3483 tree adjustment_def = NULL;
3484 tree vec_initial_def = NULL;
3485 tree reduction_op, expr, def;
3486 tree orig_name, scalar_result;
3487 imm_use_iterator imm_iter, phi_imm_iter;
3488 use_operand_p use_p, phi_use_p;
3489 bool extract_scalar_result = false;
3490 gimple use_stmt, orig_stmt, reduction_phi = NULL;
3491 bool nested_in_vect_loop = false;
3492 VEC (gimple, heap) *new_phis = NULL;
3493 VEC (gimple, heap) *inner_phis = NULL;
3494 enum vect_def_type dt = vect_unknown_def_type;
3495 int j, i;
3496 VEC (tree, heap) *scalar_results = NULL;
3497 unsigned int group_size = 1, k, ratio;
3498 VEC (tree, heap) *vec_initial_defs = NULL;
3499 VEC (gimple, heap) *phis;
3500 bool slp_reduc = false;
3501 tree new_phi_result;
3502 gimple inner_phi = NULL;
3504 if (slp_node)
3505 group_size = VEC_length (gimple, SLP_TREE_SCALAR_STMTS (slp_node));
3507 if (nested_in_vect_loop_p (loop, stmt))
3509 outer_loop = loop;
3510 loop = loop->inner;
3511 nested_in_vect_loop = true;
3512 gcc_assert (!slp_node);
3515 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
3517 case GIMPLE_SINGLE_RHS:
3518 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt))
3519 == ternary_op);
3520 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), reduc_index);
3521 break;
3522 case GIMPLE_UNARY_RHS:
3523 reduction_op = gimple_assign_rhs1 (stmt);
3524 break;
3525 case GIMPLE_BINARY_RHS:
3526 reduction_op = reduc_index ?
3527 gimple_assign_rhs2 (stmt) : gimple_assign_rhs1 (stmt);
3528 break;
3529 case GIMPLE_TERNARY_RHS:
3530 reduction_op = gimple_op (stmt, reduc_index + 1);
3531 break;
3532 default:
3533 gcc_unreachable ();
3536 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
3537 gcc_assert (vectype);
3538 mode = TYPE_MODE (vectype);
3540 /* 1. Create the reduction def-use cycle:
3541 Set the arguments of REDUCTION_PHIS, i.e., transform
3543 loop:
3544 vec_def = phi <null, null> # REDUCTION_PHI
3545 VECT_DEF = vector_stmt # vectorized form of STMT
3548 into:
3550 loop:
3551 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
3552 VECT_DEF = vector_stmt # vectorized form of STMT
3555 (in case of SLP, do it for all the phis). */
3557 /* Get the loop-entry arguments. */
3558 if (slp_node)
3559 vect_get_vec_defs (reduction_op, NULL_TREE, stmt, &vec_initial_defs,
3560 NULL, slp_node, reduc_index);
3561 else
3563 vec_initial_defs = VEC_alloc (tree, heap, 1);
3564 /* For the case of reduction, vect_get_vec_def_for_operand returns
3565 the scalar def before the loop, that defines the initial value
3566 of the reduction variable. */
3567 vec_initial_def = vect_get_vec_def_for_operand (reduction_op, stmt,
3568 &adjustment_def);
3569 VEC_quick_push (tree, vec_initial_defs, vec_initial_def);
3572 /* Set phi nodes arguments. */
3573 FOR_EACH_VEC_ELT (gimple, reduction_phis, i, phi)
3575 tree vec_init_def = VEC_index (tree, vec_initial_defs, i);
3576 tree def = VEC_index (tree, vect_defs, i);
3577 for (j = 0; j < ncopies; j++)
3579 /* Set the loop-entry arg of the reduction-phi. */
3580 add_phi_arg (phi, vec_init_def, loop_preheader_edge (loop),
3581 UNKNOWN_LOCATION);
3583 /* Set the loop-latch arg for the reduction-phi. */
3584 if (j > 0)
3585 def = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type, def);
3587 add_phi_arg (phi, def, loop_latch_edge (loop), UNKNOWN_LOCATION);
3589 if (vect_print_dump_info (REPORT_DETAILS))
3591 fprintf (vect_dump, "transform reduction: created def-use"
3592 " cycle: ");
3593 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
3594 fprintf (vect_dump, "\n");
3595 print_gimple_stmt (vect_dump, SSA_NAME_DEF_STMT (def), 0,
3596 TDF_SLIM);
3599 phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
3603 VEC_free (tree, heap, vec_initial_defs);
3605 /* 2. Create epilog code.
3606 The reduction epilog code operates across the elements of the vector
3607 of partial results computed by the vectorized loop.
3608 The reduction epilog code consists of:
3610 step 1: compute the scalar result in a vector (v_out2)
3611 step 2: extract the scalar result (s_out3) from the vector (v_out2)
3612 step 3: adjust the scalar result (s_out3) if needed.
3614 Step 1 can be accomplished using one the following three schemes:
3615 (scheme 1) using reduc_code, if available.
3616 (scheme 2) using whole-vector shifts, if available.
3617 (scheme 3) using a scalar loop. In this case steps 1+2 above are
3618 combined.
3620 The overall epilog code looks like this:
3622 s_out0 = phi <s_loop> # original EXIT_PHI
3623 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3624 v_out2 = reduce <v_out1> # step 1
3625 s_out3 = extract_field <v_out2, 0> # step 2
3626 s_out4 = adjust_result <s_out3> # step 3
3628 (step 3 is optional, and steps 1 and 2 may be combined).
3629 Lastly, the uses of s_out0 are replaced by s_out4. */
3632 /* 2.1 Create new loop-exit-phis to preserve loop-closed form:
3633 v_out1 = phi <VECT_DEF>
3634 Store them in NEW_PHIS. */
3636 exit_bb = single_exit (loop)->dest;
3637 prev_phi_info = NULL;
3638 new_phis = VEC_alloc (gimple, heap, VEC_length (tree, vect_defs));
3639 FOR_EACH_VEC_ELT (tree, vect_defs, i, def)
3641 for (j = 0; j < ncopies; j++)
3643 phi = create_phi_node (SSA_NAME_VAR (def), exit_bb);
3644 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, loop_vinfo, NULL));
3645 if (j == 0)
3646 VEC_quick_push (gimple, new_phis, phi);
3647 else
3649 def = vect_get_vec_def_for_stmt_copy (dt, def);
3650 STMT_VINFO_RELATED_STMT (prev_phi_info) = phi;
3653 SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
3654 prev_phi_info = vinfo_for_stmt (phi);
3658 /* The epilogue is created for the outer-loop, i.e., for the loop being
3659 vectorized. Create exit phis for the outer loop. */
3660 if (double_reduc)
3662 loop = outer_loop;
3663 exit_bb = single_exit (loop)->dest;
3664 inner_phis = VEC_alloc (gimple, heap, VEC_length (tree, vect_defs));
3665 FOR_EACH_VEC_ELT (gimple, new_phis, i, phi)
3667 gimple outer_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (phi)),
3668 exit_bb);
3669 SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
3670 PHI_RESULT (phi));
3671 set_vinfo_for_stmt (outer_phi, new_stmt_vec_info (outer_phi,
3672 loop_vinfo, NULL));
3673 VEC_quick_push (gimple, inner_phis, phi);
3674 VEC_replace (gimple, new_phis, i, outer_phi);
3675 prev_phi_info = vinfo_for_stmt (outer_phi);
3676 while (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi)))
3678 phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
3679 outer_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (phi)),
3680 exit_bb);
3681 SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
3682 PHI_RESULT (phi));
3683 set_vinfo_for_stmt (outer_phi, new_stmt_vec_info (outer_phi,
3684 loop_vinfo, NULL));
3685 STMT_VINFO_RELATED_STMT (prev_phi_info) = outer_phi;
3686 prev_phi_info = vinfo_for_stmt (outer_phi);
3691 exit_gsi = gsi_after_labels (exit_bb);
3693 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
3694 (i.e. when reduc_code is not available) and in the final adjustment
3695 code (if needed). Also get the original scalar reduction variable as
3696 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
3697 represents a reduction pattern), the tree-code and scalar-def are
3698 taken from the original stmt that the pattern-stmt (STMT) replaces.
3699 Otherwise (it is a regular reduction) - the tree-code and scalar-def
3700 are taken from STMT. */
3702 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
3703 if (!orig_stmt)
3705 /* Regular reduction */
3706 orig_stmt = stmt;
3708 else
3710 /* Reduction pattern */
3711 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt);
3712 gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo));
3713 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
3716 code = gimple_assign_rhs_code (orig_stmt);
3717 /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
3718 partial results are added and not subtracted. */
3719 if (code == MINUS_EXPR)
3720 code = PLUS_EXPR;
3722 scalar_dest = gimple_assign_lhs (orig_stmt);
3723 scalar_type = TREE_TYPE (scalar_dest);
3724 scalar_results = VEC_alloc (tree, heap, group_size);
3725 new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
3726 bitsize = TYPE_SIZE (scalar_type);
3728 /* In case this is a reduction in an inner-loop while vectorizing an outer
3729 loop - we don't need to extract a single scalar result at the end of the
3730 inner-loop (unless it is double reduction, i.e., the use of reduction is
3731 outside the outer-loop). The final vector of partial results will be used
3732 in the vectorized outer-loop, or reduced to a scalar result at the end of
3733 the outer-loop. */
3734 if (nested_in_vect_loop && !double_reduc)
3735 goto vect_finalize_reduction;
3737 /* SLP reduction without reduction chain, e.g.,
3738 # a1 = phi <a2, a0>
3739 # b1 = phi <b2, b0>
3740 a2 = operation (a1)
3741 b2 = operation (b1) */
3742 slp_reduc = (slp_node && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)));
3744 /* In case of reduction chain, e.g.,
3745 # a1 = phi <a3, a0>
3746 a2 = operation (a1)
3747 a3 = operation (a2),
3749 we may end up with more than one vector result. Here we reduce them to
3750 one vector. */
3751 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
3753 tree first_vect = PHI_RESULT (VEC_index (gimple, new_phis, 0));
3754 tree tmp;
3755 gimple new_vec_stmt = NULL;
3757 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3758 for (k = 1; k < VEC_length (gimple, new_phis); k++)
3760 gimple next_phi = VEC_index (gimple, new_phis, k);
3761 tree second_vect = PHI_RESULT (next_phi);
3763 tmp = build2 (code, vectype, first_vect, second_vect);
3764 new_vec_stmt = gimple_build_assign (vec_dest, tmp);
3765 first_vect = make_ssa_name (vec_dest, new_vec_stmt);
3766 gimple_assign_set_lhs (new_vec_stmt, first_vect);
3767 gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT);
3770 new_phi_result = first_vect;
3771 if (new_vec_stmt)
3773 VEC_truncate (gimple, new_phis, 0);
3774 VEC_safe_push (gimple, heap, new_phis, new_vec_stmt);
3777 else
3778 new_phi_result = PHI_RESULT (VEC_index (gimple, new_phis, 0));
3780 /* 2.3 Create the reduction code, using one of the three schemes described
3781 above. In SLP we simply need to extract all the elements from the
3782 vector (without reducing them), so we use scalar shifts. */
3783 if (reduc_code != ERROR_MARK && !slp_reduc)
3785 tree tmp;
3787 /*** Case 1: Create:
3788 v_out2 = reduc_expr <v_out1> */
3790 if (vect_print_dump_info (REPORT_DETAILS))
3791 fprintf (vect_dump, "Reduce using direct vector reduction.");
3793 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3794 tmp = build1 (reduc_code, vectype, new_phi_result);
3795 epilog_stmt = gimple_build_assign (vec_dest, tmp);
3796 new_temp = make_ssa_name (vec_dest, epilog_stmt);
3797 gimple_assign_set_lhs (epilog_stmt, new_temp);
3798 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3800 extract_scalar_result = true;
3802 else
3804 enum tree_code shift_code = ERROR_MARK;
3805 bool have_whole_vector_shift = true;
3806 int bit_offset;
3807 int element_bitsize = tree_low_cst (bitsize, 1);
3808 int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
3809 tree vec_temp;
3811 if (optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
3812 shift_code = VEC_RSHIFT_EXPR;
3813 else
3814 have_whole_vector_shift = false;
3816 /* Regardless of whether we have a whole vector shift, if we're
3817 emulating the operation via tree-vect-generic, we don't want
3818 to use it. Only the first round of the reduction is likely
3819 to still be profitable via emulation. */
3820 /* ??? It might be better to emit a reduction tree code here, so that
3821 tree-vect-generic can expand the first round via bit tricks. */
3822 if (!VECTOR_MODE_P (mode))
3823 have_whole_vector_shift = false;
3824 else
3826 optab optab = optab_for_tree_code (code, vectype, optab_default);
3827 if (optab_handler (optab, mode) == CODE_FOR_nothing)
3828 have_whole_vector_shift = false;
3831 if (have_whole_vector_shift && !slp_reduc)
3833 /*** Case 2: Create:
3834 for (offset = VS/2; offset >= element_size; offset/=2)
3836 Create: va' = vec_shift <va, offset>
3837 Create: va = vop <va, va'>
3838 } */
3840 if (vect_print_dump_info (REPORT_DETAILS))
3841 fprintf (vect_dump, "Reduce using vector shifts");
3843 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3844 new_temp = new_phi_result;
3845 for (bit_offset = vec_size_in_bits/2;
3846 bit_offset >= element_bitsize;
3847 bit_offset /= 2)
3849 tree bitpos = size_int (bit_offset);
3851 epilog_stmt = gimple_build_assign_with_ops (shift_code,
3852 vec_dest, new_temp, bitpos);
3853 new_name = make_ssa_name (vec_dest, epilog_stmt);
3854 gimple_assign_set_lhs (epilog_stmt, new_name);
3855 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3857 epilog_stmt = gimple_build_assign_with_ops (code, vec_dest,
3858 new_name, new_temp);
3859 new_temp = make_ssa_name (vec_dest, epilog_stmt);
3860 gimple_assign_set_lhs (epilog_stmt, new_temp);
3861 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3864 extract_scalar_result = true;
3866 else
3868 tree rhs;
3870 /*** Case 3: Create:
3871 s = extract_field <v_out2, 0>
3872 for (offset = element_size;
3873 offset < vector_size;
3874 offset += element_size;)
3876 Create: s' = extract_field <v_out2, offset>
3877 Create: s = op <s, s'> // For non SLP cases
3878 } */
3880 if (vect_print_dump_info (REPORT_DETAILS))
3881 fprintf (vect_dump, "Reduce using scalar code. ");
3883 vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
3884 FOR_EACH_VEC_ELT (gimple, new_phis, i, new_phi)
3886 if (gimple_code (new_phi) == GIMPLE_PHI)
3887 vec_temp = PHI_RESULT (new_phi);
3888 else
3889 vec_temp = gimple_assign_lhs (new_phi);
3890 rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
3891 bitsize_zero_node);
3892 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3893 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3894 gimple_assign_set_lhs (epilog_stmt, new_temp);
3895 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3897 /* In SLP we don't need to apply reduction operation, so we just
3898 collect s' values in SCALAR_RESULTS. */
3899 if (slp_reduc)
3900 VEC_safe_push (tree, heap, scalar_results, new_temp);
3902 for (bit_offset = element_bitsize;
3903 bit_offset < vec_size_in_bits;
3904 bit_offset += element_bitsize)
3906 tree bitpos = bitsize_int (bit_offset);
3907 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp,
3908 bitsize, bitpos);
3910 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3911 new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
3912 gimple_assign_set_lhs (epilog_stmt, new_name);
3913 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3915 if (slp_reduc)
3917 /* In SLP we don't need to apply reduction operation, so
3918 we just collect s' values in SCALAR_RESULTS. */
3919 new_temp = new_name;
3920 VEC_safe_push (tree, heap, scalar_results, new_name);
3922 else
3924 epilog_stmt = gimple_build_assign_with_ops (code,
3925 new_scalar_dest, new_name, new_temp);
3926 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3927 gimple_assign_set_lhs (epilog_stmt, new_temp);
3928 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3933 /* The only case where we need to reduce scalar results in SLP, is
3934 unrolling. If the size of SCALAR_RESULTS is greater than
3935 GROUP_SIZE, we reduce them combining elements modulo
3936 GROUP_SIZE. */
3937 if (slp_reduc)
3939 tree res, first_res, new_res;
3940 gimple new_stmt;
3942 /* Reduce multiple scalar results in case of SLP unrolling. */
3943 for (j = group_size; VEC_iterate (tree, scalar_results, j, res);
3944 j++)
3946 first_res = VEC_index (tree, scalar_results, j % group_size);
3947 new_stmt = gimple_build_assign_with_ops (code,
3948 new_scalar_dest, first_res, res);
3949 new_res = make_ssa_name (new_scalar_dest, new_stmt);
3950 gimple_assign_set_lhs (new_stmt, new_res);
3951 gsi_insert_before (&exit_gsi, new_stmt, GSI_SAME_STMT);
3952 VEC_replace (tree, scalar_results, j % group_size, new_res);
3955 else
3956 /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
3957 VEC_safe_push (tree, heap, scalar_results, new_temp);
3959 extract_scalar_result = false;
3963 /* 2.4 Extract the final scalar result. Create:
3964 s_out3 = extract_field <v_out2, bitpos> */
3966 if (extract_scalar_result)
3968 tree rhs;
3970 if (vect_print_dump_info (REPORT_DETAILS))
3971 fprintf (vect_dump, "extract scalar result");
3973 if (BYTES_BIG_ENDIAN)
3974 bitpos = size_binop (MULT_EXPR,
3975 bitsize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1),
3976 TYPE_SIZE (scalar_type));
3977 else
3978 bitpos = bitsize_zero_node;
3980 rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp, bitsize, bitpos);
3981 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3982 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3983 gimple_assign_set_lhs (epilog_stmt, new_temp);
3984 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3985 VEC_safe_push (tree, heap, scalar_results, new_temp);
3988 vect_finalize_reduction:
3990 if (double_reduc)
3991 loop = loop->inner;
3993 /* 2.5 Adjust the final result by the initial value of the reduction
3994 variable. (When such adjustment is not needed, then
3995 'adjustment_def' is zero). For example, if code is PLUS we create:
3996 new_temp = loop_exit_def + adjustment_def */
3998 if (adjustment_def)
4000 gcc_assert (!slp_reduc);
4001 if (nested_in_vect_loop)
4003 new_phi = VEC_index (gimple, new_phis, 0);
4004 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
4005 expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
4006 new_dest = vect_create_destination_var (scalar_dest, vectype);
4008 else
4010 new_temp = VEC_index (tree, scalar_results, 0);
4011 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
4012 expr = build2 (code, scalar_type, new_temp, adjustment_def);
4013 new_dest = vect_create_destination_var (scalar_dest, scalar_type);
4016 epilog_stmt = gimple_build_assign (new_dest, expr);
4017 new_temp = make_ssa_name (new_dest, epilog_stmt);
4018 gimple_assign_set_lhs (epilog_stmt, new_temp);
4019 SSA_NAME_DEF_STMT (new_temp) = epilog_stmt;
4020 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4021 if (nested_in_vect_loop)
4023 set_vinfo_for_stmt (epilog_stmt,
4024 new_stmt_vec_info (epilog_stmt, loop_vinfo,
4025 NULL));
4026 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt)) =
4027 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi));
4029 if (!double_reduc)
4030 VEC_quick_push (tree, scalar_results, new_temp);
4031 else
4032 VEC_replace (tree, scalar_results, 0, new_temp);
4034 else
4035 VEC_replace (tree, scalar_results, 0, new_temp);
4037 VEC_replace (gimple, new_phis, 0, epilog_stmt);
4040 /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit
4041 phis with new adjusted scalar results, i.e., replace use <s_out0>
4042 with use <s_out4>.
4044 Transform:
4045 loop_exit:
4046 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4047 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4048 v_out2 = reduce <v_out1>
4049 s_out3 = extract_field <v_out2, 0>
4050 s_out4 = adjust_result <s_out3>
4051 use <s_out0>
4052 use <s_out0>
4054 into:
4056 loop_exit:
4057 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4058 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4059 v_out2 = reduce <v_out1>
4060 s_out3 = extract_field <v_out2, 0>
4061 s_out4 = adjust_result <s_out3>
4062 use <s_out4>
4063 use <s_out4> */
4066 /* In SLP reduction chain we reduce vector results into one vector if
4067 necessary, hence we set here GROUP_SIZE to 1. SCALAR_DEST is the LHS of
4068 the last stmt in the reduction chain, since we are looking for the loop
4069 exit phi node. */
4070 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
4072 scalar_dest = gimple_assign_lhs (VEC_index (gimple,
4073 SLP_TREE_SCALAR_STMTS (slp_node),
4074 group_size - 1));
4075 group_size = 1;
4078 /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
4079 case that GROUP_SIZE is greater than vectorization factor). Therefore, we
4080 need to match SCALAR_RESULTS with corresponding statements. The first
4081 (GROUP_SIZE / number of new vector stmts) scalar results correspond to
4082 the first vector stmt, etc.
4083 (RATIO is equal to (GROUP_SIZE / number of new vector stmts)). */
4084 if (group_size > VEC_length (gimple, new_phis))
4086 ratio = group_size / VEC_length (gimple, new_phis);
4087 gcc_assert (!(group_size % VEC_length (gimple, new_phis)));
4089 else
4090 ratio = 1;
4092 for (k = 0; k < group_size; k++)
4094 if (k % ratio == 0)
4096 epilog_stmt = VEC_index (gimple, new_phis, k / ratio);
4097 reduction_phi = VEC_index (gimple, reduction_phis, k / ratio);
4098 if (double_reduc)
4099 inner_phi = VEC_index (gimple, inner_phis, k / ratio);
4102 if (slp_reduc)
4104 gimple current_stmt = VEC_index (gimple,
4105 SLP_TREE_SCALAR_STMTS (slp_node), k);
4107 orig_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (current_stmt));
4108 /* SLP statements can't participate in patterns. */
4109 gcc_assert (!orig_stmt);
4110 scalar_dest = gimple_assign_lhs (current_stmt);
4113 phis = VEC_alloc (gimple, heap, 3);
4114 /* Find the loop-closed-use at the loop exit of the original scalar
4115 result. (The reduction result is expected to have two immediate uses -
4116 one at the latch block, and one at the loop exit). */
4117 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4118 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
4119 VEC_safe_push (gimple, heap, phis, USE_STMT (use_p));
4121 /* We expect to have found an exit_phi because of loop-closed-ssa
4122 form. */
4123 gcc_assert (!VEC_empty (gimple, phis));
4125 FOR_EACH_VEC_ELT (gimple, phis, i, exit_phi)
4127 if (outer_loop)
4129 stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi);
4130 gimple vect_phi;
4132 /* FORNOW. Currently not supporting the case that an inner-loop
4133 reduction is not used in the outer-loop (but only outside the
4134 outer-loop), unless it is double reduction. */
4135 gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
4136 && !STMT_VINFO_LIVE_P (exit_phi_vinfo))
4137 || double_reduc);
4139 STMT_VINFO_VEC_STMT (exit_phi_vinfo) = epilog_stmt;
4140 if (!double_reduc
4141 || STMT_VINFO_DEF_TYPE (exit_phi_vinfo)
4142 != vect_double_reduction_def)
4143 continue;
4145 /* Handle double reduction:
4147 stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
4148 stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop)
4149 stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop)
4150 stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
4152 At that point the regular reduction (stmt2 and stmt3) is
4153 already vectorized, as well as the exit phi node, stmt4.
4154 Here we vectorize the phi node of double reduction, stmt1, and
4155 update all relevant statements. */
4157 /* Go through all the uses of s2 to find double reduction phi
4158 node, i.e., stmt1 above. */
4159 orig_name = PHI_RESULT (exit_phi);
4160 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
4162 stmt_vec_info use_stmt_vinfo = vinfo_for_stmt (use_stmt);
4163 stmt_vec_info new_phi_vinfo;
4164 tree vect_phi_init, preheader_arg, vect_phi_res, init_def;
4165 basic_block bb = gimple_bb (use_stmt);
4166 gimple use;
4168 /* Check that USE_STMT is really double reduction phi
4169 node. */
4170 if (gimple_code (use_stmt) != GIMPLE_PHI
4171 || gimple_phi_num_args (use_stmt) != 2
4172 || !use_stmt_vinfo
4173 || STMT_VINFO_DEF_TYPE (use_stmt_vinfo)
4174 != vect_double_reduction_def
4175 || bb->loop_father != outer_loop)
4176 continue;
4178 /* Create vector phi node for double reduction:
4179 vs1 = phi <vs0, vs2>
4180 vs1 was created previously in this function by a call to
4181 vect_get_vec_def_for_operand and is stored in
4182 vec_initial_def;
4183 vs2 is defined by INNER_PHI, the vectorized EXIT_PHI;
4184 vs0 is created here. */
4186 /* Create vector phi node. */
4187 vect_phi = create_phi_node (vec_initial_def, bb);
4188 new_phi_vinfo = new_stmt_vec_info (vect_phi,
4189 loop_vec_info_for_loop (outer_loop), NULL);
4190 set_vinfo_for_stmt (vect_phi, new_phi_vinfo);
4192 /* Create vs0 - initial def of the double reduction phi. */
4193 preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt,
4194 loop_preheader_edge (outer_loop));
4195 init_def = get_initial_def_for_reduction (stmt,
4196 preheader_arg, NULL);
4197 vect_phi_init = vect_init_vector (use_stmt, init_def,
4198 vectype, NULL);
4200 /* Update phi node arguments with vs0 and vs2. */
4201 add_phi_arg (vect_phi, vect_phi_init,
4202 loop_preheader_edge (outer_loop),
4203 UNKNOWN_LOCATION);
4204 add_phi_arg (vect_phi, PHI_RESULT (inner_phi),
4205 loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
4206 if (vect_print_dump_info (REPORT_DETAILS))
4208 fprintf (vect_dump, "created double reduction phi "
4209 "node: ");
4210 print_gimple_stmt (vect_dump, vect_phi, 0, TDF_SLIM);
4213 vect_phi_res = PHI_RESULT (vect_phi);
4215 /* Replace the use, i.e., set the correct vs1 in the regular
4216 reduction phi node. FORNOW, NCOPIES is always 1, so the
4217 loop is redundant. */
4218 use = reduction_phi;
4219 for (j = 0; j < ncopies; j++)
4221 edge pr_edge = loop_preheader_edge (loop);
4222 SET_PHI_ARG_DEF (use, pr_edge->dest_idx, vect_phi_res);
4223 use = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use));
4229 VEC_free (gimple, heap, phis);
4230 if (nested_in_vect_loop)
4232 if (double_reduc)
4233 loop = outer_loop;
4234 else
4235 continue;
4238 phis = VEC_alloc (gimple, heap, 3);
4239 /* Find the loop-closed-use at the loop exit of the original scalar
4240 result. (The reduction result is expected to have two immediate uses,
4241 one at the latch block, and one at the loop exit). For double
4242 reductions we are looking for exit phis of the outer loop. */
4243 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4245 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
4246 VEC_safe_push (gimple, heap, phis, USE_STMT (use_p));
4247 else
4249 if (double_reduc && gimple_code (USE_STMT (use_p)) == GIMPLE_PHI)
4251 tree phi_res = PHI_RESULT (USE_STMT (use_p));
4253 FOR_EACH_IMM_USE_FAST (phi_use_p, phi_imm_iter, phi_res)
4255 if (!flow_bb_inside_loop_p (loop,
4256 gimple_bb (USE_STMT (phi_use_p))))
4257 VEC_safe_push (gimple, heap, phis,
4258 USE_STMT (phi_use_p));
4264 FOR_EACH_VEC_ELT (gimple, phis, i, exit_phi)
4266 /* Replace the uses: */
4267 orig_name = PHI_RESULT (exit_phi);
4268 scalar_result = VEC_index (tree, scalar_results, k);
4269 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
4270 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
4271 SET_USE (use_p, scalar_result);
4274 VEC_free (gimple, heap, phis);
4277 VEC_free (tree, heap, scalar_results);
4278 VEC_free (gimple, heap, new_phis);
4282 /* Function vectorizable_reduction.
4284 Check if STMT performs a reduction operation that can be vectorized.
4285 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4286 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
4287 Return FALSE if not a vectorizable STMT, TRUE otherwise.
4289 This function also handles reduction idioms (patterns) that have been
4290 recognized in advance during vect_pattern_recog. In this case, STMT may be
4291 of this form:
4292 X = pattern_expr (arg0, arg1, ..., X)
4293 and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
4294 sequence that had been detected and replaced by the pattern-stmt (STMT).
4296 In some cases of reduction patterns, the type of the reduction variable X is
4297 different than the type of the other arguments of STMT.
4298 In such cases, the vectype that is used when transforming STMT into a vector
4299 stmt is different than the vectype that is used to determine the
4300 vectorization factor, because it consists of a different number of elements
4301 than the actual number of elements that are being operated upon in parallel.
4303 For example, consider an accumulation of shorts into an int accumulator.
4304 On some targets it's possible to vectorize this pattern operating on 8
4305 shorts at a time (hence, the vectype for purposes of determining the
4306 vectorization factor should be V8HI); on the other hand, the vectype that
4307 is used to create the vector form is actually V4SI (the type of the result).
4309 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
4310 indicates what is the actual level of parallelism (V8HI in the example), so
4311 that the right vectorization factor would be derived. This vectype
4312 corresponds to the type of arguments to the reduction stmt, and should *NOT*
4313 be used to create the vectorized stmt. The right vectype for the vectorized
4314 stmt is obtained from the type of the result X:
4315 get_vectype_for_scalar_type (TREE_TYPE (X))
4317 This means that, contrary to "regular" reductions (or "regular" stmts in
4318 general), the following equation:
4319 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
4320 does *NOT* necessarily hold for reduction patterns. */
4322 bool
4323 vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
4324 gimple *vec_stmt, slp_tree slp_node)
4326 tree vec_dest;
4327 tree scalar_dest;
4328 tree loop_vec_def0 = NULL_TREE, loop_vec_def1 = NULL_TREE;
4329 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4330 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4331 tree vectype_in = NULL_TREE;
4332 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4333 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4334 enum tree_code code, orig_code, epilog_reduc_code;
4335 enum machine_mode vec_mode;
4336 int op_type;
4337 optab optab, reduc_optab;
4338 tree new_temp = NULL_TREE;
4339 tree def;
4340 gimple def_stmt;
4341 enum vect_def_type dt;
4342 gimple new_phi = NULL;
4343 tree scalar_type;
4344 bool is_simple_use;
4345 gimple orig_stmt;
4346 stmt_vec_info orig_stmt_info;
4347 tree expr = NULL_TREE;
4348 int i;
4349 int ncopies;
4350 int epilog_copies;
4351 stmt_vec_info prev_stmt_info, prev_phi_info;
4352 bool single_defuse_cycle = false;
4353 tree reduc_def = NULL_TREE;
4354 gimple new_stmt = NULL;
4355 int j;
4356 tree ops[3];
4357 bool nested_cycle = false, found_nested_cycle_def = false;
4358 gimple reduc_def_stmt = NULL;
4359 /* The default is that the reduction variable is the last in statement. */
4360 int reduc_index = 2;
4361 bool double_reduc = false, dummy;
4362 basic_block def_bb;
4363 struct loop * def_stmt_loop, *outer_loop = NULL;
4364 tree def_arg;
4365 gimple def_arg_stmt;
4366 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL, *vect_defs = NULL;
4367 VEC (gimple, heap) *phis = NULL;
4368 int vec_num;
4369 tree def0, def1, tem, op0, op1 = NULL_TREE;
4371 /* In case of reduction chain we switch to the first stmt in the chain, but
4372 we don't update STMT_INFO, since only the last stmt is marked as reduction
4373 and has reduction properties. */
4374 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
4375 stmt = GROUP_FIRST_ELEMENT (stmt_info);
4377 if (nested_in_vect_loop_p (loop, stmt))
4379 outer_loop = loop;
4380 loop = loop->inner;
4381 nested_cycle = true;
4384 /* 1. Is vectorizable reduction? */
4385 /* Not supportable if the reduction variable is used in the loop, unless
4386 it's a reduction chain. */
4387 if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer
4388 && !GROUP_FIRST_ELEMENT (stmt_info))
4389 return false;
4391 /* Reductions that are not used even in an enclosing outer-loop,
4392 are expected to be "live" (used out of the loop). */
4393 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope
4394 && !STMT_VINFO_LIVE_P (stmt_info))
4395 return false;
4397 /* Make sure it was already recognized as a reduction computation. */
4398 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def
4399 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_nested_cycle)
4400 return false;
4402 /* 2. Has this been recognized as a reduction pattern?
4404 Check if STMT represents a pattern that has been recognized
4405 in earlier analysis stages. For stmts that represent a pattern,
4406 the STMT_VINFO_RELATED_STMT field records the last stmt in
4407 the original sequence that constitutes the pattern. */
4409 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
4410 if (orig_stmt)
4412 orig_stmt_info = vinfo_for_stmt (orig_stmt);
4413 gcc_assert (STMT_VINFO_RELATED_STMT (orig_stmt_info) == stmt);
4414 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
4415 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
4418 /* 3. Check the operands of the operation. The first operands are defined
4419 inside the loop body. The last operand is the reduction variable,
4420 which is defined by the loop-header-phi. */
4422 gcc_assert (is_gimple_assign (stmt));
4424 /* Flatten RHS. */
4425 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
4427 case GIMPLE_SINGLE_RHS:
4428 op_type = TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt));
4429 if (op_type == ternary_op)
4431 tree rhs = gimple_assign_rhs1 (stmt);
4432 ops[0] = TREE_OPERAND (rhs, 0);
4433 ops[1] = TREE_OPERAND (rhs, 1);
4434 ops[2] = TREE_OPERAND (rhs, 2);
4435 code = TREE_CODE (rhs);
4437 else
4438 return false;
4439 break;
4441 case GIMPLE_BINARY_RHS:
4442 code = gimple_assign_rhs_code (stmt);
4443 op_type = TREE_CODE_LENGTH (code);
4444 gcc_assert (op_type == binary_op);
4445 ops[0] = gimple_assign_rhs1 (stmt);
4446 ops[1] = gimple_assign_rhs2 (stmt);
4447 break;
4449 case GIMPLE_TERNARY_RHS:
4450 code = gimple_assign_rhs_code (stmt);
4451 op_type = TREE_CODE_LENGTH (code);
4452 gcc_assert (op_type == ternary_op);
4453 ops[0] = gimple_assign_rhs1 (stmt);
4454 ops[1] = gimple_assign_rhs2 (stmt);
4455 ops[2] = gimple_assign_rhs3 (stmt);
4456 break;
4458 case GIMPLE_UNARY_RHS:
4459 return false;
4461 default:
4462 gcc_unreachable ();
4465 if (code == COND_EXPR && slp_node)
4466 return false;
4468 scalar_dest = gimple_assign_lhs (stmt);
4469 scalar_type = TREE_TYPE (scalar_dest);
4470 if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
4471 && !SCALAR_FLOAT_TYPE_P (scalar_type))
4472 return false;
4474 /* Do not try to vectorize bit-precision reductions. */
4475 if ((TYPE_PRECISION (scalar_type)
4476 != GET_MODE_PRECISION (TYPE_MODE (scalar_type))))
4477 return false;
4479 /* All uses but the last are expected to be defined in the loop.
4480 The last use is the reduction variable. In case of nested cycle this
4481 assumption is not true: we use reduc_index to record the index of the
4482 reduction variable. */
4483 for (i = 0; i < op_type-1; i++)
4485 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
4486 if (i == 0 && code == COND_EXPR)
4487 continue;
4489 is_simple_use = vect_is_simple_use_1 (ops[i], stmt, loop_vinfo, NULL,
4490 &def_stmt, &def, &dt, &tem);
4491 if (!vectype_in)
4492 vectype_in = tem;
4493 gcc_assert (is_simple_use);
4495 if (dt != vect_internal_def
4496 && dt != vect_external_def
4497 && dt != vect_constant_def
4498 && dt != vect_induction_def
4499 && !(dt == vect_nested_cycle && nested_cycle))
4500 return false;
4502 if (dt == vect_nested_cycle)
4504 found_nested_cycle_def = true;
4505 reduc_def_stmt = def_stmt;
4506 reduc_index = i;
4510 is_simple_use = vect_is_simple_use_1 (ops[i], stmt, loop_vinfo, NULL,
4511 &def_stmt, &def, &dt, &tem);
4512 if (!vectype_in)
4513 vectype_in = tem;
4514 gcc_assert (is_simple_use);
4515 gcc_assert (dt == vect_reduction_def
4516 || dt == vect_nested_cycle
4517 || ((dt == vect_internal_def || dt == vect_external_def
4518 || dt == vect_constant_def || dt == vect_induction_def)
4519 && nested_cycle && found_nested_cycle_def));
4520 if (!found_nested_cycle_def)
4521 reduc_def_stmt = def_stmt;
4523 gcc_assert (gimple_code (reduc_def_stmt) == GIMPLE_PHI);
4524 if (orig_stmt)
4525 gcc_assert (orig_stmt == vect_is_simple_reduction (loop_vinfo,
4526 reduc_def_stmt,
4527 !nested_cycle,
4528 &dummy));
4529 else
4531 gimple tmp = vect_is_simple_reduction (loop_vinfo, reduc_def_stmt,
4532 !nested_cycle, &dummy);
4533 /* We changed STMT to be the first stmt in reduction chain, hence we
4534 check that in this case the first element in the chain is STMT. */
4535 gcc_assert (stmt == tmp
4536 || GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) == stmt);
4539 if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt)))
4540 return false;
4542 if (slp_node || PURE_SLP_STMT (stmt_info))
4543 ncopies = 1;
4544 else
4545 ncopies = (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
4546 / TYPE_VECTOR_SUBPARTS (vectype_in));
4548 gcc_assert (ncopies >= 1);
4550 vec_mode = TYPE_MODE (vectype_in);
4552 if (code == COND_EXPR)
4554 if (!vectorizable_condition (stmt, gsi, NULL, ops[reduc_index], 0, NULL))
4556 if (vect_print_dump_info (REPORT_DETAILS))
4557 fprintf (vect_dump, "unsupported condition in reduction");
4559 return false;
4562 else
4564 /* 4. Supportable by target? */
4566 /* 4.1. check support for the operation in the loop */
4567 optab = optab_for_tree_code (code, vectype_in, optab_default);
4568 if (!optab)
4570 if (vect_print_dump_info (REPORT_DETAILS))
4571 fprintf (vect_dump, "no optab.");
4573 return false;
4576 if (optab_handler (optab, vec_mode) == CODE_FOR_nothing)
4578 if (vect_print_dump_info (REPORT_DETAILS))
4579 fprintf (vect_dump, "op not supported by target.");
4581 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4582 || LOOP_VINFO_VECT_FACTOR (loop_vinfo)
4583 < vect_min_worthwhile_factor (code))
4584 return false;
4586 if (vect_print_dump_info (REPORT_DETAILS))
4587 fprintf (vect_dump, "proceeding using word mode.");
4590 /* Worthwhile without SIMD support? */
4591 if (!VECTOR_MODE_P (TYPE_MODE (vectype_in))
4592 && LOOP_VINFO_VECT_FACTOR (loop_vinfo)
4593 < vect_min_worthwhile_factor (code))
4595 if (vect_print_dump_info (REPORT_DETAILS))
4596 fprintf (vect_dump, "not worthwhile without SIMD support.");
4598 return false;
4602 /* 4.2. Check support for the epilog operation.
4604 If STMT represents a reduction pattern, then the type of the
4605 reduction variable may be different than the type of the rest
4606 of the arguments. For example, consider the case of accumulation
4607 of shorts into an int accumulator; The original code:
4608 S1: int_a = (int) short_a;
4609 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
4611 was replaced with:
4612 STMT: int_acc = widen_sum <short_a, int_acc>
4614 This means that:
4615 1. The tree-code that is used to create the vector operation in the
4616 epilog code (that reduces the partial results) is not the
4617 tree-code of STMT, but is rather the tree-code of the original
4618 stmt from the pattern that STMT is replacing. I.e, in the example
4619 above we want to use 'widen_sum' in the loop, but 'plus' in the
4620 epilog.
4621 2. The type (mode) we use to check available target support
4622 for the vector operation to be created in the *epilog*, is
4623 determined by the type of the reduction variable (in the example
4624 above we'd check this: optab_handler (plus_optab, vect_int_mode])).
4625 However the type (mode) we use to check available target support
4626 for the vector operation to be created *inside the loop*, is
4627 determined by the type of the other arguments to STMT (in the
4628 example we'd check this: optab_handler (widen_sum_optab,
4629 vect_short_mode)).
4631 This is contrary to "regular" reductions, in which the types of all
4632 the arguments are the same as the type of the reduction variable.
4633 For "regular" reductions we can therefore use the same vector type
4634 (and also the same tree-code) when generating the epilog code and
4635 when generating the code inside the loop. */
4637 if (orig_stmt)
4639 /* This is a reduction pattern: get the vectype from the type of the
4640 reduction variable, and get the tree-code from orig_stmt. */
4641 orig_code = gimple_assign_rhs_code (orig_stmt);
4642 gcc_assert (vectype_out);
4643 vec_mode = TYPE_MODE (vectype_out);
4645 else
4647 /* Regular reduction: use the same vectype and tree-code as used for
4648 the vector code inside the loop can be used for the epilog code. */
4649 orig_code = code;
4652 if (nested_cycle)
4654 def_bb = gimple_bb (reduc_def_stmt);
4655 def_stmt_loop = def_bb->loop_father;
4656 def_arg = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
4657 loop_preheader_edge (def_stmt_loop));
4658 if (TREE_CODE (def_arg) == SSA_NAME
4659 && (def_arg_stmt = SSA_NAME_DEF_STMT (def_arg))
4660 && gimple_code (def_arg_stmt) == GIMPLE_PHI
4661 && flow_bb_inside_loop_p (outer_loop, gimple_bb (def_arg_stmt))
4662 && vinfo_for_stmt (def_arg_stmt)
4663 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_arg_stmt))
4664 == vect_double_reduction_def)
4665 double_reduc = true;
4668 epilog_reduc_code = ERROR_MARK;
4669 if (reduction_code_for_scalar_code (orig_code, &epilog_reduc_code))
4671 reduc_optab = optab_for_tree_code (epilog_reduc_code, vectype_out,
4672 optab_default);
4673 if (!reduc_optab)
4675 if (vect_print_dump_info (REPORT_DETAILS))
4676 fprintf (vect_dump, "no optab for reduction.");
4678 epilog_reduc_code = ERROR_MARK;
4681 if (reduc_optab
4682 && optab_handler (reduc_optab, vec_mode) == CODE_FOR_nothing)
4684 if (vect_print_dump_info (REPORT_DETAILS))
4685 fprintf (vect_dump, "reduc op not supported by target.");
4687 epilog_reduc_code = ERROR_MARK;
4690 else
4692 if (!nested_cycle || double_reduc)
4694 if (vect_print_dump_info (REPORT_DETAILS))
4695 fprintf (vect_dump, "no reduc code for scalar code.");
4697 return false;
4701 if (double_reduc && ncopies > 1)
4703 if (vect_print_dump_info (REPORT_DETAILS))
4704 fprintf (vect_dump, "multiple types in double reduction");
4706 return false;
4709 /* In case of widenning multiplication by a constant, we update the type
4710 of the constant to be the type of the other operand. We check that the
4711 constant fits the type in the pattern recognition pass. */
4712 if (code == DOT_PROD_EXPR
4713 && !types_compatible_p (TREE_TYPE (ops[0]), TREE_TYPE (ops[1])))
4715 if (TREE_CODE (ops[0]) == INTEGER_CST)
4716 ops[0] = fold_convert (TREE_TYPE (ops[1]), ops[0]);
4717 else if (TREE_CODE (ops[1]) == INTEGER_CST)
4718 ops[1] = fold_convert (TREE_TYPE (ops[0]), ops[1]);
4719 else
4721 if (vect_print_dump_info (REPORT_DETAILS))
4722 fprintf (vect_dump, "invalid types in dot-prod");
4724 return false;
4728 if (!vec_stmt) /* transformation not required. */
4730 if (!vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies))
4731 return false;
4732 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
4733 return true;
4736 /** Transform. **/
4738 if (vect_print_dump_info (REPORT_DETAILS))
4739 fprintf (vect_dump, "transform reduction.");
4741 /* FORNOW: Multiple types are not supported for condition. */
4742 if (code == COND_EXPR)
4743 gcc_assert (ncopies == 1);
4745 /* Create the destination vector */
4746 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
4748 /* In case the vectorization factor (VF) is bigger than the number
4749 of elements that we can fit in a vectype (nunits), we have to generate
4750 more than one vector stmt - i.e - we need to "unroll" the
4751 vector stmt by a factor VF/nunits. For more details see documentation
4752 in vectorizable_operation. */
4754 /* If the reduction is used in an outer loop we need to generate
4755 VF intermediate results, like so (e.g. for ncopies=2):
4756 r0 = phi (init, r0)
4757 r1 = phi (init, r1)
4758 r0 = x0 + r0;
4759 r1 = x1 + r1;
4760 (i.e. we generate VF results in 2 registers).
4761 In this case we have a separate def-use cycle for each copy, and therefore
4762 for each copy we get the vector def for the reduction variable from the
4763 respective phi node created for this copy.
4765 Otherwise (the reduction is unused in the loop nest), we can combine
4766 together intermediate results, like so (e.g. for ncopies=2):
4767 r = phi (init, r)
4768 r = x0 + r;
4769 r = x1 + r;
4770 (i.e. we generate VF/2 results in a single register).
4771 In this case for each copy we get the vector def for the reduction variable
4772 from the vectorized reduction operation generated in the previous iteration.
4775 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope)
4777 single_defuse_cycle = true;
4778 epilog_copies = 1;
4780 else
4781 epilog_copies = ncopies;
4783 prev_stmt_info = NULL;
4784 prev_phi_info = NULL;
4785 if (slp_node)
4787 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
4788 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype_out)
4789 == TYPE_VECTOR_SUBPARTS (vectype_in));
4791 else
4793 vec_num = 1;
4794 vec_oprnds0 = VEC_alloc (tree, heap, 1);
4795 if (op_type == ternary_op)
4796 vec_oprnds1 = VEC_alloc (tree, heap, 1);
4799 phis = VEC_alloc (gimple, heap, vec_num);
4800 vect_defs = VEC_alloc (tree, heap, vec_num);
4801 if (!slp_node)
4802 VEC_quick_push (tree, vect_defs, NULL_TREE);
4804 for (j = 0; j < ncopies; j++)
4806 if (j == 0 || !single_defuse_cycle)
4808 for (i = 0; i < vec_num; i++)
4810 /* Create the reduction-phi that defines the reduction
4811 operand. */
4812 new_phi = create_phi_node (vec_dest, loop->header);
4813 set_vinfo_for_stmt (new_phi,
4814 new_stmt_vec_info (new_phi, loop_vinfo,
4815 NULL));
4816 if (j == 0 || slp_node)
4817 VEC_quick_push (gimple, phis, new_phi);
4821 if (code == COND_EXPR)
4823 gcc_assert (!slp_node);
4824 vectorizable_condition (stmt, gsi, vec_stmt,
4825 PHI_RESULT (VEC_index (gimple, phis, 0)),
4826 reduc_index, NULL);
4827 /* Multiple types are not supported for condition. */
4828 break;
4831 /* Handle uses. */
4832 if (j == 0)
4834 op0 = ops[!reduc_index];
4835 if (op_type == ternary_op)
4837 if (reduc_index == 0)
4838 op1 = ops[2];
4839 else
4840 op1 = ops[1];
4843 if (slp_node)
4844 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
4845 slp_node, -1);
4846 else
4848 loop_vec_def0 = vect_get_vec_def_for_operand (ops[!reduc_index],
4849 stmt, NULL);
4850 VEC_quick_push (tree, vec_oprnds0, loop_vec_def0);
4851 if (op_type == ternary_op)
4853 loop_vec_def1 = vect_get_vec_def_for_operand (op1, stmt,
4854 NULL);
4855 VEC_quick_push (tree, vec_oprnds1, loop_vec_def1);
4859 else
4861 if (!slp_node)
4863 enum vect_def_type dt;
4864 gimple dummy_stmt;
4865 tree dummy;
4867 vect_is_simple_use (ops[!reduc_index], stmt, loop_vinfo, NULL,
4868 &dummy_stmt, &dummy, &dt);
4869 loop_vec_def0 = vect_get_vec_def_for_stmt_copy (dt,
4870 loop_vec_def0);
4871 VEC_replace (tree, vec_oprnds0, 0, loop_vec_def0);
4872 if (op_type == ternary_op)
4874 vect_is_simple_use (op1, stmt, loop_vinfo, NULL, &dummy_stmt,
4875 &dummy, &dt);
4876 loop_vec_def1 = vect_get_vec_def_for_stmt_copy (dt,
4877 loop_vec_def1);
4878 VEC_replace (tree, vec_oprnds1, 0, loop_vec_def1);
4882 if (single_defuse_cycle)
4883 reduc_def = gimple_assign_lhs (new_stmt);
4885 STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi;
4888 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, def0)
4890 if (slp_node)
4891 reduc_def = PHI_RESULT (VEC_index (gimple, phis, i));
4892 else
4894 if (!single_defuse_cycle || j == 0)
4895 reduc_def = PHI_RESULT (new_phi);
4898 def1 = ((op_type == ternary_op)
4899 ? VEC_index (tree, vec_oprnds1, i) : NULL);
4900 if (op_type == binary_op)
4902 if (reduc_index == 0)
4903 expr = build2 (code, vectype_out, reduc_def, def0);
4904 else
4905 expr = build2 (code, vectype_out, def0, reduc_def);
4907 else
4909 if (reduc_index == 0)
4910 expr = build3 (code, vectype_out, reduc_def, def0, def1);
4911 else
4913 if (reduc_index == 1)
4914 expr = build3 (code, vectype_out, def0, reduc_def, def1);
4915 else
4916 expr = build3 (code, vectype_out, def0, def1, reduc_def);
4920 new_stmt = gimple_build_assign (vec_dest, expr);
4921 new_temp = make_ssa_name (vec_dest, new_stmt);
4922 gimple_assign_set_lhs (new_stmt, new_temp);
4923 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4925 if (slp_node)
4927 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
4928 VEC_quick_push (tree, vect_defs, new_temp);
4930 else
4931 VEC_replace (tree, vect_defs, 0, new_temp);
4934 if (slp_node)
4935 continue;
4937 if (j == 0)
4938 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4939 else
4940 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4942 prev_stmt_info = vinfo_for_stmt (new_stmt);
4943 prev_phi_info = vinfo_for_stmt (new_phi);
4946 /* Finalize the reduction-phi (set its arguments) and create the
4947 epilog reduction code. */
4948 if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node)
4950 new_temp = gimple_assign_lhs (*vec_stmt);
4951 VEC_replace (tree, vect_defs, 0, new_temp);
4954 vect_create_epilog_for_reduction (vect_defs, stmt, epilog_copies,
4955 epilog_reduc_code, phis, reduc_index,
4956 double_reduc, slp_node);
4958 VEC_free (gimple, heap, phis);
4959 VEC_free (tree, heap, vec_oprnds0);
4960 if (vec_oprnds1)
4961 VEC_free (tree, heap, vec_oprnds1);
4963 return true;
4966 /* Function vect_min_worthwhile_factor.
4968 For a loop where we could vectorize the operation indicated by CODE,
4969 return the minimum vectorization factor that makes it worthwhile
4970 to use generic vectors. */
4972 vect_min_worthwhile_factor (enum tree_code code)
4974 switch (code)
4976 case PLUS_EXPR:
4977 case MINUS_EXPR:
4978 case NEGATE_EXPR:
4979 return 4;
4981 case BIT_AND_EXPR:
4982 case BIT_IOR_EXPR:
4983 case BIT_XOR_EXPR:
4984 case BIT_NOT_EXPR:
4985 return 2;
4987 default:
4988 return INT_MAX;
4993 /* Function vectorizable_induction
4995 Check if PHI performs an induction computation that can be vectorized.
4996 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
4997 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
4998 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5000 bool
5001 vectorizable_induction (gimple phi, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
5002 gimple *vec_stmt)
5004 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
5005 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5006 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5007 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5008 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
5009 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
5010 tree vec_def;
5012 gcc_assert (ncopies >= 1);
5013 /* FORNOW. This restriction should be relaxed. */
5014 if (nested_in_vect_loop_p (loop, phi) && ncopies > 1)
5016 if (vect_print_dump_info (REPORT_DETAILS))
5017 fprintf (vect_dump, "multiple types in nested loop.");
5018 return false;
5021 if (!STMT_VINFO_RELEVANT_P (stmt_info))
5022 return false;
5024 /* FORNOW: SLP not supported. */
5025 if (STMT_SLP_TYPE (stmt_info))
5026 return false;
5028 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def);
5030 if (gimple_code (phi) != GIMPLE_PHI)
5031 return false;
5033 if (!vec_stmt) /* transformation not required. */
5035 STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
5036 if (vect_print_dump_info (REPORT_DETAILS))
5037 fprintf (vect_dump, "=== vectorizable_induction ===");
5038 vect_model_induction_cost (stmt_info, ncopies);
5039 return true;
5042 /** Transform. **/
5044 if (vect_print_dump_info (REPORT_DETAILS))
5045 fprintf (vect_dump, "transform induction phi.");
5047 vec_def = get_initial_def_for_induction (phi);
5048 *vec_stmt = SSA_NAME_DEF_STMT (vec_def);
5049 return true;
5052 /* Function vectorizable_live_operation.
5054 STMT computes a value that is used outside the loop. Check if
5055 it can be supported. */
5057 bool
5058 vectorizable_live_operation (gimple stmt,
5059 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
5060 gimple *vec_stmt ATTRIBUTE_UNUSED)
5062 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5063 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5064 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5065 int i;
5066 int op_type;
5067 tree op;
5068 tree def;
5069 gimple def_stmt;
5070 enum vect_def_type dt;
5071 enum tree_code code;
5072 enum gimple_rhs_class rhs_class;
5074 gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
5076 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
5077 return false;
5079 if (!is_gimple_assign (stmt))
5080 return false;
5082 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
5083 return false;
5085 /* FORNOW. CHECKME. */
5086 if (nested_in_vect_loop_p (loop, stmt))
5087 return false;
5089 code = gimple_assign_rhs_code (stmt);
5090 op_type = TREE_CODE_LENGTH (code);
5091 rhs_class = get_gimple_rhs_class (code);
5092 gcc_assert (rhs_class != GIMPLE_UNARY_RHS || op_type == unary_op);
5093 gcc_assert (rhs_class != GIMPLE_BINARY_RHS || op_type == binary_op);
5095 /* FORNOW: support only if all uses are invariant. This means
5096 that the scalar operations can remain in place, unvectorized.
5097 The original last scalar value that they compute will be used. */
5099 for (i = 0; i < op_type; i++)
5101 if (rhs_class == GIMPLE_SINGLE_RHS)
5102 op = TREE_OPERAND (gimple_op (stmt, 1), i);
5103 else
5104 op = gimple_op (stmt, i + 1);
5105 if (op
5106 && !vect_is_simple_use (op, stmt, loop_vinfo, NULL, &def_stmt, &def,
5107 &dt))
5109 if (vect_print_dump_info (REPORT_DETAILS))
5110 fprintf (vect_dump, "use not simple.");
5111 return false;
5114 if (dt != vect_external_def && dt != vect_constant_def)
5115 return false;
5118 /* No transformation is required for the cases we currently support. */
5119 return true;
5122 /* Kill any debug uses outside LOOP of SSA names defined in STMT. */
5124 static void
5125 vect_loop_kill_debug_uses (struct loop *loop, gimple stmt)
5127 ssa_op_iter op_iter;
5128 imm_use_iterator imm_iter;
5129 def_operand_p def_p;
5130 gimple ustmt;
5132 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
5134 FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p))
5136 basic_block bb;
5138 if (!is_gimple_debug (ustmt))
5139 continue;
5141 bb = gimple_bb (ustmt);
5143 if (!flow_bb_inside_loop_p (loop, bb))
5145 if (gimple_debug_bind_p (ustmt))
5147 if (vect_print_dump_info (REPORT_DETAILS))
5148 fprintf (vect_dump, "killing debug use");
5150 gimple_debug_bind_reset_value (ustmt);
5151 update_stmt (ustmt);
5153 else
5154 gcc_unreachable ();
5160 /* Function vect_transform_loop.
5162 The analysis phase has determined that the loop is vectorizable.
5163 Vectorize the loop - created vectorized stmts to replace the scalar
5164 stmts in the loop, and update the loop exit condition. */
5166 void
5167 vect_transform_loop (loop_vec_info loop_vinfo)
5169 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5170 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
5171 int nbbs = loop->num_nodes;
5172 gimple_stmt_iterator si;
5173 int i;
5174 tree ratio = NULL;
5175 int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
5176 bool strided_store;
5177 bool slp_scheduled = false;
5178 unsigned int nunits;
5179 tree cond_expr = NULL_TREE;
5180 gimple_seq cond_expr_stmt_list = NULL;
5181 bool do_peeling_for_loop_bound;
5182 gimple stmt, pattern_stmt;
5183 gimple_seq pattern_def_seq = NULL;
5184 gimple_stmt_iterator pattern_def_si = gsi_start (NULL);
5185 bool transform_pattern_stmt = false;
5187 if (vect_print_dump_info (REPORT_DETAILS))
5188 fprintf (vect_dump, "=== vec_transform_loop ===");
5190 /* Peel the loop if there are data refs with unknown alignment.
5191 Only one data ref with unknown store is allowed. */
5193 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
5194 vect_do_peeling_for_alignment (loop_vinfo);
5196 do_peeling_for_loop_bound
5197 = (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
5198 || (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
5199 && LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0)
5200 || LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo));
5202 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
5203 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
5204 vect_loop_versioning (loop_vinfo,
5205 !do_peeling_for_loop_bound,
5206 &cond_expr, &cond_expr_stmt_list);
5208 /* If the loop has a symbolic number of iterations 'n' (i.e. it's not a
5209 compile time constant), or it is a constant that doesn't divide by the
5210 vectorization factor, then an epilog loop needs to be created.
5211 We therefore duplicate the loop: the original loop will be vectorized,
5212 and will compute the first (n/VF) iterations. The second copy of the loop
5213 will remain scalar and will compute the remaining (n%VF) iterations.
5214 (VF is the vectorization factor). */
5216 if (do_peeling_for_loop_bound)
5217 vect_do_peeling_for_loop_bound (loop_vinfo, &ratio,
5218 cond_expr, cond_expr_stmt_list);
5219 else
5220 ratio = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
5221 LOOP_VINFO_INT_NITERS (loop_vinfo) / vectorization_factor);
5223 /* 1) Make sure the loop header has exactly two entries
5224 2) Make sure we have a preheader basic block. */
5226 gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
5228 split_edge (loop_preheader_edge (loop));
5230 /* FORNOW: the vectorizer supports only loops which body consist
5231 of one basic block (header + empty latch). When the vectorizer will
5232 support more involved loop forms, the order by which the BBs are
5233 traversed need to be reconsidered. */
5235 for (i = 0; i < nbbs; i++)
5237 basic_block bb = bbs[i];
5238 stmt_vec_info stmt_info;
5239 gimple phi;
5241 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
5243 phi = gsi_stmt (si);
5244 if (vect_print_dump_info (REPORT_DETAILS))
5246 fprintf (vect_dump, "------>vectorizing phi: ");
5247 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
5249 stmt_info = vinfo_for_stmt (phi);
5250 if (!stmt_info)
5251 continue;
5253 if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
5254 vect_loop_kill_debug_uses (loop, phi);
5256 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5257 && !STMT_VINFO_LIVE_P (stmt_info))
5258 continue;
5260 if ((TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info))
5261 != (unsigned HOST_WIDE_INT) vectorization_factor)
5262 && vect_print_dump_info (REPORT_DETAILS))
5263 fprintf (vect_dump, "multiple-types.");
5265 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
5267 if (vect_print_dump_info (REPORT_DETAILS))
5268 fprintf (vect_dump, "transform phi.");
5269 vect_transform_stmt (phi, NULL, NULL, NULL, NULL);
5273 pattern_stmt = NULL;
5274 for (si = gsi_start_bb (bb); !gsi_end_p (si) || transform_pattern_stmt;)
5276 bool is_store;
5278 if (transform_pattern_stmt)
5279 stmt = pattern_stmt;
5280 else
5281 stmt = gsi_stmt (si);
5283 if (vect_print_dump_info (REPORT_DETAILS))
5285 fprintf (vect_dump, "------>vectorizing statement: ");
5286 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
5289 stmt_info = vinfo_for_stmt (stmt);
5291 /* vector stmts created in the outer-loop during vectorization of
5292 stmts in an inner-loop may not have a stmt_info, and do not
5293 need to be vectorized. */
5294 if (!stmt_info)
5296 gsi_next (&si);
5297 continue;
5300 if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
5301 vect_loop_kill_debug_uses (loop, stmt);
5303 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5304 && !STMT_VINFO_LIVE_P (stmt_info))
5306 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
5307 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
5308 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
5309 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
5311 stmt = pattern_stmt;
5312 stmt_info = vinfo_for_stmt (stmt);
5314 else
5316 gsi_next (&si);
5317 continue;
5320 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
5321 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
5322 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
5323 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
5324 transform_pattern_stmt = true;
5326 /* If pattern statement has def stmts, vectorize them too. */
5327 if (is_pattern_stmt_p (stmt_info))
5329 if (pattern_def_seq == NULL)
5331 pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
5332 pattern_def_si = gsi_start (pattern_def_seq);
5334 else if (!gsi_end_p (pattern_def_si))
5335 gsi_next (&pattern_def_si);
5336 if (pattern_def_seq != NULL)
5338 gimple pattern_def_stmt = NULL;
5339 stmt_vec_info pattern_def_stmt_info = NULL;
5341 while (!gsi_end_p (pattern_def_si))
5343 pattern_def_stmt = gsi_stmt (pattern_def_si);
5344 pattern_def_stmt_info
5345 = vinfo_for_stmt (pattern_def_stmt);
5346 if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info)
5347 || STMT_VINFO_LIVE_P (pattern_def_stmt_info))
5348 break;
5349 gsi_next (&pattern_def_si);
5352 if (!gsi_end_p (pattern_def_si))
5354 if (vect_print_dump_info (REPORT_DETAILS))
5356 fprintf (vect_dump, "==> vectorizing pattern def"
5357 " stmt: ");
5358 print_gimple_stmt (vect_dump, pattern_def_stmt, 0,
5359 TDF_SLIM);
5362 stmt = pattern_def_stmt;
5363 stmt_info = pattern_def_stmt_info;
5365 else
5367 pattern_def_si = gsi_start (NULL);
5368 transform_pattern_stmt = false;
5371 else
5372 transform_pattern_stmt = false;
5375 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
5376 nunits = (unsigned int) TYPE_VECTOR_SUBPARTS (
5377 STMT_VINFO_VECTYPE (stmt_info));
5378 if (!STMT_SLP_TYPE (stmt_info)
5379 && nunits != (unsigned int) vectorization_factor
5380 && vect_print_dump_info (REPORT_DETAILS))
5381 /* For SLP VF is set according to unrolling factor, and not to
5382 vector size, hence for SLP this print is not valid. */
5383 fprintf (vect_dump, "multiple-types.");
5385 /* SLP. Schedule all the SLP instances when the first SLP stmt is
5386 reached. */
5387 if (STMT_SLP_TYPE (stmt_info))
5389 if (!slp_scheduled)
5391 slp_scheduled = true;
5393 if (vect_print_dump_info (REPORT_DETAILS))
5394 fprintf (vect_dump, "=== scheduling SLP instances ===");
5396 vect_schedule_slp (loop_vinfo, NULL);
5399 /* Hybrid SLP stmts must be vectorized in addition to SLP. */
5400 if (!vinfo_for_stmt (stmt) || PURE_SLP_STMT (stmt_info))
5402 if (!transform_pattern_stmt && gsi_end_p (pattern_def_si))
5404 pattern_def_seq = NULL;
5405 gsi_next (&si);
5407 continue;
5411 /* -------- vectorize statement ------------ */
5412 if (vect_print_dump_info (REPORT_DETAILS))
5413 fprintf (vect_dump, "transform statement.");
5415 strided_store = false;
5416 is_store = vect_transform_stmt (stmt, &si, &strided_store, NULL, NULL);
5417 if (is_store)
5419 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
5421 /* Interleaving. If IS_STORE is TRUE, the vectorization of the
5422 interleaving chain was completed - free all the stores in
5423 the chain. */
5424 gsi_next (&si);
5425 vect_remove_stores (GROUP_FIRST_ELEMENT (stmt_info));
5426 continue;
5428 else
5430 /* Free the attached stmt_vec_info and remove the stmt. */
5431 free_stmt_vec_info (gsi_stmt (si));
5432 gsi_remove (&si, true);
5433 continue;
5437 if (!transform_pattern_stmt && gsi_end_p (pattern_def_si))
5439 pattern_def_seq = NULL;
5440 gsi_next (&si);
5442 } /* stmts in BB */
5443 } /* BBs in loop */
5445 slpeel_make_loop_iterate_ntimes (loop, ratio);
5447 /* The memory tags and pointers in vectorized statements need to
5448 have their SSA forms updated. FIXME, why can't this be delayed
5449 until all the loops have been transformed? */
5450 update_ssa (TODO_update_ssa);
5452 if (vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
5453 fprintf (vect_dump, "LOOP VECTORIZED.");
5454 if (loop->inner && vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
5455 fprintf (vect_dump, "OUTER LOOP VECTORIZED.");