* config/darwin-c.c: Remove c-tree.h include.
[official-gcc.git] / gcc / tree-vect-loop.c
blob465a95cbf121e7fe5c1c64afb673d44361ad8769
1 /* Loop Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
5 Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "basic-block.h"
30 #include "diagnostic.h"
31 #include "tree-pretty-print.h"
32 #include "gimple-pretty-print.h"
33 #include "tree-flow.h"
34 #include "tree-dump.h"
35 #include "cfgloop.h"
36 #include "cfglayout.h"
37 #include "expr.h"
38 #include "recog.h"
39 #include "optabs.h"
40 #include "params.h"
41 #include "toplev.h"
42 #include "tree-chrec.h"
43 #include "tree-scalar-evolution.h"
44 #include "tree-vectorizer.h"
46 /* Loop Vectorization Pass.
48 This pass tries to vectorize loops.
50 For example, the vectorizer transforms the following simple loop:
52 short a[N]; short b[N]; short c[N]; int i;
54 for (i=0; i<N; i++){
55 a[i] = b[i] + c[i];
58 as if it was manually vectorized by rewriting the source code into:
60 typedef int __attribute__((mode(V8HI))) v8hi;
61 short a[N]; short b[N]; short c[N]; int i;
62 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
63 v8hi va, vb, vc;
65 for (i=0; i<N/8; i++){
66 vb = pb[i];
67 vc = pc[i];
68 va = vb + vc;
69 pa[i] = va;
72 The main entry to this pass is vectorize_loops(), in which
73 the vectorizer applies a set of analyses on a given set of loops,
74 followed by the actual vectorization transformation for the loops that
75 had successfully passed the analysis phase.
76 Throughout this pass we make a distinction between two types of
77 data: scalars (which are represented by SSA_NAMES), and memory references
78 ("data-refs"). These two types of data require different handling both
79 during analysis and transformation. The types of data-refs that the
80 vectorizer currently supports are ARRAY_REFS which base is an array DECL
81 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
82 accesses are required to have a simple (consecutive) access pattern.
84 Analysis phase:
85 ===============
86 The driver for the analysis phase is vect_analyze_loop().
87 It applies a set of analyses, some of which rely on the scalar evolution
88 analyzer (scev) developed by Sebastian Pop.
90 During the analysis phase the vectorizer records some information
91 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
92 loop, as well as general information about the loop as a whole, which is
93 recorded in a "loop_vec_info" struct attached to each loop.
95 Transformation phase:
96 =====================
97 The loop transformation phase scans all the stmts in the loop, and
98 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
99 the loop that needs to be vectorized. It inserts the vector code sequence
100 just before the scalar stmt S, and records a pointer to the vector code
101 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
102 attached to S). This pointer will be used for the vectorization of following
103 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
104 otherwise, we rely on dead code elimination for removing it.
106 For example, say stmt S1 was vectorized into stmt VS1:
108 VS1: vb = px[i];
109 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
110 S2: a = b;
112 To vectorize stmt S2, the vectorizer first finds the stmt that defines
113 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
114 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
115 resulting sequence would be:
117 VS1: vb = px[i];
118 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
119 VS2: va = vb;
120 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
122 Operands that are not SSA_NAMEs, are data-refs that appear in
123 load/store operations (like 'x[i]' in S1), and are handled differently.
125 Target modeling:
126 =================
127 Currently the only target specific information that is used is the
128 size of the vector (in bytes) - "UNITS_PER_SIMD_WORD". Targets that can
129 support different sizes of vectors, for now will need to specify one value
130 for "UNITS_PER_SIMD_WORD". More flexibility will be added in the future.
132 Since we only vectorize operations which vector form can be
133 expressed using existing tree codes, to verify that an operation is
134 supported, the vectorizer checks the relevant optab at the relevant
135 machine_mode (e.g, optab_handler (add_optab, V8HImode)->insn_code). If
136 the value found is CODE_FOR_nothing, then there's no target support, and
137 we can't vectorize the stmt.
139 For additional information on this project see:
140 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
143 /* Function vect_determine_vectorization_factor
145 Determine the vectorization factor (VF). VF is the number of data elements
146 that are operated upon in parallel in a single iteration of the vectorized
147 loop. For example, when vectorizing a loop that operates on 4byte elements,
148 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
149 elements can fit in a single vector register.
151 We currently support vectorization of loops in which all types operated upon
152 are of the same size. Therefore this function currently sets VF according to
153 the size of the types operated upon, and fails if there are multiple sizes
154 in the loop.
156 VF is also the factor by which the loop iterations are strip-mined, e.g.:
157 original loop:
158 for (i=0; i<N; i++){
159 a[i] = b[i] + c[i];
162 vectorized loop:
163 for (i=0; i<N; i+=VF){
164 a[i:VF] = b[i:VF] + c[i:VF];
168 static bool
169 vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
171 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
172 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
173 int nbbs = loop->num_nodes;
174 gimple_stmt_iterator si;
175 unsigned int vectorization_factor = 0;
176 tree scalar_type;
177 gimple phi;
178 tree vectype;
179 unsigned int nunits;
180 stmt_vec_info stmt_info;
181 int i;
182 HOST_WIDE_INT dummy;
184 if (vect_print_dump_info (REPORT_DETAILS))
185 fprintf (vect_dump, "=== vect_determine_vectorization_factor ===");
187 for (i = 0; i < nbbs; i++)
189 basic_block bb = bbs[i];
191 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
193 phi = gsi_stmt (si);
194 stmt_info = vinfo_for_stmt (phi);
195 if (vect_print_dump_info (REPORT_DETAILS))
197 fprintf (vect_dump, "==> examining phi: ");
198 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
201 gcc_assert (stmt_info);
203 if (STMT_VINFO_RELEVANT_P (stmt_info))
205 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
206 scalar_type = TREE_TYPE (PHI_RESULT (phi));
208 if (vect_print_dump_info (REPORT_DETAILS))
210 fprintf (vect_dump, "get vectype for scalar type: ");
211 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
214 vectype = get_vectype_for_scalar_type (scalar_type);
215 if (!vectype)
217 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
219 fprintf (vect_dump,
220 "not vectorized: unsupported data-type ");
221 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
223 return false;
225 STMT_VINFO_VECTYPE (stmt_info) = vectype;
227 if (vect_print_dump_info (REPORT_DETAILS))
229 fprintf (vect_dump, "vectype: ");
230 print_generic_expr (vect_dump, vectype, TDF_SLIM);
233 nunits = TYPE_VECTOR_SUBPARTS (vectype);
234 if (vect_print_dump_info (REPORT_DETAILS))
235 fprintf (vect_dump, "nunits = %d", nunits);
237 if (!vectorization_factor
238 || (nunits > vectorization_factor))
239 vectorization_factor = nunits;
243 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
245 tree vf_vectype;
246 gimple stmt = gsi_stmt (si);
247 stmt_info = vinfo_for_stmt (stmt);
249 if (vect_print_dump_info (REPORT_DETAILS))
251 fprintf (vect_dump, "==> examining statement: ");
252 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
255 gcc_assert (stmt_info);
257 /* skip stmts which do not need to be vectorized. */
258 if (!STMT_VINFO_RELEVANT_P (stmt_info)
259 && !STMT_VINFO_LIVE_P (stmt_info))
261 if (vect_print_dump_info (REPORT_DETAILS))
262 fprintf (vect_dump, "skip.");
263 continue;
266 if (gimple_get_lhs (stmt) == NULL_TREE)
268 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
270 fprintf (vect_dump, "not vectorized: irregular stmt.");
271 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
273 return false;
276 if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
278 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
280 fprintf (vect_dump, "not vectorized: vector stmt in loop:");
281 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
283 return false;
286 if (STMT_VINFO_VECTYPE (stmt_info))
288 /* The only case when a vectype had been already set is for stmts
289 that contain a dataref, or for "pattern-stmts" (stmts generated
290 by the vectorizer to represent/replace a certain idiom). */
291 gcc_assert (STMT_VINFO_DATA_REF (stmt_info)
292 || is_pattern_stmt_p (stmt_info));
293 vectype = STMT_VINFO_VECTYPE (stmt_info);
295 else
297 gcc_assert (!STMT_VINFO_DATA_REF (stmt_info)
298 && !is_pattern_stmt_p (stmt_info));
300 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
301 if (vect_print_dump_info (REPORT_DETAILS))
303 fprintf (vect_dump, "get vectype for scalar type: ");
304 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
306 vectype = get_vectype_for_scalar_type (scalar_type);
307 if (!vectype)
309 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
311 fprintf (vect_dump,
312 "not vectorized: unsupported data-type ");
313 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
315 return false;
318 STMT_VINFO_VECTYPE (stmt_info) = vectype;
321 /* The vectorization factor is according to the smallest
322 scalar type (or the largest vector size, but we only
323 support one vector size per loop). */
324 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy,
325 &dummy);
326 if (vect_print_dump_info (REPORT_DETAILS))
328 fprintf (vect_dump, "get vectype for scalar type: ");
329 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
331 vf_vectype = get_vectype_for_scalar_type (scalar_type);
332 if (!vf_vectype)
334 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
336 fprintf (vect_dump,
337 "not vectorized: unsupported data-type ");
338 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
340 return false;
343 if ((GET_MODE_SIZE (TYPE_MODE (vectype))
344 != GET_MODE_SIZE (TYPE_MODE (vf_vectype))))
346 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
348 fprintf (vect_dump,
349 "not vectorized: different sized vector "
350 "types in statement, ");
351 print_generic_expr (vect_dump, vectype, TDF_SLIM);
352 fprintf (vect_dump, " and ");
353 print_generic_expr (vect_dump, vf_vectype, TDF_SLIM);
355 return false;
358 if (vect_print_dump_info (REPORT_DETAILS))
360 fprintf (vect_dump, "vectype: ");
361 print_generic_expr (vect_dump, vf_vectype, TDF_SLIM);
364 nunits = TYPE_VECTOR_SUBPARTS (vf_vectype);
365 if (vect_print_dump_info (REPORT_DETAILS))
366 fprintf (vect_dump, "nunits = %d", nunits);
368 if (!vectorization_factor
369 || (nunits > vectorization_factor))
370 vectorization_factor = nunits;
374 /* TODO: Analyze cost. Decide if worth while to vectorize. */
375 if (vect_print_dump_info (REPORT_DETAILS))
376 fprintf (vect_dump, "vectorization factor = %d", vectorization_factor);
377 if (vectorization_factor <= 1)
379 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
380 fprintf (vect_dump, "not vectorized: unsupported data-type");
381 return false;
383 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
385 return true;
389 /* Function vect_is_simple_iv_evolution.
391 FORNOW: A simple evolution of an induction variables in the loop is
392 considered a polynomial evolution with constant step. */
394 static bool
395 vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
396 tree * step)
398 tree init_expr;
399 tree step_expr;
400 tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
402 /* When there is no evolution in this loop, the evolution function
403 is not "simple". */
404 if (evolution_part == NULL_TREE)
405 return false;
407 /* When the evolution is a polynomial of degree >= 2
408 the evolution function is not "simple". */
409 if (tree_is_chrec (evolution_part))
410 return false;
412 step_expr = evolution_part;
413 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
415 if (vect_print_dump_info (REPORT_DETAILS))
417 fprintf (vect_dump, "step: ");
418 print_generic_expr (vect_dump, step_expr, TDF_SLIM);
419 fprintf (vect_dump, ", init: ");
420 print_generic_expr (vect_dump, init_expr, TDF_SLIM);
423 *init = init_expr;
424 *step = step_expr;
426 if (TREE_CODE (step_expr) != INTEGER_CST)
428 if (vect_print_dump_info (REPORT_DETAILS))
429 fprintf (vect_dump, "step unknown.");
430 return false;
433 return true;
436 /* Function vect_analyze_scalar_cycles_1.
438 Examine the cross iteration def-use cycles of scalar variables
439 in LOOP. LOOP_VINFO represents the loop that is now being
440 considered for vectorization (can be LOOP, or an outer-loop
441 enclosing LOOP). */
443 static void
444 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
446 basic_block bb = loop->header;
447 tree dumy;
448 VEC(gimple,heap) *worklist = VEC_alloc (gimple, heap, 64);
449 gimple_stmt_iterator gsi;
450 bool double_reduc;
452 if (vect_print_dump_info (REPORT_DETAILS))
453 fprintf (vect_dump, "=== vect_analyze_scalar_cycles ===");
455 /* First - identify all inductions. Reduction detection assumes that all the
456 inductions have been identified, therefore, this order must not be
457 changed. */
458 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
460 gimple phi = gsi_stmt (gsi);
461 tree access_fn = NULL;
462 tree def = PHI_RESULT (phi);
463 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
465 if (vect_print_dump_info (REPORT_DETAILS))
467 fprintf (vect_dump, "Analyze phi: ");
468 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
471 /* Skip virtual phi's. The data dependences that are associated with
472 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
473 if (!is_gimple_reg (SSA_NAME_VAR (def)))
474 continue;
476 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
478 /* Analyze the evolution function. */
479 access_fn = analyze_scalar_evolution (loop, def);
480 if (access_fn && vect_print_dump_info (REPORT_DETAILS))
482 fprintf (vect_dump, "Access function of PHI: ");
483 print_generic_expr (vect_dump, access_fn, TDF_SLIM);
486 if (!access_fn
487 || !vect_is_simple_iv_evolution (loop->num, access_fn, &dumy, &dumy))
489 VEC_safe_push (gimple, heap, worklist, phi);
490 continue;
493 if (vect_print_dump_info (REPORT_DETAILS))
494 fprintf (vect_dump, "Detected induction.");
495 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
499 /* Second - identify all reductions and nested cycles. */
500 while (VEC_length (gimple, worklist) > 0)
502 gimple phi = VEC_pop (gimple, worklist);
503 tree def = PHI_RESULT (phi);
504 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
505 gimple reduc_stmt;
506 bool nested_cycle;
508 if (vect_print_dump_info (REPORT_DETAILS))
510 fprintf (vect_dump, "Analyze phi: ");
511 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
514 gcc_assert (is_gimple_reg (SSA_NAME_VAR (def)));
515 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
517 nested_cycle = (loop != LOOP_VINFO_LOOP (loop_vinfo));
518 reduc_stmt = vect_force_simple_reduction (loop_vinfo, phi, !nested_cycle,
519 &double_reduc);
520 if (reduc_stmt)
522 if (double_reduc)
524 if (vect_print_dump_info (REPORT_DETAILS))
525 fprintf (vect_dump, "Detected double reduction.");
527 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
528 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
529 vect_double_reduction_def;
531 else
533 if (nested_cycle)
535 if (vect_print_dump_info (REPORT_DETAILS))
536 fprintf (vect_dump, "Detected vectorizable nested cycle.");
538 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
539 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
540 vect_nested_cycle;
542 else
544 if (vect_print_dump_info (REPORT_DETAILS))
545 fprintf (vect_dump, "Detected reduction.");
547 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
548 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
549 vect_reduction_def;
550 /* Store the reduction cycles for possible vectorization in
551 loop-aware SLP. */
552 VEC_safe_push (gimple, heap,
553 LOOP_VINFO_REDUCTIONS (loop_vinfo),
554 reduc_stmt);
558 else
559 if (vect_print_dump_info (REPORT_DETAILS))
560 fprintf (vect_dump, "Unknown def-use cycle pattern.");
563 VEC_free (gimple, heap, worklist);
567 /* Function vect_analyze_scalar_cycles.
569 Examine the cross iteration def-use cycles of scalar variables, by
570 analyzing the loop-header PHIs of scalar variables; Classify each
571 cycle as one of the following: invariant, induction, reduction, unknown.
572 We do that for the loop represented by LOOP_VINFO, and also to its
573 inner-loop, if exists.
574 Examples for scalar cycles:
576 Example1: reduction:
578 loop1:
579 for (i=0; i<N; i++)
580 sum += a[i];
582 Example2: induction:
584 loop2:
585 for (i=0; i<N; i++)
586 a[i] = i; */
588 static void
589 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
591 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
593 vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
595 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
596 Reductions in such inner-loop therefore have different properties than
597 the reductions in the nest that gets vectorized:
598 1. When vectorized, they are executed in the same order as in the original
599 scalar loop, so we can't change the order of computation when
600 vectorizing them.
601 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
602 current checks are too strict. */
604 if (loop->inner)
605 vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
608 /* Function vect_get_loop_niters.
610 Determine how many iterations the loop is executed.
611 If an expression that represents the number of iterations
612 can be constructed, place it in NUMBER_OF_ITERATIONS.
613 Return the loop exit condition. */
615 static gimple
616 vect_get_loop_niters (struct loop *loop, tree *number_of_iterations)
618 tree niters;
620 if (vect_print_dump_info (REPORT_DETAILS))
621 fprintf (vect_dump, "=== get_loop_niters ===");
623 niters = number_of_exit_cond_executions (loop);
625 if (niters != NULL_TREE
626 && niters != chrec_dont_know)
628 *number_of_iterations = niters;
630 if (vect_print_dump_info (REPORT_DETAILS))
632 fprintf (vect_dump, "==> get_loop_niters:" );
633 print_generic_expr (vect_dump, *number_of_iterations, TDF_SLIM);
637 return get_loop_exit_condition (loop);
641 /* Function bb_in_loop_p
643 Used as predicate for dfs order traversal of the loop bbs. */
645 static bool
646 bb_in_loop_p (const_basic_block bb, const void *data)
648 const struct loop *const loop = (const struct loop *)data;
649 if (flow_bb_inside_loop_p (loop, bb))
650 return true;
651 return false;
655 /* Function new_loop_vec_info.
657 Create and initialize a new loop_vec_info struct for LOOP, as well as
658 stmt_vec_info structs for all the stmts in LOOP. */
660 static loop_vec_info
661 new_loop_vec_info (struct loop *loop)
663 loop_vec_info res;
664 basic_block *bbs;
665 gimple_stmt_iterator si;
666 unsigned int i, nbbs;
668 res = (loop_vec_info) xcalloc (1, sizeof (struct _loop_vec_info));
669 LOOP_VINFO_LOOP (res) = loop;
671 bbs = get_loop_body (loop);
673 /* Create/Update stmt_info for all stmts in the loop. */
674 for (i = 0; i < loop->num_nodes; i++)
676 basic_block bb = bbs[i];
678 /* BBs in a nested inner-loop will have been already processed (because
679 we will have called vect_analyze_loop_form for any nested inner-loop).
680 Therefore, for stmts in an inner-loop we just want to update the
681 STMT_VINFO_LOOP_VINFO field of their stmt_info to point to the new
682 loop_info of the outer-loop we are currently considering to vectorize
683 (instead of the loop_info of the inner-loop).
684 For stmts in other BBs we need to create a stmt_info from scratch. */
685 if (bb->loop_father != loop)
687 /* Inner-loop bb. */
688 gcc_assert (loop->inner && bb->loop_father == loop->inner);
689 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
691 gimple phi = gsi_stmt (si);
692 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
693 loop_vec_info inner_loop_vinfo =
694 STMT_VINFO_LOOP_VINFO (stmt_info);
695 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
696 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
698 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
700 gimple stmt = gsi_stmt (si);
701 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
702 loop_vec_info inner_loop_vinfo =
703 STMT_VINFO_LOOP_VINFO (stmt_info);
704 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
705 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
708 else
710 /* bb in current nest. */
711 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
713 gimple phi = gsi_stmt (si);
714 gimple_set_uid (phi, 0);
715 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, res, NULL));
718 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
720 gimple stmt = gsi_stmt (si);
721 gimple_set_uid (stmt, 0);
722 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, res, NULL));
727 /* CHECKME: We want to visit all BBs before their successors (except for
728 latch blocks, for which this assertion wouldn't hold). In the simple
729 case of the loop forms we allow, a dfs order of the BBs would the same
730 as reversed postorder traversal, so we are safe. */
732 free (bbs);
733 bbs = XCNEWVEC (basic_block, loop->num_nodes);
734 nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
735 bbs, loop->num_nodes, loop);
736 gcc_assert (nbbs == loop->num_nodes);
738 LOOP_VINFO_BBS (res) = bbs;
739 LOOP_VINFO_NITERS (res) = NULL;
740 LOOP_VINFO_NITERS_UNCHANGED (res) = NULL;
741 LOOP_VINFO_COST_MODEL_MIN_ITERS (res) = 0;
742 LOOP_VINFO_VECTORIZABLE_P (res) = 0;
743 LOOP_PEELING_FOR_ALIGNMENT (res) = 0;
744 LOOP_VINFO_VECT_FACTOR (res) = 0;
745 LOOP_VINFO_DATAREFS (res) = VEC_alloc (data_reference_p, heap, 10);
746 LOOP_VINFO_DDRS (res) = VEC_alloc (ddr_p, heap, 10 * 10);
747 LOOP_VINFO_UNALIGNED_DR (res) = NULL;
748 LOOP_VINFO_MAY_MISALIGN_STMTS (res) =
749 VEC_alloc (gimple, heap,
750 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS));
751 LOOP_VINFO_MAY_ALIAS_DDRS (res) =
752 VEC_alloc (ddr_p, heap,
753 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS));
754 LOOP_VINFO_STRIDED_STORES (res) = VEC_alloc (gimple, heap, 10);
755 LOOP_VINFO_REDUCTIONS (res) = VEC_alloc (gimple, heap, 10);
756 LOOP_VINFO_SLP_INSTANCES (res) = VEC_alloc (slp_instance, heap, 10);
757 LOOP_VINFO_SLP_UNROLLING_FACTOR (res) = 1;
759 return res;
763 /* Function destroy_loop_vec_info.
765 Free LOOP_VINFO struct, as well as all the stmt_vec_info structs of all the
766 stmts in the loop. */
768 void
769 destroy_loop_vec_info (loop_vec_info loop_vinfo, bool clean_stmts)
771 struct loop *loop;
772 basic_block *bbs;
773 int nbbs;
774 gimple_stmt_iterator si;
775 int j;
776 VEC (slp_instance, heap) *slp_instances;
777 slp_instance instance;
779 if (!loop_vinfo)
780 return;
782 loop = LOOP_VINFO_LOOP (loop_vinfo);
784 bbs = LOOP_VINFO_BBS (loop_vinfo);
785 nbbs = loop->num_nodes;
787 if (!clean_stmts)
789 free (LOOP_VINFO_BBS (loop_vinfo));
790 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
791 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
792 VEC_free (gimple, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
794 free (loop_vinfo);
795 loop->aux = NULL;
796 return;
799 for (j = 0; j < nbbs; j++)
801 basic_block bb = bbs[j];
802 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
803 free_stmt_vec_info (gsi_stmt (si));
805 for (si = gsi_start_bb (bb); !gsi_end_p (si); )
807 gimple stmt = gsi_stmt (si);
808 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
810 if (stmt_info)
812 /* Check if this is a "pattern stmt" (introduced by the
813 vectorizer during the pattern recognition pass). */
814 bool remove_stmt_p = false;
815 gimple orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
816 if (orig_stmt)
818 stmt_vec_info orig_stmt_info = vinfo_for_stmt (orig_stmt);
819 if (orig_stmt_info
820 && STMT_VINFO_IN_PATTERN_P (orig_stmt_info))
821 remove_stmt_p = true;
824 /* Free stmt_vec_info. */
825 free_stmt_vec_info (stmt);
827 /* Remove dead "pattern stmts". */
828 if (remove_stmt_p)
829 gsi_remove (&si, true);
831 gsi_next (&si);
835 free (LOOP_VINFO_BBS (loop_vinfo));
836 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
837 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
838 VEC_free (gimple, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
839 VEC_free (ddr_p, heap, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
840 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
841 for (j = 0; VEC_iterate (slp_instance, slp_instances, j, instance); j++)
842 vect_free_slp_instance (instance);
844 VEC_free (slp_instance, heap, LOOP_VINFO_SLP_INSTANCES (loop_vinfo));
845 VEC_free (gimple, heap, LOOP_VINFO_STRIDED_STORES (loop_vinfo));
846 VEC_free (gimple, heap, LOOP_VINFO_REDUCTIONS (loop_vinfo));
848 free (loop_vinfo);
849 loop->aux = NULL;
853 /* Function vect_analyze_loop_1.
855 Apply a set of analyses on LOOP, and create a loop_vec_info struct
856 for it. The different analyses will record information in the
857 loop_vec_info struct. This is a subset of the analyses applied in
858 vect_analyze_loop, to be applied on an inner-loop nested in the loop
859 that is now considered for (outer-loop) vectorization. */
861 static loop_vec_info
862 vect_analyze_loop_1 (struct loop *loop)
864 loop_vec_info loop_vinfo;
866 if (vect_print_dump_info (REPORT_DETAILS))
867 fprintf (vect_dump, "===== analyze_loop_nest_1 =====");
869 /* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */
871 loop_vinfo = vect_analyze_loop_form (loop);
872 if (!loop_vinfo)
874 if (vect_print_dump_info (REPORT_DETAILS))
875 fprintf (vect_dump, "bad inner-loop form.");
876 return NULL;
879 return loop_vinfo;
883 /* Function vect_analyze_loop_form.
885 Verify that certain CFG restrictions hold, including:
886 - the loop has a pre-header
887 - the loop has a single entry and exit
888 - the loop exit condition is simple enough, and the number of iterations
889 can be analyzed (a countable loop). */
891 loop_vec_info
892 vect_analyze_loop_form (struct loop *loop)
894 loop_vec_info loop_vinfo;
895 gimple loop_cond;
896 tree number_of_iterations = NULL;
897 loop_vec_info inner_loop_vinfo = NULL;
899 if (vect_print_dump_info (REPORT_DETAILS))
900 fprintf (vect_dump, "=== vect_analyze_loop_form ===");
902 /* Different restrictions apply when we are considering an inner-most loop,
903 vs. an outer (nested) loop.
904 (FORNOW. May want to relax some of these restrictions in the future). */
906 if (!loop->inner)
908 /* Inner-most loop. We currently require that the number of BBs is
909 exactly 2 (the header and latch). Vectorizable inner-most loops
910 look like this:
912 (pre-header)
914 header <--------+
915 | | |
916 | +--> latch --+
918 (exit-bb) */
920 if (loop->num_nodes != 2)
922 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
923 fprintf (vect_dump, "not vectorized: control flow in loop.");
924 return NULL;
927 if (empty_block_p (loop->header))
929 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
930 fprintf (vect_dump, "not vectorized: empty loop.");
931 return NULL;
934 else
936 struct loop *innerloop = loop->inner;
937 edge entryedge;
939 /* Nested loop. We currently require that the loop is doubly-nested,
940 contains a single inner loop, and the number of BBs is exactly 5.
941 Vectorizable outer-loops look like this:
943 (pre-header)
945 header <---+
947 inner-loop |
949 tail ------+
951 (exit-bb)
953 The inner-loop has the properties expected of inner-most loops
954 as described above. */
956 if ((loop->inner)->inner || (loop->inner)->next)
958 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
959 fprintf (vect_dump, "not vectorized: multiple nested loops.");
960 return NULL;
963 /* Analyze the inner-loop. */
964 inner_loop_vinfo = vect_analyze_loop_1 (loop->inner);
965 if (!inner_loop_vinfo)
967 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
968 fprintf (vect_dump, "not vectorized: Bad inner loop.");
969 return NULL;
972 if (!expr_invariant_in_loop_p (loop,
973 LOOP_VINFO_NITERS (inner_loop_vinfo)))
975 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
976 fprintf (vect_dump,
977 "not vectorized: inner-loop count not invariant.");
978 destroy_loop_vec_info (inner_loop_vinfo, true);
979 return NULL;
982 if (loop->num_nodes != 5)
984 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
985 fprintf (vect_dump, "not vectorized: control flow in loop.");
986 destroy_loop_vec_info (inner_loop_vinfo, true);
987 return NULL;
990 gcc_assert (EDGE_COUNT (innerloop->header->preds) == 2);
991 entryedge = EDGE_PRED (innerloop->header, 0);
992 if (EDGE_PRED (innerloop->header, 0)->src == innerloop->latch)
993 entryedge = EDGE_PRED (innerloop->header, 1);
995 if (entryedge->src != loop->header
996 || !single_exit (innerloop)
997 || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
999 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1000 fprintf (vect_dump, "not vectorized: unsupported outerloop form.");
1001 destroy_loop_vec_info (inner_loop_vinfo, true);
1002 return NULL;
1005 if (vect_print_dump_info (REPORT_DETAILS))
1006 fprintf (vect_dump, "Considering outer-loop vectorization.");
1009 if (!single_exit (loop)
1010 || EDGE_COUNT (loop->header->preds) != 2)
1012 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1014 if (!single_exit (loop))
1015 fprintf (vect_dump, "not vectorized: multiple exits.");
1016 else if (EDGE_COUNT (loop->header->preds) != 2)
1017 fprintf (vect_dump, "not vectorized: too many incoming edges.");
1019 if (inner_loop_vinfo)
1020 destroy_loop_vec_info (inner_loop_vinfo, true);
1021 return NULL;
1024 /* We assume that the loop exit condition is at the end of the loop. i.e,
1025 that the loop is represented as a do-while (with a proper if-guard
1026 before the loop if needed), where the loop header contains all the
1027 executable statements, and the latch is empty. */
1028 if (!empty_block_p (loop->latch)
1029 || !gimple_seq_empty_p (phi_nodes (loop->latch)))
1031 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1032 fprintf (vect_dump, "not vectorized: unexpected loop form.");
1033 if (inner_loop_vinfo)
1034 destroy_loop_vec_info (inner_loop_vinfo, true);
1035 return NULL;
1038 /* Make sure there exists a single-predecessor exit bb: */
1039 if (!single_pred_p (single_exit (loop)->dest))
1041 edge e = single_exit (loop);
1042 if (!(e->flags & EDGE_ABNORMAL))
1044 split_loop_exit_edge (e);
1045 if (vect_print_dump_info (REPORT_DETAILS))
1046 fprintf (vect_dump, "split exit edge.");
1048 else
1050 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1051 fprintf (vect_dump, "not vectorized: abnormal loop exit edge.");
1052 if (inner_loop_vinfo)
1053 destroy_loop_vec_info (inner_loop_vinfo, true);
1054 return NULL;
1058 loop_cond = vect_get_loop_niters (loop, &number_of_iterations);
1059 if (!loop_cond)
1061 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1062 fprintf (vect_dump, "not vectorized: complicated exit condition.");
1063 if (inner_loop_vinfo)
1064 destroy_loop_vec_info (inner_loop_vinfo, true);
1065 return NULL;
1068 if (!number_of_iterations)
1070 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1071 fprintf (vect_dump,
1072 "not vectorized: number of iterations cannot be computed.");
1073 if (inner_loop_vinfo)
1074 destroy_loop_vec_info (inner_loop_vinfo, true);
1075 return NULL;
1078 if (chrec_contains_undetermined (number_of_iterations))
1080 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1081 fprintf (vect_dump, "Infinite number of iterations.");
1082 if (inner_loop_vinfo)
1083 destroy_loop_vec_info (inner_loop_vinfo, true);
1084 return NULL;
1087 if (!NITERS_KNOWN_P (number_of_iterations))
1089 if (vect_print_dump_info (REPORT_DETAILS))
1091 fprintf (vect_dump, "Symbolic number of iterations is ");
1092 print_generic_expr (vect_dump, number_of_iterations, TDF_DETAILS);
1095 else if (TREE_INT_CST_LOW (number_of_iterations) == 0)
1097 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1098 fprintf (vect_dump, "not vectorized: number of iterations = 0.");
1099 if (inner_loop_vinfo)
1100 destroy_loop_vec_info (inner_loop_vinfo, false);
1101 return NULL;
1104 loop_vinfo = new_loop_vec_info (loop);
1105 LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
1106 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
1108 STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond)) = loop_exit_ctrl_vec_info_type;
1110 /* CHECKME: May want to keep it around it in the future. */
1111 if (inner_loop_vinfo)
1112 destroy_loop_vec_info (inner_loop_vinfo, false);
1114 gcc_assert (!loop->aux);
1115 loop->aux = loop_vinfo;
1116 return loop_vinfo;
1120 /* Function vect_analyze_loop_operations.
1122 Scan the loop stmts and make sure they are all vectorizable. */
1124 static bool
1125 vect_analyze_loop_operations (loop_vec_info loop_vinfo)
1127 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1128 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1129 int nbbs = loop->num_nodes;
1130 gimple_stmt_iterator si;
1131 unsigned int vectorization_factor = 0;
1132 int i;
1133 gimple phi;
1134 stmt_vec_info stmt_info;
1135 bool need_to_vectorize = false;
1136 int min_profitable_iters;
1137 int min_scalar_loop_bound;
1138 unsigned int th;
1139 bool only_slp_in_loop = true, ok;
1141 if (vect_print_dump_info (REPORT_DETAILS))
1142 fprintf (vect_dump, "=== vect_analyze_loop_operations ===");
1144 gcc_assert (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
1145 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1147 for (i = 0; i < nbbs; i++)
1149 basic_block bb = bbs[i];
1151 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
1153 phi = gsi_stmt (si);
1154 ok = true;
1156 stmt_info = vinfo_for_stmt (phi);
1157 if (vect_print_dump_info (REPORT_DETAILS))
1159 fprintf (vect_dump, "examining phi: ");
1160 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
1163 if (! is_loop_header_bb_p (bb))
1165 /* inner-loop loop-closed exit phi in outer-loop vectorization
1166 (i.e. a phi in the tail of the outer-loop).
1167 FORNOW: we currently don't support the case that these phis
1168 are not used in the outerloop (unless it is double reduction,
1169 i.e., this phi is vect_reduction_def), cause this case
1170 requires to actually do something here. */
1171 if ((!STMT_VINFO_RELEVANT_P (stmt_info)
1172 || STMT_VINFO_LIVE_P (stmt_info))
1173 && STMT_VINFO_DEF_TYPE (stmt_info)
1174 != vect_double_reduction_def)
1176 if (vect_print_dump_info (REPORT_DETAILS))
1177 fprintf (vect_dump,
1178 "Unsupported loop-closed phi in outer-loop.");
1179 return false;
1181 continue;
1184 gcc_assert (stmt_info);
1186 if (STMT_VINFO_LIVE_P (stmt_info))
1188 /* FORNOW: not yet supported. */
1189 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1190 fprintf (vect_dump, "not vectorized: value used after loop.");
1191 return false;
1194 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
1195 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
1197 /* A scalar-dependence cycle that we don't support. */
1198 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1199 fprintf (vect_dump, "not vectorized: scalar dependence cycle.");
1200 return false;
1203 if (STMT_VINFO_RELEVANT_P (stmt_info))
1205 need_to_vectorize = true;
1206 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
1207 ok = vectorizable_induction (phi, NULL, NULL);
1210 if (!ok)
1212 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1214 fprintf (vect_dump,
1215 "not vectorized: relevant phi not supported: ");
1216 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
1218 return false;
1222 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1224 gimple stmt = gsi_stmt (si);
1225 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1227 gcc_assert (stmt_info);
1229 if (!vect_analyze_stmt (stmt, &need_to_vectorize, NULL))
1230 return false;
1232 if ((STMT_VINFO_RELEVANT_P (stmt_info)
1233 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1234 && !PURE_SLP_STMT (stmt_info))
1235 /* STMT needs both SLP and loop-based vectorization. */
1236 only_slp_in_loop = false;
1238 } /* bbs */
1240 /* All operations in the loop are either irrelevant (deal with loop
1241 control, or dead), or only used outside the loop and can be moved
1242 out of the loop (e.g. invariants, inductions). The loop can be
1243 optimized away by scalar optimizations. We're better off not
1244 touching this loop. */
1245 if (!need_to_vectorize)
1247 if (vect_print_dump_info (REPORT_DETAILS))
1248 fprintf (vect_dump,
1249 "All the computation can be taken out of the loop.");
1250 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1251 fprintf (vect_dump,
1252 "not vectorized: redundant loop. no profit to vectorize.");
1253 return false;
1256 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1257 vectorization factor of the loop is the unrolling factor required by the
1258 SLP instances. If that unrolling factor is 1, we say, that we perform
1259 pure SLP on loop - cross iteration parallelism is not exploited. */
1260 if (only_slp_in_loop)
1261 vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
1262 else
1263 vectorization_factor = least_common_multiple (vectorization_factor,
1264 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
1266 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
1268 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1269 && vect_print_dump_info (REPORT_DETAILS))
1270 fprintf (vect_dump,
1271 "vectorization_factor = %d, niters = " HOST_WIDE_INT_PRINT_DEC,
1272 vectorization_factor, LOOP_VINFO_INT_NITERS (loop_vinfo));
1274 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1275 && (LOOP_VINFO_INT_NITERS (loop_vinfo) < vectorization_factor))
1277 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1278 fprintf (vect_dump, "not vectorized: iteration count too small.");
1279 if (vect_print_dump_info (REPORT_DETAILS))
1280 fprintf (vect_dump,"not vectorized: iteration count smaller than "
1281 "vectorization factor.");
1282 return false;
1285 /* Analyze cost. Decide if worth while to vectorize. */
1287 /* Once VF is set, SLP costs should be updated since the number of created
1288 vector stmts depends on VF. */
1289 vect_update_slp_costs_according_to_vf (loop_vinfo);
1291 min_profitable_iters = vect_estimate_min_profitable_iters (loop_vinfo);
1292 LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo) = min_profitable_iters;
1294 if (min_profitable_iters < 0)
1296 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1297 fprintf (vect_dump, "not vectorized: vectorization not profitable.");
1298 if (vect_print_dump_info (REPORT_DETAILS))
1299 fprintf (vect_dump, "not vectorized: vector version will never be "
1300 "profitable.");
1301 return false;
1304 min_scalar_loop_bound = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
1305 * vectorization_factor) - 1);
1307 /* Use the cost model only if it is more conservative than user specified
1308 threshold. */
1310 th = (unsigned) min_scalar_loop_bound;
1311 if (min_profitable_iters
1312 && (!min_scalar_loop_bound
1313 || min_profitable_iters > min_scalar_loop_bound))
1314 th = (unsigned) min_profitable_iters;
1316 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1317 && LOOP_VINFO_INT_NITERS (loop_vinfo) <= th)
1319 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1320 fprintf (vect_dump, "not vectorized: vectorization not "
1321 "profitable.");
1322 if (vect_print_dump_info (REPORT_DETAILS))
1323 fprintf (vect_dump, "not vectorized: iteration count smaller than "
1324 "user specified loop bound parameter or minimum "
1325 "profitable iterations (whichever is more conservative).");
1326 return false;
1329 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1330 || LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0
1331 || LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
1333 if (vect_print_dump_info (REPORT_DETAILS))
1334 fprintf (vect_dump, "epilog loop required.");
1335 if (!vect_can_advance_ivs_p (loop_vinfo))
1337 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1338 fprintf (vect_dump,
1339 "not vectorized: can't create epilog loop 1.");
1340 return false;
1342 if (!slpeel_can_duplicate_loop_p (loop, single_exit (loop)))
1344 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1345 fprintf (vect_dump,
1346 "not vectorized: can't create epilog loop 2.");
1347 return false;
1351 return true;
1355 /* Function vect_analyze_loop.
1357 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1358 for it. The different analyses will record information in the
1359 loop_vec_info struct. */
1360 loop_vec_info
1361 vect_analyze_loop (struct loop *loop)
1363 bool ok;
1364 loop_vec_info loop_vinfo;
1365 int max_vf = MAX_VECTORIZATION_FACTOR;
1366 int min_vf = 2;
1368 if (vect_print_dump_info (REPORT_DETAILS))
1369 fprintf (vect_dump, "===== analyze_loop_nest =====");
1371 if (loop_outer (loop)
1372 && loop_vec_info_for_loop (loop_outer (loop))
1373 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
1375 if (vect_print_dump_info (REPORT_DETAILS))
1376 fprintf (vect_dump, "outer-loop already vectorized.");
1377 return NULL;
1380 /* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */
1382 loop_vinfo = vect_analyze_loop_form (loop);
1383 if (!loop_vinfo)
1385 if (vect_print_dump_info (REPORT_DETAILS))
1386 fprintf (vect_dump, "bad loop form.");
1387 return NULL;
1390 /* Find all data references in the loop (which correspond to vdefs/vuses)
1391 and analyze their evolution in the loop. Also adjust the minimal
1392 vectorization factor according to the loads and stores.
1394 FORNOW: Handle only simple, array references, which
1395 alignment can be forced, and aligned pointer-references. */
1397 ok = vect_analyze_data_refs (loop_vinfo, NULL, &min_vf);
1398 if (!ok)
1400 if (vect_print_dump_info (REPORT_DETAILS))
1401 fprintf (vect_dump, "bad data references.");
1402 destroy_loop_vec_info (loop_vinfo, true);
1403 return NULL;
1406 /* Classify all cross-iteration scalar data-flow cycles.
1407 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1409 vect_analyze_scalar_cycles (loop_vinfo);
1411 vect_pattern_recog (loop_vinfo);
1413 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1415 ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
1416 if (!ok)
1418 if (vect_print_dump_info (REPORT_DETAILS))
1419 fprintf (vect_dump, "unexpected pattern.");
1420 destroy_loop_vec_info (loop_vinfo, true);
1421 return NULL;
1424 /* Analyze data dependences between the data-refs in the loop
1425 and adjust the maximum vectorization factor according to
1426 the dependences.
1427 FORNOW: fail at the first data dependence that we encounter. */
1429 ok = vect_analyze_data_ref_dependences (loop_vinfo, NULL, &max_vf);
1430 if (!ok
1431 || max_vf < min_vf)
1433 if (vect_print_dump_info (REPORT_DETAILS))
1434 fprintf (vect_dump, "bad data dependence.");
1435 destroy_loop_vec_info (loop_vinfo, true);
1436 return NULL;
1439 ok = vect_determine_vectorization_factor (loop_vinfo);
1440 if (!ok)
1442 if (vect_print_dump_info (REPORT_DETAILS))
1443 fprintf (vect_dump, "can't determine vectorization factor.");
1444 destroy_loop_vec_info (loop_vinfo, true);
1445 return NULL;
1447 if (max_vf < LOOP_VINFO_VECT_FACTOR (loop_vinfo))
1449 if (vect_print_dump_info (REPORT_DETAILS))
1450 fprintf (vect_dump, "bad data dependence.");
1451 destroy_loop_vec_info (loop_vinfo, true);
1452 return NULL;
1455 /* Analyze the alignment of the data-refs in the loop.
1456 Fail if a data reference is found that cannot be vectorized. */
1458 ok = vect_analyze_data_refs_alignment (loop_vinfo, NULL);
1459 if (!ok)
1461 if (vect_print_dump_info (REPORT_DETAILS))
1462 fprintf (vect_dump, "bad data alignment.");
1463 destroy_loop_vec_info (loop_vinfo, true);
1464 return NULL;
1467 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1468 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1470 ok = vect_analyze_data_ref_accesses (loop_vinfo, NULL);
1471 if (!ok)
1473 if (vect_print_dump_info (REPORT_DETAILS))
1474 fprintf (vect_dump, "bad data access.");
1475 destroy_loop_vec_info (loop_vinfo, true);
1476 return NULL;
1479 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
1480 It is important to call pruning after vect_analyze_data_ref_accesses,
1481 since we use grouping information gathered by interleaving analysis. */
1482 ok = vect_prune_runtime_alias_test_list (loop_vinfo);
1483 if (!ok)
1485 if (vect_print_dump_info (REPORT_DETAILS))
1486 fprintf (vect_dump, "too long list of versioning for alias "
1487 "run-time tests.");
1488 destroy_loop_vec_info (loop_vinfo, true);
1489 return NULL;
1492 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
1493 ok = vect_analyze_slp (loop_vinfo, NULL);
1494 if (ok)
1496 /* Decide which possible SLP instances to SLP. */
1497 vect_make_slp_decision (loop_vinfo);
1499 /* Find stmts that need to be both vectorized and SLPed. */
1500 vect_detect_hybrid_slp (loop_vinfo);
1503 /* This pass will decide on using loop versioning and/or loop peeling in
1504 order to enhance the alignment of data references in the loop. */
1506 ok = vect_enhance_data_refs_alignment (loop_vinfo);
1507 if (!ok)
1509 if (vect_print_dump_info (REPORT_DETAILS))
1510 fprintf (vect_dump, "bad data alignment.");
1511 destroy_loop_vec_info (loop_vinfo, true);
1512 return NULL;
1515 /* Scan all the operations in the loop and make sure they are
1516 vectorizable. */
1518 ok = vect_analyze_loop_operations (loop_vinfo);
1519 if (!ok)
1521 if (vect_print_dump_info (REPORT_DETAILS))
1522 fprintf (vect_dump, "bad operation or unsupported loop bound.");
1523 destroy_loop_vec_info (loop_vinfo, true);
1524 return NULL;
1527 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
1529 return loop_vinfo;
1533 /* Function reduction_code_for_scalar_code
1535 Input:
1536 CODE - tree_code of a reduction operations.
1538 Output:
1539 REDUC_CODE - the corresponding tree-code to be used to reduce the
1540 vector of partial results into a single scalar result (which
1541 will also reside in a vector) or ERROR_MARK if the operation is
1542 a supported reduction operation, but does not have such tree-code.
1544 Return FALSE if CODE currently cannot be vectorized as reduction. */
1546 static bool
1547 reduction_code_for_scalar_code (enum tree_code code,
1548 enum tree_code *reduc_code)
1550 switch (code)
1552 case MAX_EXPR:
1553 *reduc_code = REDUC_MAX_EXPR;
1554 return true;
1556 case MIN_EXPR:
1557 *reduc_code = REDUC_MIN_EXPR;
1558 return true;
1560 case PLUS_EXPR:
1561 *reduc_code = REDUC_PLUS_EXPR;
1562 return true;
1564 case MULT_EXPR:
1565 case MINUS_EXPR:
1566 case BIT_IOR_EXPR:
1567 case BIT_XOR_EXPR:
1568 case BIT_AND_EXPR:
1569 *reduc_code = ERROR_MARK;
1570 return true;
1572 default:
1573 return false;
1578 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
1579 STMT is printed with a message MSG. */
1581 static void
1582 report_vect_op (gimple stmt, const char *msg)
1584 fprintf (vect_dump, "%s", msg);
1585 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
1589 /* Function vect_is_simple_reduction_1
1591 (1) Detect a cross-iteration def-use cycle that represents a simple
1592 reduction computation. We look for the following pattern:
1594 loop_header:
1595 a1 = phi < a0, a2 >
1596 a3 = ...
1597 a2 = operation (a3, a1)
1599 such that:
1600 1. operation is commutative and associative and it is safe to
1601 change the order of the computation (if CHECK_REDUCTION is true)
1602 2. no uses for a2 in the loop (a2 is used out of the loop)
1603 3. no uses of a1 in the loop besides the reduction operation.
1605 Condition 1 is tested here.
1606 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
1608 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
1609 nested cycles, if CHECK_REDUCTION is false.
1611 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
1612 reductions:
1614 a1 = phi < a0, a2 >
1615 inner loop (def of a3)
1616 a2 = phi < a3 >
1618 If MODIFY is true it tries also to rework the code in-place to enable
1619 detection of more reduction patterns. For the time being we rewrite
1620 "res -= RHS" into "rhs += -RHS" when it seems worthwhile.
1623 static gimple
1624 vect_is_simple_reduction_1 (loop_vec_info loop_info, gimple phi,
1625 bool check_reduction, bool *double_reduc,
1626 bool modify)
1628 struct loop *loop = (gimple_bb (phi))->loop_father;
1629 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
1630 edge latch_e = loop_latch_edge (loop);
1631 tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
1632 gimple def_stmt, def1 = NULL, def2 = NULL;
1633 enum tree_code orig_code, code;
1634 tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE;
1635 tree type;
1636 int nloop_uses;
1637 tree name;
1638 imm_use_iterator imm_iter;
1639 use_operand_p use_p;
1640 bool phi_def;
1642 *double_reduc = false;
1644 /* If CHECK_REDUCTION is true, we assume inner-most loop vectorization,
1645 otherwise, we assume outer loop vectorization. */
1646 gcc_assert ((check_reduction && loop == vect_loop)
1647 || (!check_reduction && flow_loop_nested_p (vect_loop, loop)));
1649 name = PHI_RESULT (phi);
1650 nloop_uses = 0;
1651 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
1653 gimple use_stmt = USE_STMT (use_p);
1654 if (is_gimple_debug (use_stmt))
1655 continue;
1656 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
1657 && vinfo_for_stmt (use_stmt)
1658 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
1659 nloop_uses++;
1660 if (nloop_uses > 1)
1662 if (vect_print_dump_info (REPORT_DETAILS))
1663 fprintf (vect_dump, "reduction used in loop.");
1664 return NULL;
1668 if (TREE_CODE (loop_arg) != SSA_NAME)
1670 if (vect_print_dump_info (REPORT_DETAILS))
1672 fprintf (vect_dump, "reduction: not ssa_name: ");
1673 print_generic_expr (vect_dump, loop_arg, TDF_SLIM);
1675 return NULL;
1678 def_stmt = SSA_NAME_DEF_STMT (loop_arg);
1679 if (!def_stmt)
1681 if (vect_print_dump_info (REPORT_DETAILS))
1682 fprintf (vect_dump, "reduction: no def_stmt.");
1683 return NULL;
1686 if (!is_gimple_assign (def_stmt) && gimple_code (def_stmt) != GIMPLE_PHI)
1688 if (vect_print_dump_info (REPORT_DETAILS))
1689 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
1690 return NULL;
1693 if (is_gimple_assign (def_stmt))
1695 name = gimple_assign_lhs (def_stmt);
1696 phi_def = false;
1698 else
1700 name = PHI_RESULT (def_stmt);
1701 phi_def = true;
1704 nloop_uses = 0;
1705 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
1707 gimple use_stmt = USE_STMT (use_p);
1708 if (is_gimple_debug (use_stmt))
1709 continue;
1710 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
1711 && vinfo_for_stmt (use_stmt)
1712 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
1713 nloop_uses++;
1714 if (nloop_uses > 1)
1716 if (vect_print_dump_info (REPORT_DETAILS))
1717 fprintf (vect_dump, "reduction used in loop.");
1718 return NULL;
1722 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
1723 defined in the inner loop. */
1724 if (phi_def)
1726 op1 = PHI_ARG_DEF (def_stmt, 0);
1728 if (gimple_phi_num_args (def_stmt) != 1
1729 || TREE_CODE (op1) != SSA_NAME)
1731 if (vect_print_dump_info (REPORT_DETAILS))
1732 fprintf (vect_dump, "unsupported phi node definition.");
1734 return NULL;
1737 def1 = SSA_NAME_DEF_STMT (op1);
1738 if (flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
1739 && loop->inner
1740 && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1))
1741 && is_gimple_assign (def1))
1743 if (vect_print_dump_info (REPORT_DETAILS))
1744 report_vect_op (def_stmt, "detected double reduction: ");
1746 *double_reduc = true;
1747 return def_stmt;
1750 return NULL;
1753 code = orig_code = gimple_assign_rhs_code (def_stmt);
1755 /* We can handle "res -= x[i]", which is non-associative by
1756 simply rewriting this into "res += -x[i]". Avoid changing
1757 gimple instruction for the first simple tests and only do this
1758 if we're allowed to change code at all. */
1759 if (code == MINUS_EXPR && modify)
1760 code = PLUS_EXPR;
1762 if (check_reduction
1763 && (!commutative_tree_code (code) || !associative_tree_code (code)))
1765 if (vect_print_dump_info (REPORT_DETAILS))
1766 report_vect_op (def_stmt, "reduction: not commutative/associative: ");
1767 return NULL;
1770 if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS)
1772 if (code != COND_EXPR)
1774 if (vect_print_dump_info (REPORT_DETAILS))
1775 report_vect_op (def_stmt, "reduction: not binary operation: ");
1777 return NULL;
1780 op3 = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 0);
1781 if (COMPARISON_CLASS_P (op3))
1783 op4 = TREE_OPERAND (op3, 1);
1784 op3 = TREE_OPERAND (op3, 0);
1787 op1 = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 1);
1788 op2 = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 2);
1790 if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
1792 if (vect_print_dump_info (REPORT_DETAILS))
1793 report_vect_op (def_stmt, "reduction: uses not ssa_names: ");
1795 return NULL;
1798 else
1800 op1 = gimple_assign_rhs1 (def_stmt);
1801 op2 = gimple_assign_rhs2 (def_stmt);
1803 if (TREE_CODE (op1) != SSA_NAME || TREE_CODE (op2) != SSA_NAME)
1805 if (vect_print_dump_info (REPORT_DETAILS))
1806 report_vect_op (def_stmt, "reduction: uses not ssa_names: ");
1808 return NULL;
1812 type = TREE_TYPE (gimple_assign_lhs (def_stmt));
1813 if ((TREE_CODE (op1) == SSA_NAME
1814 && !types_compatible_p (type,TREE_TYPE (op1)))
1815 || (TREE_CODE (op2) == SSA_NAME
1816 && !types_compatible_p (type, TREE_TYPE (op2)))
1817 || (op3 && TREE_CODE (op3) == SSA_NAME
1818 && !types_compatible_p (type, TREE_TYPE (op3)))
1819 || (op4 && TREE_CODE (op4) == SSA_NAME
1820 && !types_compatible_p (type, TREE_TYPE (op4))))
1822 if (vect_print_dump_info (REPORT_DETAILS))
1824 fprintf (vect_dump, "reduction: multiple types: operation type: ");
1825 print_generic_expr (vect_dump, type, TDF_SLIM);
1826 fprintf (vect_dump, ", operands types: ");
1827 print_generic_expr (vect_dump, TREE_TYPE (op1), TDF_SLIM);
1828 fprintf (vect_dump, ",");
1829 print_generic_expr (vect_dump, TREE_TYPE (op2), TDF_SLIM);
1830 if (op3)
1832 fprintf (vect_dump, ",");
1833 print_generic_expr (vect_dump, TREE_TYPE (op3), TDF_SLIM);
1836 if (op4)
1838 fprintf (vect_dump, ",");
1839 print_generic_expr (vect_dump, TREE_TYPE (op4), TDF_SLIM);
1843 return NULL;
1846 /* Check that it's ok to change the order of the computation.
1847 Generally, when vectorizing a reduction we change the order of the
1848 computation. This may change the behavior of the program in some
1849 cases, so we need to check that this is ok. One exception is when
1850 vectorizing an outer-loop: the inner-loop is executed sequentially,
1851 and therefore vectorizing reductions in the inner-loop during
1852 outer-loop vectorization is safe. */
1854 /* CHECKME: check for !flag_finite_math_only too? */
1855 if (SCALAR_FLOAT_TYPE_P (type) && !flag_associative_math
1856 && check_reduction)
1858 /* Changing the order of operations changes the semantics. */
1859 if (vect_print_dump_info (REPORT_DETAILS))
1860 report_vect_op (def_stmt, "reduction: unsafe fp math optimization: ");
1861 return NULL;
1863 else if (INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type)
1864 && check_reduction)
1866 /* Changing the order of operations changes the semantics. */
1867 if (vect_print_dump_info (REPORT_DETAILS))
1868 report_vect_op (def_stmt, "reduction: unsafe int math optimization: ");
1869 return NULL;
1871 else if (SAT_FIXED_POINT_TYPE_P (type) && check_reduction)
1873 /* Changing the order of operations changes the semantics. */
1874 if (vect_print_dump_info (REPORT_DETAILS))
1875 report_vect_op (def_stmt,
1876 "reduction: unsafe fixed-point math optimization: ");
1877 return NULL;
1880 /* If we detected "res -= x[i]" earlier, rewrite it into
1881 "res += -x[i]" now. If this turns out to be useless reassoc
1882 will clean it up again. */
1883 if (orig_code == MINUS_EXPR)
1885 tree rhs = gimple_assign_rhs2 (def_stmt);
1886 tree negrhs = make_ssa_name (SSA_NAME_VAR (rhs), NULL);
1887 gimple negate_stmt = gimple_build_assign_with_ops (NEGATE_EXPR, negrhs,
1888 rhs, NULL);
1889 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
1890 set_vinfo_for_stmt (negate_stmt, new_stmt_vec_info (negate_stmt,
1891 loop_info, NULL));
1892 gsi_insert_before (&gsi, negate_stmt, GSI_NEW_STMT);
1893 gimple_assign_set_rhs2 (def_stmt, negrhs);
1894 gimple_assign_set_rhs_code (def_stmt, PLUS_EXPR);
1895 update_stmt (def_stmt);
1898 /* Reduction is safe. We're dealing with one of the following:
1899 1) integer arithmetic and no trapv
1900 2) floating point arithmetic, and special flags permit this optimization
1901 3) nested cycle (i.e., outer loop vectorization). */
1902 if (TREE_CODE (op1) == SSA_NAME)
1903 def1 = SSA_NAME_DEF_STMT (op1);
1905 if (TREE_CODE (op2) == SSA_NAME)
1906 def2 = SSA_NAME_DEF_STMT (op2);
1908 if (code != COND_EXPR
1909 && (!def1 || !def2 || gimple_nop_p (def1) || gimple_nop_p (def2)))
1911 if (vect_print_dump_info (REPORT_DETAILS))
1912 report_vect_op (def_stmt, "reduction: no defs for operands: ");
1913 return NULL;
1916 /* Check that one def is the reduction def, defined by PHI,
1917 the other def is either defined in the loop ("vect_internal_def"),
1918 or it's an induction (defined by a loop-header phi-node). */
1920 if (def2 && def2 == phi
1921 && (code == COND_EXPR
1922 || (def1 && flow_bb_inside_loop_p (loop, gimple_bb (def1))
1923 && (is_gimple_assign (def1)
1924 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
1925 == vect_induction_def
1926 || (gimple_code (def1) == GIMPLE_PHI
1927 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
1928 == vect_internal_def
1929 && !is_loop_header_bb_p (gimple_bb (def1)))))))
1931 if (vect_print_dump_info (REPORT_DETAILS))
1932 report_vect_op (def_stmt, "detected reduction: ");
1933 return def_stmt;
1935 else if (def1 && def1 == phi
1936 && (code == COND_EXPR
1937 || (def2 && flow_bb_inside_loop_p (loop, gimple_bb (def2))
1938 && (is_gimple_assign (def2)
1939 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
1940 == vect_induction_def
1941 || (gimple_code (def2) == GIMPLE_PHI
1942 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
1943 == vect_internal_def
1944 && !is_loop_header_bb_p (gimple_bb (def2)))))))
1946 if (check_reduction)
1948 /* Swap operands (just for simplicity - so that the rest of the code
1949 can assume that the reduction variable is always the last (second)
1950 argument). */
1951 if (vect_print_dump_info (REPORT_DETAILS))
1952 report_vect_op (def_stmt,
1953 "detected reduction: need to swap operands: ");
1955 swap_tree_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
1956 gimple_assign_rhs2_ptr (def_stmt));
1958 else
1960 if (vect_print_dump_info (REPORT_DETAILS))
1961 report_vect_op (def_stmt, "detected reduction: ");
1964 return def_stmt;
1966 else
1968 if (vect_print_dump_info (REPORT_DETAILS))
1969 report_vect_op (def_stmt, "reduction: unknown pattern: ");
1971 return NULL;
1975 /* Wrapper around vect_is_simple_reduction_1, that won't modify code
1976 in-place. Arguments as there. */
1978 static gimple
1979 vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
1980 bool check_reduction, bool *double_reduc)
1982 return vect_is_simple_reduction_1 (loop_info, phi, check_reduction,
1983 double_reduc, false);
1986 /* Wrapper around vect_is_simple_reduction_1, which will modify code
1987 in-place if it enables detection of more reductions. Arguments
1988 as there. */
1990 gimple
1991 vect_force_simple_reduction (loop_vec_info loop_info, gimple phi,
1992 bool check_reduction, bool *double_reduc)
1994 return vect_is_simple_reduction_1 (loop_info, phi, check_reduction,
1995 double_reduc, true);
1998 /* Function vect_estimate_min_profitable_iters
2000 Return the number of iterations required for the vector version of the
2001 loop to be profitable relative to the cost of the scalar version of the
2002 loop.
2004 TODO: Take profile info into account before making vectorization
2005 decisions, if available. */
2008 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo)
2010 int i;
2011 int min_profitable_iters;
2012 int peel_iters_prologue;
2013 int peel_iters_epilogue;
2014 int vec_inside_cost = 0;
2015 int vec_outside_cost = 0;
2016 int scalar_single_iter_cost = 0;
2017 int scalar_outside_cost = 0;
2018 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2019 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2020 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
2021 int nbbs = loop->num_nodes;
2022 int byte_misalign = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo);
2023 int peel_guard_costs = 0;
2024 int innerloop_iters = 0, factor;
2025 VEC (slp_instance, heap) *slp_instances;
2026 slp_instance instance;
2028 /* Cost model disabled. */
2029 if (!flag_vect_cost_model)
2031 if (vect_print_dump_info (REPORT_COST))
2032 fprintf (vect_dump, "cost model disabled.");
2033 return 0;
2036 /* Requires loop versioning tests to handle misalignment. */
2037 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
2039 /* FIXME: Make cost depend on complexity of individual check. */
2040 vec_outside_cost +=
2041 VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
2042 if (vect_print_dump_info (REPORT_COST))
2043 fprintf (vect_dump, "cost model: Adding cost of checks for loop "
2044 "versioning to treat misalignment.\n");
2047 /* Requires loop versioning with alias checks. */
2048 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2050 /* FIXME: Make cost depend on complexity of individual check. */
2051 vec_outside_cost +=
2052 VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
2053 if (vect_print_dump_info (REPORT_COST))
2054 fprintf (vect_dump, "cost model: Adding cost of checks for loop "
2055 "versioning aliasing.\n");
2058 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2059 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2060 vec_outside_cost += TARG_COND_TAKEN_BRANCH_COST;
2062 /* Count statements in scalar loop. Using this as scalar cost for a single
2063 iteration for now.
2065 TODO: Add outer loop support.
2067 TODO: Consider assigning different costs to different scalar
2068 statements. */
2070 /* FORNOW. */
2071 if (loop->inner)
2072 innerloop_iters = 50; /* FIXME */
2074 for (i = 0; i < nbbs; i++)
2076 gimple_stmt_iterator si;
2077 basic_block bb = bbs[i];
2079 if (bb->loop_father == loop->inner)
2080 factor = innerloop_iters;
2081 else
2082 factor = 1;
2084 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
2086 gimple stmt = gsi_stmt (si);
2087 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2088 /* Skip stmts that are not vectorized inside the loop. */
2089 if (!STMT_VINFO_RELEVANT_P (stmt_info)
2090 && (!STMT_VINFO_LIVE_P (stmt_info)
2091 || STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def))
2092 continue;
2093 scalar_single_iter_cost += cost_for_stmt (stmt) * factor;
2094 vec_inside_cost += STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) * factor;
2095 /* FIXME: for stmts in the inner-loop in outer-loop vectorization,
2096 some of the "outside" costs are generated inside the outer-loop. */
2097 vec_outside_cost += STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info);
2101 /* Add additional cost for the peeled instructions in prologue and epilogue
2102 loop.
2104 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
2105 at compile-time - we assume it's vf/2 (the worst would be vf-1).
2107 TODO: Build an expression that represents peel_iters for prologue and
2108 epilogue to be used in a run-time test. */
2110 if (byte_misalign < 0)
2112 peel_iters_prologue = vf/2;
2113 if (vect_print_dump_info (REPORT_COST))
2114 fprintf (vect_dump, "cost model: "
2115 "prologue peel iters set to vf/2.");
2117 /* If peeling for alignment is unknown, loop bound of main loop becomes
2118 unknown. */
2119 peel_iters_epilogue = vf/2;
2120 if (vect_print_dump_info (REPORT_COST))
2121 fprintf (vect_dump, "cost model: "
2122 "epilogue peel iters set to vf/2 because "
2123 "peeling for alignment is unknown .");
2125 /* If peeled iterations are unknown, count a taken branch and a not taken
2126 branch per peeled loop. Even if scalar loop iterations are known,
2127 vector iterations are not known since peeled prologue iterations are
2128 not known. Hence guards remain the same. */
2129 peel_guard_costs += 2 * (TARG_COND_TAKEN_BRANCH_COST
2130 + TARG_COND_NOT_TAKEN_BRANCH_COST);
2132 else
2134 if (byte_misalign)
2136 struct data_reference *dr = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
2137 int element_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr))));
2138 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr)));
2139 int nelements = TYPE_VECTOR_SUBPARTS (vectype);
2141 peel_iters_prologue = nelements - (byte_misalign / element_size);
2143 else
2144 peel_iters_prologue = 0;
2146 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
2148 peel_iters_epilogue = vf/2;
2149 if (vect_print_dump_info (REPORT_COST))
2150 fprintf (vect_dump, "cost model: "
2151 "epilogue peel iters set to vf/2 because "
2152 "loop iterations are unknown .");
2154 /* If peeled iterations are known but number of scalar loop
2155 iterations are unknown, count a taken branch per peeled loop. */
2156 peel_guard_costs += 2 * TARG_COND_TAKEN_BRANCH_COST;
2159 else
2161 int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
2162 peel_iters_prologue = niters < peel_iters_prologue ?
2163 niters : peel_iters_prologue;
2164 peel_iters_epilogue = (niters - peel_iters_prologue) % vf;
2168 vec_outside_cost += (peel_iters_prologue * scalar_single_iter_cost)
2169 + (peel_iters_epilogue * scalar_single_iter_cost)
2170 + peel_guard_costs;
2172 /* FORNOW: The scalar outside cost is incremented in one of the
2173 following ways:
2175 1. The vectorizer checks for alignment and aliasing and generates
2176 a condition that allows dynamic vectorization. A cost model
2177 check is ANDED with the versioning condition. Hence scalar code
2178 path now has the added cost of the versioning check.
2180 if (cost > th & versioning_check)
2181 jmp to vector code
2183 Hence run-time scalar is incremented by not-taken branch cost.
2185 2. The vectorizer then checks if a prologue is required. If the
2186 cost model check was not done before during versioning, it has to
2187 be done before the prologue check.
2189 if (cost <= th)
2190 prologue = scalar_iters
2191 if (prologue == 0)
2192 jmp to vector code
2193 else
2194 execute prologue
2195 if (prologue == num_iters)
2196 go to exit
2198 Hence the run-time scalar cost is incremented by a taken branch,
2199 plus a not-taken branch, plus a taken branch cost.
2201 3. The vectorizer then checks if an epilogue is required. If the
2202 cost model check was not done before during prologue check, it
2203 has to be done with the epilogue check.
2205 if (prologue == 0)
2206 jmp to vector code
2207 else
2208 execute prologue
2209 if (prologue == num_iters)
2210 go to exit
2211 vector code:
2212 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
2213 jmp to epilogue
2215 Hence the run-time scalar cost should be incremented by 2 taken
2216 branches.
2218 TODO: The back end may reorder the BBS's differently and reverse
2219 conditions/branch directions. Change the estimates below to
2220 something more reasonable. */
2222 /* If the number of iterations is known and we do not do versioning, we can
2223 decide whether to vectorize at compile time. Hence the scalar version
2224 do not carry cost model guard costs. */
2225 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2226 || LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2227 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2229 /* Cost model check occurs at versioning. */
2230 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2231 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2232 scalar_outside_cost += TARG_COND_NOT_TAKEN_BRANCH_COST;
2233 else
2235 /* Cost model check occurs at prologue generation. */
2236 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
2237 scalar_outside_cost += 2 * TARG_COND_TAKEN_BRANCH_COST
2238 + TARG_COND_NOT_TAKEN_BRANCH_COST;
2239 /* Cost model check occurs at epilogue generation. */
2240 else
2241 scalar_outside_cost += 2 * TARG_COND_TAKEN_BRANCH_COST;
2245 /* Add SLP costs. */
2246 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
2247 for (i = 0; VEC_iterate (slp_instance, slp_instances, i, instance); i++)
2249 vec_outside_cost += SLP_INSTANCE_OUTSIDE_OF_LOOP_COST (instance);
2250 vec_inside_cost += SLP_INSTANCE_INSIDE_OF_LOOP_COST (instance);
2253 /* Calculate number of iterations required to make the vector version
2254 profitable, relative to the loop bodies only. The following condition
2255 must hold true:
2256 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
2257 where
2258 SIC = scalar iteration cost, VIC = vector iteration cost,
2259 VOC = vector outside cost, VF = vectorization factor,
2260 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
2261 SOC = scalar outside cost for run time cost model check. */
2263 if ((scalar_single_iter_cost * vf) > vec_inside_cost)
2265 if (vec_outside_cost <= 0)
2266 min_profitable_iters = 1;
2267 else
2269 min_profitable_iters = ((vec_outside_cost - scalar_outside_cost) * vf
2270 - vec_inside_cost * peel_iters_prologue
2271 - vec_inside_cost * peel_iters_epilogue)
2272 / ((scalar_single_iter_cost * vf)
2273 - vec_inside_cost);
2275 if ((scalar_single_iter_cost * vf * min_profitable_iters)
2276 <= ((vec_inside_cost * min_profitable_iters)
2277 + ((vec_outside_cost - scalar_outside_cost) * vf)))
2278 min_profitable_iters++;
2281 /* vector version will never be profitable. */
2282 else
2284 if (vect_print_dump_info (REPORT_COST))
2285 fprintf (vect_dump, "cost model: the vector iteration cost = %d "
2286 "divided by the scalar iteration cost = %d "
2287 "is greater or equal to the vectorization factor = %d.",
2288 vec_inside_cost, scalar_single_iter_cost, vf);
2289 return -1;
2292 if (vect_print_dump_info (REPORT_COST))
2294 fprintf (vect_dump, "Cost model analysis: \n");
2295 fprintf (vect_dump, " Vector inside of loop cost: %d\n",
2296 vec_inside_cost);
2297 fprintf (vect_dump, " Vector outside of loop cost: %d\n",
2298 vec_outside_cost);
2299 fprintf (vect_dump, " Scalar iteration cost: %d\n",
2300 scalar_single_iter_cost);
2301 fprintf (vect_dump, " Scalar outside cost: %d\n", scalar_outside_cost);
2302 fprintf (vect_dump, " prologue iterations: %d\n",
2303 peel_iters_prologue);
2304 fprintf (vect_dump, " epilogue iterations: %d\n",
2305 peel_iters_epilogue);
2306 fprintf (vect_dump, " Calculated minimum iters for profitability: %d\n",
2307 min_profitable_iters);
2310 min_profitable_iters =
2311 min_profitable_iters < vf ? vf : min_profitable_iters;
2313 /* Because the condition we create is:
2314 if (niters <= min_profitable_iters)
2315 then skip the vectorized loop. */
2316 min_profitable_iters--;
2318 if (vect_print_dump_info (REPORT_COST))
2319 fprintf (vect_dump, " Profitability threshold = %d\n",
2320 min_profitable_iters);
2322 return min_profitable_iters;
2326 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
2327 functions. Design better to avoid maintenance issues. */
2329 /* Function vect_model_reduction_cost.
2331 Models cost for a reduction operation, including the vector ops
2332 generated within the strip-mine loop, the initial definition before
2333 the loop, and the epilogue code that must be generated. */
2335 static bool
2336 vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
2337 int ncopies)
2339 int outer_cost = 0;
2340 enum tree_code code;
2341 optab optab;
2342 tree vectype;
2343 gimple stmt, orig_stmt;
2344 tree reduction_op;
2345 enum machine_mode mode;
2346 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2347 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2350 /* Cost of reduction op inside loop. */
2351 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) += ncopies * TARG_VEC_STMT_COST;
2353 stmt = STMT_VINFO_STMT (stmt_info);
2355 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
2357 case GIMPLE_SINGLE_RHS:
2358 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)) == ternary_op);
2359 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), 2);
2360 break;
2361 case GIMPLE_UNARY_RHS:
2362 reduction_op = gimple_assign_rhs1 (stmt);
2363 break;
2364 case GIMPLE_BINARY_RHS:
2365 reduction_op = gimple_assign_rhs2 (stmt);
2366 break;
2367 default:
2368 gcc_unreachable ();
2371 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
2372 if (!vectype)
2374 if (vect_print_dump_info (REPORT_COST))
2376 fprintf (vect_dump, "unsupported data-type ");
2377 print_generic_expr (vect_dump, TREE_TYPE (reduction_op), TDF_SLIM);
2379 return false;
2382 mode = TYPE_MODE (vectype);
2383 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2385 if (!orig_stmt)
2386 orig_stmt = STMT_VINFO_STMT (stmt_info);
2388 code = gimple_assign_rhs_code (orig_stmt);
2390 /* Add in cost for initial definition. */
2391 outer_cost += TARG_SCALAR_TO_VEC_COST;
2393 /* Determine cost of epilogue code.
2395 We have a reduction operator that will reduce the vector in one statement.
2396 Also requires scalar extract. */
2398 if (!nested_in_vect_loop_p (loop, orig_stmt))
2400 if (reduc_code != ERROR_MARK)
2401 outer_cost += TARG_VEC_STMT_COST + TARG_VEC_TO_SCALAR_COST;
2402 else
2404 int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
2405 tree bitsize =
2406 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt)));
2407 int element_bitsize = tree_low_cst (bitsize, 1);
2408 int nelements = vec_size_in_bits / element_bitsize;
2410 optab = optab_for_tree_code (code, vectype, optab_default);
2412 /* We have a whole vector shift available. */
2413 if (VECTOR_MODE_P (mode)
2414 && optab_handler (optab, mode)->insn_code != CODE_FOR_nothing
2415 && optab_handler (vec_shr_optab, mode)->insn_code != CODE_FOR_nothing)
2416 /* Final reduction via vector shifts and the reduction operator. Also
2417 requires scalar extract. */
2418 outer_cost += ((exact_log2(nelements) * 2) * TARG_VEC_STMT_COST
2419 + TARG_VEC_TO_SCALAR_COST);
2420 else
2421 /* Use extracts and reduction op for final reduction. For N elements,
2422 we have N extracts and N-1 reduction ops. */
2423 outer_cost += ((nelements + nelements - 1) * TARG_VEC_STMT_COST);
2427 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = outer_cost;
2429 if (vect_print_dump_info (REPORT_COST))
2430 fprintf (vect_dump, "vect_model_reduction_cost: inside_cost = %d, "
2431 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
2432 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
2434 return true;
2438 /* Function vect_model_induction_cost.
2440 Models cost for induction operations. */
2442 static void
2443 vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies)
2445 /* loop cost for vec_loop. */
2446 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) = ncopies * TARG_VEC_STMT_COST;
2447 /* prologue cost for vec_init and vec_step. */
2448 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = 2 * TARG_SCALAR_TO_VEC_COST;
2450 if (vect_print_dump_info (REPORT_COST))
2451 fprintf (vect_dump, "vect_model_induction_cost: inside_cost = %d, "
2452 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
2453 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
2457 /* Function get_initial_def_for_induction
2459 Input:
2460 STMT - a stmt that performs an induction operation in the loop.
2461 IV_PHI - the initial value of the induction variable
2463 Output:
2464 Return a vector variable, initialized with the first VF values of
2465 the induction variable. E.g., for an iv with IV_PHI='X' and
2466 evolution S, for a vector of 4 units, we want to return:
2467 [X, X + S, X + 2*S, X + 3*S]. */
2469 static tree
2470 get_initial_def_for_induction (gimple iv_phi)
2472 stmt_vec_info stmt_vinfo = vinfo_for_stmt (iv_phi);
2473 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
2474 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2475 tree scalar_type = TREE_TYPE (gimple_phi_result (iv_phi));
2476 tree vectype;
2477 int nunits;
2478 edge pe = loop_preheader_edge (loop);
2479 struct loop *iv_loop;
2480 basic_block new_bb;
2481 tree vec, vec_init, vec_step, t;
2482 tree access_fn;
2483 tree new_var;
2484 tree new_name;
2485 gimple init_stmt, induction_phi, new_stmt;
2486 tree induc_def, vec_def, vec_dest;
2487 tree init_expr, step_expr;
2488 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2489 int i;
2490 bool ok;
2491 int ncopies;
2492 tree expr;
2493 stmt_vec_info phi_info = vinfo_for_stmt (iv_phi);
2494 bool nested_in_vect_loop = false;
2495 gimple_seq stmts = NULL;
2496 imm_use_iterator imm_iter;
2497 use_operand_p use_p;
2498 gimple exit_phi;
2499 edge latch_e;
2500 tree loop_arg;
2501 gimple_stmt_iterator si;
2502 basic_block bb = gimple_bb (iv_phi);
2503 tree stepvectype;
2505 vectype = get_vectype_for_scalar_type (scalar_type);
2506 gcc_assert (vectype);
2507 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2508 ncopies = vf / nunits;
2510 gcc_assert (phi_info);
2511 gcc_assert (ncopies >= 1);
2513 /* Find the first insertion point in the BB. */
2514 si = gsi_after_labels (bb);
2516 if (INTEGRAL_TYPE_P (scalar_type))
2517 step_expr = build_int_cst (scalar_type, 0);
2518 else if (POINTER_TYPE_P (scalar_type))
2519 step_expr = build_int_cst (sizetype, 0);
2520 else
2521 step_expr = build_real (scalar_type, dconst0);
2523 /* Is phi in an inner-loop, while vectorizing an enclosing outer-loop? */
2524 if (nested_in_vect_loop_p (loop, iv_phi))
2526 nested_in_vect_loop = true;
2527 iv_loop = loop->inner;
2529 else
2530 iv_loop = loop;
2531 gcc_assert (iv_loop == (gimple_bb (iv_phi))->loop_father);
2533 latch_e = loop_latch_edge (iv_loop);
2534 loop_arg = PHI_ARG_DEF_FROM_EDGE (iv_phi, latch_e);
2536 access_fn = analyze_scalar_evolution (iv_loop, PHI_RESULT (iv_phi));
2537 gcc_assert (access_fn);
2538 ok = vect_is_simple_iv_evolution (iv_loop->num, access_fn,
2539 &init_expr, &step_expr);
2540 gcc_assert (ok);
2541 pe = loop_preheader_edge (iv_loop);
2543 /* Create the vector that holds the initial_value of the induction. */
2544 if (nested_in_vect_loop)
2546 /* iv_loop is nested in the loop to be vectorized. init_expr had already
2547 been created during vectorization of previous stmts; We obtain it from
2548 the STMT_VINFO_VEC_STMT of the defining stmt. */
2549 tree iv_def = PHI_ARG_DEF_FROM_EDGE (iv_phi,
2550 loop_preheader_edge (iv_loop));
2551 vec_init = vect_get_vec_def_for_operand (iv_def, iv_phi, NULL);
2553 else
2555 /* iv_loop is the loop to be vectorized. Create:
2556 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
2557 new_var = vect_get_new_vect_var (scalar_type, vect_scalar_var, "var_");
2558 add_referenced_var (new_var);
2560 new_name = force_gimple_operand (init_expr, &stmts, false, new_var);
2561 if (stmts)
2563 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
2564 gcc_assert (!new_bb);
2567 t = NULL_TREE;
2568 t = tree_cons (NULL_TREE, init_expr, t);
2569 for (i = 1; i < nunits; i++)
2571 /* Create: new_name_i = new_name + step_expr */
2572 enum tree_code code = POINTER_TYPE_P (scalar_type)
2573 ? POINTER_PLUS_EXPR : PLUS_EXPR;
2574 init_stmt = gimple_build_assign_with_ops (code, new_var,
2575 new_name, step_expr);
2576 new_name = make_ssa_name (new_var, init_stmt);
2577 gimple_assign_set_lhs (init_stmt, new_name);
2579 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
2580 gcc_assert (!new_bb);
2582 if (vect_print_dump_info (REPORT_DETAILS))
2584 fprintf (vect_dump, "created new init_stmt: ");
2585 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
2587 t = tree_cons (NULL_TREE, new_name, t);
2589 /* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1] */
2590 vec = build_constructor_from_list (vectype, nreverse (t));
2591 vec_init = vect_init_vector (iv_phi, vec, vectype, NULL);
2595 /* Create the vector that holds the step of the induction. */
2596 if (nested_in_vect_loop)
2597 /* iv_loop is nested in the loop to be vectorized. Generate:
2598 vec_step = [S, S, S, S] */
2599 new_name = step_expr;
2600 else
2602 /* iv_loop is the loop to be vectorized. Generate:
2603 vec_step = [VF*S, VF*S, VF*S, VF*S] */
2604 expr = build_int_cst (TREE_TYPE (step_expr), vf);
2605 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
2606 expr, step_expr);
2609 t = NULL_TREE;
2610 for (i = 0; i < nunits; i++)
2611 t = tree_cons (NULL_TREE, unshare_expr (new_name), t);
2612 gcc_assert (CONSTANT_CLASS_P (new_name));
2613 stepvectype = get_vectype_for_scalar_type (TREE_TYPE (new_name));
2614 gcc_assert (stepvectype);
2615 vec = build_vector (stepvectype, t);
2616 vec_step = vect_init_vector (iv_phi, vec, stepvectype, NULL);
2619 /* Create the following def-use cycle:
2620 loop prolog:
2621 vec_init = ...
2622 vec_step = ...
2623 loop:
2624 vec_iv = PHI <vec_init, vec_loop>
2626 STMT
2628 vec_loop = vec_iv + vec_step; */
2630 /* Create the induction-phi that defines the induction-operand. */
2631 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
2632 add_referenced_var (vec_dest);
2633 induction_phi = create_phi_node (vec_dest, iv_loop->header);
2634 set_vinfo_for_stmt (induction_phi,
2635 new_stmt_vec_info (induction_phi, loop_vinfo, NULL));
2636 induc_def = PHI_RESULT (induction_phi);
2638 /* Create the iv update inside the loop */
2639 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
2640 induc_def, vec_step);
2641 vec_def = make_ssa_name (vec_dest, new_stmt);
2642 gimple_assign_set_lhs (new_stmt, vec_def);
2643 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
2644 set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo,
2645 NULL));
2647 /* Set the arguments of the phi node: */
2648 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
2649 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
2650 UNKNOWN_LOCATION);
2653 /* In case that vectorization factor (VF) is bigger than the number
2654 of elements that we can fit in a vectype (nunits), we have to generate
2655 more than one vector stmt - i.e - we need to "unroll" the
2656 vector stmt by a factor VF/nunits. For more details see documentation
2657 in vectorizable_operation. */
2659 if (ncopies > 1)
2661 stmt_vec_info prev_stmt_vinfo;
2662 /* FORNOW. This restriction should be relaxed. */
2663 gcc_assert (!nested_in_vect_loop);
2665 /* Create the vector that holds the step of the induction. */
2666 expr = build_int_cst (TREE_TYPE (step_expr), nunits);
2667 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
2668 expr, step_expr);
2669 t = NULL_TREE;
2670 for (i = 0; i < nunits; i++)
2671 t = tree_cons (NULL_TREE, unshare_expr (new_name), t);
2672 gcc_assert (CONSTANT_CLASS_P (new_name));
2673 vec = build_vector (stepvectype, t);
2674 vec_step = vect_init_vector (iv_phi, vec, stepvectype, NULL);
2676 vec_def = induc_def;
2677 prev_stmt_vinfo = vinfo_for_stmt (induction_phi);
2678 for (i = 1; i < ncopies; i++)
2680 /* vec_i = vec_prev + vec_step */
2681 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
2682 vec_def, vec_step);
2683 vec_def = make_ssa_name (vec_dest, new_stmt);
2684 gimple_assign_set_lhs (new_stmt, vec_def);
2686 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
2687 set_vinfo_for_stmt (new_stmt,
2688 new_stmt_vec_info (new_stmt, loop_vinfo, NULL));
2689 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt;
2690 prev_stmt_vinfo = vinfo_for_stmt (new_stmt);
2694 if (nested_in_vect_loop)
2696 /* Find the loop-closed exit-phi of the induction, and record
2697 the final vector of induction results: */
2698 exit_phi = NULL;
2699 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
2701 if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (USE_STMT (use_p))))
2703 exit_phi = USE_STMT (use_p);
2704 break;
2707 if (exit_phi)
2709 stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi);
2710 /* FORNOW. Currently not supporting the case that an inner-loop induction
2711 is not used in the outer-loop (i.e. only outside the outer-loop). */
2712 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
2713 && !STMT_VINFO_LIVE_P (stmt_vinfo));
2715 STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt;
2716 if (vect_print_dump_info (REPORT_DETAILS))
2718 fprintf (vect_dump, "vector of inductions after inner-loop:");
2719 print_gimple_stmt (vect_dump, new_stmt, 0, TDF_SLIM);
2725 if (vect_print_dump_info (REPORT_DETAILS))
2727 fprintf (vect_dump, "transform induction: created def-use cycle: ");
2728 print_gimple_stmt (vect_dump, induction_phi, 0, TDF_SLIM);
2729 fprintf (vect_dump, "\n");
2730 print_gimple_stmt (vect_dump, SSA_NAME_DEF_STMT (vec_def), 0, TDF_SLIM);
2733 STMT_VINFO_VEC_STMT (phi_info) = induction_phi;
2734 return induc_def;
2738 /* Function get_initial_def_for_reduction
2740 Input:
2741 STMT - a stmt that performs a reduction operation in the loop.
2742 INIT_VAL - the initial value of the reduction variable
2744 Output:
2745 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
2746 of the reduction (used for adjusting the epilog - see below).
2747 Return a vector variable, initialized according to the operation that STMT
2748 performs. This vector will be used as the initial value of the
2749 vector of partial results.
2751 Option1 (adjust in epilog): Initialize the vector as follows:
2752 add/bit or/xor: [0,0,...,0,0]
2753 mult/bit and: [1,1,...,1,1]
2754 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
2755 and when necessary (e.g. add/mult case) let the caller know
2756 that it needs to adjust the result by init_val.
2758 Option2: Initialize the vector as follows:
2759 add/bit or/xor: [init_val,0,0,...,0]
2760 mult/bit and: [init_val,1,1,...,1]
2761 min/max/cond_expr: [init_val,init_val,...,init_val]
2762 and no adjustments are needed.
2764 For example, for the following code:
2766 s = init_val;
2767 for (i=0;i<n;i++)
2768 s = s + a[i];
2770 STMT is 's = s + a[i]', and the reduction variable is 's'.
2771 For a vector of 4 units, we want to return either [0,0,0,init_val],
2772 or [0,0,0,0] and let the caller know that it needs to adjust
2773 the result at the end by 'init_val'.
2775 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
2776 initialization vector is simpler (same element in all entries), if
2777 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
2779 A cost model should help decide between these two schemes. */
2781 tree
2782 get_initial_def_for_reduction (gimple stmt, tree init_val,
2783 tree *adjustment_def)
2785 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
2786 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
2787 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2788 tree scalar_type = TREE_TYPE (init_val);
2789 tree vectype = get_vectype_for_scalar_type (scalar_type);
2790 int nunits;
2791 enum tree_code code = gimple_assign_rhs_code (stmt);
2792 tree def_for_init;
2793 tree init_def;
2794 tree t = NULL_TREE;
2795 int i;
2796 bool nested_in_vect_loop = false;
2797 tree init_value;
2798 REAL_VALUE_TYPE real_init_val = dconst0;
2799 int int_init_val = 0;
2800 gimple def_stmt = NULL;
2802 gcc_assert (vectype);
2803 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2805 gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
2806 || SCALAR_FLOAT_TYPE_P (scalar_type));
2808 if (nested_in_vect_loop_p (loop, stmt))
2809 nested_in_vect_loop = true;
2810 else
2811 gcc_assert (loop == (gimple_bb (stmt))->loop_father);
2813 /* In case of double reduction we only create a vector variable to be put
2814 in the reduction phi node. The actual statement creation is done in
2815 vect_create_epilog_for_reduction. */
2816 if (adjustment_def && nested_in_vect_loop
2817 && TREE_CODE (init_val) == SSA_NAME
2818 && (def_stmt = SSA_NAME_DEF_STMT (init_val))
2819 && gimple_code (def_stmt) == GIMPLE_PHI
2820 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
2821 && vinfo_for_stmt (def_stmt)
2822 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
2823 == vect_double_reduction_def)
2825 *adjustment_def = NULL;
2826 return vect_create_destination_var (init_val, vectype);
2829 if (TREE_CONSTANT (init_val))
2831 if (SCALAR_FLOAT_TYPE_P (scalar_type))
2832 init_value = build_real (scalar_type, TREE_REAL_CST (init_val));
2833 else
2834 init_value = build_int_cst (scalar_type, TREE_INT_CST_LOW (init_val));
2836 else
2837 init_value = init_val;
2839 switch (code)
2841 case WIDEN_SUM_EXPR:
2842 case DOT_PROD_EXPR:
2843 case PLUS_EXPR:
2844 case MINUS_EXPR:
2845 case BIT_IOR_EXPR:
2846 case BIT_XOR_EXPR:
2847 case MULT_EXPR:
2848 case BIT_AND_EXPR:
2849 /* ADJUSMENT_DEF is NULL when called from
2850 vect_create_epilog_for_reduction to vectorize double reduction. */
2851 if (adjustment_def)
2853 if (nested_in_vect_loop)
2854 *adjustment_def = vect_get_vec_def_for_operand (init_val, stmt,
2855 NULL);
2856 else
2857 *adjustment_def = init_val;
2860 if (code == MULT_EXPR || code == BIT_AND_EXPR)
2862 real_init_val = dconst1;
2863 int_init_val = 1;
2866 if (SCALAR_FLOAT_TYPE_P (scalar_type))
2867 def_for_init = build_real (scalar_type, real_init_val);
2868 else
2869 def_for_init = build_int_cst (scalar_type, int_init_val);
2871 /* Create a vector of '0' or '1' except the first element. */
2872 for (i = nunits - 2; i >= 0; --i)
2873 t = tree_cons (NULL_TREE, def_for_init, t);
2875 /* Option1: the first element is '0' or '1' as well. */
2876 if (adjustment_def)
2878 t = tree_cons (NULL_TREE, def_for_init, t);
2879 init_def = build_vector (vectype, t);
2880 break;
2883 /* Option2: the first element is INIT_VAL. */
2884 t = tree_cons (NULL_TREE, init_value, t);
2885 if (TREE_CONSTANT (init_val))
2886 init_def = build_vector (vectype, t);
2887 else
2888 init_def = build_constructor_from_list (vectype, t);
2890 break;
2892 case MIN_EXPR:
2893 case MAX_EXPR:
2894 case COND_EXPR:
2895 if (adjustment_def)
2897 *adjustment_def = NULL_TREE;
2898 init_def = vect_get_vec_def_for_operand (init_val, stmt, NULL);
2899 break;
2902 for (i = nunits - 1; i >= 0; --i)
2903 t = tree_cons (NULL_TREE, init_value, t);
2905 if (TREE_CONSTANT (init_val))
2906 init_def = build_vector (vectype, t);
2907 else
2908 init_def = build_constructor_from_list (vectype, t);
2910 break;
2912 default:
2913 gcc_unreachable ();
2916 return init_def;
2920 /* Function vect_create_epilog_for_reduction
2922 Create code at the loop-epilog to finalize the result of a reduction
2923 computation.
2925 VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector
2926 reduction statements.
2927 STMT is the scalar reduction stmt that is being vectorized.
2928 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
2929 number of elements that we can fit in a vectype (nunits). In this case
2930 we have to generate more than one vector stmt - i.e - we need to "unroll"
2931 the vector stmt by a factor VF/nunits. For more details see documentation
2932 in vectorizable_operation.
2933 REDUC_CODE is the tree-code for the epilog reduction.
2934 REDUCTION_PHIS is a list of the phi-nodes that carry the reduction
2935 computation.
2936 REDUC_INDEX is the index of the operand in the right hand side of the
2937 statement that is defined by REDUCTION_PHI.
2938 DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
2939 SLP_NODE is an SLP node containing a group of reduction statements. The
2940 first one in this group is STMT.
2942 This function:
2943 1. Creates the reduction def-use cycles: sets the arguments for
2944 REDUCTION_PHIS:
2945 The loop-entry argument is the vectorized initial-value of the reduction.
2946 The loop-latch argument is taken from VECT_DEFS - the vector of partial
2947 sums.
2948 2. "Reduces" each vector of partial results VECT_DEFS into a single result,
2949 by applying the operation specified by REDUC_CODE if available, or by
2950 other means (whole-vector shifts or a scalar loop).
2951 The function also creates a new phi node at the loop exit to preserve
2952 loop-closed form, as illustrated below.
2954 The flow at the entry to this function:
2956 loop:
2957 vec_def = phi <null, null> # REDUCTION_PHI
2958 VECT_DEF = vector_stmt # vectorized form of STMT
2959 s_loop = scalar_stmt # (scalar) STMT
2960 loop_exit:
2961 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
2962 use <s_out0>
2963 use <s_out0>
2965 The above is transformed by this function into:
2967 loop:
2968 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
2969 VECT_DEF = vector_stmt # vectorized form of STMT
2970 s_loop = scalar_stmt # (scalar) STMT
2971 loop_exit:
2972 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
2973 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
2974 v_out2 = reduce <v_out1>
2975 s_out3 = extract_field <v_out2, 0>
2976 s_out4 = adjust_result <s_out3>
2977 use <s_out4>
2978 use <s_out4>
2981 static void
2982 vect_create_epilog_for_reduction (VEC (tree, heap) *vect_defs, gimple stmt,
2983 int ncopies, enum tree_code reduc_code,
2984 VEC (gimple, heap) *reduction_phis,
2985 int reduc_index, bool double_reduc,
2986 slp_tree slp_node)
2988 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2989 stmt_vec_info prev_phi_info;
2990 tree vectype;
2991 enum machine_mode mode;
2992 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2993 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
2994 basic_block exit_bb;
2995 tree scalar_dest;
2996 tree scalar_type;
2997 gimple new_phi = NULL, phi;
2998 gimple_stmt_iterator exit_gsi;
2999 tree vec_dest;
3000 tree new_temp = NULL_TREE, new_dest, new_name, new_scalar_dest;
3001 gimple epilog_stmt = NULL;
3002 enum tree_code code = gimple_assign_rhs_code (stmt);
3003 gimple exit_phi;
3004 tree bitsize, bitpos;
3005 tree adjustment_def = NULL;
3006 tree vec_initial_def = NULL;
3007 tree reduction_op, expr, def;
3008 tree orig_name, scalar_result;
3009 imm_use_iterator imm_iter;
3010 use_operand_p use_p;
3011 bool extract_scalar_result = false;
3012 gimple use_stmt, orig_stmt, reduction_phi = NULL;
3013 bool nested_in_vect_loop = false;
3014 VEC (gimple, heap) *new_phis = NULL;
3015 enum vect_def_type dt = vect_unknown_def_type;
3016 int j, i;
3017 VEC (tree, heap) *scalar_results = NULL;
3018 unsigned int group_size = 1, k, ratio;
3019 VEC (tree, heap) *vec_initial_defs = NULL;
3020 VEC (gimple, heap) *phis;
3022 if (slp_node)
3023 group_size = VEC_length (gimple, SLP_TREE_SCALAR_STMTS (slp_node));
3025 if (nested_in_vect_loop_p (loop, stmt))
3027 outer_loop = loop;
3028 loop = loop->inner;
3029 nested_in_vect_loop = true;
3030 gcc_assert (!slp_node);
3033 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
3035 case GIMPLE_SINGLE_RHS:
3036 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt))
3037 == ternary_op);
3038 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), reduc_index);
3039 break;
3040 case GIMPLE_UNARY_RHS:
3041 reduction_op = gimple_assign_rhs1 (stmt);
3042 break;
3043 case GIMPLE_BINARY_RHS:
3044 reduction_op = reduc_index ?
3045 gimple_assign_rhs2 (stmt) : gimple_assign_rhs1 (stmt);
3046 break;
3047 default:
3048 gcc_unreachable ();
3051 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
3052 gcc_assert (vectype);
3053 mode = TYPE_MODE (vectype);
3055 /* 1. Create the reduction def-use cycle:
3056 Set the arguments of REDUCTION_PHIS, i.e., transform
3058 loop:
3059 vec_def = phi <null, null> # REDUCTION_PHI
3060 VECT_DEF = vector_stmt # vectorized form of STMT
3063 into:
3065 loop:
3066 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
3067 VECT_DEF = vector_stmt # vectorized form of STMT
3070 (in case of SLP, do it for all the phis). */
3072 /* Get the loop-entry arguments. */
3073 if (slp_node)
3074 vect_get_slp_defs (slp_node, &vec_initial_defs, NULL, reduc_index);
3075 else
3077 vec_initial_defs = VEC_alloc (tree, heap, 1);
3078 /* For the case of reduction, vect_get_vec_def_for_operand returns
3079 the scalar def before the loop, that defines the initial value
3080 of the reduction variable. */
3081 vec_initial_def = vect_get_vec_def_for_operand (reduction_op, stmt,
3082 &adjustment_def);
3083 VEC_quick_push (tree, vec_initial_defs, vec_initial_def);
3086 /* Set phi nodes arguments. */
3087 for (i = 0; VEC_iterate (gimple, reduction_phis, i, phi); i++)
3089 tree vec_init_def = VEC_index (tree, vec_initial_defs, i);
3090 tree def = VEC_index (tree, vect_defs, i);
3091 for (j = 0; j < ncopies; j++)
3093 /* Set the loop-entry arg of the reduction-phi. */
3094 add_phi_arg (phi, vec_init_def, loop_preheader_edge (loop),
3095 UNKNOWN_LOCATION);
3097 /* Set the loop-latch arg for the reduction-phi. */
3098 if (j > 0)
3099 def = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type, def);
3101 add_phi_arg (phi, def, loop_latch_edge (loop), UNKNOWN_LOCATION);
3103 if (vect_print_dump_info (REPORT_DETAILS))
3105 fprintf (vect_dump, "transform reduction: created def-use"
3106 " cycle: ");
3107 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
3108 fprintf (vect_dump, "\n");
3109 print_gimple_stmt (vect_dump, SSA_NAME_DEF_STMT (def), 0,
3110 TDF_SLIM);
3113 phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
3117 VEC_free (tree, heap, vec_initial_defs);
3119 /* 2. Create epilog code.
3120 The reduction epilog code operates across the elements of the vector
3121 of partial results computed by the vectorized loop.
3122 The reduction epilog code consists of:
3124 step 1: compute the scalar result in a vector (v_out2)
3125 step 2: extract the scalar result (s_out3) from the vector (v_out2)
3126 step 3: adjust the scalar result (s_out3) if needed.
3128 Step 1 can be accomplished using one the following three schemes:
3129 (scheme 1) using reduc_code, if available.
3130 (scheme 2) using whole-vector shifts, if available.
3131 (scheme 3) using a scalar loop. In this case steps 1+2 above are
3132 combined.
3134 The overall epilog code looks like this:
3136 s_out0 = phi <s_loop> # original EXIT_PHI
3137 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3138 v_out2 = reduce <v_out1> # step 1
3139 s_out3 = extract_field <v_out2, 0> # step 2
3140 s_out4 = adjust_result <s_out3> # step 3
3142 (step 3 is optional, and steps 1 and 2 may be combined).
3143 Lastly, the uses of s_out0 are replaced by s_out4. */
3146 /* 2.1 Create new loop-exit-phis to preserve loop-closed form:
3147 v_out1 = phi <VECT_DEF>
3148 Store them in NEW_PHIS. */
3150 exit_bb = single_exit (loop)->dest;
3151 prev_phi_info = NULL;
3152 new_phis = VEC_alloc (gimple, heap, VEC_length (tree, vect_defs));
3153 for (i = 0; VEC_iterate (tree, vect_defs, i, def); i++)
3155 for (j = 0; j < ncopies; j++)
3157 phi = create_phi_node (SSA_NAME_VAR (def), exit_bb);
3158 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, loop_vinfo, NULL));
3159 if (j == 0)
3160 VEC_quick_push (gimple, new_phis, phi);
3161 else
3163 def = vect_get_vec_def_for_stmt_copy (dt, def);
3164 STMT_VINFO_RELATED_STMT (prev_phi_info) = phi;
3167 SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
3168 prev_phi_info = vinfo_for_stmt (phi);
3172 exit_gsi = gsi_after_labels (exit_bb);
3174 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
3175 (i.e. when reduc_code is not available) and in the final adjustment
3176 code (if needed). Also get the original scalar reduction variable as
3177 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
3178 represents a reduction pattern), the tree-code and scalar-def are
3179 taken from the original stmt that the pattern-stmt (STMT) replaces.
3180 Otherwise (it is a regular reduction) - the tree-code and scalar-def
3181 are taken from STMT. */
3183 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
3184 if (!orig_stmt)
3186 /* Regular reduction */
3187 orig_stmt = stmt;
3189 else
3191 /* Reduction pattern */
3192 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt);
3193 gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo));
3194 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
3197 code = gimple_assign_rhs_code (orig_stmt);
3198 /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
3199 partial results are added and not subtracted. */
3200 if (code == MINUS_EXPR)
3201 code = PLUS_EXPR;
3203 scalar_dest = gimple_assign_lhs (orig_stmt);
3204 scalar_type = TREE_TYPE (scalar_dest);
3205 scalar_results = VEC_alloc (tree, heap, group_size);
3206 new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
3207 bitsize = TYPE_SIZE (scalar_type);
3209 /* In case this is a reduction in an inner-loop while vectorizing an outer
3210 loop - we don't need to extract a single scalar result at the end of the
3211 inner-loop (unless it is double reduction, i.e., the use of reduction is
3212 outside the outer-loop). The final vector of partial results will be used
3213 in the vectorized outer-loop, or reduced to a scalar result at the end of
3214 the outer-loop. */
3215 if (nested_in_vect_loop && !double_reduc)
3216 goto vect_finalize_reduction;
3218 /* 2.3 Create the reduction code, using one of the three schemes described
3219 above. In SLP we simply need to extract all the elements from the
3220 vector (without reducing them), so we use scalar shifts. */
3221 if (reduc_code != ERROR_MARK && !slp_node)
3223 tree tmp;
3225 /*** Case 1: Create:
3226 v_out2 = reduc_expr <v_out1> */
3228 if (vect_print_dump_info (REPORT_DETAILS))
3229 fprintf (vect_dump, "Reduce using direct vector reduction.");
3231 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3232 new_phi = VEC_index (gimple, new_phis, 0);
3233 tmp = build1 (reduc_code, vectype, PHI_RESULT (new_phi));
3234 epilog_stmt = gimple_build_assign (vec_dest, tmp);
3235 new_temp = make_ssa_name (vec_dest, epilog_stmt);
3236 gimple_assign_set_lhs (epilog_stmt, new_temp);
3237 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3239 extract_scalar_result = true;
3241 else
3243 enum tree_code shift_code = ERROR_MARK;
3244 bool have_whole_vector_shift = true;
3245 int bit_offset;
3246 int element_bitsize = tree_low_cst (bitsize, 1);
3247 int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
3248 tree vec_temp;
3250 if (optab_handler (vec_shr_optab, mode)->insn_code != CODE_FOR_nothing)
3251 shift_code = VEC_RSHIFT_EXPR;
3252 else
3253 have_whole_vector_shift = false;
3255 /* Regardless of whether we have a whole vector shift, if we're
3256 emulating the operation via tree-vect-generic, we don't want
3257 to use it. Only the first round of the reduction is likely
3258 to still be profitable via emulation. */
3259 /* ??? It might be better to emit a reduction tree code here, so that
3260 tree-vect-generic can expand the first round via bit tricks. */
3261 if (!VECTOR_MODE_P (mode))
3262 have_whole_vector_shift = false;
3263 else
3265 optab optab = optab_for_tree_code (code, vectype, optab_default);
3266 if (optab_handler (optab, mode)->insn_code == CODE_FOR_nothing)
3267 have_whole_vector_shift = false;
3270 if (have_whole_vector_shift && !slp_node)
3272 /*** Case 2: Create:
3273 for (offset = VS/2; offset >= element_size; offset/=2)
3275 Create: va' = vec_shift <va, offset>
3276 Create: va = vop <va, va'>
3277 } */
3279 if (vect_print_dump_info (REPORT_DETAILS))
3280 fprintf (vect_dump, "Reduce using vector shifts");
3282 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3283 new_phi = VEC_index (gimple, new_phis, 0);
3284 new_temp = PHI_RESULT (new_phi);
3285 for (bit_offset = vec_size_in_bits/2;
3286 bit_offset >= element_bitsize;
3287 bit_offset /= 2)
3289 tree bitpos = size_int (bit_offset);
3291 epilog_stmt = gimple_build_assign_with_ops (shift_code,
3292 vec_dest, new_temp, bitpos);
3293 new_name = make_ssa_name (vec_dest, epilog_stmt);
3294 gimple_assign_set_lhs (epilog_stmt, new_name);
3295 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3297 epilog_stmt = gimple_build_assign_with_ops (code, vec_dest,
3298 new_name, new_temp);
3299 new_temp = make_ssa_name (vec_dest, epilog_stmt);
3300 gimple_assign_set_lhs (epilog_stmt, new_temp);
3301 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3304 extract_scalar_result = true;
3306 else
3308 tree rhs;
3310 /*** Case 3: Create:
3311 s = extract_field <v_out2, 0>
3312 for (offset = element_size;
3313 offset < vector_size;
3314 offset += element_size;)
3316 Create: s' = extract_field <v_out2, offset>
3317 Create: s = op <s, s'> // For non SLP cases
3318 } */
3320 if (vect_print_dump_info (REPORT_DETAILS))
3321 fprintf (vect_dump, "Reduce using scalar code. ");
3323 vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
3324 for (i = 0; VEC_iterate (gimple, new_phis, i, new_phi); i++)
3326 vec_temp = PHI_RESULT (new_phi);
3327 rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
3328 bitsize_zero_node);
3329 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3330 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3331 gimple_assign_set_lhs (epilog_stmt, new_temp);
3332 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3334 /* In SLP we don't need to apply reduction operation, so we just
3335 collect s' values in SCALAR_RESULTS. */
3336 if (slp_node)
3337 VEC_safe_push (tree, heap, scalar_results, new_temp);
3339 for (bit_offset = element_bitsize;
3340 bit_offset < vec_size_in_bits;
3341 bit_offset += element_bitsize)
3343 tree bitpos = bitsize_int (bit_offset);
3344 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp,
3345 bitsize, bitpos);
3347 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3348 new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
3349 gimple_assign_set_lhs (epilog_stmt, new_name);
3350 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3352 if (slp_node)
3354 /* In SLP we don't need to apply reduction operation, so
3355 we just collect s' values in SCALAR_RESULTS. */
3356 new_temp = new_name;
3357 VEC_safe_push (tree, heap, scalar_results, new_name);
3359 else
3361 epilog_stmt = gimple_build_assign_with_ops (code,
3362 new_scalar_dest, new_name, new_temp);
3363 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3364 gimple_assign_set_lhs (epilog_stmt, new_temp);
3365 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3370 /* The only case where we need to reduce scalar results in SLP, is
3371 unrolling. If the size of SCALAR_RESULTS is greater than
3372 GROUP_SIZE, we reduce them combining elements modulo
3373 GROUP_SIZE. */
3374 if (slp_node)
3376 tree res, first_res, new_res;
3377 gimple new_stmt;
3379 /* Reduce multiple scalar results in case of SLP unrolling. */
3380 for (j = group_size; VEC_iterate (tree, scalar_results, j, res);
3381 j++)
3383 first_res = VEC_index (tree, scalar_results, j % group_size);
3384 new_stmt = gimple_build_assign_with_ops (code,
3385 new_scalar_dest, first_res, res);
3386 new_res = make_ssa_name (new_scalar_dest, new_stmt);
3387 gimple_assign_set_lhs (new_stmt, new_res);
3388 gsi_insert_before (&exit_gsi, new_stmt, GSI_SAME_STMT);
3389 VEC_replace (tree, scalar_results, j % group_size, new_res);
3392 else
3393 /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
3394 VEC_safe_push (tree, heap, scalar_results, new_temp);
3396 extract_scalar_result = false;
3400 /* 2.4 Extract the final scalar result. Create:
3401 s_out3 = extract_field <v_out2, bitpos> */
3403 if (extract_scalar_result)
3405 tree rhs;
3407 if (vect_print_dump_info (REPORT_DETAILS))
3408 fprintf (vect_dump, "extract scalar result");
3410 if (BYTES_BIG_ENDIAN)
3411 bitpos = size_binop (MULT_EXPR,
3412 bitsize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1),
3413 TYPE_SIZE (scalar_type));
3414 else
3415 bitpos = bitsize_zero_node;
3417 rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp, bitsize, bitpos);
3418 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3419 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3420 gimple_assign_set_lhs (epilog_stmt, new_temp);
3421 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3422 VEC_safe_push (tree, heap, scalar_results, new_temp);
3425 vect_finalize_reduction:
3427 /* 2.5 Adjust the final result by the initial value of the reduction
3428 variable. (When such adjustment is not needed, then
3429 'adjustment_def' is zero). For example, if code is PLUS we create:
3430 new_temp = loop_exit_def + adjustment_def */
3432 if (adjustment_def)
3434 gcc_assert (!slp_node);
3435 if (nested_in_vect_loop)
3437 new_phi = VEC_index (gimple, new_phis, 0);
3438 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
3439 expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
3440 new_dest = vect_create_destination_var (scalar_dest, vectype);
3442 else
3444 new_temp = VEC_index (tree, scalar_results, 0);
3445 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
3446 expr = build2 (code, scalar_type, new_temp, adjustment_def);
3447 new_dest = vect_create_destination_var (scalar_dest, scalar_type);
3450 epilog_stmt = gimple_build_assign (new_dest, expr);
3451 new_temp = make_ssa_name (new_dest, epilog_stmt);
3452 gimple_assign_set_lhs (epilog_stmt, new_temp);
3453 SSA_NAME_DEF_STMT (new_temp) = epilog_stmt;
3454 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3455 if (nested_in_vect_loop)
3457 set_vinfo_for_stmt (epilog_stmt,
3458 new_stmt_vec_info (epilog_stmt, loop_vinfo,
3459 NULL));
3460 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt)) =
3461 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi));
3463 if (!double_reduc)
3464 VEC_quick_push (tree, scalar_results, new_temp);
3465 else
3466 VEC_replace (tree, scalar_results, 0, new_temp);
3468 else
3469 VEC_replace (tree, scalar_results, 0, new_temp);
3471 VEC_replace (gimple, new_phis, 0, epilog_stmt);
3474 /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit
3475 phis with new adjusted scalar results, i.e., replace use <s_out0>
3476 with use <s_out4>.
3478 Transform:
3479 loop_exit:
3480 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3481 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3482 v_out2 = reduce <v_out1>
3483 s_out3 = extract_field <v_out2, 0>
3484 s_out4 = adjust_result <s_out3>
3485 use <s_out0>
3486 use <s_out0>
3488 into:
3490 loop_exit:
3491 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3492 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3493 v_out2 = reduce <v_out1>
3494 s_out3 = extract_field <v_out2, 0>
3495 s_out4 = adjust_result <s_out3>
3496 use <s_out4>
3497 use <s_out4> */
3499 /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
3500 case that GROUP_SIZE is greater than vectorization factor). Therefore, we
3501 need to match SCALAR_RESULTS with corresponding statements. The first
3502 (GROUP_SIZE / number of new vector stmts) scalar results correspond to
3503 the first vector stmt, etc.
3504 (RATIO is equal to (GROUP_SIZE / number of new vector stmts)). */
3505 if (group_size > VEC_length (gimple, new_phis))
3507 ratio = group_size / VEC_length (gimple, new_phis);
3508 gcc_assert (!(group_size % VEC_length (gimple, new_phis)));
3510 else
3511 ratio = 1;
3513 for (k = 0; k < group_size; k++)
3515 if (k % ratio == 0)
3517 epilog_stmt = VEC_index (gimple, new_phis, k / ratio);
3518 reduction_phi = VEC_index (gimple, reduction_phis, k / ratio);
3521 if (slp_node)
3523 gimple current_stmt = VEC_index (gimple,
3524 SLP_TREE_SCALAR_STMTS (slp_node), k);
3526 orig_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (current_stmt));
3527 /* SLP statements can't participate in patterns. */
3528 gcc_assert (!orig_stmt);
3529 scalar_dest = gimple_assign_lhs (current_stmt);
3532 phis = VEC_alloc (gimple, heap, 3);
3533 /* Find the loop-closed-use at the loop exit of the original scalar
3534 result. (The reduction result is expected to have two immediate uses -
3535 one at the latch block, and one at the loop exit). */
3536 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
3537 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
3538 VEC_safe_push (gimple, heap, phis, USE_STMT (use_p));
3540 /* We expect to have found an exit_phi because of loop-closed-ssa
3541 form. */
3542 gcc_assert (!VEC_empty (gimple, phis));
3544 for (i = 0; VEC_iterate (gimple, phis, i, exit_phi); i++)
3546 if (outer_loop)
3548 stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi);
3549 gimple vect_phi;
3551 /* FORNOW. Currently not supporting the case that an inner-loop
3552 reduction is not used in the outer-loop (but only outside the
3553 outer-loop), unless it is double reduction. */
3554 gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
3555 && !STMT_VINFO_LIVE_P (exit_phi_vinfo))
3556 || double_reduc);
3558 STMT_VINFO_VEC_STMT (exit_phi_vinfo) = epilog_stmt;
3559 if (!double_reduc
3560 || STMT_VINFO_DEF_TYPE (exit_phi_vinfo)
3561 != vect_double_reduction_def)
3562 continue;
3564 /* Handle double reduction:
3566 stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
3567 stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop)
3568 stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop)
3569 stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
3571 At that point the regular reduction (stmt2 and stmt3) is
3572 already vectorized, as well as the exit phi node, stmt4.
3573 Here we vectorize the phi node of double reduction, stmt1, and
3574 update all relevant statements. */
3576 /* Go through all the uses of s2 to find double reduction phi
3577 node, i.e., stmt1 above. */
3578 orig_name = PHI_RESULT (exit_phi);
3579 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
3581 stmt_vec_info use_stmt_vinfo = vinfo_for_stmt (use_stmt);
3582 stmt_vec_info new_phi_vinfo;
3583 tree vect_phi_init, preheader_arg, vect_phi_res, init_def;
3584 basic_block bb = gimple_bb (use_stmt);
3585 gimple use;
3587 /* Check that USE_STMT is really double reduction phi
3588 node. */
3589 if (gimple_code (use_stmt) != GIMPLE_PHI
3590 || gimple_phi_num_args (use_stmt) != 2
3591 || !use_stmt_vinfo
3592 || STMT_VINFO_DEF_TYPE (use_stmt_vinfo)
3593 != vect_double_reduction_def
3594 || bb->loop_father != outer_loop)
3595 continue;
3597 /* Create vector phi node for double reduction:
3598 vs1 = phi <vs0, vs2>
3599 vs1 was created previously in this function by a call to
3600 vect_get_vec_def_for_operand and is stored in
3601 vec_initial_def;
3602 vs2 is defined by EPILOG_STMT, the vectorized EXIT_PHI;
3603 vs0 is created here. */
3605 /* Create vector phi node. */
3606 vect_phi = create_phi_node (vec_initial_def, bb);
3607 new_phi_vinfo = new_stmt_vec_info (vect_phi,
3608 loop_vec_info_for_loop (outer_loop), NULL);
3609 set_vinfo_for_stmt (vect_phi, new_phi_vinfo);
3611 /* Create vs0 - initial def of the double reduction phi. */
3612 preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt,
3613 loop_preheader_edge (outer_loop));
3614 init_def = get_initial_def_for_reduction (stmt,
3615 preheader_arg, NULL);
3616 vect_phi_init = vect_init_vector (use_stmt, init_def,
3617 vectype, NULL);
3619 /* Update phi node arguments with vs0 and vs2. */
3620 add_phi_arg (vect_phi, vect_phi_init,
3621 loop_preheader_edge (outer_loop),
3622 UNKNOWN_LOCATION);
3623 add_phi_arg (vect_phi, PHI_RESULT (epilog_stmt),
3624 loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
3625 if (vect_print_dump_info (REPORT_DETAILS))
3627 fprintf (vect_dump, "created double reduction phi "
3628 "node: ");
3629 print_gimple_stmt (vect_dump, vect_phi, 0, TDF_SLIM);
3632 vect_phi_res = PHI_RESULT (vect_phi);
3634 /* Replace the use, i.e., set the correct vs1 in the regular
3635 reduction phi node. FORNOW, NCOPIES is always 1, so the
3636 loop is redundant. */
3637 use = reduction_phi;
3638 for (j = 0; j < ncopies; j++)
3640 edge pr_edge = loop_preheader_edge (loop);
3641 SET_PHI_ARG_DEF (use, pr_edge->dest_idx, vect_phi_res);
3642 use = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use));
3647 /* Replace the uses: */
3648 orig_name = PHI_RESULT (exit_phi);
3649 scalar_result = VEC_index (tree, scalar_results, k);
3650 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
3651 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
3652 SET_USE (use_p, scalar_result);
3655 VEC_free (gimple, heap, phis);
3658 VEC_free (tree, heap, scalar_results);
3659 VEC_free (gimple, heap, new_phis);
3663 /* Function vectorizable_reduction.
3665 Check if STMT performs a reduction operation that can be vectorized.
3666 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3667 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3668 Return FALSE if not a vectorizable STMT, TRUE otherwise.
3670 This function also handles reduction idioms (patterns) that have been
3671 recognized in advance during vect_pattern_recog. In this case, STMT may be
3672 of this form:
3673 X = pattern_expr (arg0, arg1, ..., X)
3674 and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
3675 sequence that had been detected and replaced by the pattern-stmt (STMT).
3677 In some cases of reduction patterns, the type of the reduction variable X is
3678 different than the type of the other arguments of STMT.
3679 In such cases, the vectype that is used when transforming STMT into a vector
3680 stmt is different than the vectype that is used to determine the
3681 vectorization factor, because it consists of a different number of elements
3682 than the actual number of elements that are being operated upon in parallel.
3684 For example, consider an accumulation of shorts into an int accumulator.
3685 On some targets it's possible to vectorize this pattern operating on 8
3686 shorts at a time (hence, the vectype for purposes of determining the
3687 vectorization factor should be V8HI); on the other hand, the vectype that
3688 is used to create the vector form is actually V4SI (the type of the result).
3690 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
3691 indicates what is the actual level of parallelism (V8HI in the example), so
3692 that the right vectorization factor would be derived. This vectype
3693 corresponds to the type of arguments to the reduction stmt, and should *NOT*
3694 be used to create the vectorized stmt. The right vectype for the vectorized
3695 stmt is obtained from the type of the result X:
3696 get_vectype_for_scalar_type (TREE_TYPE (X))
3698 This means that, contrary to "regular" reductions (or "regular" stmts in
3699 general), the following equation:
3700 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
3701 does *NOT* necessarily hold for reduction patterns. */
3703 bool
3704 vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
3705 gimple *vec_stmt, slp_tree slp_node)
3707 tree vec_dest;
3708 tree scalar_dest;
3709 tree loop_vec_def0 = NULL_TREE, loop_vec_def1 = NULL_TREE;
3710 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3711 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3712 tree vectype_in = NULL_TREE;
3713 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3714 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3715 enum tree_code code, orig_code, epilog_reduc_code;
3716 enum machine_mode vec_mode;
3717 int op_type;
3718 optab optab, reduc_optab;
3719 tree new_temp = NULL_TREE;
3720 tree def;
3721 gimple def_stmt;
3722 enum vect_def_type dt;
3723 gimple new_phi = NULL;
3724 tree scalar_type;
3725 bool is_simple_use;
3726 gimple orig_stmt;
3727 stmt_vec_info orig_stmt_info;
3728 tree expr = NULL_TREE;
3729 int i;
3730 int ncopies;
3731 int epilog_copies;
3732 stmt_vec_info prev_stmt_info, prev_phi_info;
3733 bool single_defuse_cycle = false;
3734 tree reduc_def = NULL_TREE;
3735 gimple new_stmt = NULL;
3736 int j;
3737 tree ops[3];
3738 bool nested_cycle = false, found_nested_cycle_def = false;
3739 gimple reduc_def_stmt = NULL;
3740 /* The default is that the reduction variable is the last in statement. */
3741 int reduc_index = 2;
3742 bool double_reduc = false, dummy;
3743 basic_block def_bb;
3744 struct loop * def_stmt_loop, *outer_loop = NULL;
3745 tree def_arg;
3746 gimple def_arg_stmt;
3747 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL, *vect_defs = NULL;
3748 VEC (gimple, heap) *phis = NULL;
3749 int vec_num;
3750 tree def0, def1;
3752 if (nested_in_vect_loop_p (loop, stmt))
3754 outer_loop = loop;
3755 loop = loop->inner;
3756 nested_cycle = true;
3759 /* 1. Is vectorizable reduction? */
3760 /* Not supportable if the reduction variable is used in the loop. */
3761 if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer)
3762 return false;
3764 /* Reductions that are not used even in an enclosing outer-loop,
3765 are expected to be "live" (used out of the loop). */
3766 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope
3767 && !STMT_VINFO_LIVE_P (stmt_info))
3768 return false;
3770 /* Make sure it was already recognized as a reduction computation. */
3771 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def
3772 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_nested_cycle)
3773 return false;
3775 /* 2. Has this been recognized as a reduction pattern?
3777 Check if STMT represents a pattern that has been recognized
3778 in earlier analysis stages. For stmts that represent a pattern,
3779 the STMT_VINFO_RELATED_STMT field records the last stmt in
3780 the original sequence that constitutes the pattern. */
3782 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
3783 if (orig_stmt)
3785 orig_stmt_info = vinfo_for_stmt (orig_stmt);
3786 gcc_assert (STMT_VINFO_RELATED_STMT (orig_stmt_info) == stmt);
3787 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
3788 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
3791 /* 3. Check the operands of the operation. The first operands are defined
3792 inside the loop body. The last operand is the reduction variable,
3793 which is defined by the loop-header-phi. */
3795 gcc_assert (is_gimple_assign (stmt));
3797 /* Flatten RHS */
3798 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
3800 case GIMPLE_SINGLE_RHS:
3801 op_type = TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt));
3802 if (op_type == ternary_op)
3804 tree rhs = gimple_assign_rhs1 (stmt);
3805 ops[0] = TREE_OPERAND (rhs, 0);
3806 ops[1] = TREE_OPERAND (rhs, 1);
3807 ops[2] = TREE_OPERAND (rhs, 2);
3808 code = TREE_CODE (rhs);
3810 else
3811 return false;
3812 break;
3814 case GIMPLE_BINARY_RHS:
3815 code = gimple_assign_rhs_code (stmt);
3816 op_type = TREE_CODE_LENGTH (code);
3817 gcc_assert (op_type == binary_op);
3818 ops[0] = gimple_assign_rhs1 (stmt);
3819 ops[1] = gimple_assign_rhs2 (stmt);
3820 break;
3822 case GIMPLE_UNARY_RHS:
3823 return false;
3825 default:
3826 gcc_unreachable ();
3829 scalar_dest = gimple_assign_lhs (stmt);
3830 scalar_type = TREE_TYPE (scalar_dest);
3831 if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
3832 && !SCALAR_FLOAT_TYPE_P (scalar_type))
3833 return false;
3835 /* All uses but the last are expected to be defined in the loop.
3836 The last use is the reduction variable. In case of nested cycle this
3837 assumption is not true: we use reduc_index to record the index of the
3838 reduction variable. */
3839 for (i = 0; i < op_type-1; i++)
3841 tree tem;
3843 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
3844 if (i == 0 && code == COND_EXPR)
3845 continue;
3847 is_simple_use = vect_is_simple_use_1 (ops[i], loop_vinfo, NULL,
3848 &def_stmt, &def, &dt, &tem);
3849 if (!vectype_in)
3850 vectype_in = tem;
3851 gcc_assert (is_simple_use);
3852 if (dt != vect_internal_def
3853 && dt != vect_external_def
3854 && dt != vect_constant_def
3855 && dt != vect_induction_def
3856 && !(dt == vect_nested_cycle && nested_cycle))
3857 return false;
3859 if (dt == vect_nested_cycle)
3861 found_nested_cycle_def = true;
3862 reduc_def_stmt = def_stmt;
3863 reduc_index = i;
3867 is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, NULL, &def_stmt,
3868 &def, &dt);
3869 gcc_assert (is_simple_use);
3870 gcc_assert (dt == vect_reduction_def
3871 || dt == vect_nested_cycle
3872 || ((dt == vect_internal_def || dt == vect_external_def
3873 || dt == vect_constant_def || dt == vect_induction_def)
3874 && nested_cycle && found_nested_cycle_def));
3875 if (!found_nested_cycle_def)
3876 reduc_def_stmt = def_stmt;
3878 gcc_assert (gimple_code (reduc_def_stmt) == GIMPLE_PHI);
3879 if (orig_stmt)
3880 gcc_assert (orig_stmt == vect_is_simple_reduction (loop_vinfo,
3881 reduc_def_stmt,
3882 !nested_cycle,
3883 &dummy));
3884 else
3885 gcc_assert (stmt == vect_is_simple_reduction (loop_vinfo, reduc_def_stmt,
3886 !nested_cycle, &dummy));
3888 if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt)))
3889 return false;
3891 if (slp_node)
3892 ncopies = 1;
3893 else
3894 ncopies = (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
3895 / TYPE_VECTOR_SUBPARTS (vectype_in));
3897 gcc_assert (ncopies >= 1);
3899 vec_mode = TYPE_MODE (vectype_in);
3901 if (code == COND_EXPR)
3903 if (!vectorizable_condition (stmt, gsi, NULL, ops[reduc_index], 0))
3905 if (vect_print_dump_info (REPORT_DETAILS))
3906 fprintf (vect_dump, "unsupported condition in reduction");
3908 return false;
3911 else
3913 /* 4. Supportable by target? */
3915 /* 4.1. check support for the operation in the loop */
3916 optab = optab_for_tree_code (code, vectype_in, optab_default);
3917 if (!optab)
3919 if (vect_print_dump_info (REPORT_DETAILS))
3920 fprintf (vect_dump, "no optab.");
3922 return false;
3925 if (optab_handler (optab, vec_mode)->insn_code == CODE_FOR_nothing)
3927 if (vect_print_dump_info (REPORT_DETAILS))
3928 fprintf (vect_dump, "op not supported by target.");
3930 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
3931 || LOOP_VINFO_VECT_FACTOR (loop_vinfo)
3932 < vect_min_worthwhile_factor (code))
3933 return false;
3935 if (vect_print_dump_info (REPORT_DETAILS))
3936 fprintf (vect_dump, "proceeding using word mode.");
3939 /* Worthwhile without SIMD support? */
3940 if (!VECTOR_MODE_P (TYPE_MODE (vectype_in))
3941 && LOOP_VINFO_VECT_FACTOR (loop_vinfo)
3942 < vect_min_worthwhile_factor (code))
3944 if (vect_print_dump_info (REPORT_DETAILS))
3945 fprintf (vect_dump, "not worthwhile without SIMD support.");
3947 return false;
3951 /* 4.2. Check support for the epilog operation.
3953 If STMT represents a reduction pattern, then the type of the
3954 reduction variable may be different than the type of the rest
3955 of the arguments. For example, consider the case of accumulation
3956 of shorts into an int accumulator; The original code:
3957 S1: int_a = (int) short_a;
3958 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
3960 was replaced with:
3961 STMT: int_acc = widen_sum <short_a, int_acc>
3963 This means that:
3964 1. The tree-code that is used to create the vector operation in the
3965 epilog code (that reduces the partial results) is not the
3966 tree-code of STMT, but is rather the tree-code of the original
3967 stmt from the pattern that STMT is replacing. I.e, in the example
3968 above we want to use 'widen_sum' in the loop, but 'plus' in the
3969 epilog.
3970 2. The type (mode) we use to check available target support
3971 for the vector operation to be created in the *epilog*, is
3972 determined by the type of the reduction variable (in the example
3973 above we'd check this: plus_optab[vect_int_mode]).
3974 However the type (mode) we use to check available target support
3975 for the vector operation to be created *inside the loop*, is
3976 determined by the type of the other arguments to STMT (in the
3977 example we'd check this: widen_sum_optab[vect_short_mode]).
3979 This is contrary to "regular" reductions, in which the types of all
3980 the arguments are the same as the type of the reduction variable.
3981 For "regular" reductions we can therefore use the same vector type
3982 (and also the same tree-code) when generating the epilog code and
3983 when generating the code inside the loop. */
3985 if (orig_stmt)
3987 /* This is a reduction pattern: get the vectype from the type of the
3988 reduction variable, and get the tree-code from orig_stmt. */
3989 orig_code = gimple_assign_rhs_code (orig_stmt);
3990 gcc_assert (vectype_out);
3991 vec_mode = TYPE_MODE (vectype_out);
3993 else
3995 /* Regular reduction: use the same vectype and tree-code as used for
3996 the vector code inside the loop can be used for the epilog code. */
3997 orig_code = code;
4000 if (nested_cycle)
4002 def_bb = gimple_bb (reduc_def_stmt);
4003 def_stmt_loop = def_bb->loop_father;
4004 def_arg = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
4005 loop_preheader_edge (def_stmt_loop));
4006 if (TREE_CODE (def_arg) == SSA_NAME
4007 && (def_arg_stmt = SSA_NAME_DEF_STMT (def_arg))
4008 && gimple_code (def_arg_stmt) == GIMPLE_PHI
4009 && flow_bb_inside_loop_p (outer_loop, gimple_bb (def_arg_stmt))
4010 && vinfo_for_stmt (def_arg_stmt)
4011 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_arg_stmt))
4012 == vect_double_reduction_def)
4013 double_reduc = true;
4016 epilog_reduc_code = ERROR_MARK;
4017 if (reduction_code_for_scalar_code (orig_code, &epilog_reduc_code))
4019 reduc_optab = optab_for_tree_code (epilog_reduc_code, vectype_out,
4020 optab_default);
4021 if (!reduc_optab)
4023 if (vect_print_dump_info (REPORT_DETAILS))
4024 fprintf (vect_dump, "no optab for reduction.");
4026 epilog_reduc_code = ERROR_MARK;
4029 if (reduc_optab
4030 && optab_handler (reduc_optab, vec_mode)->insn_code
4031 == CODE_FOR_nothing)
4033 if (vect_print_dump_info (REPORT_DETAILS))
4034 fprintf (vect_dump, "reduc op not supported by target.");
4036 epilog_reduc_code = ERROR_MARK;
4039 else
4041 if (!nested_cycle || double_reduc)
4043 if (vect_print_dump_info (REPORT_DETAILS))
4044 fprintf (vect_dump, "no reduc code for scalar code.");
4046 return false;
4050 if (double_reduc && ncopies > 1)
4052 if (vect_print_dump_info (REPORT_DETAILS))
4053 fprintf (vect_dump, "multiple types in double reduction");
4055 return false;
4058 if (!vec_stmt) /* transformation not required. */
4060 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
4061 if (!vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies))
4062 return false;
4063 return true;
4066 /** Transform. **/
4068 if (vect_print_dump_info (REPORT_DETAILS))
4069 fprintf (vect_dump, "transform reduction.");
4071 /* FORNOW: Multiple types are not supported for condition. */
4072 if (code == COND_EXPR)
4073 gcc_assert (ncopies == 1);
4075 /* Create the destination vector */
4076 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
4078 /* In case the vectorization factor (VF) is bigger than the number
4079 of elements that we can fit in a vectype (nunits), we have to generate
4080 more than one vector stmt - i.e - we need to "unroll" the
4081 vector stmt by a factor VF/nunits. For more details see documentation
4082 in vectorizable_operation. */
4084 /* If the reduction is used in an outer loop we need to generate
4085 VF intermediate results, like so (e.g. for ncopies=2):
4086 r0 = phi (init, r0)
4087 r1 = phi (init, r1)
4088 r0 = x0 + r0;
4089 r1 = x1 + r1;
4090 (i.e. we generate VF results in 2 registers).
4091 In this case we have a separate def-use cycle for each copy, and therefore
4092 for each copy we get the vector def for the reduction variable from the
4093 respective phi node created for this copy.
4095 Otherwise (the reduction is unused in the loop nest), we can combine
4096 together intermediate results, like so (e.g. for ncopies=2):
4097 r = phi (init, r)
4098 r = x0 + r;
4099 r = x1 + r;
4100 (i.e. we generate VF/2 results in a single register).
4101 In this case for each copy we get the vector def for the reduction variable
4102 from the vectorized reduction operation generated in the previous iteration.
4105 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope)
4107 single_defuse_cycle = true;
4108 epilog_copies = 1;
4110 else
4111 epilog_copies = ncopies;
4113 prev_stmt_info = NULL;
4114 prev_phi_info = NULL;
4115 if (slp_node)
4117 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
4118 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype_out)
4119 == TYPE_VECTOR_SUBPARTS (vectype_in));
4121 else
4123 vec_num = 1;
4124 vec_oprnds0 = VEC_alloc (tree, heap, 1);
4125 if (op_type == ternary_op)
4126 vec_oprnds1 = VEC_alloc (tree, heap, 1);
4129 phis = VEC_alloc (gimple, heap, vec_num);
4130 vect_defs = VEC_alloc (tree, heap, vec_num);
4131 if (!slp_node)
4132 VEC_quick_push (tree, vect_defs, NULL_TREE);
4134 for (j = 0; j < ncopies; j++)
4136 if (j == 0 || !single_defuse_cycle)
4138 for (i = 0; i < vec_num; i++)
4140 /* Create the reduction-phi that defines the reduction
4141 operand. */
4142 new_phi = create_phi_node (vec_dest, loop->header);
4143 set_vinfo_for_stmt (new_phi,
4144 new_stmt_vec_info (new_phi, loop_vinfo,
4145 NULL));
4146 if (j == 0 || slp_node)
4147 VEC_quick_push (gimple, phis, new_phi);
4151 if (code == COND_EXPR)
4153 gcc_assert (!slp_node);
4154 vectorizable_condition (stmt, gsi, vec_stmt,
4155 PHI_RESULT (VEC_index (gimple, phis, 0)),
4156 reduc_index);
4157 /* Multiple types are not supported for condition. */
4158 break;
4161 /* Handle uses. */
4162 if (j == 0)
4164 if (slp_node)
4165 vect_get_slp_defs (slp_node, &vec_oprnds0, &vec_oprnds1, -1);
4166 else
4168 loop_vec_def0 = vect_get_vec_def_for_operand (ops[!reduc_index],
4169 stmt, NULL);
4170 VEC_quick_push (tree, vec_oprnds0, loop_vec_def0);
4171 if (op_type == ternary_op)
4173 if (reduc_index == 0)
4174 loop_vec_def1 = vect_get_vec_def_for_operand (ops[2], stmt,
4175 NULL);
4176 else
4177 loop_vec_def1 = vect_get_vec_def_for_operand (ops[1], stmt,
4178 NULL);
4180 VEC_quick_push (tree, vec_oprnds1, loop_vec_def1);
4184 else
4186 if (!slp_node)
4188 enum vect_def_type dt = vect_unknown_def_type; /* Dummy */
4189 loop_vec_def0 = vect_get_vec_def_for_stmt_copy (dt, loop_vec_def0);
4190 VEC_replace (tree, vec_oprnds0, 0, loop_vec_def0);
4191 if (op_type == ternary_op)
4193 loop_vec_def1 = vect_get_vec_def_for_stmt_copy (dt,
4194 loop_vec_def1);
4195 VEC_replace (tree, vec_oprnds1, 0, loop_vec_def1);
4199 if (single_defuse_cycle)
4200 reduc_def = gimple_assign_lhs (new_stmt);
4202 STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi;
4205 for (i = 0; VEC_iterate (tree, vec_oprnds0, i, def0); i++)
4207 if (slp_node)
4208 reduc_def = PHI_RESULT (VEC_index (gimple, phis, i));
4209 else
4211 if (!single_defuse_cycle || j == 0)
4212 reduc_def = PHI_RESULT (new_phi);
4215 def1 = ((op_type == ternary_op)
4216 ? VEC_index (tree, vec_oprnds1, i) : NULL);
4217 if (op_type == binary_op)
4219 if (reduc_index == 0)
4220 expr = build2 (code, vectype_out, reduc_def, def0);
4221 else
4222 expr = build2 (code, vectype_out, def0, reduc_def);
4224 else
4226 if (reduc_index == 0)
4227 expr = build3 (code, vectype_out, reduc_def, def0, def1);
4228 else
4230 if (reduc_index == 1)
4231 expr = build3 (code, vectype_out, def0, reduc_def, def1);
4232 else
4233 expr = build3 (code, vectype_out, def0, def1, reduc_def);
4237 new_stmt = gimple_build_assign (vec_dest, expr);
4238 new_temp = make_ssa_name (vec_dest, new_stmt);
4239 gimple_assign_set_lhs (new_stmt, new_temp);
4240 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4241 if (slp_node)
4243 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
4244 VEC_quick_push (tree, vect_defs, new_temp);
4246 else
4247 VEC_replace (tree, vect_defs, 0, new_temp);
4250 if (slp_node)
4251 continue;
4253 if (j == 0)
4254 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4255 else
4256 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4258 prev_stmt_info = vinfo_for_stmt (new_stmt);
4259 prev_phi_info = vinfo_for_stmt (new_phi);
4262 /* Finalize the reduction-phi (set its arguments) and create the
4263 epilog reduction code. */
4264 if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node)
4266 new_temp = gimple_assign_lhs (*vec_stmt);
4267 VEC_replace (tree, vect_defs, 0, new_temp);
4270 vect_create_epilog_for_reduction (vect_defs, stmt, epilog_copies,
4271 epilog_reduc_code, phis, reduc_index,
4272 double_reduc, slp_node);
4274 VEC_free (gimple, heap, phis);
4275 VEC_free (tree, heap, vec_oprnds0);
4276 if (vec_oprnds1)
4277 VEC_free (tree, heap, vec_oprnds1);
4279 return true;
4282 /* Function vect_min_worthwhile_factor.
4284 For a loop where we could vectorize the operation indicated by CODE,
4285 return the minimum vectorization factor that makes it worthwhile
4286 to use generic vectors. */
4288 vect_min_worthwhile_factor (enum tree_code code)
4290 switch (code)
4292 case PLUS_EXPR:
4293 case MINUS_EXPR:
4294 case NEGATE_EXPR:
4295 return 4;
4297 case BIT_AND_EXPR:
4298 case BIT_IOR_EXPR:
4299 case BIT_XOR_EXPR:
4300 case BIT_NOT_EXPR:
4301 return 2;
4303 default:
4304 return INT_MAX;
4309 /* Function vectorizable_induction
4311 Check if PHI performs an induction computation that can be vectorized.
4312 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
4313 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
4314 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4316 bool
4317 vectorizable_induction (gimple phi, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
4318 gimple *vec_stmt)
4320 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
4321 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4322 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4323 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4324 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4325 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4326 tree vec_def;
4328 gcc_assert (ncopies >= 1);
4329 /* FORNOW. This restriction should be relaxed. */
4330 if (nested_in_vect_loop_p (loop, phi) && ncopies > 1)
4332 if (vect_print_dump_info (REPORT_DETAILS))
4333 fprintf (vect_dump, "multiple types in nested loop.");
4334 return false;
4337 if (!STMT_VINFO_RELEVANT_P (stmt_info))
4338 return false;
4340 /* FORNOW: SLP not supported. */
4341 if (STMT_SLP_TYPE (stmt_info))
4342 return false;
4344 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def);
4346 if (gimple_code (phi) != GIMPLE_PHI)
4347 return false;
4349 if (!vec_stmt) /* transformation not required. */
4351 STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
4352 if (vect_print_dump_info (REPORT_DETAILS))
4353 fprintf (vect_dump, "=== vectorizable_induction ===");
4354 vect_model_induction_cost (stmt_info, ncopies);
4355 return true;
4358 /** Transform. **/
4360 if (vect_print_dump_info (REPORT_DETAILS))
4361 fprintf (vect_dump, "transform induction phi.");
4363 vec_def = get_initial_def_for_induction (phi);
4364 *vec_stmt = SSA_NAME_DEF_STMT (vec_def);
4365 return true;
4368 /* Function vectorizable_live_operation.
4370 STMT computes a value that is used outside the loop. Check if
4371 it can be supported. */
4373 bool
4374 vectorizable_live_operation (gimple stmt,
4375 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
4376 gimple *vec_stmt ATTRIBUTE_UNUSED)
4378 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4379 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4380 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4381 int i;
4382 int op_type;
4383 tree op;
4384 tree def;
4385 gimple def_stmt;
4386 enum vect_def_type dt;
4387 enum tree_code code;
4388 enum gimple_rhs_class rhs_class;
4390 gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
4392 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
4393 return false;
4395 if (!is_gimple_assign (stmt))
4396 return false;
4398 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4399 return false;
4401 /* FORNOW. CHECKME. */
4402 if (nested_in_vect_loop_p (loop, stmt))
4403 return false;
4405 code = gimple_assign_rhs_code (stmt);
4406 op_type = TREE_CODE_LENGTH (code);
4407 rhs_class = get_gimple_rhs_class (code);
4408 gcc_assert (rhs_class != GIMPLE_UNARY_RHS || op_type == unary_op);
4409 gcc_assert (rhs_class != GIMPLE_BINARY_RHS || op_type == binary_op);
4411 /* FORNOW: support only if all uses are invariant. This means
4412 that the scalar operations can remain in place, unvectorized.
4413 The original last scalar value that they compute will be used. */
4415 for (i = 0; i < op_type; i++)
4417 if (rhs_class == GIMPLE_SINGLE_RHS)
4418 op = TREE_OPERAND (gimple_op (stmt, 1), i);
4419 else
4420 op = gimple_op (stmt, i + 1);
4421 if (op
4422 && !vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def, &dt))
4424 if (vect_print_dump_info (REPORT_DETAILS))
4425 fprintf (vect_dump, "use not simple.");
4426 return false;
4429 if (dt != vect_external_def && dt != vect_constant_def)
4430 return false;
4433 /* No transformation is required for the cases we currently support. */
4434 return true;
4437 /* Kill any debug uses outside LOOP of SSA names defined in STMT. */
4439 static void
4440 vect_loop_kill_debug_uses (struct loop *loop, gimple stmt)
4442 ssa_op_iter op_iter;
4443 imm_use_iterator imm_iter;
4444 def_operand_p def_p;
4445 gimple ustmt;
4447 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
4449 FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p))
4451 basic_block bb;
4453 if (!is_gimple_debug (ustmt))
4454 continue;
4456 bb = gimple_bb (ustmt);
4458 if (!flow_bb_inside_loop_p (loop, bb))
4460 if (gimple_debug_bind_p (ustmt))
4462 if (vect_print_dump_info (REPORT_DETAILS))
4463 fprintf (vect_dump, "killing debug use");
4465 gimple_debug_bind_reset_value (ustmt);
4466 update_stmt (ustmt);
4468 else
4469 gcc_unreachable ();
4475 /* Function vect_transform_loop.
4477 The analysis phase has determined that the loop is vectorizable.
4478 Vectorize the loop - created vectorized stmts to replace the scalar
4479 stmts in the loop, and update the loop exit condition. */
4481 void
4482 vect_transform_loop (loop_vec_info loop_vinfo)
4484 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4485 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
4486 int nbbs = loop->num_nodes;
4487 gimple_stmt_iterator si;
4488 int i;
4489 tree ratio = NULL;
4490 int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4491 bool strided_store;
4492 bool slp_scheduled = false;
4493 unsigned int nunits;
4494 tree cond_expr = NULL_TREE;
4495 gimple_seq cond_expr_stmt_list = NULL;
4496 bool do_peeling_for_loop_bound;
4498 if (vect_print_dump_info (REPORT_DETAILS))
4499 fprintf (vect_dump, "=== vec_transform_loop ===");
4501 /* Peel the loop if there are data refs with unknown alignment.
4502 Only one data ref with unknown store is allowed. */
4504 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
4505 vect_do_peeling_for_alignment (loop_vinfo);
4507 do_peeling_for_loop_bound
4508 = (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
4509 || (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
4510 && LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0));
4512 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
4513 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
4514 vect_loop_versioning (loop_vinfo,
4515 !do_peeling_for_loop_bound,
4516 &cond_expr, &cond_expr_stmt_list);
4518 /* If the loop has a symbolic number of iterations 'n' (i.e. it's not a
4519 compile time constant), or it is a constant that doesn't divide by the
4520 vectorization factor, then an epilog loop needs to be created.
4521 We therefore duplicate the loop: the original loop will be vectorized,
4522 and will compute the first (n/VF) iterations. The second copy of the loop
4523 will remain scalar and will compute the remaining (n%VF) iterations.
4524 (VF is the vectorization factor). */
4526 if (do_peeling_for_loop_bound)
4527 vect_do_peeling_for_loop_bound (loop_vinfo, &ratio,
4528 cond_expr, cond_expr_stmt_list);
4529 else
4530 ratio = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
4531 LOOP_VINFO_INT_NITERS (loop_vinfo) / vectorization_factor);
4533 /* 1) Make sure the loop header has exactly two entries
4534 2) Make sure we have a preheader basic block. */
4536 gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
4538 split_edge (loop_preheader_edge (loop));
4540 /* FORNOW: the vectorizer supports only loops which body consist
4541 of one basic block (header + empty latch). When the vectorizer will
4542 support more involved loop forms, the order by which the BBs are
4543 traversed need to be reconsidered. */
4545 for (i = 0; i < nbbs; i++)
4547 basic_block bb = bbs[i];
4548 stmt_vec_info stmt_info;
4549 gimple phi;
4551 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
4553 phi = gsi_stmt (si);
4554 if (vect_print_dump_info (REPORT_DETAILS))
4556 fprintf (vect_dump, "------>vectorizing phi: ");
4557 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
4559 stmt_info = vinfo_for_stmt (phi);
4560 if (!stmt_info)
4561 continue;
4563 if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
4564 vect_loop_kill_debug_uses (loop, phi);
4566 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4567 && !STMT_VINFO_LIVE_P (stmt_info))
4568 continue;
4570 if ((TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info))
4571 != (unsigned HOST_WIDE_INT) vectorization_factor)
4572 && vect_print_dump_info (REPORT_DETAILS))
4573 fprintf (vect_dump, "multiple-types.");
4575 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
4577 if (vect_print_dump_info (REPORT_DETAILS))
4578 fprintf (vect_dump, "transform phi.");
4579 vect_transform_stmt (phi, NULL, NULL, NULL, NULL);
4583 for (si = gsi_start_bb (bb); !gsi_end_p (si);)
4585 gimple stmt = gsi_stmt (si);
4586 bool is_store;
4588 if (vect_print_dump_info (REPORT_DETAILS))
4590 fprintf (vect_dump, "------>vectorizing statement: ");
4591 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4594 stmt_info = vinfo_for_stmt (stmt);
4596 /* vector stmts created in the outer-loop during vectorization of
4597 stmts in an inner-loop may not have a stmt_info, and do not
4598 need to be vectorized. */
4599 if (!stmt_info)
4601 gsi_next (&si);
4602 continue;
4605 if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
4606 vect_loop_kill_debug_uses (loop, stmt);
4608 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4609 && !STMT_VINFO_LIVE_P (stmt_info))
4611 gsi_next (&si);
4612 continue;
4615 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
4616 nunits =
4617 (unsigned int) TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
4618 if (!STMT_SLP_TYPE (stmt_info)
4619 && nunits != (unsigned int) vectorization_factor
4620 && vect_print_dump_info (REPORT_DETAILS))
4621 /* For SLP VF is set according to unrolling factor, and not to
4622 vector size, hence for SLP this print is not valid. */
4623 fprintf (vect_dump, "multiple-types.");
4625 /* SLP. Schedule all the SLP instances when the first SLP stmt is
4626 reached. */
4627 if (STMT_SLP_TYPE (stmt_info))
4629 if (!slp_scheduled)
4631 slp_scheduled = true;
4633 if (vect_print_dump_info (REPORT_DETAILS))
4634 fprintf (vect_dump, "=== scheduling SLP instances ===");
4636 vect_schedule_slp (loop_vinfo, NULL);
4639 /* Hybrid SLP stmts must be vectorized in addition to SLP. */
4640 if (!vinfo_for_stmt (stmt) || PURE_SLP_STMT (stmt_info))
4642 gsi_next (&si);
4643 continue;
4647 /* -------- vectorize statement ------------ */
4648 if (vect_print_dump_info (REPORT_DETAILS))
4649 fprintf (vect_dump, "transform statement.");
4651 strided_store = false;
4652 is_store = vect_transform_stmt (stmt, &si, &strided_store, NULL, NULL);
4653 if (is_store)
4655 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
4657 /* Interleaving. If IS_STORE is TRUE, the vectorization of the
4658 interleaving chain was completed - free all the stores in
4659 the chain. */
4660 vect_remove_stores (DR_GROUP_FIRST_DR (stmt_info));
4661 gsi_remove (&si, true);
4662 continue;
4664 else
4666 /* Free the attached stmt_vec_info and remove the stmt. */
4667 free_stmt_vec_info (stmt);
4668 gsi_remove (&si, true);
4669 continue;
4672 gsi_next (&si);
4673 } /* stmts in BB */
4674 } /* BBs in loop */
4676 slpeel_make_loop_iterate_ntimes (loop, ratio);
4678 /* The memory tags and pointers in vectorized statements need to
4679 have their SSA forms updated. FIXME, why can't this be delayed
4680 until all the loops have been transformed? */
4681 update_ssa (TODO_update_ssa);
4683 if (vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
4684 fprintf (vect_dump, "LOOP VECTORIZED.");
4685 if (loop->inner && vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
4686 fprintf (vect_dump, "OUTER LOOP VECTORIZED.");