* targhooks.c (default_function_value): Don't use
[official-gcc.git] / gcc / tree-vect-loop.c
blobccddab3b33643b3cec5cde725fc7cc9dcbd4d1da
1 /* Loop Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
5 Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "basic-block.h"
30 #include "tree-pretty-print.h"
31 #include "gimple-pretty-print.h"
32 #include "tree-flow.h"
33 #include "tree-dump.h"
34 #include "cfgloop.h"
35 #include "cfglayout.h"
36 #include "expr.h"
37 #include "recog.h"
38 #include "optabs.h"
39 #include "params.h"
40 #include "toplev.h"
41 #include "tree-chrec.h"
42 #include "tree-scalar-evolution.h"
43 #include "tree-vectorizer.h"
44 #include "target.h"
46 /* Loop Vectorization Pass.
48 This pass tries to vectorize loops.
50 For example, the vectorizer transforms the following simple loop:
52 short a[N]; short b[N]; short c[N]; int i;
54 for (i=0; i<N; i++){
55 a[i] = b[i] + c[i];
58 as if it was manually vectorized by rewriting the source code into:
60 typedef int __attribute__((mode(V8HI))) v8hi;
61 short a[N]; short b[N]; short c[N]; int i;
62 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
63 v8hi va, vb, vc;
65 for (i=0; i<N/8; i++){
66 vb = pb[i];
67 vc = pc[i];
68 va = vb + vc;
69 pa[i] = va;
72 The main entry to this pass is vectorize_loops(), in which
73 the vectorizer applies a set of analyses on a given set of loops,
74 followed by the actual vectorization transformation for the loops that
75 had successfully passed the analysis phase.
76 Throughout this pass we make a distinction between two types of
77 data: scalars (which are represented by SSA_NAMES), and memory references
78 ("data-refs"). These two types of data require different handling both
79 during analysis and transformation. The types of data-refs that the
80 vectorizer currently supports are ARRAY_REFS which base is an array DECL
81 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
82 accesses are required to have a simple (consecutive) access pattern.
84 Analysis phase:
85 ===============
86 The driver for the analysis phase is vect_analyze_loop().
87 It applies a set of analyses, some of which rely on the scalar evolution
88 analyzer (scev) developed by Sebastian Pop.
90 During the analysis phase the vectorizer records some information
91 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
92 loop, as well as general information about the loop as a whole, which is
93 recorded in a "loop_vec_info" struct attached to each loop.
95 Transformation phase:
96 =====================
97 The loop transformation phase scans all the stmts in the loop, and
98 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
99 the loop that needs to be vectorized. It inserts the vector code sequence
100 just before the scalar stmt S, and records a pointer to the vector code
101 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
102 attached to S). This pointer will be used for the vectorization of following
103 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
104 otherwise, we rely on dead code elimination for removing it.
106 For example, say stmt S1 was vectorized into stmt VS1:
108 VS1: vb = px[i];
109 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
110 S2: a = b;
112 To vectorize stmt S2, the vectorizer first finds the stmt that defines
113 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
114 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
115 resulting sequence would be:
117 VS1: vb = px[i];
118 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
119 VS2: va = vb;
120 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
122 Operands that are not SSA_NAMEs, are data-refs that appear in
123 load/store operations (like 'x[i]' in S1), and are handled differently.
125 Target modeling:
126 =================
127 Currently the only target specific information that is used is the
128 size of the vector (in bytes) - "UNITS_PER_SIMD_WORD". Targets that can
129 support different sizes of vectors, for now will need to specify one value
130 for "UNITS_PER_SIMD_WORD". More flexibility will be added in the future.
132 Since we only vectorize operations which vector form can be
133 expressed using existing tree codes, to verify that an operation is
134 supported, the vectorizer checks the relevant optab at the relevant
135 machine_mode (e.g, optab_handler (add_optab, V8HImode)->insn_code). If
136 the value found is CODE_FOR_nothing, then there's no target support, and
137 we can't vectorize the stmt.
139 For additional information on this project see:
140 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
143 /* Function vect_determine_vectorization_factor
145 Determine the vectorization factor (VF). VF is the number of data elements
146 that are operated upon in parallel in a single iteration of the vectorized
147 loop. For example, when vectorizing a loop that operates on 4byte elements,
148 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
149 elements can fit in a single vector register.
151 We currently support vectorization of loops in which all types operated upon
152 are of the same size. Therefore this function currently sets VF according to
153 the size of the types operated upon, and fails if there are multiple sizes
154 in the loop.
156 VF is also the factor by which the loop iterations are strip-mined, e.g.:
157 original loop:
158 for (i=0; i<N; i++){
159 a[i] = b[i] + c[i];
162 vectorized loop:
163 for (i=0; i<N; i+=VF){
164 a[i:VF] = b[i:VF] + c[i:VF];
168 static bool
169 vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
171 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
172 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
173 int nbbs = loop->num_nodes;
174 gimple_stmt_iterator si;
175 unsigned int vectorization_factor = 0;
176 tree scalar_type;
177 gimple phi;
178 tree vectype;
179 unsigned int nunits;
180 stmt_vec_info stmt_info;
181 int i;
182 HOST_WIDE_INT dummy;
184 if (vect_print_dump_info (REPORT_DETAILS))
185 fprintf (vect_dump, "=== vect_determine_vectorization_factor ===");
187 for (i = 0; i < nbbs; i++)
189 basic_block bb = bbs[i];
191 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
193 phi = gsi_stmt (si);
194 stmt_info = vinfo_for_stmt (phi);
195 if (vect_print_dump_info (REPORT_DETAILS))
197 fprintf (vect_dump, "==> examining phi: ");
198 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
201 gcc_assert (stmt_info);
203 if (STMT_VINFO_RELEVANT_P (stmt_info))
205 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
206 scalar_type = TREE_TYPE (PHI_RESULT (phi));
208 if (vect_print_dump_info (REPORT_DETAILS))
210 fprintf (vect_dump, "get vectype for scalar type: ");
211 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
214 vectype = get_vectype_for_scalar_type (scalar_type);
215 if (!vectype)
217 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
219 fprintf (vect_dump,
220 "not vectorized: unsupported data-type ");
221 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
223 return false;
225 STMT_VINFO_VECTYPE (stmt_info) = vectype;
227 if (vect_print_dump_info (REPORT_DETAILS))
229 fprintf (vect_dump, "vectype: ");
230 print_generic_expr (vect_dump, vectype, TDF_SLIM);
233 nunits = TYPE_VECTOR_SUBPARTS (vectype);
234 if (vect_print_dump_info (REPORT_DETAILS))
235 fprintf (vect_dump, "nunits = %d", nunits);
237 if (!vectorization_factor
238 || (nunits > vectorization_factor))
239 vectorization_factor = nunits;
243 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
245 tree vf_vectype;
246 gimple stmt = gsi_stmt (si);
247 stmt_info = vinfo_for_stmt (stmt);
249 if (vect_print_dump_info (REPORT_DETAILS))
251 fprintf (vect_dump, "==> examining statement: ");
252 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
255 gcc_assert (stmt_info);
257 /* skip stmts which do not need to be vectorized. */
258 if (!STMT_VINFO_RELEVANT_P (stmt_info)
259 && !STMT_VINFO_LIVE_P (stmt_info))
261 if (vect_print_dump_info (REPORT_DETAILS))
262 fprintf (vect_dump, "skip.");
263 continue;
266 if (gimple_get_lhs (stmt) == NULL_TREE)
268 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
270 fprintf (vect_dump, "not vectorized: irregular stmt.");
271 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
273 return false;
276 if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
278 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
280 fprintf (vect_dump, "not vectorized: vector stmt in loop:");
281 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
283 return false;
286 if (STMT_VINFO_VECTYPE (stmt_info))
288 /* The only case when a vectype had been already set is for stmts
289 that contain a dataref, or for "pattern-stmts" (stmts generated
290 by the vectorizer to represent/replace a certain idiom). */
291 gcc_assert (STMT_VINFO_DATA_REF (stmt_info)
292 || is_pattern_stmt_p (stmt_info));
293 vectype = STMT_VINFO_VECTYPE (stmt_info);
295 else
297 gcc_assert (!STMT_VINFO_DATA_REF (stmt_info)
298 && !is_pattern_stmt_p (stmt_info));
300 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
301 if (vect_print_dump_info (REPORT_DETAILS))
303 fprintf (vect_dump, "get vectype for scalar type: ");
304 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
306 vectype = get_vectype_for_scalar_type (scalar_type);
307 if (!vectype)
309 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
311 fprintf (vect_dump,
312 "not vectorized: unsupported data-type ");
313 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
315 return false;
318 STMT_VINFO_VECTYPE (stmt_info) = vectype;
321 /* The vectorization factor is according to the smallest
322 scalar type (or the largest vector size, but we only
323 support one vector size per loop). */
324 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy,
325 &dummy);
326 if (vect_print_dump_info (REPORT_DETAILS))
328 fprintf (vect_dump, "get vectype for scalar type: ");
329 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
331 vf_vectype = get_vectype_for_scalar_type (scalar_type);
332 if (!vf_vectype)
334 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
336 fprintf (vect_dump,
337 "not vectorized: unsupported data-type ");
338 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
340 return false;
343 if ((GET_MODE_SIZE (TYPE_MODE (vectype))
344 != GET_MODE_SIZE (TYPE_MODE (vf_vectype))))
346 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
348 fprintf (vect_dump,
349 "not vectorized: different sized vector "
350 "types in statement, ");
351 print_generic_expr (vect_dump, vectype, TDF_SLIM);
352 fprintf (vect_dump, " and ");
353 print_generic_expr (vect_dump, vf_vectype, TDF_SLIM);
355 return false;
358 if (vect_print_dump_info (REPORT_DETAILS))
360 fprintf (vect_dump, "vectype: ");
361 print_generic_expr (vect_dump, vf_vectype, TDF_SLIM);
364 nunits = TYPE_VECTOR_SUBPARTS (vf_vectype);
365 if (vect_print_dump_info (REPORT_DETAILS))
366 fprintf (vect_dump, "nunits = %d", nunits);
368 if (!vectorization_factor
369 || (nunits > vectorization_factor))
370 vectorization_factor = nunits;
374 /* TODO: Analyze cost. Decide if worth while to vectorize. */
375 if (vect_print_dump_info (REPORT_DETAILS))
376 fprintf (vect_dump, "vectorization factor = %d", vectorization_factor);
377 if (vectorization_factor <= 1)
379 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
380 fprintf (vect_dump, "not vectorized: unsupported data-type");
381 return false;
383 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
385 return true;
389 /* Function vect_is_simple_iv_evolution.
391 FORNOW: A simple evolution of an induction variables in the loop is
392 considered a polynomial evolution with constant step. */
394 static bool
395 vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
396 tree * step)
398 tree init_expr;
399 tree step_expr;
400 tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
402 /* When there is no evolution in this loop, the evolution function
403 is not "simple". */
404 if (evolution_part == NULL_TREE)
405 return false;
407 /* When the evolution is a polynomial of degree >= 2
408 the evolution function is not "simple". */
409 if (tree_is_chrec (evolution_part))
410 return false;
412 step_expr = evolution_part;
413 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
415 if (vect_print_dump_info (REPORT_DETAILS))
417 fprintf (vect_dump, "step: ");
418 print_generic_expr (vect_dump, step_expr, TDF_SLIM);
419 fprintf (vect_dump, ", init: ");
420 print_generic_expr (vect_dump, init_expr, TDF_SLIM);
423 *init = init_expr;
424 *step = step_expr;
426 if (TREE_CODE (step_expr) != INTEGER_CST)
428 if (vect_print_dump_info (REPORT_DETAILS))
429 fprintf (vect_dump, "step unknown.");
430 return false;
433 return true;
436 /* Function vect_analyze_scalar_cycles_1.
438 Examine the cross iteration def-use cycles of scalar variables
439 in LOOP. LOOP_VINFO represents the loop that is now being
440 considered for vectorization (can be LOOP, or an outer-loop
441 enclosing LOOP). */
443 static void
444 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
446 basic_block bb = loop->header;
447 tree dumy;
448 VEC(gimple,heap) *worklist = VEC_alloc (gimple, heap, 64);
449 gimple_stmt_iterator gsi;
450 bool double_reduc;
452 if (vect_print_dump_info (REPORT_DETAILS))
453 fprintf (vect_dump, "=== vect_analyze_scalar_cycles ===");
455 /* First - identify all inductions. Reduction detection assumes that all the
456 inductions have been identified, therefore, this order must not be
457 changed. */
458 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
460 gimple phi = gsi_stmt (gsi);
461 tree access_fn = NULL;
462 tree def = PHI_RESULT (phi);
463 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
465 if (vect_print_dump_info (REPORT_DETAILS))
467 fprintf (vect_dump, "Analyze phi: ");
468 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
471 /* Skip virtual phi's. The data dependences that are associated with
472 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
473 if (!is_gimple_reg (SSA_NAME_VAR (def)))
474 continue;
476 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
478 /* Analyze the evolution function. */
479 access_fn = analyze_scalar_evolution (loop, def);
480 if (access_fn && vect_print_dump_info (REPORT_DETAILS))
482 fprintf (vect_dump, "Access function of PHI: ");
483 print_generic_expr (vect_dump, access_fn, TDF_SLIM);
486 if (!access_fn
487 || !vect_is_simple_iv_evolution (loop->num, access_fn, &dumy, &dumy))
489 VEC_safe_push (gimple, heap, worklist, phi);
490 continue;
493 if (vect_print_dump_info (REPORT_DETAILS))
494 fprintf (vect_dump, "Detected induction.");
495 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
499 /* Second - identify all reductions and nested cycles. */
500 while (VEC_length (gimple, worklist) > 0)
502 gimple phi = VEC_pop (gimple, worklist);
503 tree def = PHI_RESULT (phi);
504 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
505 gimple reduc_stmt;
506 bool nested_cycle;
508 if (vect_print_dump_info (REPORT_DETAILS))
510 fprintf (vect_dump, "Analyze phi: ");
511 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
514 gcc_assert (is_gimple_reg (SSA_NAME_VAR (def)));
515 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
517 nested_cycle = (loop != LOOP_VINFO_LOOP (loop_vinfo));
518 reduc_stmt = vect_force_simple_reduction (loop_vinfo, phi, !nested_cycle,
519 &double_reduc);
520 if (reduc_stmt)
522 if (double_reduc)
524 if (vect_print_dump_info (REPORT_DETAILS))
525 fprintf (vect_dump, "Detected double reduction.");
527 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
528 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
529 vect_double_reduction_def;
531 else
533 if (nested_cycle)
535 if (vect_print_dump_info (REPORT_DETAILS))
536 fprintf (vect_dump, "Detected vectorizable nested cycle.");
538 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
539 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
540 vect_nested_cycle;
542 else
544 if (vect_print_dump_info (REPORT_DETAILS))
545 fprintf (vect_dump, "Detected reduction.");
547 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
548 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
549 vect_reduction_def;
550 /* Store the reduction cycles for possible vectorization in
551 loop-aware SLP. */
552 VEC_safe_push (gimple, heap,
553 LOOP_VINFO_REDUCTIONS (loop_vinfo),
554 reduc_stmt);
558 else
559 if (vect_print_dump_info (REPORT_DETAILS))
560 fprintf (vect_dump, "Unknown def-use cycle pattern.");
563 VEC_free (gimple, heap, worklist);
567 /* Function vect_analyze_scalar_cycles.
569 Examine the cross iteration def-use cycles of scalar variables, by
570 analyzing the loop-header PHIs of scalar variables; Classify each
571 cycle as one of the following: invariant, induction, reduction, unknown.
572 We do that for the loop represented by LOOP_VINFO, and also to its
573 inner-loop, if exists.
574 Examples for scalar cycles:
576 Example1: reduction:
578 loop1:
579 for (i=0; i<N; i++)
580 sum += a[i];
582 Example2: induction:
584 loop2:
585 for (i=0; i<N; i++)
586 a[i] = i; */
588 static void
589 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
591 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
593 vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
595 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
596 Reductions in such inner-loop therefore have different properties than
597 the reductions in the nest that gets vectorized:
598 1. When vectorized, they are executed in the same order as in the original
599 scalar loop, so we can't change the order of computation when
600 vectorizing them.
601 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
602 current checks are too strict. */
604 if (loop->inner)
605 vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
608 /* Function vect_get_loop_niters.
610 Determine how many iterations the loop is executed.
611 If an expression that represents the number of iterations
612 can be constructed, place it in NUMBER_OF_ITERATIONS.
613 Return the loop exit condition. */
615 static gimple
616 vect_get_loop_niters (struct loop *loop, tree *number_of_iterations)
618 tree niters;
620 if (vect_print_dump_info (REPORT_DETAILS))
621 fprintf (vect_dump, "=== get_loop_niters ===");
623 niters = number_of_exit_cond_executions (loop);
625 if (niters != NULL_TREE
626 && niters != chrec_dont_know)
628 *number_of_iterations = niters;
630 if (vect_print_dump_info (REPORT_DETAILS))
632 fprintf (vect_dump, "==> get_loop_niters:" );
633 print_generic_expr (vect_dump, *number_of_iterations, TDF_SLIM);
637 return get_loop_exit_condition (loop);
641 /* Function bb_in_loop_p
643 Used as predicate for dfs order traversal of the loop bbs. */
645 static bool
646 bb_in_loop_p (const_basic_block bb, const void *data)
648 const struct loop *const loop = (const struct loop *)data;
649 if (flow_bb_inside_loop_p (loop, bb))
650 return true;
651 return false;
655 /* Function new_loop_vec_info.
657 Create and initialize a new loop_vec_info struct for LOOP, as well as
658 stmt_vec_info structs for all the stmts in LOOP. */
660 static loop_vec_info
661 new_loop_vec_info (struct loop *loop)
663 loop_vec_info res;
664 basic_block *bbs;
665 gimple_stmt_iterator si;
666 unsigned int i, nbbs;
668 res = (loop_vec_info) xcalloc (1, sizeof (struct _loop_vec_info));
669 LOOP_VINFO_LOOP (res) = loop;
671 bbs = get_loop_body (loop);
673 /* Create/Update stmt_info for all stmts in the loop. */
674 for (i = 0; i < loop->num_nodes; i++)
676 basic_block bb = bbs[i];
678 /* BBs in a nested inner-loop will have been already processed (because
679 we will have called vect_analyze_loop_form for any nested inner-loop).
680 Therefore, for stmts in an inner-loop we just want to update the
681 STMT_VINFO_LOOP_VINFO field of their stmt_info to point to the new
682 loop_info of the outer-loop we are currently considering to vectorize
683 (instead of the loop_info of the inner-loop).
684 For stmts in other BBs we need to create a stmt_info from scratch. */
685 if (bb->loop_father != loop)
687 /* Inner-loop bb. */
688 gcc_assert (loop->inner && bb->loop_father == loop->inner);
689 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
691 gimple phi = gsi_stmt (si);
692 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
693 loop_vec_info inner_loop_vinfo =
694 STMT_VINFO_LOOP_VINFO (stmt_info);
695 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
696 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
698 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
700 gimple stmt = gsi_stmt (si);
701 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
702 loop_vec_info inner_loop_vinfo =
703 STMT_VINFO_LOOP_VINFO (stmt_info);
704 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
705 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
708 else
710 /* bb in current nest. */
711 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
713 gimple phi = gsi_stmt (si);
714 gimple_set_uid (phi, 0);
715 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, res, NULL));
718 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
720 gimple stmt = gsi_stmt (si);
721 gimple_set_uid (stmt, 0);
722 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, res, NULL));
727 /* CHECKME: We want to visit all BBs before their successors (except for
728 latch blocks, for which this assertion wouldn't hold). In the simple
729 case of the loop forms we allow, a dfs order of the BBs would the same
730 as reversed postorder traversal, so we are safe. */
732 free (bbs);
733 bbs = XCNEWVEC (basic_block, loop->num_nodes);
734 nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
735 bbs, loop->num_nodes, loop);
736 gcc_assert (nbbs == loop->num_nodes);
738 LOOP_VINFO_BBS (res) = bbs;
739 LOOP_VINFO_NITERS (res) = NULL;
740 LOOP_VINFO_NITERS_UNCHANGED (res) = NULL;
741 LOOP_VINFO_COST_MODEL_MIN_ITERS (res) = 0;
742 LOOP_VINFO_VECTORIZABLE_P (res) = 0;
743 LOOP_PEELING_FOR_ALIGNMENT (res) = 0;
744 LOOP_VINFO_VECT_FACTOR (res) = 0;
745 LOOP_VINFO_DATAREFS (res) = VEC_alloc (data_reference_p, heap, 10);
746 LOOP_VINFO_DDRS (res) = VEC_alloc (ddr_p, heap, 10 * 10);
747 LOOP_VINFO_UNALIGNED_DR (res) = NULL;
748 LOOP_VINFO_MAY_MISALIGN_STMTS (res) =
749 VEC_alloc (gimple, heap,
750 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS));
751 LOOP_VINFO_MAY_ALIAS_DDRS (res) =
752 VEC_alloc (ddr_p, heap,
753 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS));
754 LOOP_VINFO_STRIDED_STORES (res) = VEC_alloc (gimple, heap, 10);
755 LOOP_VINFO_REDUCTIONS (res) = VEC_alloc (gimple, heap, 10);
756 LOOP_VINFO_SLP_INSTANCES (res) = VEC_alloc (slp_instance, heap, 10);
757 LOOP_VINFO_SLP_UNROLLING_FACTOR (res) = 1;
759 return res;
763 /* Function destroy_loop_vec_info.
765 Free LOOP_VINFO struct, as well as all the stmt_vec_info structs of all the
766 stmts in the loop. */
768 void
769 destroy_loop_vec_info (loop_vec_info loop_vinfo, bool clean_stmts)
771 struct loop *loop;
772 basic_block *bbs;
773 int nbbs;
774 gimple_stmt_iterator si;
775 int j;
776 VEC (slp_instance, heap) *slp_instances;
777 slp_instance instance;
779 if (!loop_vinfo)
780 return;
782 loop = LOOP_VINFO_LOOP (loop_vinfo);
784 bbs = LOOP_VINFO_BBS (loop_vinfo);
785 nbbs = loop->num_nodes;
787 if (!clean_stmts)
789 free (LOOP_VINFO_BBS (loop_vinfo));
790 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
791 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
792 VEC_free (gimple, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
794 free (loop_vinfo);
795 loop->aux = NULL;
796 return;
799 for (j = 0; j < nbbs; j++)
801 basic_block bb = bbs[j];
802 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
803 free_stmt_vec_info (gsi_stmt (si));
805 for (si = gsi_start_bb (bb); !gsi_end_p (si); )
807 gimple stmt = gsi_stmt (si);
808 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
810 if (stmt_info)
812 /* Check if this is a "pattern stmt" (introduced by the
813 vectorizer during the pattern recognition pass). */
814 bool remove_stmt_p = false;
815 gimple orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
816 if (orig_stmt)
818 stmt_vec_info orig_stmt_info = vinfo_for_stmt (orig_stmt);
819 if (orig_stmt_info
820 && STMT_VINFO_IN_PATTERN_P (orig_stmt_info))
821 remove_stmt_p = true;
824 /* Free stmt_vec_info. */
825 free_stmt_vec_info (stmt);
827 /* Remove dead "pattern stmts". */
828 if (remove_stmt_p)
829 gsi_remove (&si, true);
831 gsi_next (&si);
835 free (LOOP_VINFO_BBS (loop_vinfo));
836 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
837 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
838 VEC_free (gimple, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
839 VEC_free (ddr_p, heap, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
840 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
841 for (j = 0; VEC_iterate (slp_instance, slp_instances, j, instance); j++)
842 vect_free_slp_instance (instance);
844 VEC_free (slp_instance, heap, LOOP_VINFO_SLP_INSTANCES (loop_vinfo));
845 VEC_free (gimple, heap, LOOP_VINFO_STRIDED_STORES (loop_vinfo));
846 VEC_free (gimple, heap, LOOP_VINFO_REDUCTIONS (loop_vinfo));
848 free (loop_vinfo);
849 loop->aux = NULL;
853 /* Function vect_analyze_loop_1.
855 Apply a set of analyses on LOOP, and create a loop_vec_info struct
856 for it. The different analyses will record information in the
857 loop_vec_info struct. This is a subset of the analyses applied in
858 vect_analyze_loop, to be applied on an inner-loop nested in the loop
859 that is now considered for (outer-loop) vectorization. */
861 static loop_vec_info
862 vect_analyze_loop_1 (struct loop *loop)
864 loop_vec_info loop_vinfo;
866 if (vect_print_dump_info (REPORT_DETAILS))
867 fprintf (vect_dump, "===== analyze_loop_nest_1 =====");
869 /* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */
871 loop_vinfo = vect_analyze_loop_form (loop);
872 if (!loop_vinfo)
874 if (vect_print_dump_info (REPORT_DETAILS))
875 fprintf (vect_dump, "bad inner-loop form.");
876 return NULL;
879 return loop_vinfo;
883 /* Function vect_analyze_loop_form.
885 Verify that certain CFG restrictions hold, including:
886 - the loop has a pre-header
887 - the loop has a single entry and exit
888 - the loop exit condition is simple enough, and the number of iterations
889 can be analyzed (a countable loop). */
891 loop_vec_info
892 vect_analyze_loop_form (struct loop *loop)
894 loop_vec_info loop_vinfo;
895 gimple loop_cond;
896 tree number_of_iterations = NULL;
897 loop_vec_info inner_loop_vinfo = NULL;
899 if (vect_print_dump_info (REPORT_DETAILS))
900 fprintf (vect_dump, "=== vect_analyze_loop_form ===");
902 /* Different restrictions apply when we are considering an inner-most loop,
903 vs. an outer (nested) loop.
904 (FORNOW. May want to relax some of these restrictions in the future). */
906 if (!loop->inner)
908 /* Inner-most loop. We currently require that the number of BBs is
909 exactly 2 (the header and latch). Vectorizable inner-most loops
910 look like this:
912 (pre-header)
914 header <--------+
915 | | |
916 | +--> latch --+
918 (exit-bb) */
920 if (loop->num_nodes != 2)
922 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
923 fprintf (vect_dump, "not vectorized: control flow in loop.");
924 return NULL;
927 if (empty_block_p (loop->header))
929 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
930 fprintf (vect_dump, "not vectorized: empty loop.");
931 return NULL;
934 else
936 struct loop *innerloop = loop->inner;
937 edge entryedge;
939 /* Nested loop. We currently require that the loop is doubly-nested,
940 contains a single inner loop, and the number of BBs is exactly 5.
941 Vectorizable outer-loops look like this:
943 (pre-header)
945 header <---+
947 inner-loop |
949 tail ------+
951 (exit-bb)
953 The inner-loop has the properties expected of inner-most loops
954 as described above. */
956 if ((loop->inner)->inner || (loop->inner)->next)
958 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
959 fprintf (vect_dump, "not vectorized: multiple nested loops.");
960 return NULL;
963 /* Analyze the inner-loop. */
964 inner_loop_vinfo = vect_analyze_loop_1 (loop->inner);
965 if (!inner_loop_vinfo)
967 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
968 fprintf (vect_dump, "not vectorized: Bad inner loop.");
969 return NULL;
972 if (!expr_invariant_in_loop_p (loop,
973 LOOP_VINFO_NITERS (inner_loop_vinfo)))
975 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
976 fprintf (vect_dump,
977 "not vectorized: inner-loop count not invariant.");
978 destroy_loop_vec_info (inner_loop_vinfo, true);
979 return NULL;
982 if (loop->num_nodes != 5)
984 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
985 fprintf (vect_dump, "not vectorized: control flow in loop.");
986 destroy_loop_vec_info (inner_loop_vinfo, true);
987 return NULL;
990 gcc_assert (EDGE_COUNT (innerloop->header->preds) == 2);
991 entryedge = EDGE_PRED (innerloop->header, 0);
992 if (EDGE_PRED (innerloop->header, 0)->src == innerloop->latch)
993 entryedge = EDGE_PRED (innerloop->header, 1);
995 if (entryedge->src != loop->header
996 || !single_exit (innerloop)
997 || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
999 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1000 fprintf (vect_dump, "not vectorized: unsupported outerloop form.");
1001 destroy_loop_vec_info (inner_loop_vinfo, true);
1002 return NULL;
1005 if (vect_print_dump_info (REPORT_DETAILS))
1006 fprintf (vect_dump, "Considering outer-loop vectorization.");
1009 if (!single_exit (loop)
1010 || EDGE_COUNT (loop->header->preds) != 2)
1012 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1014 if (!single_exit (loop))
1015 fprintf (vect_dump, "not vectorized: multiple exits.");
1016 else if (EDGE_COUNT (loop->header->preds) != 2)
1017 fprintf (vect_dump, "not vectorized: too many incoming edges.");
1019 if (inner_loop_vinfo)
1020 destroy_loop_vec_info (inner_loop_vinfo, true);
1021 return NULL;
1024 /* We assume that the loop exit condition is at the end of the loop. i.e,
1025 that the loop is represented as a do-while (with a proper if-guard
1026 before the loop if needed), where the loop header contains all the
1027 executable statements, and the latch is empty. */
1028 if (!empty_block_p (loop->latch)
1029 || !gimple_seq_empty_p (phi_nodes (loop->latch)))
1031 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1032 fprintf (vect_dump, "not vectorized: unexpected loop form.");
1033 if (inner_loop_vinfo)
1034 destroy_loop_vec_info (inner_loop_vinfo, true);
1035 return NULL;
1038 /* Make sure there exists a single-predecessor exit bb: */
1039 if (!single_pred_p (single_exit (loop)->dest))
1041 edge e = single_exit (loop);
1042 if (!(e->flags & EDGE_ABNORMAL))
1044 split_loop_exit_edge (e);
1045 if (vect_print_dump_info (REPORT_DETAILS))
1046 fprintf (vect_dump, "split exit edge.");
1048 else
1050 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1051 fprintf (vect_dump, "not vectorized: abnormal loop exit edge.");
1052 if (inner_loop_vinfo)
1053 destroy_loop_vec_info (inner_loop_vinfo, true);
1054 return NULL;
1058 loop_cond = vect_get_loop_niters (loop, &number_of_iterations);
1059 if (!loop_cond)
1061 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1062 fprintf (vect_dump, "not vectorized: complicated exit condition.");
1063 if (inner_loop_vinfo)
1064 destroy_loop_vec_info (inner_loop_vinfo, true);
1065 return NULL;
1068 if (!number_of_iterations)
1070 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1071 fprintf (vect_dump,
1072 "not vectorized: number of iterations cannot be computed.");
1073 if (inner_loop_vinfo)
1074 destroy_loop_vec_info (inner_loop_vinfo, true);
1075 return NULL;
1078 if (chrec_contains_undetermined (number_of_iterations))
1080 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1081 fprintf (vect_dump, "Infinite number of iterations.");
1082 if (inner_loop_vinfo)
1083 destroy_loop_vec_info (inner_loop_vinfo, true);
1084 return NULL;
1087 if (!NITERS_KNOWN_P (number_of_iterations))
1089 if (vect_print_dump_info (REPORT_DETAILS))
1091 fprintf (vect_dump, "Symbolic number of iterations is ");
1092 print_generic_expr (vect_dump, number_of_iterations, TDF_DETAILS);
1095 else if (TREE_INT_CST_LOW (number_of_iterations) == 0)
1097 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1098 fprintf (vect_dump, "not vectorized: number of iterations = 0.");
1099 if (inner_loop_vinfo)
1100 destroy_loop_vec_info (inner_loop_vinfo, false);
1101 return NULL;
1104 loop_vinfo = new_loop_vec_info (loop);
1105 LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
1106 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
1108 STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond)) = loop_exit_ctrl_vec_info_type;
1110 /* CHECKME: May want to keep it around it in the future. */
1111 if (inner_loop_vinfo)
1112 destroy_loop_vec_info (inner_loop_vinfo, false);
1114 gcc_assert (!loop->aux);
1115 loop->aux = loop_vinfo;
1116 return loop_vinfo;
1120 /* Get cost by calling cost target builtin. */
1122 static inline
1123 int vect_get_cost (enum vect_cost_for_stmt type_of_cost)
1125 return targetm.vectorize.builtin_vectorization_cost (type_of_cost);
1129 /* Function vect_analyze_loop_operations.
1131 Scan the loop stmts and make sure they are all vectorizable. */
1133 static bool
1134 vect_analyze_loop_operations (loop_vec_info loop_vinfo)
1136 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1137 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1138 int nbbs = loop->num_nodes;
1139 gimple_stmt_iterator si;
1140 unsigned int vectorization_factor = 0;
1141 int i;
1142 gimple phi;
1143 stmt_vec_info stmt_info;
1144 bool need_to_vectorize = false;
1145 int min_profitable_iters;
1146 int min_scalar_loop_bound;
1147 unsigned int th;
1148 bool only_slp_in_loop = true, ok;
1150 if (vect_print_dump_info (REPORT_DETAILS))
1151 fprintf (vect_dump, "=== vect_analyze_loop_operations ===");
1153 gcc_assert (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
1154 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1156 for (i = 0; i < nbbs; i++)
1158 basic_block bb = bbs[i];
1160 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
1162 phi = gsi_stmt (si);
1163 ok = true;
1165 stmt_info = vinfo_for_stmt (phi);
1166 if (vect_print_dump_info (REPORT_DETAILS))
1168 fprintf (vect_dump, "examining phi: ");
1169 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
1172 if (! is_loop_header_bb_p (bb))
1174 /* inner-loop loop-closed exit phi in outer-loop vectorization
1175 (i.e. a phi in the tail of the outer-loop).
1176 FORNOW: we currently don't support the case that these phis
1177 are not used in the outerloop (unless it is double reduction,
1178 i.e., this phi is vect_reduction_def), cause this case
1179 requires to actually do something here. */
1180 if ((!STMT_VINFO_RELEVANT_P (stmt_info)
1181 || STMT_VINFO_LIVE_P (stmt_info))
1182 && STMT_VINFO_DEF_TYPE (stmt_info)
1183 != vect_double_reduction_def)
1185 if (vect_print_dump_info (REPORT_DETAILS))
1186 fprintf (vect_dump,
1187 "Unsupported loop-closed phi in outer-loop.");
1188 return false;
1190 continue;
1193 gcc_assert (stmt_info);
1195 if (STMT_VINFO_LIVE_P (stmt_info))
1197 /* FORNOW: not yet supported. */
1198 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1199 fprintf (vect_dump, "not vectorized: value used after loop.");
1200 return false;
1203 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
1204 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
1206 /* A scalar-dependence cycle that we don't support. */
1207 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1208 fprintf (vect_dump, "not vectorized: scalar dependence cycle.");
1209 return false;
1212 if (STMT_VINFO_RELEVANT_P (stmt_info))
1214 need_to_vectorize = true;
1215 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
1216 ok = vectorizable_induction (phi, NULL, NULL);
1219 if (!ok)
1221 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1223 fprintf (vect_dump,
1224 "not vectorized: relevant phi not supported: ");
1225 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
1227 return false;
1231 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1233 gimple stmt = gsi_stmt (si);
1234 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1236 gcc_assert (stmt_info);
1238 if (!vect_analyze_stmt (stmt, &need_to_vectorize, NULL))
1239 return false;
1241 if ((STMT_VINFO_RELEVANT_P (stmt_info)
1242 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1243 && !PURE_SLP_STMT (stmt_info))
1244 /* STMT needs both SLP and loop-based vectorization. */
1245 only_slp_in_loop = false;
1247 } /* bbs */
1249 /* All operations in the loop are either irrelevant (deal with loop
1250 control, or dead), or only used outside the loop and can be moved
1251 out of the loop (e.g. invariants, inductions). The loop can be
1252 optimized away by scalar optimizations. We're better off not
1253 touching this loop. */
1254 if (!need_to_vectorize)
1256 if (vect_print_dump_info (REPORT_DETAILS))
1257 fprintf (vect_dump,
1258 "All the computation can be taken out of the loop.");
1259 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1260 fprintf (vect_dump,
1261 "not vectorized: redundant loop. no profit to vectorize.");
1262 return false;
1265 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1266 vectorization factor of the loop is the unrolling factor required by the
1267 SLP instances. If that unrolling factor is 1, we say, that we perform
1268 pure SLP on loop - cross iteration parallelism is not exploited. */
1269 if (only_slp_in_loop)
1270 vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
1271 else
1272 vectorization_factor = least_common_multiple (vectorization_factor,
1273 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
1275 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
1277 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1278 && vect_print_dump_info (REPORT_DETAILS))
1279 fprintf (vect_dump,
1280 "vectorization_factor = %d, niters = " HOST_WIDE_INT_PRINT_DEC,
1281 vectorization_factor, LOOP_VINFO_INT_NITERS (loop_vinfo));
1283 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1284 && (LOOP_VINFO_INT_NITERS (loop_vinfo) < vectorization_factor))
1286 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1287 fprintf (vect_dump, "not vectorized: iteration count too small.");
1288 if (vect_print_dump_info (REPORT_DETAILS))
1289 fprintf (vect_dump,"not vectorized: iteration count smaller than "
1290 "vectorization factor.");
1291 return false;
1294 /* Analyze cost. Decide if worth while to vectorize. */
1296 /* Once VF is set, SLP costs should be updated since the number of created
1297 vector stmts depends on VF. */
1298 vect_update_slp_costs_according_to_vf (loop_vinfo);
1300 min_profitable_iters = vect_estimate_min_profitable_iters (loop_vinfo);
1301 LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo) = min_profitable_iters;
1303 if (min_profitable_iters < 0)
1305 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1306 fprintf (vect_dump, "not vectorized: vectorization not profitable.");
1307 if (vect_print_dump_info (REPORT_DETAILS))
1308 fprintf (vect_dump, "not vectorized: vector version will never be "
1309 "profitable.");
1310 return false;
1313 min_scalar_loop_bound = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
1314 * vectorization_factor) - 1);
1316 /* Use the cost model only if it is more conservative than user specified
1317 threshold. */
1319 th = (unsigned) min_scalar_loop_bound;
1320 if (min_profitable_iters
1321 && (!min_scalar_loop_bound
1322 || min_profitable_iters > min_scalar_loop_bound))
1323 th = (unsigned) min_profitable_iters;
1325 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1326 && LOOP_VINFO_INT_NITERS (loop_vinfo) <= th)
1328 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1329 fprintf (vect_dump, "not vectorized: vectorization not "
1330 "profitable.");
1331 if (vect_print_dump_info (REPORT_DETAILS))
1332 fprintf (vect_dump, "not vectorized: iteration count smaller than "
1333 "user specified loop bound parameter or minimum "
1334 "profitable iterations (whichever is more conservative).");
1335 return false;
1338 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1339 || LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0
1340 || LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
1342 if (vect_print_dump_info (REPORT_DETAILS))
1343 fprintf (vect_dump, "epilog loop required.");
1344 if (!vect_can_advance_ivs_p (loop_vinfo))
1346 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1347 fprintf (vect_dump,
1348 "not vectorized: can't create epilog loop 1.");
1349 return false;
1351 if (!slpeel_can_duplicate_loop_p (loop, single_exit (loop)))
1353 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1354 fprintf (vect_dump,
1355 "not vectorized: can't create epilog loop 2.");
1356 return false;
1360 return true;
1364 /* Function vect_analyze_loop.
1366 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1367 for it. The different analyses will record information in the
1368 loop_vec_info struct. */
1369 loop_vec_info
1370 vect_analyze_loop (struct loop *loop)
1372 bool ok;
1373 loop_vec_info loop_vinfo;
1374 int max_vf = MAX_VECTORIZATION_FACTOR;
1375 int min_vf = 2;
1377 if (vect_print_dump_info (REPORT_DETAILS))
1378 fprintf (vect_dump, "===== analyze_loop_nest =====");
1380 if (loop_outer (loop)
1381 && loop_vec_info_for_loop (loop_outer (loop))
1382 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
1384 if (vect_print_dump_info (REPORT_DETAILS))
1385 fprintf (vect_dump, "outer-loop already vectorized.");
1386 return NULL;
1389 /* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */
1391 loop_vinfo = vect_analyze_loop_form (loop);
1392 if (!loop_vinfo)
1394 if (vect_print_dump_info (REPORT_DETAILS))
1395 fprintf (vect_dump, "bad loop form.");
1396 return NULL;
1399 /* Find all data references in the loop (which correspond to vdefs/vuses)
1400 and analyze their evolution in the loop. Also adjust the minimal
1401 vectorization factor according to the loads and stores.
1403 FORNOW: Handle only simple, array references, which
1404 alignment can be forced, and aligned pointer-references. */
1406 ok = vect_analyze_data_refs (loop_vinfo, NULL, &min_vf);
1407 if (!ok)
1409 if (vect_print_dump_info (REPORT_DETAILS))
1410 fprintf (vect_dump, "bad data references.");
1411 destroy_loop_vec_info (loop_vinfo, true);
1412 return NULL;
1415 /* Classify all cross-iteration scalar data-flow cycles.
1416 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1418 vect_analyze_scalar_cycles (loop_vinfo);
1420 vect_pattern_recog (loop_vinfo);
1422 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1424 ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
1425 if (!ok)
1427 if (vect_print_dump_info (REPORT_DETAILS))
1428 fprintf (vect_dump, "unexpected pattern.");
1429 destroy_loop_vec_info (loop_vinfo, true);
1430 return NULL;
1433 /* Analyze data dependences between the data-refs in the loop
1434 and adjust the maximum vectorization factor according to
1435 the dependences.
1436 FORNOW: fail at the first data dependence that we encounter. */
1438 ok = vect_analyze_data_ref_dependences (loop_vinfo, NULL, &max_vf);
1439 if (!ok
1440 || max_vf < min_vf)
1442 if (vect_print_dump_info (REPORT_DETAILS))
1443 fprintf (vect_dump, "bad data dependence.");
1444 destroy_loop_vec_info (loop_vinfo, true);
1445 return NULL;
1448 ok = vect_determine_vectorization_factor (loop_vinfo);
1449 if (!ok)
1451 if (vect_print_dump_info (REPORT_DETAILS))
1452 fprintf (vect_dump, "can't determine vectorization factor.");
1453 destroy_loop_vec_info (loop_vinfo, true);
1454 return NULL;
1456 if (max_vf < LOOP_VINFO_VECT_FACTOR (loop_vinfo))
1458 if (vect_print_dump_info (REPORT_DETAILS))
1459 fprintf (vect_dump, "bad data dependence.");
1460 destroy_loop_vec_info (loop_vinfo, true);
1461 return NULL;
1464 /* Analyze the alignment of the data-refs in the loop.
1465 Fail if a data reference is found that cannot be vectorized. */
1467 ok = vect_analyze_data_refs_alignment (loop_vinfo, NULL);
1468 if (!ok)
1470 if (vect_print_dump_info (REPORT_DETAILS))
1471 fprintf (vect_dump, "bad data alignment.");
1472 destroy_loop_vec_info (loop_vinfo, true);
1473 return NULL;
1476 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1477 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1479 ok = vect_analyze_data_ref_accesses (loop_vinfo, NULL);
1480 if (!ok)
1482 if (vect_print_dump_info (REPORT_DETAILS))
1483 fprintf (vect_dump, "bad data access.");
1484 destroy_loop_vec_info (loop_vinfo, true);
1485 return NULL;
1488 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
1489 It is important to call pruning after vect_analyze_data_ref_accesses,
1490 since we use grouping information gathered by interleaving analysis. */
1491 ok = vect_prune_runtime_alias_test_list (loop_vinfo);
1492 if (!ok)
1494 if (vect_print_dump_info (REPORT_DETAILS))
1495 fprintf (vect_dump, "too long list of versioning for alias "
1496 "run-time tests.");
1497 destroy_loop_vec_info (loop_vinfo, true);
1498 return NULL;
1501 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
1502 ok = vect_analyze_slp (loop_vinfo, NULL);
1503 if (ok)
1505 /* Decide which possible SLP instances to SLP. */
1506 vect_make_slp_decision (loop_vinfo);
1508 /* Find stmts that need to be both vectorized and SLPed. */
1509 vect_detect_hybrid_slp (loop_vinfo);
1512 /* This pass will decide on using loop versioning and/or loop peeling in
1513 order to enhance the alignment of data references in the loop. */
1515 ok = vect_enhance_data_refs_alignment (loop_vinfo);
1516 if (!ok)
1518 if (vect_print_dump_info (REPORT_DETAILS))
1519 fprintf (vect_dump, "bad data alignment.");
1520 destroy_loop_vec_info (loop_vinfo, true);
1521 return NULL;
1524 /* Scan all the operations in the loop and make sure they are
1525 vectorizable. */
1527 ok = vect_analyze_loop_operations (loop_vinfo);
1528 if (!ok)
1530 if (vect_print_dump_info (REPORT_DETAILS))
1531 fprintf (vect_dump, "bad operation or unsupported loop bound.");
1532 destroy_loop_vec_info (loop_vinfo, true);
1533 return NULL;
1536 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
1538 return loop_vinfo;
1542 /* Function reduction_code_for_scalar_code
1544 Input:
1545 CODE - tree_code of a reduction operations.
1547 Output:
1548 REDUC_CODE - the corresponding tree-code to be used to reduce the
1549 vector of partial results into a single scalar result (which
1550 will also reside in a vector) or ERROR_MARK if the operation is
1551 a supported reduction operation, but does not have such tree-code.
1553 Return FALSE if CODE currently cannot be vectorized as reduction. */
1555 static bool
1556 reduction_code_for_scalar_code (enum tree_code code,
1557 enum tree_code *reduc_code)
1559 switch (code)
1561 case MAX_EXPR:
1562 *reduc_code = REDUC_MAX_EXPR;
1563 return true;
1565 case MIN_EXPR:
1566 *reduc_code = REDUC_MIN_EXPR;
1567 return true;
1569 case PLUS_EXPR:
1570 *reduc_code = REDUC_PLUS_EXPR;
1571 return true;
1573 case MULT_EXPR:
1574 case MINUS_EXPR:
1575 case BIT_IOR_EXPR:
1576 case BIT_XOR_EXPR:
1577 case BIT_AND_EXPR:
1578 *reduc_code = ERROR_MARK;
1579 return true;
1581 default:
1582 return false;
1587 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
1588 STMT is printed with a message MSG. */
1590 static void
1591 report_vect_op (gimple stmt, const char *msg)
1593 fprintf (vect_dump, "%s", msg);
1594 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
1598 /* Function vect_is_simple_reduction_1
1600 (1) Detect a cross-iteration def-use cycle that represents a simple
1601 reduction computation. We look for the following pattern:
1603 loop_header:
1604 a1 = phi < a0, a2 >
1605 a3 = ...
1606 a2 = operation (a3, a1)
1608 such that:
1609 1. operation is commutative and associative and it is safe to
1610 change the order of the computation (if CHECK_REDUCTION is true)
1611 2. no uses for a2 in the loop (a2 is used out of the loop)
1612 3. no uses of a1 in the loop besides the reduction operation.
1614 Condition 1 is tested here.
1615 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
1617 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
1618 nested cycles, if CHECK_REDUCTION is false.
1620 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
1621 reductions:
1623 a1 = phi < a0, a2 >
1624 inner loop (def of a3)
1625 a2 = phi < a3 >
1627 If MODIFY is true it tries also to rework the code in-place to enable
1628 detection of more reduction patterns. For the time being we rewrite
1629 "res -= RHS" into "rhs += -RHS" when it seems worthwhile.
1632 static gimple
1633 vect_is_simple_reduction_1 (loop_vec_info loop_info, gimple phi,
1634 bool check_reduction, bool *double_reduc,
1635 bool modify)
1637 struct loop *loop = (gimple_bb (phi))->loop_father;
1638 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
1639 edge latch_e = loop_latch_edge (loop);
1640 tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
1641 gimple def_stmt, def1 = NULL, def2 = NULL;
1642 enum tree_code orig_code, code;
1643 tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE;
1644 tree type;
1645 int nloop_uses;
1646 tree name;
1647 imm_use_iterator imm_iter;
1648 use_operand_p use_p;
1649 bool phi_def;
1651 *double_reduc = false;
1653 /* If CHECK_REDUCTION is true, we assume inner-most loop vectorization,
1654 otherwise, we assume outer loop vectorization. */
1655 gcc_assert ((check_reduction && loop == vect_loop)
1656 || (!check_reduction && flow_loop_nested_p (vect_loop, loop)));
1658 name = PHI_RESULT (phi);
1659 nloop_uses = 0;
1660 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
1662 gimple use_stmt = USE_STMT (use_p);
1663 if (is_gimple_debug (use_stmt))
1664 continue;
1665 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
1666 && vinfo_for_stmt (use_stmt)
1667 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
1668 nloop_uses++;
1669 if (nloop_uses > 1)
1671 if (vect_print_dump_info (REPORT_DETAILS))
1672 fprintf (vect_dump, "reduction used in loop.");
1673 return NULL;
1677 if (TREE_CODE (loop_arg) != SSA_NAME)
1679 if (vect_print_dump_info (REPORT_DETAILS))
1681 fprintf (vect_dump, "reduction: not ssa_name: ");
1682 print_generic_expr (vect_dump, loop_arg, TDF_SLIM);
1684 return NULL;
1687 def_stmt = SSA_NAME_DEF_STMT (loop_arg);
1688 if (!def_stmt)
1690 if (vect_print_dump_info (REPORT_DETAILS))
1691 fprintf (vect_dump, "reduction: no def_stmt.");
1692 return NULL;
1695 if (!is_gimple_assign (def_stmt) && gimple_code (def_stmt) != GIMPLE_PHI)
1697 if (vect_print_dump_info (REPORT_DETAILS))
1698 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
1699 return NULL;
1702 if (is_gimple_assign (def_stmt))
1704 name = gimple_assign_lhs (def_stmt);
1705 phi_def = false;
1707 else
1709 name = PHI_RESULT (def_stmt);
1710 phi_def = true;
1713 nloop_uses = 0;
1714 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
1716 gimple use_stmt = USE_STMT (use_p);
1717 if (is_gimple_debug (use_stmt))
1718 continue;
1719 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
1720 && vinfo_for_stmt (use_stmt)
1721 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
1722 nloop_uses++;
1723 if (nloop_uses > 1)
1725 if (vect_print_dump_info (REPORT_DETAILS))
1726 fprintf (vect_dump, "reduction used in loop.");
1727 return NULL;
1731 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
1732 defined in the inner loop. */
1733 if (phi_def)
1735 op1 = PHI_ARG_DEF (def_stmt, 0);
1737 if (gimple_phi_num_args (def_stmt) != 1
1738 || TREE_CODE (op1) != SSA_NAME)
1740 if (vect_print_dump_info (REPORT_DETAILS))
1741 fprintf (vect_dump, "unsupported phi node definition.");
1743 return NULL;
1746 def1 = SSA_NAME_DEF_STMT (op1);
1747 if (flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
1748 && loop->inner
1749 && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1))
1750 && is_gimple_assign (def1))
1752 if (vect_print_dump_info (REPORT_DETAILS))
1753 report_vect_op (def_stmt, "detected double reduction: ");
1755 *double_reduc = true;
1756 return def_stmt;
1759 return NULL;
1762 code = orig_code = gimple_assign_rhs_code (def_stmt);
1764 /* We can handle "res -= x[i]", which is non-associative by
1765 simply rewriting this into "res += -x[i]". Avoid changing
1766 gimple instruction for the first simple tests and only do this
1767 if we're allowed to change code at all. */
1768 if (code == MINUS_EXPR && modify)
1769 code = PLUS_EXPR;
1771 if (check_reduction
1772 && (!commutative_tree_code (code) || !associative_tree_code (code)))
1774 if (vect_print_dump_info (REPORT_DETAILS))
1775 report_vect_op (def_stmt, "reduction: not commutative/associative: ");
1776 return NULL;
1779 if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS)
1781 if (code != COND_EXPR)
1783 if (vect_print_dump_info (REPORT_DETAILS))
1784 report_vect_op (def_stmt, "reduction: not binary operation: ");
1786 return NULL;
1789 op3 = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 0);
1790 if (COMPARISON_CLASS_P (op3))
1792 op4 = TREE_OPERAND (op3, 1);
1793 op3 = TREE_OPERAND (op3, 0);
1796 op1 = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 1);
1797 op2 = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 2);
1799 if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
1801 if (vect_print_dump_info (REPORT_DETAILS))
1802 report_vect_op (def_stmt, "reduction: uses not ssa_names: ");
1804 return NULL;
1807 else
1809 op1 = gimple_assign_rhs1 (def_stmt);
1810 op2 = gimple_assign_rhs2 (def_stmt);
1812 if (TREE_CODE (op1) != SSA_NAME || TREE_CODE (op2) != SSA_NAME)
1814 if (vect_print_dump_info (REPORT_DETAILS))
1815 report_vect_op (def_stmt, "reduction: uses not ssa_names: ");
1817 return NULL;
1821 type = TREE_TYPE (gimple_assign_lhs (def_stmt));
1822 if ((TREE_CODE (op1) == SSA_NAME
1823 && !types_compatible_p (type,TREE_TYPE (op1)))
1824 || (TREE_CODE (op2) == SSA_NAME
1825 && !types_compatible_p (type, TREE_TYPE (op2)))
1826 || (op3 && TREE_CODE (op3) == SSA_NAME
1827 && !types_compatible_p (type, TREE_TYPE (op3)))
1828 || (op4 && TREE_CODE (op4) == SSA_NAME
1829 && !types_compatible_p (type, TREE_TYPE (op4))))
1831 if (vect_print_dump_info (REPORT_DETAILS))
1833 fprintf (vect_dump, "reduction: multiple types: operation type: ");
1834 print_generic_expr (vect_dump, type, TDF_SLIM);
1835 fprintf (vect_dump, ", operands types: ");
1836 print_generic_expr (vect_dump, TREE_TYPE (op1), TDF_SLIM);
1837 fprintf (vect_dump, ",");
1838 print_generic_expr (vect_dump, TREE_TYPE (op2), TDF_SLIM);
1839 if (op3)
1841 fprintf (vect_dump, ",");
1842 print_generic_expr (vect_dump, TREE_TYPE (op3), TDF_SLIM);
1845 if (op4)
1847 fprintf (vect_dump, ",");
1848 print_generic_expr (vect_dump, TREE_TYPE (op4), TDF_SLIM);
1852 return NULL;
1855 /* Check that it's ok to change the order of the computation.
1856 Generally, when vectorizing a reduction we change the order of the
1857 computation. This may change the behavior of the program in some
1858 cases, so we need to check that this is ok. One exception is when
1859 vectorizing an outer-loop: the inner-loop is executed sequentially,
1860 and therefore vectorizing reductions in the inner-loop during
1861 outer-loop vectorization is safe. */
1863 /* CHECKME: check for !flag_finite_math_only too? */
1864 if (SCALAR_FLOAT_TYPE_P (type) && !flag_associative_math
1865 && check_reduction)
1867 /* Changing the order of operations changes the semantics. */
1868 if (vect_print_dump_info (REPORT_DETAILS))
1869 report_vect_op (def_stmt, "reduction: unsafe fp math optimization: ");
1870 return NULL;
1872 else if (INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type)
1873 && check_reduction)
1875 /* Changing the order of operations changes the semantics. */
1876 if (vect_print_dump_info (REPORT_DETAILS))
1877 report_vect_op (def_stmt, "reduction: unsafe int math optimization: ");
1878 return NULL;
1880 else if (SAT_FIXED_POINT_TYPE_P (type) && check_reduction)
1882 /* Changing the order of operations changes the semantics. */
1883 if (vect_print_dump_info (REPORT_DETAILS))
1884 report_vect_op (def_stmt,
1885 "reduction: unsafe fixed-point math optimization: ");
1886 return NULL;
1889 /* If we detected "res -= x[i]" earlier, rewrite it into
1890 "res += -x[i]" now. If this turns out to be useless reassoc
1891 will clean it up again. */
1892 if (orig_code == MINUS_EXPR)
1894 tree rhs = gimple_assign_rhs2 (def_stmt);
1895 tree negrhs = make_ssa_name (SSA_NAME_VAR (rhs), NULL);
1896 gimple negate_stmt = gimple_build_assign_with_ops (NEGATE_EXPR, negrhs,
1897 rhs, NULL);
1898 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
1899 set_vinfo_for_stmt (negate_stmt, new_stmt_vec_info (negate_stmt,
1900 loop_info, NULL));
1901 gsi_insert_before (&gsi, negate_stmt, GSI_NEW_STMT);
1902 gimple_assign_set_rhs2 (def_stmt, negrhs);
1903 gimple_assign_set_rhs_code (def_stmt, PLUS_EXPR);
1904 update_stmt (def_stmt);
1907 /* Reduction is safe. We're dealing with one of the following:
1908 1) integer arithmetic and no trapv
1909 2) floating point arithmetic, and special flags permit this optimization
1910 3) nested cycle (i.e., outer loop vectorization). */
1911 if (TREE_CODE (op1) == SSA_NAME)
1912 def1 = SSA_NAME_DEF_STMT (op1);
1914 if (TREE_CODE (op2) == SSA_NAME)
1915 def2 = SSA_NAME_DEF_STMT (op2);
1917 if (code != COND_EXPR
1918 && (!def1 || !def2 || gimple_nop_p (def1) || gimple_nop_p (def2)))
1920 if (vect_print_dump_info (REPORT_DETAILS))
1921 report_vect_op (def_stmt, "reduction: no defs for operands: ");
1922 return NULL;
1925 /* Check that one def is the reduction def, defined by PHI,
1926 the other def is either defined in the loop ("vect_internal_def"),
1927 or it's an induction (defined by a loop-header phi-node). */
1929 if (def2 && def2 == phi
1930 && (code == COND_EXPR
1931 || (def1 && flow_bb_inside_loop_p (loop, gimple_bb (def1))
1932 && (is_gimple_assign (def1)
1933 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
1934 == vect_induction_def
1935 || (gimple_code (def1) == GIMPLE_PHI
1936 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
1937 == vect_internal_def
1938 && !is_loop_header_bb_p (gimple_bb (def1)))))))
1940 if (vect_print_dump_info (REPORT_DETAILS))
1941 report_vect_op (def_stmt, "detected reduction: ");
1942 return def_stmt;
1944 else if (def1 && def1 == phi
1945 && (code == COND_EXPR
1946 || (def2 && flow_bb_inside_loop_p (loop, gimple_bb (def2))
1947 && (is_gimple_assign (def2)
1948 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
1949 == vect_induction_def
1950 || (gimple_code (def2) == GIMPLE_PHI
1951 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
1952 == vect_internal_def
1953 && !is_loop_header_bb_p (gimple_bb (def2)))))))
1955 if (check_reduction)
1957 /* Swap operands (just for simplicity - so that the rest of the code
1958 can assume that the reduction variable is always the last (second)
1959 argument). */
1960 if (vect_print_dump_info (REPORT_DETAILS))
1961 report_vect_op (def_stmt,
1962 "detected reduction: need to swap operands: ");
1964 swap_tree_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
1965 gimple_assign_rhs2_ptr (def_stmt));
1967 else
1969 if (vect_print_dump_info (REPORT_DETAILS))
1970 report_vect_op (def_stmt, "detected reduction: ");
1973 return def_stmt;
1975 else
1977 if (vect_print_dump_info (REPORT_DETAILS))
1978 report_vect_op (def_stmt, "reduction: unknown pattern: ");
1980 return NULL;
1984 /* Wrapper around vect_is_simple_reduction_1, that won't modify code
1985 in-place. Arguments as there. */
1987 static gimple
1988 vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
1989 bool check_reduction, bool *double_reduc)
1991 return vect_is_simple_reduction_1 (loop_info, phi, check_reduction,
1992 double_reduc, false);
1995 /* Wrapper around vect_is_simple_reduction_1, which will modify code
1996 in-place if it enables detection of more reductions. Arguments
1997 as there. */
1999 gimple
2000 vect_force_simple_reduction (loop_vec_info loop_info, gimple phi,
2001 bool check_reduction, bool *double_reduc)
2003 return vect_is_simple_reduction_1 (loop_info, phi, check_reduction,
2004 double_reduc, true);
2007 /* Function vect_estimate_min_profitable_iters
2009 Return the number of iterations required for the vector version of the
2010 loop to be profitable relative to the cost of the scalar version of the
2011 loop.
2013 TODO: Take profile info into account before making vectorization
2014 decisions, if available. */
2017 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo)
2019 int i;
2020 int min_profitable_iters;
2021 int peel_iters_prologue;
2022 int peel_iters_epilogue;
2023 int vec_inside_cost = 0;
2024 int vec_outside_cost = 0;
2025 int scalar_single_iter_cost = 0;
2026 int scalar_outside_cost = 0;
2027 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2028 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2029 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
2030 int nbbs = loop->num_nodes;
2031 int byte_misalign = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo);
2032 int peel_guard_costs = 0;
2033 int innerloop_iters = 0, factor;
2034 VEC (slp_instance, heap) *slp_instances;
2035 slp_instance instance;
2037 /* Cost model disabled. */
2038 if (!flag_vect_cost_model)
2040 if (vect_print_dump_info (REPORT_COST))
2041 fprintf (vect_dump, "cost model disabled.");
2042 return 0;
2045 /* Requires loop versioning tests to handle misalignment. */
2046 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
2048 /* FIXME: Make cost depend on complexity of individual check. */
2049 vec_outside_cost +=
2050 VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
2051 if (vect_print_dump_info (REPORT_COST))
2052 fprintf (vect_dump, "cost model: Adding cost of checks for loop "
2053 "versioning to treat misalignment.\n");
2056 /* Requires loop versioning with alias checks. */
2057 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2059 /* FIXME: Make cost depend on complexity of individual check. */
2060 vec_outside_cost +=
2061 VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
2062 if (vect_print_dump_info (REPORT_COST))
2063 fprintf (vect_dump, "cost model: Adding cost of checks for loop "
2064 "versioning aliasing.\n");
2067 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2068 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2069 vec_outside_cost += vect_get_cost (cond_branch_taken);
2071 /* Count statements in scalar loop. Using this as scalar cost for a single
2072 iteration for now.
2074 TODO: Add outer loop support.
2076 TODO: Consider assigning different costs to different scalar
2077 statements. */
2079 /* FORNOW. */
2080 if (loop->inner)
2081 innerloop_iters = 50; /* FIXME */
2083 for (i = 0; i < nbbs; i++)
2085 gimple_stmt_iterator si;
2086 basic_block bb = bbs[i];
2088 if (bb->loop_father == loop->inner)
2089 factor = innerloop_iters;
2090 else
2091 factor = 1;
2093 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
2095 gimple stmt = gsi_stmt (si);
2096 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2097 /* Skip stmts that are not vectorized inside the loop. */
2098 if (!STMT_VINFO_RELEVANT_P (stmt_info)
2099 && (!STMT_VINFO_LIVE_P (stmt_info)
2100 || STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def))
2101 continue;
2102 scalar_single_iter_cost += cost_for_stmt (stmt) * factor;
2103 vec_inside_cost += STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) * factor;
2104 /* FIXME: for stmts in the inner-loop in outer-loop vectorization,
2105 some of the "outside" costs are generated inside the outer-loop. */
2106 vec_outside_cost += STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info);
2110 /* Add additional cost for the peeled instructions in prologue and epilogue
2111 loop.
2113 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
2114 at compile-time - we assume it's vf/2 (the worst would be vf-1).
2116 TODO: Build an expression that represents peel_iters for prologue and
2117 epilogue to be used in a run-time test. */
2119 if (byte_misalign < 0)
2121 peel_iters_prologue = vf/2;
2122 if (vect_print_dump_info (REPORT_COST))
2123 fprintf (vect_dump, "cost model: "
2124 "prologue peel iters set to vf/2.");
2126 /* If peeling for alignment is unknown, loop bound of main loop becomes
2127 unknown. */
2128 peel_iters_epilogue = vf/2;
2129 if (vect_print_dump_info (REPORT_COST))
2130 fprintf (vect_dump, "cost model: "
2131 "epilogue peel iters set to vf/2 because "
2132 "peeling for alignment is unknown .");
2134 /* If peeled iterations are unknown, count a taken branch and a not taken
2135 branch per peeled loop. Even if scalar loop iterations are known,
2136 vector iterations are not known since peeled prologue iterations are
2137 not known. Hence guards remain the same. */
2138 peel_guard_costs += 2 * (vect_get_cost (cond_branch_taken)
2139 + vect_get_cost (cond_branch_not_taken));
2141 else
2143 if (byte_misalign)
2145 struct data_reference *dr = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
2146 int element_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr))));
2147 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr)));
2148 int nelements = TYPE_VECTOR_SUBPARTS (vectype);
2150 peel_iters_prologue = nelements - (byte_misalign / element_size);
2152 else
2153 peel_iters_prologue = 0;
2155 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
2157 peel_iters_epilogue = vf/2;
2158 if (vect_print_dump_info (REPORT_COST))
2159 fprintf (vect_dump, "cost model: "
2160 "epilogue peel iters set to vf/2 because "
2161 "loop iterations are unknown .");
2163 /* If peeled iterations are known but number of scalar loop
2164 iterations are unknown, count a taken branch per peeled loop. */
2165 peel_guard_costs += 2 * vect_get_cost (cond_branch_taken);
2167 else
2169 int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
2170 peel_iters_prologue = niters < peel_iters_prologue ?
2171 niters : peel_iters_prologue;
2172 peel_iters_epilogue = (niters - peel_iters_prologue) % vf;
2176 vec_outside_cost += (peel_iters_prologue * scalar_single_iter_cost)
2177 + (peel_iters_epilogue * scalar_single_iter_cost)
2178 + peel_guard_costs;
2180 /* FORNOW: The scalar outside cost is incremented in one of the
2181 following ways:
2183 1. The vectorizer checks for alignment and aliasing and generates
2184 a condition that allows dynamic vectorization. A cost model
2185 check is ANDED with the versioning condition. Hence scalar code
2186 path now has the added cost of the versioning check.
2188 if (cost > th & versioning_check)
2189 jmp to vector code
2191 Hence run-time scalar is incremented by not-taken branch cost.
2193 2. The vectorizer then checks if a prologue is required. If the
2194 cost model check was not done before during versioning, it has to
2195 be done before the prologue check.
2197 if (cost <= th)
2198 prologue = scalar_iters
2199 if (prologue == 0)
2200 jmp to vector code
2201 else
2202 execute prologue
2203 if (prologue == num_iters)
2204 go to exit
2206 Hence the run-time scalar cost is incremented by a taken branch,
2207 plus a not-taken branch, plus a taken branch cost.
2209 3. The vectorizer then checks if an epilogue is required. If the
2210 cost model check was not done before during prologue check, it
2211 has to be done with the epilogue check.
2213 if (prologue == 0)
2214 jmp to vector code
2215 else
2216 execute prologue
2217 if (prologue == num_iters)
2218 go to exit
2219 vector code:
2220 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
2221 jmp to epilogue
2223 Hence the run-time scalar cost should be incremented by 2 taken
2224 branches.
2226 TODO: The back end may reorder the BBS's differently and reverse
2227 conditions/branch directions. Change the estimates below to
2228 something more reasonable. */
2230 /* If the number of iterations is known and we do not do versioning, we can
2231 decide whether to vectorize at compile time. Hence the scalar version
2232 do not carry cost model guard costs. */
2233 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2234 || LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2235 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2237 /* Cost model check occurs at versioning. */
2238 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2239 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2240 scalar_outside_cost += vect_get_cost (cond_branch_not_taken);
2241 else
2243 /* Cost model check occurs at prologue generation. */
2244 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
2245 scalar_outside_cost += 2 * vect_get_cost (cond_branch_taken)
2246 + vect_get_cost (cond_branch_not_taken);
2247 /* Cost model check occurs at epilogue generation. */
2248 else
2249 scalar_outside_cost += 2 * vect_get_cost (cond_branch_taken);
2253 /* Add SLP costs. */
2254 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
2255 for (i = 0; VEC_iterate (slp_instance, slp_instances, i, instance); i++)
2257 vec_outside_cost += SLP_INSTANCE_OUTSIDE_OF_LOOP_COST (instance);
2258 vec_inside_cost += SLP_INSTANCE_INSIDE_OF_LOOP_COST (instance);
2261 /* Calculate number of iterations required to make the vector version
2262 profitable, relative to the loop bodies only. The following condition
2263 must hold true:
2264 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
2265 where
2266 SIC = scalar iteration cost, VIC = vector iteration cost,
2267 VOC = vector outside cost, VF = vectorization factor,
2268 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
2269 SOC = scalar outside cost for run time cost model check. */
2271 if ((scalar_single_iter_cost * vf) > vec_inside_cost)
2273 if (vec_outside_cost <= 0)
2274 min_profitable_iters = 1;
2275 else
2277 min_profitable_iters = ((vec_outside_cost - scalar_outside_cost) * vf
2278 - vec_inside_cost * peel_iters_prologue
2279 - vec_inside_cost * peel_iters_epilogue)
2280 / ((scalar_single_iter_cost * vf)
2281 - vec_inside_cost);
2283 if ((scalar_single_iter_cost * vf * min_profitable_iters)
2284 <= ((vec_inside_cost * min_profitable_iters)
2285 + ((vec_outside_cost - scalar_outside_cost) * vf)))
2286 min_profitable_iters++;
2289 /* vector version will never be profitable. */
2290 else
2292 if (vect_print_dump_info (REPORT_COST))
2293 fprintf (vect_dump, "cost model: the vector iteration cost = %d "
2294 "divided by the scalar iteration cost = %d "
2295 "is greater or equal to the vectorization factor = %d.",
2296 vec_inside_cost, scalar_single_iter_cost, vf);
2297 return -1;
2300 if (vect_print_dump_info (REPORT_COST))
2302 fprintf (vect_dump, "Cost model analysis: \n");
2303 fprintf (vect_dump, " Vector inside of loop cost: %d\n",
2304 vec_inside_cost);
2305 fprintf (vect_dump, " Vector outside of loop cost: %d\n",
2306 vec_outside_cost);
2307 fprintf (vect_dump, " Scalar iteration cost: %d\n",
2308 scalar_single_iter_cost);
2309 fprintf (vect_dump, " Scalar outside cost: %d\n", scalar_outside_cost);
2310 fprintf (vect_dump, " prologue iterations: %d\n",
2311 peel_iters_prologue);
2312 fprintf (vect_dump, " epilogue iterations: %d\n",
2313 peel_iters_epilogue);
2314 fprintf (vect_dump, " Calculated minimum iters for profitability: %d\n",
2315 min_profitable_iters);
2318 min_profitable_iters =
2319 min_profitable_iters < vf ? vf : min_profitable_iters;
2321 /* Because the condition we create is:
2322 if (niters <= min_profitable_iters)
2323 then skip the vectorized loop. */
2324 min_profitable_iters--;
2326 if (vect_print_dump_info (REPORT_COST))
2327 fprintf (vect_dump, " Profitability threshold = %d\n",
2328 min_profitable_iters);
2330 return min_profitable_iters;
2334 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
2335 functions. Design better to avoid maintenance issues. */
2337 /* Function vect_model_reduction_cost.
2339 Models cost for a reduction operation, including the vector ops
2340 generated within the strip-mine loop, the initial definition before
2341 the loop, and the epilogue code that must be generated. */
2343 static bool
2344 vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
2345 int ncopies)
2347 int outer_cost = 0;
2348 enum tree_code code;
2349 optab optab;
2350 tree vectype;
2351 gimple stmt, orig_stmt;
2352 tree reduction_op;
2353 enum machine_mode mode;
2354 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2355 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2358 /* Cost of reduction op inside loop. */
2359 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info)
2360 += ncopies * vect_get_cost (vector_stmt);
2362 stmt = STMT_VINFO_STMT (stmt_info);
2364 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
2366 case GIMPLE_SINGLE_RHS:
2367 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)) == ternary_op);
2368 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), 2);
2369 break;
2370 case GIMPLE_UNARY_RHS:
2371 reduction_op = gimple_assign_rhs1 (stmt);
2372 break;
2373 case GIMPLE_BINARY_RHS:
2374 reduction_op = gimple_assign_rhs2 (stmt);
2375 break;
2376 default:
2377 gcc_unreachable ();
2380 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
2381 if (!vectype)
2383 if (vect_print_dump_info (REPORT_COST))
2385 fprintf (vect_dump, "unsupported data-type ");
2386 print_generic_expr (vect_dump, TREE_TYPE (reduction_op), TDF_SLIM);
2388 return false;
2391 mode = TYPE_MODE (vectype);
2392 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2394 if (!orig_stmt)
2395 orig_stmt = STMT_VINFO_STMT (stmt_info);
2397 code = gimple_assign_rhs_code (orig_stmt);
2399 /* Add in cost for initial definition. */
2400 outer_cost += vect_get_cost (scalar_to_vec);
2402 /* Determine cost of epilogue code.
2404 We have a reduction operator that will reduce the vector in one statement.
2405 Also requires scalar extract. */
2407 if (!nested_in_vect_loop_p (loop, orig_stmt))
2409 if (reduc_code != ERROR_MARK)
2410 outer_cost += vect_get_cost (vector_stmt)
2411 + vect_get_cost (vec_to_scalar);
2412 else
2414 int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
2415 tree bitsize =
2416 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt)));
2417 int element_bitsize = tree_low_cst (bitsize, 1);
2418 int nelements = vec_size_in_bits / element_bitsize;
2420 optab = optab_for_tree_code (code, vectype, optab_default);
2422 /* We have a whole vector shift available. */
2423 if (VECTOR_MODE_P (mode)
2424 && optab_handler (optab, mode)->insn_code != CODE_FOR_nothing
2425 && optab_handler (vec_shr_optab, mode)->insn_code != CODE_FOR_nothing)
2426 /* Final reduction via vector shifts and the reduction operator. Also
2427 requires scalar extract. */
2428 outer_cost += ((exact_log2(nelements) * 2)
2429 * vect_get_cost (vector_stmt)
2430 + vect_get_cost (vec_to_scalar));
2431 else
2432 /* Use extracts and reduction op for final reduction. For N elements,
2433 we have N extracts and N-1 reduction ops. */
2434 outer_cost += ((nelements + nelements - 1)
2435 * vect_get_cost (vector_stmt));
2439 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = outer_cost;
2441 if (vect_print_dump_info (REPORT_COST))
2442 fprintf (vect_dump, "vect_model_reduction_cost: inside_cost = %d, "
2443 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
2444 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
2446 return true;
2450 /* Function vect_model_induction_cost.
2452 Models cost for induction operations. */
2454 static void
2455 vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies)
2457 /* loop cost for vec_loop. */
2458 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info)
2459 = ncopies * vect_get_cost (vector_stmt);
2460 /* prologue cost for vec_init and vec_step. */
2461 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info)
2462 = 2 * vect_get_cost (scalar_to_vec);
2464 if (vect_print_dump_info (REPORT_COST))
2465 fprintf (vect_dump, "vect_model_induction_cost: inside_cost = %d, "
2466 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
2467 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
2471 /* Function get_initial_def_for_induction
2473 Input:
2474 STMT - a stmt that performs an induction operation in the loop.
2475 IV_PHI - the initial value of the induction variable
2477 Output:
2478 Return a vector variable, initialized with the first VF values of
2479 the induction variable. E.g., for an iv with IV_PHI='X' and
2480 evolution S, for a vector of 4 units, we want to return:
2481 [X, X + S, X + 2*S, X + 3*S]. */
2483 static tree
2484 get_initial_def_for_induction (gimple iv_phi)
2486 stmt_vec_info stmt_vinfo = vinfo_for_stmt (iv_phi);
2487 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
2488 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2489 tree scalar_type = TREE_TYPE (gimple_phi_result (iv_phi));
2490 tree vectype;
2491 int nunits;
2492 edge pe = loop_preheader_edge (loop);
2493 struct loop *iv_loop;
2494 basic_block new_bb;
2495 tree vec, vec_init, vec_step, t;
2496 tree access_fn;
2497 tree new_var;
2498 tree new_name;
2499 gimple init_stmt, induction_phi, new_stmt;
2500 tree induc_def, vec_def, vec_dest;
2501 tree init_expr, step_expr;
2502 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2503 int i;
2504 bool ok;
2505 int ncopies;
2506 tree expr;
2507 stmt_vec_info phi_info = vinfo_for_stmt (iv_phi);
2508 bool nested_in_vect_loop = false;
2509 gimple_seq stmts = NULL;
2510 imm_use_iterator imm_iter;
2511 use_operand_p use_p;
2512 gimple exit_phi;
2513 edge latch_e;
2514 tree loop_arg;
2515 gimple_stmt_iterator si;
2516 basic_block bb = gimple_bb (iv_phi);
2517 tree stepvectype;
2519 vectype = get_vectype_for_scalar_type (scalar_type);
2520 gcc_assert (vectype);
2521 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2522 ncopies = vf / nunits;
2524 gcc_assert (phi_info);
2525 gcc_assert (ncopies >= 1);
2527 /* Find the first insertion point in the BB. */
2528 si = gsi_after_labels (bb);
2530 if (INTEGRAL_TYPE_P (scalar_type))
2531 step_expr = build_int_cst (scalar_type, 0);
2532 else if (POINTER_TYPE_P (scalar_type))
2533 step_expr = build_int_cst (sizetype, 0);
2534 else
2535 step_expr = build_real (scalar_type, dconst0);
2537 /* Is phi in an inner-loop, while vectorizing an enclosing outer-loop? */
2538 if (nested_in_vect_loop_p (loop, iv_phi))
2540 nested_in_vect_loop = true;
2541 iv_loop = loop->inner;
2543 else
2544 iv_loop = loop;
2545 gcc_assert (iv_loop == (gimple_bb (iv_phi))->loop_father);
2547 latch_e = loop_latch_edge (iv_loop);
2548 loop_arg = PHI_ARG_DEF_FROM_EDGE (iv_phi, latch_e);
2550 access_fn = analyze_scalar_evolution (iv_loop, PHI_RESULT (iv_phi));
2551 gcc_assert (access_fn);
2552 ok = vect_is_simple_iv_evolution (iv_loop->num, access_fn,
2553 &init_expr, &step_expr);
2554 gcc_assert (ok);
2555 pe = loop_preheader_edge (iv_loop);
2557 /* Create the vector that holds the initial_value of the induction. */
2558 if (nested_in_vect_loop)
2560 /* iv_loop is nested in the loop to be vectorized. init_expr had already
2561 been created during vectorization of previous stmts; We obtain it from
2562 the STMT_VINFO_VEC_STMT of the defining stmt. */
2563 tree iv_def = PHI_ARG_DEF_FROM_EDGE (iv_phi,
2564 loop_preheader_edge (iv_loop));
2565 vec_init = vect_get_vec_def_for_operand (iv_def, iv_phi, NULL);
2567 else
2569 /* iv_loop is the loop to be vectorized. Create:
2570 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
2571 new_var = vect_get_new_vect_var (scalar_type, vect_scalar_var, "var_");
2572 add_referenced_var (new_var);
2574 new_name = force_gimple_operand (init_expr, &stmts, false, new_var);
2575 if (stmts)
2577 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
2578 gcc_assert (!new_bb);
2581 t = NULL_TREE;
2582 t = tree_cons (NULL_TREE, init_expr, t);
2583 for (i = 1; i < nunits; i++)
2585 /* Create: new_name_i = new_name + step_expr */
2586 enum tree_code code = POINTER_TYPE_P (scalar_type)
2587 ? POINTER_PLUS_EXPR : PLUS_EXPR;
2588 init_stmt = gimple_build_assign_with_ops (code, new_var,
2589 new_name, step_expr);
2590 new_name = make_ssa_name (new_var, init_stmt);
2591 gimple_assign_set_lhs (init_stmt, new_name);
2593 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
2594 gcc_assert (!new_bb);
2596 if (vect_print_dump_info (REPORT_DETAILS))
2598 fprintf (vect_dump, "created new init_stmt: ");
2599 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
2601 t = tree_cons (NULL_TREE, new_name, t);
2603 /* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1] */
2604 vec = build_constructor_from_list (vectype, nreverse (t));
2605 vec_init = vect_init_vector (iv_phi, vec, vectype, NULL);
2609 /* Create the vector that holds the step of the induction. */
2610 if (nested_in_vect_loop)
2611 /* iv_loop is nested in the loop to be vectorized. Generate:
2612 vec_step = [S, S, S, S] */
2613 new_name = step_expr;
2614 else
2616 /* iv_loop is the loop to be vectorized. Generate:
2617 vec_step = [VF*S, VF*S, VF*S, VF*S] */
2618 expr = build_int_cst (TREE_TYPE (step_expr), vf);
2619 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
2620 expr, step_expr);
2623 t = NULL_TREE;
2624 for (i = 0; i < nunits; i++)
2625 t = tree_cons (NULL_TREE, unshare_expr (new_name), t);
2626 gcc_assert (CONSTANT_CLASS_P (new_name));
2627 stepvectype = get_vectype_for_scalar_type (TREE_TYPE (new_name));
2628 gcc_assert (stepvectype);
2629 vec = build_vector (stepvectype, t);
2630 vec_step = vect_init_vector (iv_phi, vec, stepvectype, NULL);
2633 /* Create the following def-use cycle:
2634 loop prolog:
2635 vec_init = ...
2636 vec_step = ...
2637 loop:
2638 vec_iv = PHI <vec_init, vec_loop>
2640 STMT
2642 vec_loop = vec_iv + vec_step; */
2644 /* Create the induction-phi that defines the induction-operand. */
2645 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
2646 add_referenced_var (vec_dest);
2647 induction_phi = create_phi_node (vec_dest, iv_loop->header);
2648 set_vinfo_for_stmt (induction_phi,
2649 new_stmt_vec_info (induction_phi, loop_vinfo, NULL));
2650 induc_def = PHI_RESULT (induction_phi);
2652 /* Create the iv update inside the loop */
2653 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
2654 induc_def, vec_step);
2655 vec_def = make_ssa_name (vec_dest, new_stmt);
2656 gimple_assign_set_lhs (new_stmt, vec_def);
2657 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
2658 set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo,
2659 NULL));
2661 /* Set the arguments of the phi node: */
2662 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
2663 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
2664 UNKNOWN_LOCATION);
2667 /* In case that vectorization factor (VF) is bigger than the number
2668 of elements that we can fit in a vectype (nunits), we have to generate
2669 more than one vector stmt - i.e - we need to "unroll" the
2670 vector stmt by a factor VF/nunits. For more details see documentation
2671 in vectorizable_operation. */
2673 if (ncopies > 1)
2675 stmt_vec_info prev_stmt_vinfo;
2676 /* FORNOW. This restriction should be relaxed. */
2677 gcc_assert (!nested_in_vect_loop);
2679 /* Create the vector that holds the step of the induction. */
2680 expr = build_int_cst (TREE_TYPE (step_expr), nunits);
2681 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
2682 expr, step_expr);
2683 t = NULL_TREE;
2684 for (i = 0; i < nunits; i++)
2685 t = tree_cons (NULL_TREE, unshare_expr (new_name), t);
2686 gcc_assert (CONSTANT_CLASS_P (new_name));
2687 vec = build_vector (stepvectype, t);
2688 vec_step = vect_init_vector (iv_phi, vec, stepvectype, NULL);
2690 vec_def = induc_def;
2691 prev_stmt_vinfo = vinfo_for_stmt (induction_phi);
2692 for (i = 1; i < ncopies; i++)
2694 /* vec_i = vec_prev + vec_step */
2695 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
2696 vec_def, vec_step);
2697 vec_def = make_ssa_name (vec_dest, new_stmt);
2698 gimple_assign_set_lhs (new_stmt, vec_def);
2700 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
2701 set_vinfo_for_stmt (new_stmt,
2702 new_stmt_vec_info (new_stmt, loop_vinfo, NULL));
2703 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt;
2704 prev_stmt_vinfo = vinfo_for_stmt (new_stmt);
2708 if (nested_in_vect_loop)
2710 /* Find the loop-closed exit-phi of the induction, and record
2711 the final vector of induction results: */
2712 exit_phi = NULL;
2713 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
2715 if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (USE_STMT (use_p))))
2717 exit_phi = USE_STMT (use_p);
2718 break;
2721 if (exit_phi)
2723 stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi);
2724 /* FORNOW. Currently not supporting the case that an inner-loop induction
2725 is not used in the outer-loop (i.e. only outside the outer-loop). */
2726 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
2727 && !STMT_VINFO_LIVE_P (stmt_vinfo));
2729 STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt;
2730 if (vect_print_dump_info (REPORT_DETAILS))
2732 fprintf (vect_dump, "vector of inductions after inner-loop:");
2733 print_gimple_stmt (vect_dump, new_stmt, 0, TDF_SLIM);
2739 if (vect_print_dump_info (REPORT_DETAILS))
2741 fprintf (vect_dump, "transform induction: created def-use cycle: ");
2742 print_gimple_stmt (vect_dump, induction_phi, 0, TDF_SLIM);
2743 fprintf (vect_dump, "\n");
2744 print_gimple_stmt (vect_dump, SSA_NAME_DEF_STMT (vec_def), 0, TDF_SLIM);
2747 STMT_VINFO_VEC_STMT (phi_info) = induction_phi;
2748 return induc_def;
2752 /* Function get_initial_def_for_reduction
2754 Input:
2755 STMT - a stmt that performs a reduction operation in the loop.
2756 INIT_VAL - the initial value of the reduction variable
2758 Output:
2759 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
2760 of the reduction (used for adjusting the epilog - see below).
2761 Return a vector variable, initialized according to the operation that STMT
2762 performs. This vector will be used as the initial value of the
2763 vector of partial results.
2765 Option1 (adjust in epilog): Initialize the vector as follows:
2766 add/bit or/xor: [0,0,...,0,0]
2767 mult/bit and: [1,1,...,1,1]
2768 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
2769 and when necessary (e.g. add/mult case) let the caller know
2770 that it needs to adjust the result by init_val.
2772 Option2: Initialize the vector as follows:
2773 add/bit or/xor: [init_val,0,0,...,0]
2774 mult/bit and: [init_val,1,1,...,1]
2775 min/max/cond_expr: [init_val,init_val,...,init_val]
2776 and no adjustments are needed.
2778 For example, for the following code:
2780 s = init_val;
2781 for (i=0;i<n;i++)
2782 s = s + a[i];
2784 STMT is 's = s + a[i]', and the reduction variable is 's'.
2785 For a vector of 4 units, we want to return either [0,0,0,init_val],
2786 or [0,0,0,0] and let the caller know that it needs to adjust
2787 the result at the end by 'init_val'.
2789 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
2790 initialization vector is simpler (same element in all entries), if
2791 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
2793 A cost model should help decide between these two schemes. */
2795 tree
2796 get_initial_def_for_reduction (gimple stmt, tree init_val,
2797 tree *adjustment_def)
2799 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
2800 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
2801 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2802 tree scalar_type = TREE_TYPE (init_val);
2803 tree vectype = get_vectype_for_scalar_type (scalar_type);
2804 int nunits;
2805 enum tree_code code = gimple_assign_rhs_code (stmt);
2806 tree def_for_init;
2807 tree init_def;
2808 tree t = NULL_TREE;
2809 int i;
2810 bool nested_in_vect_loop = false;
2811 tree init_value;
2812 REAL_VALUE_TYPE real_init_val = dconst0;
2813 int int_init_val = 0;
2814 gimple def_stmt = NULL;
2816 gcc_assert (vectype);
2817 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2819 gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
2820 || SCALAR_FLOAT_TYPE_P (scalar_type));
2822 if (nested_in_vect_loop_p (loop, stmt))
2823 nested_in_vect_loop = true;
2824 else
2825 gcc_assert (loop == (gimple_bb (stmt))->loop_father);
2827 /* In case of double reduction we only create a vector variable to be put
2828 in the reduction phi node. The actual statement creation is done in
2829 vect_create_epilog_for_reduction. */
2830 if (adjustment_def && nested_in_vect_loop
2831 && TREE_CODE (init_val) == SSA_NAME
2832 && (def_stmt = SSA_NAME_DEF_STMT (init_val))
2833 && gimple_code (def_stmt) == GIMPLE_PHI
2834 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
2835 && vinfo_for_stmt (def_stmt)
2836 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
2837 == vect_double_reduction_def)
2839 *adjustment_def = NULL;
2840 return vect_create_destination_var (init_val, vectype);
2843 if (TREE_CONSTANT (init_val))
2845 if (SCALAR_FLOAT_TYPE_P (scalar_type))
2846 init_value = build_real (scalar_type, TREE_REAL_CST (init_val));
2847 else
2848 init_value = build_int_cst (scalar_type, TREE_INT_CST_LOW (init_val));
2850 else
2851 init_value = init_val;
2853 switch (code)
2855 case WIDEN_SUM_EXPR:
2856 case DOT_PROD_EXPR:
2857 case PLUS_EXPR:
2858 case MINUS_EXPR:
2859 case BIT_IOR_EXPR:
2860 case BIT_XOR_EXPR:
2861 case MULT_EXPR:
2862 case BIT_AND_EXPR:
2863 /* ADJUSMENT_DEF is NULL when called from
2864 vect_create_epilog_for_reduction to vectorize double reduction. */
2865 if (adjustment_def)
2867 if (nested_in_vect_loop)
2868 *adjustment_def = vect_get_vec_def_for_operand (init_val, stmt,
2869 NULL);
2870 else
2871 *adjustment_def = init_val;
2874 if (code == MULT_EXPR || code == BIT_AND_EXPR)
2876 real_init_val = dconst1;
2877 int_init_val = 1;
2880 if (SCALAR_FLOAT_TYPE_P (scalar_type))
2881 def_for_init = build_real (scalar_type, real_init_val);
2882 else
2883 def_for_init = build_int_cst (scalar_type, int_init_val);
2885 /* Create a vector of '0' or '1' except the first element. */
2886 for (i = nunits - 2; i >= 0; --i)
2887 t = tree_cons (NULL_TREE, def_for_init, t);
2889 /* Option1: the first element is '0' or '1' as well. */
2890 if (adjustment_def)
2892 t = tree_cons (NULL_TREE, def_for_init, t);
2893 init_def = build_vector (vectype, t);
2894 break;
2897 /* Option2: the first element is INIT_VAL. */
2898 t = tree_cons (NULL_TREE, init_value, t);
2899 if (TREE_CONSTANT (init_val))
2900 init_def = build_vector (vectype, t);
2901 else
2902 init_def = build_constructor_from_list (vectype, t);
2904 break;
2906 case MIN_EXPR:
2907 case MAX_EXPR:
2908 case COND_EXPR:
2909 if (adjustment_def)
2911 *adjustment_def = NULL_TREE;
2912 init_def = vect_get_vec_def_for_operand (init_val, stmt, NULL);
2913 break;
2916 for (i = nunits - 1; i >= 0; --i)
2917 t = tree_cons (NULL_TREE, init_value, t);
2919 if (TREE_CONSTANT (init_val))
2920 init_def = build_vector (vectype, t);
2921 else
2922 init_def = build_constructor_from_list (vectype, t);
2924 break;
2926 default:
2927 gcc_unreachable ();
2930 return init_def;
2934 /* Function vect_create_epilog_for_reduction
2936 Create code at the loop-epilog to finalize the result of a reduction
2937 computation.
2939 VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector
2940 reduction statements.
2941 STMT is the scalar reduction stmt that is being vectorized.
2942 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
2943 number of elements that we can fit in a vectype (nunits). In this case
2944 we have to generate more than one vector stmt - i.e - we need to "unroll"
2945 the vector stmt by a factor VF/nunits. For more details see documentation
2946 in vectorizable_operation.
2947 REDUC_CODE is the tree-code for the epilog reduction.
2948 REDUCTION_PHIS is a list of the phi-nodes that carry the reduction
2949 computation.
2950 REDUC_INDEX is the index of the operand in the right hand side of the
2951 statement that is defined by REDUCTION_PHI.
2952 DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
2953 SLP_NODE is an SLP node containing a group of reduction statements. The
2954 first one in this group is STMT.
2956 This function:
2957 1. Creates the reduction def-use cycles: sets the arguments for
2958 REDUCTION_PHIS:
2959 The loop-entry argument is the vectorized initial-value of the reduction.
2960 The loop-latch argument is taken from VECT_DEFS - the vector of partial
2961 sums.
2962 2. "Reduces" each vector of partial results VECT_DEFS into a single result,
2963 by applying the operation specified by REDUC_CODE if available, or by
2964 other means (whole-vector shifts or a scalar loop).
2965 The function also creates a new phi node at the loop exit to preserve
2966 loop-closed form, as illustrated below.
2968 The flow at the entry to this function:
2970 loop:
2971 vec_def = phi <null, null> # REDUCTION_PHI
2972 VECT_DEF = vector_stmt # vectorized form of STMT
2973 s_loop = scalar_stmt # (scalar) STMT
2974 loop_exit:
2975 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
2976 use <s_out0>
2977 use <s_out0>
2979 The above is transformed by this function into:
2981 loop:
2982 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
2983 VECT_DEF = vector_stmt # vectorized form of STMT
2984 s_loop = scalar_stmt # (scalar) STMT
2985 loop_exit:
2986 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
2987 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
2988 v_out2 = reduce <v_out1>
2989 s_out3 = extract_field <v_out2, 0>
2990 s_out4 = adjust_result <s_out3>
2991 use <s_out4>
2992 use <s_out4>
2995 static void
2996 vect_create_epilog_for_reduction (VEC (tree, heap) *vect_defs, gimple stmt,
2997 int ncopies, enum tree_code reduc_code,
2998 VEC (gimple, heap) *reduction_phis,
2999 int reduc_index, bool double_reduc,
3000 slp_tree slp_node)
3002 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3003 stmt_vec_info prev_phi_info;
3004 tree vectype;
3005 enum machine_mode mode;
3006 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3007 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
3008 basic_block exit_bb;
3009 tree scalar_dest;
3010 tree scalar_type;
3011 gimple new_phi = NULL, phi;
3012 gimple_stmt_iterator exit_gsi;
3013 tree vec_dest;
3014 tree new_temp = NULL_TREE, new_dest, new_name, new_scalar_dest;
3015 gimple epilog_stmt = NULL;
3016 enum tree_code code = gimple_assign_rhs_code (stmt);
3017 gimple exit_phi;
3018 tree bitsize, bitpos;
3019 tree adjustment_def = NULL;
3020 tree vec_initial_def = NULL;
3021 tree reduction_op, expr, def;
3022 tree orig_name, scalar_result;
3023 imm_use_iterator imm_iter;
3024 use_operand_p use_p;
3025 bool extract_scalar_result = false;
3026 gimple use_stmt, orig_stmt, reduction_phi = NULL;
3027 bool nested_in_vect_loop = false;
3028 VEC (gimple, heap) *new_phis = NULL;
3029 enum vect_def_type dt = vect_unknown_def_type;
3030 int j, i;
3031 VEC (tree, heap) *scalar_results = NULL;
3032 unsigned int group_size = 1, k, ratio;
3033 VEC (tree, heap) *vec_initial_defs = NULL;
3034 VEC (gimple, heap) *phis;
3036 if (slp_node)
3037 group_size = VEC_length (gimple, SLP_TREE_SCALAR_STMTS (slp_node));
3039 if (nested_in_vect_loop_p (loop, stmt))
3041 outer_loop = loop;
3042 loop = loop->inner;
3043 nested_in_vect_loop = true;
3044 gcc_assert (!slp_node);
3047 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
3049 case GIMPLE_SINGLE_RHS:
3050 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt))
3051 == ternary_op);
3052 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), reduc_index);
3053 break;
3054 case GIMPLE_UNARY_RHS:
3055 reduction_op = gimple_assign_rhs1 (stmt);
3056 break;
3057 case GIMPLE_BINARY_RHS:
3058 reduction_op = reduc_index ?
3059 gimple_assign_rhs2 (stmt) : gimple_assign_rhs1 (stmt);
3060 break;
3061 default:
3062 gcc_unreachable ();
3065 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
3066 gcc_assert (vectype);
3067 mode = TYPE_MODE (vectype);
3069 /* 1. Create the reduction def-use cycle:
3070 Set the arguments of REDUCTION_PHIS, i.e., transform
3072 loop:
3073 vec_def = phi <null, null> # REDUCTION_PHI
3074 VECT_DEF = vector_stmt # vectorized form of STMT
3077 into:
3079 loop:
3080 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
3081 VECT_DEF = vector_stmt # vectorized form of STMT
3084 (in case of SLP, do it for all the phis). */
3086 /* Get the loop-entry arguments. */
3087 if (slp_node)
3088 vect_get_slp_defs (slp_node, &vec_initial_defs, NULL, reduc_index);
3089 else
3091 vec_initial_defs = VEC_alloc (tree, heap, 1);
3092 /* For the case of reduction, vect_get_vec_def_for_operand returns
3093 the scalar def before the loop, that defines the initial value
3094 of the reduction variable. */
3095 vec_initial_def = vect_get_vec_def_for_operand (reduction_op, stmt,
3096 &adjustment_def);
3097 VEC_quick_push (tree, vec_initial_defs, vec_initial_def);
3100 /* Set phi nodes arguments. */
3101 for (i = 0; VEC_iterate (gimple, reduction_phis, i, phi); i++)
3103 tree vec_init_def = VEC_index (tree, vec_initial_defs, i);
3104 tree def = VEC_index (tree, vect_defs, i);
3105 for (j = 0; j < ncopies; j++)
3107 /* Set the loop-entry arg of the reduction-phi. */
3108 add_phi_arg (phi, vec_init_def, loop_preheader_edge (loop),
3109 UNKNOWN_LOCATION);
3111 /* Set the loop-latch arg for the reduction-phi. */
3112 if (j > 0)
3113 def = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type, def);
3115 add_phi_arg (phi, def, loop_latch_edge (loop), UNKNOWN_LOCATION);
3117 if (vect_print_dump_info (REPORT_DETAILS))
3119 fprintf (vect_dump, "transform reduction: created def-use"
3120 " cycle: ");
3121 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
3122 fprintf (vect_dump, "\n");
3123 print_gimple_stmt (vect_dump, SSA_NAME_DEF_STMT (def), 0,
3124 TDF_SLIM);
3127 phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
3131 VEC_free (tree, heap, vec_initial_defs);
3133 /* 2. Create epilog code.
3134 The reduction epilog code operates across the elements of the vector
3135 of partial results computed by the vectorized loop.
3136 The reduction epilog code consists of:
3138 step 1: compute the scalar result in a vector (v_out2)
3139 step 2: extract the scalar result (s_out3) from the vector (v_out2)
3140 step 3: adjust the scalar result (s_out3) if needed.
3142 Step 1 can be accomplished using one the following three schemes:
3143 (scheme 1) using reduc_code, if available.
3144 (scheme 2) using whole-vector shifts, if available.
3145 (scheme 3) using a scalar loop. In this case steps 1+2 above are
3146 combined.
3148 The overall epilog code looks like this:
3150 s_out0 = phi <s_loop> # original EXIT_PHI
3151 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3152 v_out2 = reduce <v_out1> # step 1
3153 s_out3 = extract_field <v_out2, 0> # step 2
3154 s_out4 = adjust_result <s_out3> # step 3
3156 (step 3 is optional, and steps 1 and 2 may be combined).
3157 Lastly, the uses of s_out0 are replaced by s_out4. */
3160 /* 2.1 Create new loop-exit-phis to preserve loop-closed form:
3161 v_out1 = phi <VECT_DEF>
3162 Store them in NEW_PHIS. */
3164 exit_bb = single_exit (loop)->dest;
3165 prev_phi_info = NULL;
3166 new_phis = VEC_alloc (gimple, heap, VEC_length (tree, vect_defs));
3167 for (i = 0; VEC_iterate (tree, vect_defs, i, def); i++)
3169 for (j = 0; j < ncopies; j++)
3171 phi = create_phi_node (SSA_NAME_VAR (def), exit_bb);
3172 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, loop_vinfo, NULL));
3173 if (j == 0)
3174 VEC_quick_push (gimple, new_phis, phi);
3175 else
3177 def = vect_get_vec_def_for_stmt_copy (dt, def);
3178 STMT_VINFO_RELATED_STMT (prev_phi_info) = phi;
3181 SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
3182 prev_phi_info = vinfo_for_stmt (phi);
3186 exit_gsi = gsi_after_labels (exit_bb);
3188 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
3189 (i.e. when reduc_code is not available) and in the final adjustment
3190 code (if needed). Also get the original scalar reduction variable as
3191 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
3192 represents a reduction pattern), the tree-code and scalar-def are
3193 taken from the original stmt that the pattern-stmt (STMT) replaces.
3194 Otherwise (it is a regular reduction) - the tree-code and scalar-def
3195 are taken from STMT. */
3197 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
3198 if (!orig_stmt)
3200 /* Regular reduction */
3201 orig_stmt = stmt;
3203 else
3205 /* Reduction pattern */
3206 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt);
3207 gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo));
3208 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
3211 code = gimple_assign_rhs_code (orig_stmt);
3212 /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
3213 partial results are added and not subtracted. */
3214 if (code == MINUS_EXPR)
3215 code = PLUS_EXPR;
3217 scalar_dest = gimple_assign_lhs (orig_stmt);
3218 scalar_type = TREE_TYPE (scalar_dest);
3219 scalar_results = VEC_alloc (tree, heap, group_size);
3220 new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
3221 bitsize = TYPE_SIZE (scalar_type);
3223 /* In case this is a reduction in an inner-loop while vectorizing an outer
3224 loop - we don't need to extract a single scalar result at the end of the
3225 inner-loop (unless it is double reduction, i.e., the use of reduction is
3226 outside the outer-loop). The final vector of partial results will be used
3227 in the vectorized outer-loop, or reduced to a scalar result at the end of
3228 the outer-loop. */
3229 if (nested_in_vect_loop && !double_reduc)
3230 goto vect_finalize_reduction;
3232 /* 2.3 Create the reduction code, using one of the three schemes described
3233 above. In SLP we simply need to extract all the elements from the
3234 vector (without reducing them), so we use scalar shifts. */
3235 if (reduc_code != ERROR_MARK && !slp_node)
3237 tree tmp;
3239 /*** Case 1: Create:
3240 v_out2 = reduc_expr <v_out1> */
3242 if (vect_print_dump_info (REPORT_DETAILS))
3243 fprintf (vect_dump, "Reduce using direct vector reduction.");
3245 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3246 new_phi = VEC_index (gimple, new_phis, 0);
3247 tmp = build1 (reduc_code, vectype, PHI_RESULT (new_phi));
3248 epilog_stmt = gimple_build_assign (vec_dest, tmp);
3249 new_temp = make_ssa_name (vec_dest, epilog_stmt);
3250 gimple_assign_set_lhs (epilog_stmt, new_temp);
3251 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3253 extract_scalar_result = true;
3255 else
3257 enum tree_code shift_code = ERROR_MARK;
3258 bool have_whole_vector_shift = true;
3259 int bit_offset;
3260 int element_bitsize = tree_low_cst (bitsize, 1);
3261 int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
3262 tree vec_temp;
3264 if (optab_handler (vec_shr_optab, mode)->insn_code != CODE_FOR_nothing)
3265 shift_code = VEC_RSHIFT_EXPR;
3266 else
3267 have_whole_vector_shift = false;
3269 /* Regardless of whether we have a whole vector shift, if we're
3270 emulating the operation via tree-vect-generic, we don't want
3271 to use it. Only the first round of the reduction is likely
3272 to still be profitable via emulation. */
3273 /* ??? It might be better to emit a reduction tree code here, so that
3274 tree-vect-generic can expand the first round via bit tricks. */
3275 if (!VECTOR_MODE_P (mode))
3276 have_whole_vector_shift = false;
3277 else
3279 optab optab = optab_for_tree_code (code, vectype, optab_default);
3280 if (optab_handler (optab, mode)->insn_code == CODE_FOR_nothing)
3281 have_whole_vector_shift = false;
3284 if (have_whole_vector_shift && !slp_node)
3286 /*** Case 2: Create:
3287 for (offset = VS/2; offset >= element_size; offset/=2)
3289 Create: va' = vec_shift <va, offset>
3290 Create: va = vop <va, va'>
3291 } */
3293 if (vect_print_dump_info (REPORT_DETAILS))
3294 fprintf (vect_dump, "Reduce using vector shifts");
3296 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3297 new_phi = VEC_index (gimple, new_phis, 0);
3298 new_temp = PHI_RESULT (new_phi);
3299 for (bit_offset = vec_size_in_bits/2;
3300 bit_offset >= element_bitsize;
3301 bit_offset /= 2)
3303 tree bitpos = size_int (bit_offset);
3305 epilog_stmt = gimple_build_assign_with_ops (shift_code,
3306 vec_dest, new_temp, bitpos);
3307 new_name = make_ssa_name (vec_dest, epilog_stmt);
3308 gimple_assign_set_lhs (epilog_stmt, new_name);
3309 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3311 epilog_stmt = gimple_build_assign_with_ops (code, vec_dest,
3312 new_name, new_temp);
3313 new_temp = make_ssa_name (vec_dest, epilog_stmt);
3314 gimple_assign_set_lhs (epilog_stmt, new_temp);
3315 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3318 extract_scalar_result = true;
3320 else
3322 tree rhs;
3324 /*** Case 3: Create:
3325 s = extract_field <v_out2, 0>
3326 for (offset = element_size;
3327 offset < vector_size;
3328 offset += element_size;)
3330 Create: s' = extract_field <v_out2, offset>
3331 Create: s = op <s, s'> // For non SLP cases
3332 } */
3334 if (vect_print_dump_info (REPORT_DETAILS))
3335 fprintf (vect_dump, "Reduce using scalar code. ");
3337 vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
3338 for (i = 0; VEC_iterate (gimple, new_phis, i, new_phi); i++)
3340 vec_temp = PHI_RESULT (new_phi);
3341 rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
3342 bitsize_zero_node);
3343 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3344 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3345 gimple_assign_set_lhs (epilog_stmt, new_temp);
3346 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3348 /* In SLP we don't need to apply reduction operation, so we just
3349 collect s' values in SCALAR_RESULTS. */
3350 if (slp_node)
3351 VEC_safe_push (tree, heap, scalar_results, new_temp);
3353 for (bit_offset = element_bitsize;
3354 bit_offset < vec_size_in_bits;
3355 bit_offset += element_bitsize)
3357 tree bitpos = bitsize_int (bit_offset);
3358 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp,
3359 bitsize, bitpos);
3361 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3362 new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
3363 gimple_assign_set_lhs (epilog_stmt, new_name);
3364 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3366 if (slp_node)
3368 /* In SLP we don't need to apply reduction operation, so
3369 we just collect s' values in SCALAR_RESULTS. */
3370 new_temp = new_name;
3371 VEC_safe_push (tree, heap, scalar_results, new_name);
3373 else
3375 epilog_stmt = gimple_build_assign_with_ops (code,
3376 new_scalar_dest, new_name, new_temp);
3377 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3378 gimple_assign_set_lhs (epilog_stmt, new_temp);
3379 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3384 /* The only case where we need to reduce scalar results in SLP, is
3385 unrolling. If the size of SCALAR_RESULTS is greater than
3386 GROUP_SIZE, we reduce them combining elements modulo
3387 GROUP_SIZE. */
3388 if (slp_node)
3390 tree res, first_res, new_res;
3391 gimple new_stmt;
3393 /* Reduce multiple scalar results in case of SLP unrolling. */
3394 for (j = group_size; VEC_iterate (tree, scalar_results, j, res);
3395 j++)
3397 first_res = VEC_index (tree, scalar_results, j % group_size);
3398 new_stmt = gimple_build_assign_with_ops (code,
3399 new_scalar_dest, first_res, res);
3400 new_res = make_ssa_name (new_scalar_dest, new_stmt);
3401 gimple_assign_set_lhs (new_stmt, new_res);
3402 gsi_insert_before (&exit_gsi, new_stmt, GSI_SAME_STMT);
3403 VEC_replace (tree, scalar_results, j % group_size, new_res);
3406 else
3407 /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
3408 VEC_safe_push (tree, heap, scalar_results, new_temp);
3410 extract_scalar_result = false;
3414 /* 2.4 Extract the final scalar result. Create:
3415 s_out3 = extract_field <v_out2, bitpos> */
3417 if (extract_scalar_result)
3419 tree rhs;
3421 if (vect_print_dump_info (REPORT_DETAILS))
3422 fprintf (vect_dump, "extract scalar result");
3424 if (BYTES_BIG_ENDIAN)
3425 bitpos = size_binop (MULT_EXPR,
3426 bitsize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1),
3427 TYPE_SIZE (scalar_type));
3428 else
3429 bitpos = bitsize_zero_node;
3431 rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp, bitsize, bitpos);
3432 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
3433 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
3434 gimple_assign_set_lhs (epilog_stmt, new_temp);
3435 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3436 VEC_safe_push (tree, heap, scalar_results, new_temp);
3439 vect_finalize_reduction:
3441 /* 2.5 Adjust the final result by the initial value of the reduction
3442 variable. (When such adjustment is not needed, then
3443 'adjustment_def' is zero). For example, if code is PLUS we create:
3444 new_temp = loop_exit_def + adjustment_def */
3446 if (adjustment_def)
3448 gcc_assert (!slp_node);
3449 if (nested_in_vect_loop)
3451 new_phi = VEC_index (gimple, new_phis, 0);
3452 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
3453 expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
3454 new_dest = vect_create_destination_var (scalar_dest, vectype);
3456 else
3458 new_temp = VEC_index (tree, scalar_results, 0);
3459 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
3460 expr = build2 (code, scalar_type, new_temp, adjustment_def);
3461 new_dest = vect_create_destination_var (scalar_dest, scalar_type);
3464 epilog_stmt = gimple_build_assign (new_dest, expr);
3465 new_temp = make_ssa_name (new_dest, epilog_stmt);
3466 gimple_assign_set_lhs (epilog_stmt, new_temp);
3467 SSA_NAME_DEF_STMT (new_temp) = epilog_stmt;
3468 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
3469 if (nested_in_vect_loop)
3471 set_vinfo_for_stmt (epilog_stmt,
3472 new_stmt_vec_info (epilog_stmt, loop_vinfo,
3473 NULL));
3474 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt)) =
3475 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi));
3477 if (!double_reduc)
3478 VEC_quick_push (tree, scalar_results, new_temp);
3479 else
3480 VEC_replace (tree, scalar_results, 0, new_temp);
3482 else
3483 VEC_replace (tree, scalar_results, 0, new_temp);
3485 VEC_replace (gimple, new_phis, 0, epilog_stmt);
3488 /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit
3489 phis with new adjusted scalar results, i.e., replace use <s_out0>
3490 with use <s_out4>.
3492 Transform:
3493 loop_exit:
3494 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3495 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3496 v_out2 = reduce <v_out1>
3497 s_out3 = extract_field <v_out2, 0>
3498 s_out4 = adjust_result <s_out3>
3499 use <s_out0>
3500 use <s_out0>
3502 into:
3504 loop_exit:
3505 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3506 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3507 v_out2 = reduce <v_out1>
3508 s_out3 = extract_field <v_out2, 0>
3509 s_out4 = adjust_result <s_out3>
3510 use <s_out4>
3511 use <s_out4> */
3513 /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
3514 case that GROUP_SIZE is greater than vectorization factor). Therefore, we
3515 need to match SCALAR_RESULTS with corresponding statements. The first
3516 (GROUP_SIZE / number of new vector stmts) scalar results correspond to
3517 the first vector stmt, etc.
3518 (RATIO is equal to (GROUP_SIZE / number of new vector stmts)). */
3519 if (group_size > VEC_length (gimple, new_phis))
3521 ratio = group_size / VEC_length (gimple, new_phis);
3522 gcc_assert (!(group_size % VEC_length (gimple, new_phis)));
3524 else
3525 ratio = 1;
3527 for (k = 0; k < group_size; k++)
3529 if (k % ratio == 0)
3531 epilog_stmt = VEC_index (gimple, new_phis, k / ratio);
3532 reduction_phi = VEC_index (gimple, reduction_phis, k / ratio);
3535 if (slp_node)
3537 gimple current_stmt = VEC_index (gimple,
3538 SLP_TREE_SCALAR_STMTS (slp_node), k);
3540 orig_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (current_stmt));
3541 /* SLP statements can't participate in patterns. */
3542 gcc_assert (!orig_stmt);
3543 scalar_dest = gimple_assign_lhs (current_stmt);
3546 phis = VEC_alloc (gimple, heap, 3);
3547 /* Find the loop-closed-use at the loop exit of the original scalar
3548 result. (The reduction result is expected to have two immediate uses -
3549 one at the latch block, and one at the loop exit). */
3550 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
3551 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
3552 VEC_safe_push (gimple, heap, phis, USE_STMT (use_p));
3554 /* We expect to have found an exit_phi because of loop-closed-ssa
3555 form. */
3556 gcc_assert (!VEC_empty (gimple, phis));
3558 for (i = 0; VEC_iterate (gimple, phis, i, exit_phi); i++)
3560 if (outer_loop)
3562 stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi);
3563 gimple vect_phi;
3565 /* FORNOW. Currently not supporting the case that an inner-loop
3566 reduction is not used in the outer-loop (but only outside the
3567 outer-loop), unless it is double reduction. */
3568 gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
3569 && !STMT_VINFO_LIVE_P (exit_phi_vinfo))
3570 || double_reduc);
3572 STMT_VINFO_VEC_STMT (exit_phi_vinfo) = epilog_stmt;
3573 if (!double_reduc
3574 || STMT_VINFO_DEF_TYPE (exit_phi_vinfo)
3575 != vect_double_reduction_def)
3576 continue;
3578 /* Handle double reduction:
3580 stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
3581 stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop)
3582 stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop)
3583 stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
3585 At that point the regular reduction (stmt2 and stmt3) is
3586 already vectorized, as well as the exit phi node, stmt4.
3587 Here we vectorize the phi node of double reduction, stmt1, and
3588 update all relevant statements. */
3590 /* Go through all the uses of s2 to find double reduction phi
3591 node, i.e., stmt1 above. */
3592 orig_name = PHI_RESULT (exit_phi);
3593 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
3595 stmt_vec_info use_stmt_vinfo = vinfo_for_stmt (use_stmt);
3596 stmt_vec_info new_phi_vinfo;
3597 tree vect_phi_init, preheader_arg, vect_phi_res, init_def;
3598 basic_block bb = gimple_bb (use_stmt);
3599 gimple use;
3601 /* Check that USE_STMT is really double reduction phi
3602 node. */
3603 if (gimple_code (use_stmt) != GIMPLE_PHI
3604 || gimple_phi_num_args (use_stmt) != 2
3605 || !use_stmt_vinfo
3606 || STMT_VINFO_DEF_TYPE (use_stmt_vinfo)
3607 != vect_double_reduction_def
3608 || bb->loop_father != outer_loop)
3609 continue;
3611 /* Create vector phi node for double reduction:
3612 vs1 = phi <vs0, vs2>
3613 vs1 was created previously in this function by a call to
3614 vect_get_vec_def_for_operand and is stored in
3615 vec_initial_def;
3616 vs2 is defined by EPILOG_STMT, the vectorized EXIT_PHI;
3617 vs0 is created here. */
3619 /* Create vector phi node. */
3620 vect_phi = create_phi_node (vec_initial_def, bb);
3621 new_phi_vinfo = new_stmt_vec_info (vect_phi,
3622 loop_vec_info_for_loop (outer_loop), NULL);
3623 set_vinfo_for_stmt (vect_phi, new_phi_vinfo);
3625 /* Create vs0 - initial def of the double reduction phi. */
3626 preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt,
3627 loop_preheader_edge (outer_loop));
3628 init_def = get_initial_def_for_reduction (stmt,
3629 preheader_arg, NULL);
3630 vect_phi_init = vect_init_vector (use_stmt, init_def,
3631 vectype, NULL);
3633 /* Update phi node arguments with vs0 and vs2. */
3634 add_phi_arg (vect_phi, vect_phi_init,
3635 loop_preheader_edge (outer_loop),
3636 UNKNOWN_LOCATION);
3637 add_phi_arg (vect_phi, PHI_RESULT (epilog_stmt),
3638 loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
3639 if (vect_print_dump_info (REPORT_DETAILS))
3641 fprintf (vect_dump, "created double reduction phi "
3642 "node: ");
3643 print_gimple_stmt (vect_dump, vect_phi, 0, TDF_SLIM);
3646 vect_phi_res = PHI_RESULT (vect_phi);
3648 /* Replace the use, i.e., set the correct vs1 in the regular
3649 reduction phi node. FORNOW, NCOPIES is always 1, so the
3650 loop is redundant. */
3651 use = reduction_phi;
3652 for (j = 0; j < ncopies; j++)
3654 edge pr_edge = loop_preheader_edge (loop);
3655 SET_PHI_ARG_DEF (use, pr_edge->dest_idx, vect_phi_res);
3656 use = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use));
3661 /* Replace the uses: */
3662 orig_name = PHI_RESULT (exit_phi);
3663 scalar_result = VEC_index (tree, scalar_results, k);
3664 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
3665 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
3666 SET_USE (use_p, scalar_result);
3669 VEC_free (gimple, heap, phis);
3672 VEC_free (tree, heap, scalar_results);
3673 VEC_free (gimple, heap, new_phis);
3677 /* Function vectorizable_reduction.
3679 Check if STMT performs a reduction operation that can be vectorized.
3680 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3681 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3682 Return FALSE if not a vectorizable STMT, TRUE otherwise.
3684 This function also handles reduction idioms (patterns) that have been
3685 recognized in advance during vect_pattern_recog. In this case, STMT may be
3686 of this form:
3687 X = pattern_expr (arg0, arg1, ..., X)
3688 and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
3689 sequence that had been detected and replaced by the pattern-stmt (STMT).
3691 In some cases of reduction patterns, the type of the reduction variable X is
3692 different than the type of the other arguments of STMT.
3693 In such cases, the vectype that is used when transforming STMT into a vector
3694 stmt is different than the vectype that is used to determine the
3695 vectorization factor, because it consists of a different number of elements
3696 than the actual number of elements that are being operated upon in parallel.
3698 For example, consider an accumulation of shorts into an int accumulator.
3699 On some targets it's possible to vectorize this pattern operating on 8
3700 shorts at a time (hence, the vectype for purposes of determining the
3701 vectorization factor should be V8HI); on the other hand, the vectype that
3702 is used to create the vector form is actually V4SI (the type of the result).
3704 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
3705 indicates what is the actual level of parallelism (V8HI in the example), so
3706 that the right vectorization factor would be derived. This vectype
3707 corresponds to the type of arguments to the reduction stmt, and should *NOT*
3708 be used to create the vectorized stmt. The right vectype for the vectorized
3709 stmt is obtained from the type of the result X:
3710 get_vectype_for_scalar_type (TREE_TYPE (X))
3712 This means that, contrary to "regular" reductions (or "regular" stmts in
3713 general), the following equation:
3714 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
3715 does *NOT* necessarily hold for reduction patterns. */
3717 bool
3718 vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
3719 gimple *vec_stmt, slp_tree slp_node)
3721 tree vec_dest;
3722 tree scalar_dest;
3723 tree loop_vec_def0 = NULL_TREE, loop_vec_def1 = NULL_TREE;
3724 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3725 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3726 tree vectype_in = NULL_TREE;
3727 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3728 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3729 enum tree_code code, orig_code, epilog_reduc_code;
3730 enum machine_mode vec_mode;
3731 int op_type;
3732 optab optab, reduc_optab;
3733 tree new_temp = NULL_TREE;
3734 tree def;
3735 gimple def_stmt;
3736 enum vect_def_type dt;
3737 gimple new_phi = NULL;
3738 tree scalar_type;
3739 bool is_simple_use;
3740 gimple orig_stmt;
3741 stmt_vec_info orig_stmt_info;
3742 tree expr = NULL_TREE;
3743 int i;
3744 int ncopies;
3745 int epilog_copies;
3746 stmt_vec_info prev_stmt_info, prev_phi_info;
3747 bool single_defuse_cycle = false;
3748 tree reduc_def = NULL_TREE;
3749 gimple new_stmt = NULL;
3750 int j;
3751 tree ops[3];
3752 bool nested_cycle = false, found_nested_cycle_def = false;
3753 gimple reduc_def_stmt = NULL;
3754 /* The default is that the reduction variable is the last in statement. */
3755 int reduc_index = 2;
3756 bool double_reduc = false, dummy;
3757 basic_block def_bb;
3758 struct loop * def_stmt_loop, *outer_loop = NULL;
3759 tree def_arg;
3760 gimple def_arg_stmt;
3761 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL, *vect_defs = NULL;
3762 VEC (gimple, heap) *phis = NULL;
3763 int vec_num;
3764 tree def0, def1;
3766 if (nested_in_vect_loop_p (loop, stmt))
3768 outer_loop = loop;
3769 loop = loop->inner;
3770 nested_cycle = true;
3773 /* 1. Is vectorizable reduction? */
3774 /* Not supportable if the reduction variable is used in the loop. */
3775 if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer)
3776 return false;
3778 /* Reductions that are not used even in an enclosing outer-loop,
3779 are expected to be "live" (used out of the loop). */
3780 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope
3781 && !STMT_VINFO_LIVE_P (stmt_info))
3782 return false;
3784 /* Make sure it was already recognized as a reduction computation. */
3785 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def
3786 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_nested_cycle)
3787 return false;
3789 /* 2. Has this been recognized as a reduction pattern?
3791 Check if STMT represents a pattern that has been recognized
3792 in earlier analysis stages. For stmts that represent a pattern,
3793 the STMT_VINFO_RELATED_STMT field records the last stmt in
3794 the original sequence that constitutes the pattern. */
3796 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
3797 if (orig_stmt)
3799 orig_stmt_info = vinfo_for_stmt (orig_stmt);
3800 gcc_assert (STMT_VINFO_RELATED_STMT (orig_stmt_info) == stmt);
3801 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
3802 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
3805 /* 3. Check the operands of the operation. The first operands are defined
3806 inside the loop body. The last operand is the reduction variable,
3807 which is defined by the loop-header-phi. */
3809 gcc_assert (is_gimple_assign (stmt));
3811 /* Flatten RHS */
3812 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
3814 case GIMPLE_SINGLE_RHS:
3815 op_type = TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt));
3816 if (op_type == ternary_op)
3818 tree rhs = gimple_assign_rhs1 (stmt);
3819 ops[0] = TREE_OPERAND (rhs, 0);
3820 ops[1] = TREE_OPERAND (rhs, 1);
3821 ops[2] = TREE_OPERAND (rhs, 2);
3822 code = TREE_CODE (rhs);
3824 else
3825 return false;
3826 break;
3828 case GIMPLE_BINARY_RHS:
3829 code = gimple_assign_rhs_code (stmt);
3830 op_type = TREE_CODE_LENGTH (code);
3831 gcc_assert (op_type == binary_op);
3832 ops[0] = gimple_assign_rhs1 (stmt);
3833 ops[1] = gimple_assign_rhs2 (stmt);
3834 break;
3836 case GIMPLE_UNARY_RHS:
3837 return false;
3839 default:
3840 gcc_unreachable ();
3843 scalar_dest = gimple_assign_lhs (stmt);
3844 scalar_type = TREE_TYPE (scalar_dest);
3845 if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
3846 && !SCALAR_FLOAT_TYPE_P (scalar_type))
3847 return false;
3849 /* All uses but the last are expected to be defined in the loop.
3850 The last use is the reduction variable. In case of nested cycle this
3851 assumption is not true: we use reduc_index to record the index of the
3852 reduction variable. */
3853 for (i = 0; i < op_type-1; i++)
3855 tree tem;
3857 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
3858 if (i == 0 && code == COND_EXPR)
3859 continue;
3861 is_simple_use = vect_is_simple_use_1 (ops[i], loop_vinfo, NULL,
3862 &def_stmt, &def, &dt, &tem);
3863 if (!vectype_in)
3864 vectype_in = tem;
3865 gcc_assert (is_simple_use);
3866 if (dt != vect_internal_def
3867 && dt != vect_external_def
3868 && dt != vect_constant_def
3869 && dt != vect_induction_def
3870 && !(dt == vect_nested_cycle && nested_cycle))
3871 return false;
3873 if (dt == vect_nested_cycle)
3875 found_nested_cycle_def = true;
3876 reduc_def_stmt = def_stmt;
3877 reduc_index = i;
3881 is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, NULL, &def_stmt,
3882 &def, &dt);
3883 gcc_assert (is_simple_use);
3884 gcc_assert (dt == vect_reduction_def
3885 || dt == vect_nested_cycle
3886 || ((dt == vect_internal_def || dt == vect_external_def
3887 || dt == vect_constant_def || dt == vect_induction_def)
3888 && nested_cycle && found_nested_cycle_def));
3889 if (!found_nested_cycle_def)
3890 reduc_def_stmt = def_stmt;
3892 gcc_assert (gimple_code (reduc_def_stmt) == GIMPLE_PHI);
3893 if (orig_stmt)
3894 gcc_assert (orig_stmt == vect_is_simple_reduction (loop_vinfo,
3895 reduc_def_stmt,
3896 !nested_cycle,
3897 &dummy));
3898 else
3899 gcc_assert (stmt == vect_is_simple_reduction (loop_vinfo, reduc_def_stmt,
3900 !nested_cycle, &dummy));
3902 if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt)))
3903 return false;
3905 if (slp_node)
3906 ncopies = 1;
3907 else
3908 ncopies = (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
3909 / TYPE_VECTOR_SUBPARTS (vectype_in));
3911 gcc_assert (ncopies >= 1);
3913 vec_mode = TYPE_MODE (vectype_in);
3915 if (code == COND_EXPR)
3917 if (!vectorizable_condition (stmt, gsi, NULL, ops[reduc_index], 0))
3919 if (vect_print_dump_info (REPORT_DETAILS))
3920 fprintf (vect_dump, "unsupported condition in reduction");
3922 return false;
3925 else
3927 /* 4. Supportable by target? */
3929 /* 4.1. check support for the operation in the loop */
3930 optab = optab_for_tree_code (code, vectype_in, optab_default);
3931 if (!optab)
3933 if (vect_print_dump_info (REPORT_DETAILS))
3934 fprintf (vect_dump, "no optab.");
3936 return false;
3939 if (optab_handler (optab, vec_mode)->insn_code == CODE_FOR_nothing)
3941 if (vect_print_dump_info (REPORT_DETAILS))
3942 fprintf (vect_dump, "op not supported by target.");
3944 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
3945 || LOOP_VINFO_VECT_FACTOR (loop_vinfo)
3946 < vect_min_worthwhile_factor (code))
3947 return false;
3949 if (vect_print_dump_info (REPORT_DETAILS))
3950 fprintf (vect_dump, "proceeding using word mode.");
3953 /* Worthwhile without SIMD support? */
3954 if (!VECTOR_MODE_P (TYPE_MODE (vectype_in))
3955 && LOOP_VINFO_VECT_FACTOR (loop_vinfo)
3956 < vect_min_worthwhile_factor (code))
3958 if (vect_print_dump_info (REPORT_DETAILS))
3959 fprintf (vect_dump, "not worthwhile without SIMD support.");
3961 return false;
3965 /* 4.2. Check support for the epilog operation.
3967 If STMT represents a reduction pattern, then the type of the
3968 reduction variable may be different than the type of the rest
3969 of the arguments. For example, consider the case of accumulation
3970 of shorts into an int accumulator; The original code:
3971 S1: int_a = (int) short_a;
3972 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
3974 was replaced with:
3975 STMT: int_acc = widen_sum <short_a, int_acc>
3977 This means that:
3978 1. The tree-code that is used to create the vector operation in the
3979 epilog code (that reduces the partial results) is not the
3980 tree-code of STMT, but is rather the tree-code of the original
3981 stmt from the pattern that STMT is replacing. I.e, in the example
3982 above we want to use 'widen_sum' in the loop, but 'plus' in the
3983 epilog.
3984 2. The type (mode) we use to check available target support
3985 for the vector operation to be created in the *epilog*, is
3986 determined by the type of the reduction variable (in the example
3987 above we'd check this: plus_optab[vect_int_mode]).
3988 However the type (mode) we use to check available target support
3989 for the vector operation to be created *inside the loop*, is
3990 determined by the type of the other arguments to STMT (in the
3991 example we'd check this: widen_sum_optab[vect_short_mode]).
3993 This is contrary to "regular" reductions, in which the types of all
3994 the arguments are the same as the type of the reduction variable.
3995 For "regular" reductions we can therefore use the same vector type
3996 (and also the same tree-code) when generating the epilog code and
3997 when generating the code inside the loop. */
3999 if (orig_stmt)
4001 /* This is a reduction pattern: get the vectype from the type of the
4002 reduction variable, and get the tree-code from orig_stmt. */
4003 orig_code = gimple_assign_rhs_code (orig_stmt);
4004 gcc_assert (vectype_out);
4005 vec_mode = TYPE_MODE (vectype_out);
4007 else
4009 /* Regular reduction: use the same vectype and tree-code as used for
4010 the vector code inside the loop can be used for the epilog code. */
4011 orig_code = code;
4014 if (nested_cycle)
4016 def_bb = gimple_bb (reduc_def_stmt);
4017 def_stmt_loop = def_bb->loop_father;
4018 def_arg = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
4019 loop_preheader_edge (def_stmt_loop));
4020 if (TREE_CODE (def_arg) == SSA_NAME
4021 && (def_arg_stmt = SSA_NAME_DEF_STMT (def_arg))
4022 && gimple_code (def_arg_stmt) == GIMPLE_PHI
4023 && flow_bb_inside_loop_p (outer_loop, gimple_bb (def_arg_stmt))
4024 && vinfo_for_stmt (def_arg_stmt)
4025 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_arg_stmt))
4026 == vect_double_reduction_def)
4027 double_reduc = true;
4030 epilog_reduc_code = ERROR_MARK;
4031 if (reduction_code_for_scalar_code (orig_code, &epilog_reduc_code))
4033 reduc_optab = optab_for_tree_code (epilog_reduc_code, vectype_out,
4034 optab_default);
4035 if (!reduc_optab)
4037 if (vect_print_dump_info (REPORT_DETAILS))
4038 fprintf (vect_dump, "no optab for reduction.");
4040 epilog_reduc_code = ERROR_MARK;
4043 if (reduc_optab
4044 && optab_handler (reduc_optab, vec_mode)->insn_code
4045 == CODE_FOR_nothing)
4047 if (vect_print_dump_info (REPORT_DETAILS))
4048 fprintf (vect_dump, "reduc op not supported by target.");
4050 epilog_reduc_code = ERROR_MARK;
4053 else
4055 if (!nested_cycle || double_reduc)
4057 if (vect_print_dump_info (REPORT_DETAILS))
4058 fprintf (vect_dump, "no reduc code for scalar code.");
4060 return false;
4064 if (double_reduc && ncopies > 1)
4066 if (vect_print_dump_info (REPORT_DETAILS))
4067 fprintf (vect_dump, "multiple types in double reduction");
4069 return false;
4072 if (!vec_stmt) /* transformation not required. */
4074 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
4075 if (!vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies))
4076 return false;
4077 return true;
4080 /** Transform. **/
4082 if (vect_print_dump_info (REPORT_DETAILS))
4083 fprintf (vect_dump, "transform reduction.");
4085 /* FORNOW: Multiple types are not supported for condition. */
4086 if (code == COND_EXPR)
4087 gcc_assert (ncopies == 1);
4089 /* Create the destination vector */
4090 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
4092 /* In case the vectorization factor (VF) is bigger than the number
4093 of elements that we can fit in a vectype (nunits), we have to generate
4094 more than one vector stmt - i.e - we need to "unroll" the
4095 vector stmt by a factor VF/nunits. For more details see documentation
4096 in vectorizable_operation. */
4098 /* If the reduction is used in an outer loop we need to generate
4099 VF intermediate results, like so (e.g. for ncopies=2):
4100 r0 = phi (init, r0)
4101 r1 = phi (init, r1)
4102 r0 = x0 + r0;
4103 r1 = x1 + r1;
4104 (i.e. we generate VF results in 2 registers).
4105 In this case we have a separate def-use cycle for each copy, and therefore
4106 for each copy we get the vector def for the reduction variable from the
4107 respective phi node created for this copy.
4109 Otherwise (the reduction is unused in the loop nest), we can combine
4110 together intermediate results, like so (e.g. for ncopies=2):
4111 r = phi (init, r)
4112 r = x0 + r;
4113 r = x1 + r;
4114 (i.e. we generate VF/2 results in a single register).
4115 In this case for each copy we get the vector def for the reduction variable
4116 from the vectorized reduction operation generated in the previous iteration.
4119 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope)
4121 single_defuse_cycle = true;
4122 epilog_copies = 1;
4124 else
4125 epilog_copies = ncopies;
4127 prev_stmt_info = NULL;
4128 prev_phi_info = NULL;
4129 if (slp_node)
4131 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
4132 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype_out)
4133 == TYPE_VECTOR_SUBPARTS (vectype_in));
4135 else
4137 vec_num = 1;
4138 vec_oprnds0 = VEC_alloc (tree, heap, 1);
4139 if (op_type == ternary_op)
4140 vec_oprnds1 = VEC_alloc (tree, heap, 1);
4143 phis = VEC_alloc (gimple, heap, vec_num);
4144 vect_defs = VEC_alloc (tree, heap, vec_num);
4145 if (!slp_node)
4146 VEC_quick_push (tree, vect_defs, NULL_TREE);
4148 for (j = 0; j < ncopies; j++)
4150 if (j == 0 || !single_defuse_cycle)
4152 for (i = 0; i < vec_num; i++)
4154 /* Create the reduction-phi that defines the reduction
4155 operand. */
4156 new_phi = create_phi_node (vec_dest, loop->header);
4157 set_vinfo_for_stmt (new_phi,
4158 new_stmt_vec_info (new_phi, loop_vinfo,
4159 NULL));
4160 if (j == 0 || slp_node)
4161 VEC_quick_push (gimple, phis, new_phi);
4165 if (code == COND_EXPR)
4167 gcc_assert (!slp_node);
4168 vectorizable_condition (stmt, gsi, vec_stmt,
4169 PHI_RESULT (VEC_index (gimple, phis, 0)),
4170 reduc_index);
4171 /* Multiple types are not supported for condition. */
4172 break;
4175 /* Handle uses. */
4176 if (j == 0)
4178 if (slp_node)
4179 vect_get_slp_defs (slp_node, &vec_oprnds0, &vec_oprnds1, -1);
4180 else
4182 loop_vec_def0 = vect_get_vec_def_for_operand (ops[!reduc_index],
4183 stmt, NULL);
4184 VEC_quick_push (tree, vec_oprnds0, loop_vec_def0);
4185 if (op_type == ternary_op)
4187 if (reduc_index == 0)
4188 loop_vec_def1 = vect_get_vec_def_for_operand (ops[2], stmt,
4189 NULL);
4190 else
4191 loop_vec_def1 = vect_get_vec_def_for_operand (ops[1], stmt,
4192 NULL);
4194 VEC_quick_push (tree, vec_oprnds1, loop_vec_def1);
4198 else
4200 if (!slp_node)
4202 enum vect_def_type dt = vect_unknown_def_type; /* Dummy */
4203 loop_vec_def0 = vect_get_vec_def_for_stmt_copy (dt, loop_vec_def0);
4204 VEC_replace (tree, vec_oprnds0, 0, loop_vec_def0);
4205 if (op_type == ternary_op)
4207 loop_vec_def1 = vect_get_vec_def_for_stmt_copy (dt,
4208 loop_vec_def1);
4209 VEC_replace (tree, vec_oprnds1, 0, loop_vec_def1);
4213 if (single_defuse_cycle)
4214 reduc_def = gimple_assign_lhs (new_stmt);
4216 STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi;
4219 for (i = 0; VEC_iterate (tree, vec_oprnds0, i, def0); i++)
4221 if (slp_node)
4222 reduc_def = PHI_RESULT (VEC_index (gimple, phis, i));
4223 else
4225 if (!single_defuse_cycle || j == 0)
4226 reduc_def = PHI_RESULT (new_phi);
4229 def1 = ((op_type == ternary_op)
4230 ? VEC_index (tree, vec_oprnds1, i) : NULL);
4231 if (op_type == binary_op)
4233 if (reduc_index == 0)
4234 expr = build2 (code, vectype_out, reduc_def, def0);
4235 else
4236 expr = build2 (code, vectype_out, def0, reduc_def);
4238 else
4240 if (reduc_index == 0)
4241 expr = build3 (code, vectype_out, reduc_def, def0, def1);
4242 else
4244 if (reduc_index == 1)
4245 expr = build3 (code, vectype_out, def0, reduc_def, def1);
4246 else
4247 expr = build3 (code, vectype_out, def0, def1, reduc_def);
4251 new_stmt = gimple_build_assign (vec_dest, expr);
4252 new_temp = make_ssa_name (vec_dest, new_stmt);
4253 gimple_assign_set_lhs (new_stmt, new_temp);
4254 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4255 if (slp_node)
4257 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
4258 VEC_quick_push (tree, vect_defs, new_temp);
4260 else
4261 VEC_replace (tree, vect_defs, 0, new_temp);
4264 if (slp_node)
4265 continue;
4267 if (j == 0)
4268 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4269 else
4270 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4272 prev_stmt_info = vinfo_for_stmt (new_stmt);
4273 prev_phi_info = vinfo_for_stmt (new_phi);
4276 /* Finalize the reduction-phi (set its arguments) and create the
4277 epilog reduction code. */
4278 if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node)
4280 new_temp = gimple_assign_lhs (*vec_stmt);
4281 VEC_replace (tree, vect_defs, 0, new_temp);
4284 vect_create_epilog_for_reduction (vect_defs, stmt, epilog_copies,
4285 epilog_reduc_code, phis, reduc_index,
4286 double_reduc, slp_node);
4288 VEC_free (gimple, heap, phis);
4289 VEC_free (tree, heap, vec_oprnds0);
4290 if (vec_oprnds1)
4291 VEC_free (tree, heap, vec_oprnds1);
4293 return true;
4296 /* Function vect_min_worthwhile_factor.
4298 For a loop where we could vectorize the operation indicated by CODE,
4299 return the minimum vectorization factor that makes it worthwhile
4300 to use generic vectors. */
4302 vect_min_worthwhile_factor (enum tree_code code)
4304 switch (code)
4306 case PLUS_EXPR:
4307 case MINUS_EXPR:
4308 case NEGATE_EXPR:
4309 return 4;
4311 case BIT_AND_EXPR:
4312 case BIT_IOR_EXPR:
4313 case BIT_XOR_EXPR:
4314 case BIT_NOT_EXPR:
4315 return 2;
4317 default:
4318 return INT_MAX;
4323 /* Function vectorizable_induction
4325 Check if PHI performs an induction computation that can be vectorized.
4326 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
4327 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
4328 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4330 bool
4331 vectorizable_induction (gimple phi, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
4332 gimple *vec_stmt)
4334 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
4335 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4336 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4337 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4338 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4339 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4340 tree vec_def;
4342 gcc_assert (ncopies >= 1);
4343 /* FORNOW. This restriction should be relaxed. */
4344 if (nested_in_vect_loop_p (loop, phi) && ncopies > 1)
4346 if (vect_print_dump_info (REPORT_DETAILS))
4347 fprintf (vect_dump, "multiple types in nested loop.");
4348 return false;
4351 if (!STMT_VINFO_RELEVANT_P (stmt_info))
4352 return false;
4354 /* FORNOW: SLP not supported. */
4355 if (STMT_SLP_TYPE (stmt_info))
4356 return false;
4358 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def);
4360 if (gimple_code (phi) != GIMPLE_PHI)
4361 return false;
4363 if (!vec_stmt) /* transformation not required. */
4365 STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
4366 if (vect_print_dump_info (REPORT_DETAILS))
4367 fprintf (vect_dump, "=== vectorizable_induction ===");
4368 vect_model_induction_cost (stmt_info, ncopies);
4369 return true;
4372 /** Transform. **/
4374 if (vect_print_dump_info (REPORT_DETAILS))
4375 fprintf (vect_dump, "transform induction phi.");
4377 vec_def = get_initial_def_for_induction (phi);
4378 *vec_stmt = SSA_NAME_DEF_STMT (vec_def);
4379 return true;
4382 /* Function vectorizable_live_operation.
4384 STMT computes a value that is used outside the loop. Check if
4385 it can be supported. */
4387 bool
4388 vectorizable_live_operation (gimple stmt,
4389 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
4390 gimple *vec_stmt ATTRIBUTE_UNUSED)
4392 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4393 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4394 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4395 int i;
4396 int op_type;
4397 tree op;
4398 tree def;
4399 gimple def_stmt;
4400 enum vect_def_type dt;
4401 enum tree_code code;
4402 enum gimple_rhs_class rhs_class;
4404 gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
4406 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
4407 return false;
4409 if (!is_gimple_assign (stmt))
4410 return false;
4412 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4413 return false;
4415 /* FORNOW. CHECKME. */
4416 if (nested_in_vect_loop_p (loop, stmt))
4417 return false;
4419 code = gimple_assign_rhs_code (stmt);
4420 op_type = TREE_CODE_LENGTH (code);
4421 rhs_class = get_gimple_rhs_class (code);
4422 gcc_assert (rhs_class != GIMPLE_UNARY_RHS || op_type == unary_op);
4423 gcc_assert (rhs_class != GIMPLE_BINARY_RHS || op_type == binary_op);
4425 /* FORNOW: support only if all uses are invariant. This means
4426 that the scalar operations can remain in place, unvectorized.
4427 The original last scalar value that they compute will be used. */
4429 for (i = 0; i < op_type; i++)
4431 if (rhs_class == GIMPLE_SINGLE_RHS)
4432 op = TREE_OPERAND (gimple_op (stmt, 1), i);
4433 else
4434 op = gimple_op (stmt, i + 1);
4435 if (op
4436 && !vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def, &dt))
4438 if (vect_print_dump_info (REPORT_DETAILS))
4439 fprintf (vect_dump, "use not simple.");
4440 return false;
4443 if (dt != vect_external_def && dt != vect_constant_def)
4444 return false;
4447 /* No transformation is required for the cases we currently support. */
4448 return true;
4451 /* Kill any debug uses outside LOOP of SSA names defined in STMT. */
4453 static void
4454 vect_loop_kill_debug_uses (struct loop *loop, gimple stmt)
4456 ssa_op_iter op_iter;
4457 imm_use_iterator imm_iter;
4458 def_operand_p def_p;
4459 gimple ustmt;
4461 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
4463 FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p))
4465 basic_block bb;
4467 if (!is_gimple_debug (ustmt))
4468 continue;
4470 bb = gimple_bb (ustmt);
4472 if (!flow_bb_inside_loop_p (loop, bb))
4474 if (gimple_debug_bind_p (ustmt))
4476 if (vect_print_dump_info (REPORT_DETAILS))
4477 fprintf (vect_dump, "killing debug use");
4479 gimple_debug_bind_reset_value (ustmt);
4480 update_stmt (ustmt);
4482 else
4483 gcc_unreachable ();
4489 /* Function vect_transform_loop.
4491 The analysis phase has determined that the loop is vectorizable.
4492 Vectorize the loop - created vectorized stmts to replace the scalar
4493 stmts in the loop, and update the loop exit condition. */
4495 void
4496 vect_transform_loop (loop_vec_info loop_vinfo)
4498 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4499 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
4500 int nbbs = loop->num_nodes;
4501 gimple_stmt_iterator si;
4502 int i;
4503 tree ratio = NULL;
4504 int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4505 bool strided_store;
4506 bool slp_scheduled = false;
4507 unsigned int nunits;
4508 tree cond_expr = NULL_TREE;
4509 gimple_seq cond_expr_stmt_list = NULL;
4510 bool do_peeling_for_loop_bound;
4512 if (vect_print_dump_info (REPORT_DETAILS))
4513 fprintf (vect_dump, "=== vec_transform_loop ===");
4515 /* Peel the loop if there are data refs with unknown alignment.
4516 Only one data ref with unknown store is allowed. */
4518 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
4519 vect_do_peeling_for_alignment (loop_vinfo);
4521 do_peeling_for_loop_bound
4522 = (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
4523 || (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
4524 && LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0));
4526 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
4527 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
4528 vect_loop_versioning (loop_vinfo,
4529 !do_peeling_for_loop_bound,
4530 &cond_expr, &cond_expr_stmt_list);
4532 /* If the loop has a symbolic number of iterations 'n' (i.e. it's not a
4533 compile time constant), or it is a constant that doesn't divide by the
4534 vectorization factor, then an epilog loop needs to be created.
4535 We therefore duplicate the loop: the original loop will be vectorized,
4536 and will compute the first (n/VF) iterations. The second copy of the loop
4537 will remain scalar and will compute the remaining (n%VF) iterations.
4538 (VF is the vectorization factor). */
4540 if (do_peeling_for_loop_bound)
4541 vect_do_peeling_for_loop_bound (loop_vinfo, &ratio,
4542 cond_expr, cond_expr_stmt_list);
4543 else
4544 ratio = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
4545 LOOP_VINFO_INT_NITERS (loop_vinfo) / vectorization_factor);
4547 /* 1) Make sure the loop header has exactly two entries
4548 2) Make sure we have a preheader basic block. */
4550 gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
4552 split_edge (loop_preheader_edge (loop));
4554 /* FORNOW: the vectorizer supports only loops which body consist
4555 of one basic block (header + empty latch). When the vectorizer will
4556 support more involved loop forms, the order by which the BBs are
4557 traversed need to be reconsidered. */
4559 for (i = 0; i < nbbs; i++)
4561 basic_block bb = bbs[i];
4562 stmt_vec_info stmt_info;
4563 gimple phi;
4565 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
4567 phi = gsi_stmt (si);
4568 if (vect_print_dump_info (REPORT_DETAILS))
4570 fprintf (vect_dump, "------>vectorizing phi: ");
4571 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
4573 stmt_info = vinfo_for_stmt (phi);
4574 if (!stmt_info)
4575 continue;
4577 if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
4578 vect_loop_kill_debug_uses (loop, phi);
4580 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4581 && !STMT_VINFO_LIVE_P (stmt_info))
4582 continue;
4584 if ((TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info))
4585 != (unsigned HOST_WIDE_INT) vectorization_factor)
4586 && vect_print_dump_info (REPORT_DETAILS))
4587 fprintf (vect_dump, "multiple-types.");
4589 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
4591 if (vect_print_dump_info (REPORT_DETAILS))
4592 fprintf (vect_dump, "transform phi.");
4593 vect_transform_stmt (phi, NULL, NULL, NULL, NULL);
4597 for (si = gsi_start_bb (bb); !gsi_end_p (si);)
4599 gimple stmt = gsi_stmt (si);
4600 bool is_store;
4602 if (vect_print_dump_info (REPORT_DETAILS))
4604 fprintf (vect_dump, "------>vectorizing statement: ");
4605 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4608 stmt_info = vinfo_for_stmt (stmt);
4610 /* vector stmts created in the outer-loop during vectorization of
4611 stmts in an inner-loop may not have a stmt_info, and do not
4612 need to be vectorized. */
4613 if (!stmt_info)
4615 gsi_next (&si);
4616 continue;
4619 if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
4620 vect_loop_kill_debug_uses (loop, stmt);
4622 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4623 && !STMT_VINFO_LIVE_P (stmt_info))
4625 gsi_next (&si);
4626 continue;
4629 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
4630 nunits =
4631 (unsigned int) TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
4632 if (!STMT_SLP_TYPE (stmt_info)
4633 && nunits != (unsigned int) vectorization_factor
4634 && vect_print_dump_info (REPORT_DETAILS))
4635 /* For SLP VF is set according to unrolling factor, and not to
4636 vector size, hence for SLP this print is not valid. */
4637 fprintf (vect_dump, "multiple-types.");
4639 /* SLP. Schedule all the SLP instances when the first SLP stmt is
4640 reached. */
4641 if (STMT_SLP_TYPE (stmt_info))
4643 if (!slp_scheduled)
4645 slp_scheduled = true;
4647 if (vect_print_dump_info (REPORT_DETAILS))
4648 fprintf (vect_dump, "=== scheduling SLP instances ===");
4650 vect_schedule_slp (loop_vinfo, NULL);
4653 /* Hybrid SLP stmts must be vectorized in addition to SLP. */
4654 if (!vinfo_for_stmt (stmt) || PURE_SLP_STMT (stmt_info))
4656 gsi_next (&si);
4657 continue;
4661 /* -------- vectorize statement ------------ */
4662 if (vect_print_dump_info (REPORT_DETAILS))
4663 fprintf (vect_dump, "transform statement.");
4665 strided_store = false;
4666 is_store = vect_transform_stmt (stmt, &si, &strided_store, NULL, NULL);
4667 if (is_store)
4669 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
4671 /* Interleaving. If IS_STORE is TRUE, the vectorization of the
4672 interleaving chain was completed - free all the stores in
4673 the chain. */
4674 vect_remove_stores (DR_GROUP_FIRST_DR (stmt_info));
4675 gsi_remove (&si, true);
4676 continue;
4678 else
4680 /* Free the attached stmt_vec_info and remove the stmt. */
4681 free_stmt_vec_info (stmt);
4682 gsi_remove (&si, true);
4683 continue;
4686 gsi_next (&si);
4687 } /* stmts in BB */
4688 } /* BBs in loop */
4690 slpeel_make_loop_iterate_ntimes (loop, ratio);
4692 /* The memory tags and pointers in vectorized statements need to
4693 have their SSA forms updated. FIXME, why can't this be delayed
4694 until all the loops have been transformed? */
4695 update_ssa (TODO_update_ssa);
4697 if (vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
4698 fprintf (vect_dump, "LOOP VECTORIZED.");
4699 if (loop->inner && vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
4700 fprintf (vect_dump, "OUTER LOOP VECTORIZED.");