2009-04-17 Andrew Stubbs <ams@codesourcery.com>
[official-gcc.git] / gcc / tree-vect-loop.c
blobd78fa99ff8b1755ba0f31a12da74bd35496b5eb8
1 /* Loop Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software
3 Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
5 Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "basic-block.h"
30 #include "diagnostic.h"
31 #include "tree-flow.h"
32 #include "tree-dump.h"
33 #include "cfgloop.h"
34 #include "cfglayout.h"
35 #include "expr.h"
36 #include "recog.h"
37 #include "optabs.h"
38 #include "params.h"
39 #include "toplev.h"
40 #include "tree-chrec.h"
41 #include "tree-scalar-evolution.h"
42 #include "tree-vectorizer.h"
44 /* Loop Vectorization Pass.
46 This pass tries to vectorize loops.
48 For example, the vectorizer transforms the following simple loop:
50 short a[N]; short b[N]; short c[N]; int i;
52 for (i=0; i<N; i++){
53 a[i] = b[i] + c[i];
56 as if it was manually vectorized by rewriting the source code into:
58 typedef int __attribute__((mode(V8HI))) v8hi;
59 short a[N]; short b[N]; short c[N]; int i;
60 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
61 v8hi va, vb, vc;
63 for (i=0; i<N/8; i++){
64 vb = pb[i];
65 vc = pc[i];
66 va = vb + vc;
67 pa[i] = va;
70 The main entry to this pass is vectorize_loops(), in which
71 the vectorizer applies a set of analyses on a given set of loops,
72 followed by the actual vectorization transformation for the loops that
73 had successfully passed the analysis phase.
74 Throughout this pass we make a distinction between two types of
75 data: scalars (which are represented by SSA_NAMES), and memory references
76 ("data-refs"). These two types of data require different handling both
77 during analysis and transformation. The types of data-refs that the
78 vectorizer currently supports are ARRAY_REFS which base is an array DECL
79 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
80 accesses are required to have a simple (consecutive) access pattern.
82 Analysis phase:
83 ===============
84 The driver for the analysis phase is vect_analyze_loop().
85 It applies a set of analyses, some of which rely on the scalar evolution
86 analyzer (scev) developed by Sebastian Pop.
88 During the analysis phase the vectorizer records some information
89 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
90 loop, as well as general information about the loop as a whole, which is
91 recorded in a "loop_vec_info" struct attached to each loop.
93 Transformation phase:
94 =====================
95 The loop transformation phase scans all the stmts in the loop, and
96 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
97 the loop that needs to be vectorized. It inserts the vector code sequence
98 just before the scalar stmt S, and records a pointer to the vector code
99 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
100 attached to S). This pointer will be used for the vectorization of following
101 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
102 otherwise, we rely on dead code elimination for removing it.
104 For example, say stmt S1 was vectorized into stmt VS1:
106 VS1: vb = px[i];
107 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
108 S2: a = b;
110 To vectorize stmt S2, the vectorizer first finds the stmt that defines
111 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
112 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
113 resulting sequence would be:
115 VS1: vb = px[i];
116 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
117 VS2: va = vb;
118 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
120 Operands that are not SSA_NAMEs, are data-refs that appear in
121 load/store operations (like 'x[i]' in S1), and are handled differently.
123 Target modeling:
124 =================
125 Currently the only target specific information that is used is the
126 size of the vector (in bytes) - "UNITS_PER_SIMD_WORD". Targets that can
127 support different sizes of vectors, for now will need to specify one value
128 for "UNITS_PER_SIMD_WORD". More flexibility will be added in the future.
130 Since we only vectorize operations which vector form can be
131 expressed using existing tree codes, to verify that an operation is
132 supported, the vectorizer checks the relevant optab at the relevant
133 machine_mode (e.g, optab_handler (add_optab, V8HImode)->insn_code). If
134 the value found is CODE_FOR_nothing, then there's no target support, and
135 we can't vectorize the stmt.
137 For additional information on this project see:
138 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
141 /* Function vect_determine_vectorization_factor
143 Determine the vectorization factor (VF). VF is the number of data elements
144 that are operated upon in parallel in a single iteration of the vectorized
145 loop. For example, when vectorizing a loop that operates on 4byte elements,
146 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
147 elements can fit in a single vector register.
149 We currently support vectorization of loops in which all types operated upon
150 are of the same size. Therefore this function currently sets VF according to
151 the size of the types operated upon, and fails if there are multiple sizes
152 in the loop.
154 VF is also the factor by which the loop iterations are strip-mined, e.g.:
155 original loop:
156 for (i=0; i<N; i++){
157 a[i] = b[i] + c[i];
160 vectorized loop:
161 for (i=0; i<N; i+=VF){
162 a[i:VF] = b[i:VF] + c[i:VF];
166 static bool
167 vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
169 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
170 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
171 int nbbs = loop->num_nodes;
172 gimple_stmt_iterator si;
173 unsigned int vectorization_factor = 0;
174 tree scalar_type;
175 gimple phi;
176 tree vectype;
177 unsigned int nunits;
178 stmt_vec_info stmt_info;
179 int i;
180 HOST_WIDE_INT dummy;
182 if (vect_print_dump_info (REPORT_DETAILS))
183 fprintf (vect_dump, "=== vect_determine_vectorization_factor ===");
185 for (i = 0; i < nbbs; i++)
187 basic_block bb = bbs[i];
189 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
191 phi = gsi_stmt (si);
192 stmt_info = vinfo_for_stmt (phi);
193 if (vect_print_dump_info (REPORT_DETAILS))
195 fprintf (vect_dump, "==> examining phi: ");
196 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
199 gcc_assert (stmt_info);
201 if (STMT_VINFO_RELEVANT_P (stmt_info))
203 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
204 scalar_type = TREE_TYPE (PHI_RESULT (phi));
206 if (vect_print_dump_info (REPORT_DETAILS))
208 fprintf (vect_dump, "get vectype for scalar type: ");
209 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
212 vectype = get_vectype_for_scalar_type (scalar_type);
213 if (!vectype)
215 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS))
217 fprintf (vect_dump,
218 "not vectorized: unsupported data-type ");
219 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
221 return false;
223 STMT_VINFO_VECTYPE (stmt_info) = vectype;
225 if (vect_print_dump_info (REPORT_DETAILS))
227 fprintf (vect_dump, "vectype: ");
228 print_generic_expr (vect_dump, vectype, TDF_SLIM);
231 nunits = TYPE_VECTOR_SUBPARTS (vectype);
232 if (vect_print_dump_info (REPORT_DETAILS))
233 fprintf (vect_dump, "nunits = %d", nunits);
235 if (!vectorization_factor
236 || (nunits > vectorization_factor))
237 vectorization_factor = nunits;
241 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
243 gimple stmt = gsi_stmt (si);
244 stmt_info = vinfo_for_stmt (stmt);
246 if (vect_print_dump_info (REPORT_DETAILS))
248 fprintf (vect_dump, "==> examining statement: ");
249 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
252 gcc_assert (stmt_info);
254 /* skip stmts which do not need to be vectorized. */
255 if (!STMT_VINFO_RELEVANT_P (stmt_info)
256 && !STMT_VINFO_LIVE_P (stmt_info))
258 if (vect_print_dump_info (REPORT_DETAILS))
259 fprintf (vect_dump, "skip.");
260 continue;
263 if (gimple_get_lhs (stmt) == NULL_TREE)
265 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS))
267 fprintf (vect_dump, "not vectorized: irregular stmt.");
268 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
270 return false;
273 if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
275 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS))
277 fprintf (vect_dump, "not vectorized: vector stmt in loop:");
278 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
280 return false;
283 if (STMT_VINFO_VECTYPE (stmt_info))
285 /* The only case when a vectype had been already set is for stmts
286 that contain a dataref, or for "pattern-stmts" (stmts generated
287 by the vectorizer to represent/replace a certain idiom). */
288 gcc_assert (STMT_VINFO_DATA_REF (stmt_info)
289 || is_pattern_stmt_p (stmt_info));
290 vectype = STMT_VINFO_VECTYPE (stmt_info);
292 else
295 gcc_assert (! STMT_VINFO_DATA_REF (stmt_info)
296 && !is_pattern_stmt_p (stmt_info));
298 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy,
299 &dummy);
300 if (vect_print_dump_info (REPORT_DETAILS))
302 fprintf (vect_dump, "get vectype for scalar type: ");
303 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
306 vectype = get_vectype_for_scalar_type (scalar_type);
307 if (!vectype)
309 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS))
311 fprintf (vect_dump,
312 "not vectorized: unsupported data-type ");
313 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
315 return false;
317 STMT_VINFO_VECTYPE (stmt_info) = vectype;
320 if (vect_print_dump_info (REPORT_DETAILS))
322 fprintf (vect_dump, "vectype: ");
323 print_generic_expr (vect_dump, vectype, TDF_SLIM);
326 nunits = TYPE_VECTOR_SUBPARTS (vectype);
327 if (vect_print_dump_info (REPORT_DETAILS))
328 fprintf (vect_dump, "nunits = %d", nunits);
330 if (!vectorization_factor
331 || (nunits > vectorization_factor))
332 vectorization_factor = nunits;
337 /* TODO: Analyze cost. Decide if worth while to vectorize. */
338 if (vect_print_dump_info (REPORT_DETAILS))
339 fprintf (vect_dump, "vectorization factor = %d", vectorization_factor);
340 if (vectorization_factor <= 1)
342 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS))
343 fprintf (vect_dump, "not vectorized: unsupported data-type");
344 return false;
346 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
348 return true;
352 /* Function vect_is_simple_iv_evolution.
354 FORNOW: A simple evolution of an induction variables in the loop is
355 considered a polynomial evolution with constant step. */
357 static bool
358 vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
359 tree * step)
361 tree init_expr;
362 tree step_expr;
363 tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
365 /* When there is no evolution in this loop, the evolution function
366 is not "simple". */
367 if (evolution_part == NULL_TREE)
368 return false;
370 /* When the evolution is a polynomial of degree >= 2
371 the evolution function is not "simple". */
372 if (tree_is_chrec (evolution_part))
373 return false;
375 step_expr = evolution_part;
376 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
378 if (vect_print_dump_info (REPORT_DETAILS))
380 fprintf (vect_dump, "step: ");
381 print_generic_expr (vect_dump, step_expr, TDF_SLIM);
382 fprintf (vect_dump, ", init: ");
383 print_generic_expr (vect_dump, init_expr, TDF_SLIM);
386 *init = init_expr;
387 *step = step_expr;
389 if (TREE_CODE (step_expr) != INTEGER_CST)
391 if (vect_print_dump_info (REPORT_DETAILS))
392 fprintf (vect_dump, "step unknown.");
393 return false;
396 return true;
399 /* Function vect_analyze_scalar_cycles_1.
401 Examine the cross iteration def-use cycles of scalar variables
402 in LOOP. LOOP_VINFO represents the loop that is now being
403 considered for vectorization (can be LOOP, or an outer-loop
404 enclosing LOOP). */
406 static void
407 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
409 basic_block bb = loop->header;
410 tree dumy;
411 VEC(gimple,heap) *worklist = VEC_alloc (gimple, heap, 64);
412 gimple_stmt_iterator gsi;
414 if (vect_print_dump_info (REPORT_DETAILS))
415 fprintf (vect_dump, "=== vect_analyze_scalar_cycles ===");
417 /* First - identify all inductions. */
418 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
420 gimple phi = gsi_stmt (gsi);
421 tree access_fn = NULL;
422 tree def = PHI_RESULT (phi);
423 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
425 if (vect_print_dump_info (REPORT_DETAILS))
427 fprintf (vect_dump, "Analyze phi: ");
428 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
431 /* Skip virtual phi's. The data dependences that are associated with
432 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
433 if (!is_gimple_reg (SSA_NAME_VAR (def)))
434 continue;
436 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
438 /* Analyze the evolution function. */
439 access_fn = analyze_scalar_evolution (loop, def);
440 if (access_fn && vect_print_dump_info (REPORT_DETAILS))
442 fprintf (vect_dump, "Access function of PHI: ");
443 print_generic_expr (vect_dump, access_fn, TDF_SLIM);
446 if (!access_fn
447 || !vect_is_simple_iv_evolution (loop->num, access_fn, &dumy, &dumy))
449 VEC_safe_push (gimple, heap, worklist, phi);
450 continue;
453 if (vect_print_dump_info (REPORT_DETAILS))
454 fprintf (vect_dump, "Detected induction.");
455 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
459 /* Second - identify all reductions. */
460 while (VEC_length (gimple, worklist) > 0)
462 gimple phi = VEC_pop (gimple, worklist);
463 tree def = PHI_RESULT (phi);
464 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
465 gimple reduc_stmt;
467 if (vect_print_dump_info (REPORT_DETAILS))
469 fprintf (vect_dump, "Analyze phi: ");
470 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
473 gcc_assert (is_gimple_reg (SSA_NAME_VAR (def)));
474 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
476 reduc_stmt = vect_is_simple_reduction (loop_vinfo, phi);
477 if (reduc_stmt)
479 if (vect_print_dump_info (REPORT_DETAILS))
480 fprintf (vect_dump, "Detected reduction.");
481 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
482 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
483 vect_reduction_def;
485 else
486 if (vect_print_dump_info (REPORT_DETAILS))
487 fprintf (vect_dump, "Unknown def-use cycle pattern.");
490 VEC_free (gimple, heap, worklist);
491 return;
495 /* Function vect_analyze_scalar_cycles.
497 Examine the cross iteration def-use cycles of scalar variables, by
498 analyzing the loop-header PHIs of scalar variables; Classify each
499 cycle as one of the following: invariant, induction, reduction, unknown.
500 We do that for the loop represented by LOOP_VINFO, and also to its
501 inner-loop, if exists.
502 Examples for scalar cycles:
504 Example1: reduction:
506 loop1:
507 for (i=0; i<N; i++)
508 sum += a[i];
510 Example2: induction:
512 loop2:
513 for (i=0; i<N; i++)
514 a[i] = i; */
516 static void
517 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
519 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
521 vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
523 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
524 Reductions in such inner-loop therefore have different properties than
525 the reductions in the nest that gets vectorized:
526 1. When vectorized, they are executed in the same order as in the original
527 scalar loop, so we can't change the order of computation when
528 vectorizing them.
529 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
530 current checks are too strict. */
532 if (loop->inner)
533 vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
537 /* Function vect_get_loop_niters.
539 Determine how many iterations the loop is executed.
540 If an expression that represents the number of iterations
541 can be constructed, place it in NUMBER_OF_ITERATIONS.
542 Return the loop exit condition. */
544 static gimple
545 vect_get_loop_niters (struct loop *loop, tree *number_of_iterations)
547 tree niters;
549 if (vect_print_dump_info (REPORT_DETAILS))
550 fprintf (vect_dump, "=== get_loop_niters ===");
552 niters = number_of_exit_cond_executions (loop);
554 if (niters != NULL_TREE
555 && niters != chrec_dont_know)
557 *number_of_iterations = niters;
559 if (vect_print_dump_info (REPORT_DETAILS))
561 fprintf (vect_dump, "==> get_loop_niters:" );
562 print_generic_expr (vect_dump, *number_of_iterations, TDF_SLIM);
566 return get_loop_exit_condition (loop);
570 /* Function bb_in_loop_p
572 Used as predicate for dfs order traversal of the loop bbs. */
574 static bool
575 bb_in_loop_p (const_basic_block bb, const void *data)
577 const struct loop *const loop = (const struct loop *)data;
578 if (flow_bb_inside_loop_p (loop, bb))
579 return true;
580 return false;
584 /* Function new_loop_vec_info.
586 Create and initialize a new loop_vec_info struct for LOOP, as well as
587 stmt_vec_info structs for all the stmts in LOOP. */
589 static loop_vec_info
590 new_loop_vec_info (struct loop *loop)
592 loop_vec_info res;
593 basic_block *bbs;
594 gimple_stmt_iterator si;
595 unsigned int i, nbbs;
597 res = (loop_vec_info) xcalloc (1, sizeof (struct _loop_vec_info));
598 LOOP_VINFO_LOOP (res) = loop;
600 bbs = get_loop_body (loop);
602 /* Create/Update stmt_info for all stmts in the loop. */
603 for (i = 0; i < loop->num_nodes; i++)
605 basic_block bb = bbs[i];
607 /* BBs in a nested inner-loop will have been already processed (because
608 we will have called vect_analyze_loop_form for any nested inner-loop).
609 Therefore, for stmts in an inner-loop we just want to update the
610 STMT_VINFO_LOOP_VINFO field of their stmt_info to point to the new
611 loop_info of the outer-loop we are currently considering to vectorize
612 (instead of the loop_info of the inner-loop).
613 For stmts in other BBs we need to create a stmt_info from scratch. */
614 if (bb->loop_father != loop)
616 /* Inner-loop bb. */
617 gcc_assert (loop->inner && bb->loop_father == loop->inner);
618 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
620 gimple phi = gsi_stmt (si);
621 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
622 loop_vec_info inner_loop_vinfo =
623 STMT_VINFO_LOOP_VINFO (stmt_info);
624 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
625 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
627 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
629 gimple stmt = gsi_stmt (si);
630 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
631 loop_vec_info inner_loop_vinfo =
632 STMT_VINFO_LOOP_VINFO (stmt_info);
633 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
634 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
637 else
639 /* bb in current nest. */
640 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
642 gimple phi = gsi_stmt (si);
643 gimple_set_uid (phi, 0);
644 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, res));
647 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
649 gimple stmt = gsi_stmt (si);
650 gimple_set_uid (stmt, 0);
651 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, res));
656 /* CHECKME: We want to visit all BBs before their successors (except for
657 latch blocks, for which this assertion wouldn't hold). In the simple
658 case of the loop forms we allow, a dfs order of the BBs would the same
659 as reversed postorder traversal, so we are safe. */
661 free (bbs);
662 bbs = XCNEWVEC (basic_block, loop->num_nodes);
663 nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
664 bbs, loop->num_nodes, loop);
665 gcc_assert (nbbs == loop->num_nodes);
667 LOOP_VINFO_BBS (res) = bbs;
668 LOOP_VINFO_NITERS (res) = NULL;
669 LOOP_VINFO_NITERS_UNCHANGED (res) = NULL;
670 LOOP_VINFO_COST_MODEL_MIN_ITERS (res) = 0;
671 LOOP_VINFO_VECTORIZABLE_P (res) = 0;
672 LOOP_PEELING_FOR_ALIGNMENT (res) = 0;
673 LOOP_VINFO_VECT_FACTOR (res) = 0;
674 LOOP_VINFO_DATAREFS (res) = VEC_alloc (data_reference_p, heap, 10);
675 LOOP_VINFO_DDRS (res) = VEC_alloc (ddr_p, heap, 10 * 10);
676 LOOP_VINFO_UNALIGNED_DR (res) = NULL;
677 LOOP_VINFO_MAY_MISALIGN_STMTS (res) =
678 VEC_alloc (gimple, heap,
679 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS));
680 LOOP_VINFO_MAY_ALIAS_DDRS (res) =
681 VEC_alloc (ddr_p, heap,
682 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS));
683 LOOP_VINFO_STRIDED_STORES (res) = VEC_alloc (gimple, heap, 10);
684 LOOP_VINFO_SLP_INSTANCES (res) = VEC_alloc (slp_instance, heap, 10);
685 LOOP_VINFO_SLP_UNROLLING_FACTOR (res) = 1;
687 return res;
691 /* Function destroy_loop_vec_info.
693 Free LOOP_VINFO struct, as well as all the stmt_vec_info structs of all the
694 stmts in the loop. */
696 void
697 destroy_loop_vec_info (loop_vec_info loop_vinfo, bool clean_stmts)
699 struct loop *loop;
700 basic_block *bbs;
701 int nbbs;
702 gimple_stmt_iterator si;
703 int j;
704 VEC (slp_instance, heap) *slp_instances;
705 slp_instance instance;
707 if (!loop_vinfo)
708 return;
710 loop = LOOP_VINFO_LOOP (loop_vinfo);
712 bbs = LOOP_VINFO_BBS (loop_vinfo);
713 nbbs = loop->num_nodes;
715 if (!clean_stmts)
717 free (LOOP_VINFO_BBS (loop_vinfo));
718 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
719 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
720 VEC_free (gimple, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
722 free (loop_vinfo);
723 loop->aux = NULL;
724 return;
727 for (j = 0; j < nbbs; j++)
729 basic_block bb = bbs[j];
730 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
731 free_stmt_vec_info (gsi_stmt (si));
733 for (si = gsi_start_bb (bb); !gsi_end_p (si); )
735 gimple stmt = gsi_stmt (si);
736 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
738 if (stmt_info)
740 /* Check if this is a "pattern stmt" (introduced by the
741 vectorizer during the pattern recognition pass). */
742 bool remove_stmt_p = false;
743 gimple orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
744 if (orig_stmt)
746 stmt_vec_info orig_stmt_info = vinfo_for_stmt (orig_stmt);
747 if (orig_stmt_info
748 && STMT_VINFO_IN_PATTERN_P (orig_stmt_info))
749 remove_stmt_p = true;
752 /* Free stmt_vec_info. */
753 free_stmt_vec_info (stmt);
755 /* Remove dead "pattern stmts". */
756 if (remove_stmt_p)
757 gsi_remove (&si, true);
759 gsi_next (&si);
763 free (LOOP_VINFO_BBS (loop_vinfo));
764 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
765 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
766 VEC_free (gimple, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
767 VEC_free (ddr_p, heap, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
768 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
769 for (j = 0; VEC_iterate (slp_instance, slp_instances, j, instance); j++)
770 vect_free_slp_instance (instance);
772 VEC_free (slp_instance, heap, LOOP_VINFO_SLP_INSTANCES (loop_vinfo));
773 VEC_free (gimple, heap, LOOP_VINFO_STRIDED_STORES (loop_vinfo));
775 free (loop_vinfo);
776 loop->aux = NULL;
780 /* Function vect_analyze_loop_1.
782 Apply a set of analyses on LOOP, and create a loop_vec_info struct
783 for it. The different analyses will record information in the
784 loop_vec_info struct. This is a subset of the analyses applied in
785 vect_analyze_loop, to be applied on an inner-loop nested in the loop
786 that is now considered for (outer-loop) vectorization. */
788 static loop_vec_info
789 vect_analyze_loop_1 (struct loop *loop)
791 loop_vec_info loop_vinfo;
793 if (vect_print_dump_info (REPORT_DETAILS))
794 fprintf (vect_dump, "===== analyze_loop_nest_1 =====");
796 /* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */
798 loop_vinfo = vect_analyze_loop_form (loop);
799 if (!loop_vinfo)
801 if (vect_print_dump_info (REPORT_DETAILS))
802 fprintf (vect_dump, "bad inner-loop form.");
803 return NULL;
806 return loop_vinfo;
810 /* Function vect_analyze_loop_form.
812 Verify that certain CFG restrictions hold, including:
813 - the loop has a pre-header
814 - the loop has a single entry and exit
815 - the loop exit condition is simple enough, and the number of iterations
816 can be analyzed (a countable loop). */
818 loop_vec_info
819 vect_analyze_loop_form (struct loop *loop)
821 loop_vec_info loop_vinfo;
822 gimple loop_cond;
823 tree number_of_iterations = NULL;
824 loop_vec_info inner_loop_vinfo = NULL;
826 if (vect_print_dump_info (REPORT_DETAILS))
827 fprintf (vect_dump, "=== vect_analyze_loop_form ===");
829 /* Different restrictions apply when we are considering an inner-most loop,
830 vs. an outer (nested) loop.
831 (FORNOW. May want to relax some of these restrictions in the future). */
833 if (!loop->inner)
835 /* Inner-most loop. We currently require that the number of BBs is
836 exactly 2 (the header and latch). Vectorizable inner-most loops
837 look like this:
839 (pre-header)
841 header <--------+
842 | | |
843 | +--> latch --+
845 (exit-bb) */
847 if (loop->num_nodes != 2)
849 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
850 fprintf (vect_dump, "not vectorized: too many BBs in loop.");
851 return NULL;
854 if (empty_block_p (loop->header))
856 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
857 fprintf (vect_dump, "not vectorized: empty loop.");
858 return NULL;
861 else
863 struct loop *innerloop = loop->inner;
864 edge backedge, entryedge;
866 /* Nested loop. We currently require that the loop is doubly-nested,
867 contains a single inner loop, and the number of BBs is exactly 5.
868 Vectorizable outer-loops look like this:
870 (pre-header)
872 header <---+
874 inner-loop |
876 tail ------+
878 (exit-bb)
880 The inner-loop has the properties expected of inner-most loops
881 as described above. */
883 if ((loop->inner)->inner || (loop->inner)->next)
885 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
886 fprintf (vect_dump, "not vectorized: multiple nested loops.");
887 return NULL;
890 /* Analyze the inner-loop. */
891 inner_loop_vinfo = vect_analyze_loop_1 (loop->inner);
892 if (!inner_loop_vinfo)
894 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
895 fprintf (vect_dump, "not vectorized: Bad inner loop.");
896 return NULL;
899 if (!expr_invariant_in_loop_p (loop,
900 LOOP_VINFO_NITERS (inner_loop_vinfo)))
902 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
903 fprintf (vect_dump,
904 "not vectorized: inner-loop count not invariant.");
905 destroy_loop_vec_info (inner_loop_vinfo, true);
906 return NULL;
909 if (loop->num_nodes != 5)
911 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
912 fprintf (vect_dump, "not vectorized: too many BBs in loop.");
913 destroy_loop_vec_info (inner_loop_vinfo, true);
914 return NULL;
917 gcc_assert (EDGE_COUNT (innerloop->header->preds) == 2);
918 backedge = EDGE_PRED (innerloop->header, 1);
919 entryedge = EDGE_PRED (innerloop->header, 0);
920 if (EDGE_PRED (innerloop->header, 0)->src == innerloop->latch)
922 backedge = EDGE_PRED (innerloop->header, 0);
923 entryedge = EDGE_PRED (innerloop->header, 1);
926 if (entryedge->src != loop->header
927 || !single_exit (innerloop)
928 || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
930 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
931 fprintf (vect_dump, "not vectorized: unsupported outerloop form.");
932 destroy_loop_vec_info (inner_loop_vinfo, true);
933 return NULL;
936 if (vect_print_dump_info (REPORT_DETAILS))
937 fprintf (vect_dump, "Considering outer-loop vectorization.");
940 if (!single_exit (loop)
941 || EDGE_COUNT (loop->header->preds) != 2)
943 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
945 if (!single_exit (loop))
946 fprintf (vect_dump, "not vectorized: multiple exits.");
947 else if (EDGE_COUNT (loop->header->preds) != 2)
948 fprintf (vect_dump, "not vectorized: too many incoming edges.");
950 if (inner_loop_vinfo)
951 destroy_loop_vec_info (inner_loop_vinfo, true);
952 return NULL;
955 /* We assume that the loop exit condition is at the end of the loop. i.e,
956 that the loop is represented as a do-while (with a proper if-guard
957 before the loop if needed), where the loop header contains all the
958 executable statements, and the latch is empty. */
959 if (!empty_block_p (loop->latch)
960 || phi_nodes (loop->latch))
962 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
963 fprintf (vect_dump, "not vectorized: unexpected loop form.");
964 if (inner_loop_vinfo)
965 destroy_loop_vec_info (inner_loop_vinfo, true);
966 return NULL;
969 /* Make sure there exists a single-predecessor exit bb: */
970 if (!single_pred_p (single_exit (loop)->dest))
972 edge e = single_exit (loop);
973 if (!(e->flags & EDGE_ABNORMAL))
975 split_loop_exit_edge (e);
976 if (vect_print_dump_info (REPORT_DETAILS))
977 fprintf (vect_dump, "split exit edge.");
979 else
981 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
982 fprintf (vect_dump, "not vectorized: abnormal loop exit edge.");
983 if (inner_loop_vinfo)
984 destroy_loop_vec_info (inner_loop_vinfo, true);
985 return NULL;
989 loop_cond = vect_get_loop_niters (loop, &number_of_iterations);
990 if (!loop_cond)
992 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
993 fprintf (vect_dump, "not vectorized: complicated exit condition.");
994 if (inner_loop_vinfo)
995 destroy_loop_vec_info (inner_loop_vinfo, true);
996 return NULL;
999 if (!number_of_iterations)
1001 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1002 fprintf (vect_dump,
1003 "not vectorized: number of iterations cannot be computed.");
1004 if (inner_loop_vinfo)
1005 destroy_loop_vec_info (inner_loop_vinfo, true);
1006 return NULL;
1009 if (chrec_contains_undetermined (number_of_iterations))
1011 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
1012 fprintf (vect_dump, "Infinite number of iterations.");
1013 if (inner_loop_vinfo)
1014 destroy_loop_vec_info (inner_loop_vinfo, true);
1015 return NULL;
1018 if (!NITERS_KNOWN_P (number_of_iterations))
1020 if (vect_print_dump_info (REPORT_DETAILS))
1022 fprintf (vect_dump, "Symbolic number of iterations is ");
1023 print_generic_expr (vect_dump, number_of_iterations, TDF_DETAILS);
1026 else if (TREE_INT_CST_LOW (number_of_iterations) == 0)
1028 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS))
1029 fprintf (vect_dump, "not vectorized: number of iterations = 0.");
1030 if (inner_loop_vinfo)
1031 destroy_loop_vec_info (inner_loop_vinfo, false);
1032 return NULL;
1035 loop_vinfo = new_loop_vec_info (loop);
1036 LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
1037 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
1039 STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond)) = loop_exit_ctrl_vec_info_type;
1041 /* CHECKME: May want to keep it around it in the future. */
1042 if (inner_loop_vinfo)
1043 destroy_loop_vec_info (inner_loop_vinfo, false);
1045 gcc_assert (!loop->aux);
1046 loop->aux = loop_vinfo;
1047 return loop_vinfo;
1050 /* Function vect_analyze_loop.
1052 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1053 for it. The different analyses will record information in the
1054 loop_vec_info struct. */
1055 loop_vec_info
1056 vect_analyze_loop (struct loop *loop)
1058 bool ok;
1059 loop_vec_info loop_vinfo;
1061 if (vect_print_dump_info (REPORT_DETAILS))
1062 fprintf (vect_dump, "===== analyze_loop_nest =====");
1064 if (loop_outer (loop)
1065 && loop_vec_info_for_loop (loop_outer (loop))
1066 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
1068 if (vect_print_dump_info (REPORT_DETAILS))
1069 fprintf (vect_dump, "outer-loop already vectorized.");
1070 return NULL;
1073 /* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */
1075 loop_vinfo = vect_analyze_loop_form (loop);
1076 if (!loop_vinfo)
1078 if (vect_print_dump_info (REPORT_DETAILS))
1079 fprintf (vect_dump, "bad loop form.");
1080 return NULL;
1083 /* Find all data references in the loop (which correspond to vdefs/vuses)
1084 and analyze their evolution in the loop.
1086 FORNOW: Handle only simple, array references, which
1087 alignment can be forced, and aligned pointer-references. */
1089 ok = vect_analyze_data_refs (loop_vinfo);
1090 if (!ok)
1092 if (vect_print_dump_info (REPORT_DETAILS))
1093 fprintf (vect_dump, "bad data references.");
1094 destroy_loop_vec_info (loop_vinfo, true);
1095 return NULL;
1098 /* Classify all cross-iteration scalar data-flow cycles.
1099 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1101 vect_analyze_scalar_cycles (loop_vinfo);
1103 vect_pattern_recog (loop_vinfo);
1105 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1107 ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
1108 if (!ok)
1110 if (vect_print_dump_info (REPORT_DETAILS))
1111 fprintf (vect_dump, "unexpected pattern.");
1112 destroy_loop_vec_info (loop_vinfo, true);
1113 return NULL;
1116 /* Analyze the alignment of the data-refs in the loop.
1117 Fail if a data reference is found that cannot be vectorized. */
1119 ok = vect_analyze_data_refs_alignment (loop_vinfo);
1120 if (!ok)
1122 if (vect_print_dump_info (REPORT_DETAILS))
1123 fprintf (vect_dump, "bad data alignment.");
1124 destroy_loop_vec_info (loop_vinfo, true);
1125 return NULL;
1128 ok = vect_determine_vectorization_factor (loop_vinfo);
1129 if (!ok)
1131 if (vect_print_dump_info (REPORT_DETAILS))
1132 fprintf (vect_dump, "can't determine vectorization factor.");
1133 destroy_loop_vec_info (loop_vinfo, true);
1134 return NULL;
1137 /* Analyze data dependences between the data-refs in the loop.
1138 FORNOW: fail at the first data dependence that we encounter. */
1140 ok = vect_analyze_data_ref_dependences (loop_vinfo);
1141 if (!ok)
1143 if (vect_print_dump_info (REPORT_DETAILS))
1144 fprintf (vect_dump, "bad data dependence.");
1145 destroy_loop_vec_info (loop_vinfo, true);
1146 return NULL;
1149 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1150 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1152 ok = vect_analyze_data_ref_accesses (loop_vinfo);
1153 if (!ok)
1155 if (vect_print_dump_info (REPORT_DETAILS))
1156 fprintf (vect_dump, "bad data access.");
1157 destroy_loop_vec_info (loop_vinfo, true);
1158 return NULL;
1161 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
1162 It is important to call pruning after vect_analyze_data_ref_accesses,
1163 since we use grouping information gathered by interleaving analysis. */
1164 ok = vect_prune_runtime_alias_test_list (loop_vinfo);
1165 if (!ok)
1167 if (vect_print_dump_info (REPORT_DETAILS))
1168 fprintf (vect_dump, "too long list of versioning for alias "
1169 "run-time tests.");
1170 destroy_loop_vec_info (loop_vinfo, true);
1171 return NULL;
1174 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
1175 ok = vect_analyze_slp (loop_vinfo);
1176 if (ok)
1178 /* Decide which possible SLP instances to SLP. */
1179 vect_make_slp_decision (loop_vinfo);
1181 /* Find stmts that need to be both vectorized and SLPed. */
1182 vect_detect_hybrid_slp (loop_vinfo);
1185 /* This pass will decide on using loop versioning and/or loop peeling in
1186 order to enhance the alignment of data references in the loop. */
1188 ok = vect_enhance_data_refs_alignment (loop_vinfo);
1189 if (!ok)
1191 if (vect_print_dump_info (REPORT_DETAILS))
1192 fprintf (vect_dump, "bad data alignment.");
1193 destroy_loop_vec_info (loop_vinfo, true);
1194 return NULL;
1197 /* Scan all the operations in the loop and make sure they are
1198 vectorizable. */
1200 ok = vect_analyze_operations (loop_vinfo);
1201 if (!ok)
1203 if (vect_print_dump_info (REPORT_DETAILS))
1204 fprintf (vect_dump, "bad operation or unsupported loop bound.");
1205 destroy_loop_vec_info (loop_vinfo, true);
1206 return NULL;
1209 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
1211 return loop_vinfo;
1215 /* Function reduction_code_for_scalar_code
1217 Input:
1218 CODE - tree_code of a reduction operations.
1220 Output:
1221 REDUC_CODE - the corresponding tree-code to be used to reduce the
1222 vector of partial results into a single scalar result (which
1223 will also reside in a vector).
1225 Return TRUE if a corresponding REDUC_CODE was found, FALSE otherwise. */
1227 static bool
1228 reduction_code_for_scalar_code (enum tree_code code,
1229 enum tree_code *reduc_code)
1231 switch (code)
1233 case MAX_EXPR:
1234 *reduc_code = REDUC_MAX_EXPR;
1235 return true;
1237 case MIN_EXPR:
1238 *reduc_code = REDUC_MIN_EXPR;
1239 return true;
1241 case PLUS_EXPR:
1242 *reduc_code = REDUC_PLUS_EXPR;
1243 return true;
1245 default:
1246 return false;
1251 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
1252 STMT is printed with a message MSG. */
1254 static void
1255 report_vect_op (gimple stmt, const char *msg)
1257 fprintf (vect_dump, "%s", msg);
1258 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
1262 /* Function vect_is_simple_reduction
1264 Detect a cross-iteration def-use cycle that represents a simple
1265 reduction computation. We look for the following pattern:
1267 loop_header:
1268 a1 = phi < a0, a2 >
1269 a3 = ...
1270 a2 = operation (a3, a1)
1272 such that:
1273 1. operation is commutative and associative and it is safe to
1274 change the order of the computation.
1275 2. no uses for a2 in the loop (a2 is used out of the loop)
1276 3. no uses of a1 in the loop besides the reduction operation.
1278 Condition 1 is tested here.
1279 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized. */
1281 gimple
1282 vect_is_simple_reduction (loop_vec_info loop_info, gimple phi)
1284 struct loop *loop = (gimple_bb (phi))->loop_father;
1285 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
1286 edge latch_e = loop_latch_edge (loop);
1287 tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
1288 gimple def_stmt, def1, def2;
1289 enum tree_code code;
1290 tree op1, op2;
1291 tree type;
1292 int nloop_uses;
1293 tree name;
1294 imm_use_iterator imm_iter;
1295 use_operand_p use_p;
1297 gcc_assert (loop == vect_loop || flow_loop_nested_p (vect_loop, loop));
1299 name = PHI_RESULT (phi);
1300 nloop_uses = 0;
1301 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
1303 gimple use_stmt = USE_STMT (use_p);
1304 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
1305 && vinfo_for_stmt (use_stmt)
1306 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
1307 nloop_uses++;
1308 if (nloop_uses > 1)
1310 if (vect_print_dump_info (REPORT_DETAILS))
1311 fprintf (vect_dump, "reduction used in loop.");
1312 return NULL;
1316 if (TREE_CODE (loop_arg) != SSA_NAME)
1318 if (vect_print_dump_info (REPORT_DETAILS))
1320 fprintf (vect_dump, "reduction: not ssa_name: ");
1321 print_generic_expr (vect_dump, loop_arg, TDF_SLIM);
1323 return NULL;
1326 def_stmt = SSA_NAME_DEF_STMT (loop_arg);
1327 if (!def_stmt)
1329 if (vect_print_dump_info (REPORT_DETAILS))
1330 fprintf (vect_dump, "reduction: no def_stmt.");
1331 return NULL;
1334 if (!is_gimple_assign (def_stmt))
1336 if (vect_print_dump_info (REPORT_DETAILS))
1337 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
1338 return NULL;
1341 name = gimple_assign_lhs (def_stmt);
1342 nloop_uses = 0;
1343 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
1345 gimple use_stmt = USE_STMT (use_p);
1346 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
1347 && vinfo_for_stmt (use_stmt)
1348 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
1349 nloop_uses++;
1350 if (nloop_uses > 1)
1352 if (vect_print_dump_info (REPORT_DETAILS))
1353 fprintf (vect_dump, "reduction used in loop.");
1354 return NULL;
1358 code = gimple_assign_rhs_code (def_stmt);
1360 if (!commutative_tree_code (code) || !associative_tree_code (code))
1362 if (vect_print_dump_info (REPORT_DETAILS))
1363 report_vect_op (def_stmt, "reduction: not commutative/associative: ");
1364 return NULL;
1367 if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS)
1369 if (vect_print_dump_info (REPORT_DETAILS))
1370 report_vect_op (def_stmt, "reduction: not binary operation: ");
1371 return NULL;
1374 op1 = gimple_assign_rhs1 (def_stmt);
1375 op2 = gimple_assign_rhs2 (def_stmt);
1376 if (TREE_CODE (op1) != SSA_NAME || TREE_CODE (op2) != SSA_NAME)
1378 if (vect_print_dump_info (REPORT_DETAILS))
1379 report_vect_op (def_stmt, "reduction: uses not ssa_names: ");
1380 return NULL;
1383 /* Check that it's ok to change the order of the computation. */
1384 type = TREE_TYPE (gimple_assign_lhs (def_stmt));
1385 if (TYPE_MAIN_VARIANT (type) != TYPE_MAIN_VARIANT (TREE_TYPE (op1))
1386 || TYPE_MAIN_VARIANT (type) != TYPE_MAIN_VARIANT (TREE_TYPE (op2)))
1388 if (vect_print_dump_info (REPORT_DETAILS))
1390 fprintf (vect_dump, "reduction: multiple types: operation type: ");
1391 print_generic_expr (vect_dump, type, TDF_SLIM);
1392 fprintf (vect_dump, ", operands types: ");
1393 print_generic_expr (vect_dump, TREE_TYPE (op1), TDF_SLIM);
1394 fprintf (vect_dump, ",");
1395 print_generic_expr (vect_dump, TREE_TYPE (op2), TDF_SLIM);
1397 return NULL;
1400 /* Generally, when vectorizing a reduction we change the order of the
1401 computation. This may change the behavior of the program in some
1402 cases, so we need to check that this is ok. One exception is when
1403 vectorizing an outer-loop: the inner-loop is executed sequentially,
1404 and therefore vectorizing reductions in the inner-loop during
1405 outer-loop vectorization is safe. */
1407 /* CHECKME: check for !flag_finite_math_only too? */
1408 if (SCALAR_FLOAT_TYPE_P (type) && !flag_associative_math
1409 && !nested_in_vect_loop_p (vect_loop, def_stmt))
1411 /* Changing the order of operations changes the semantics. */
1412 if (vect_print_dump_info (REPORT_DETAILS))
1413 report_vect_op (def_stmt, "reduction: unsafe fp math optimization: ");
1414 return NULL;
1416 else if (INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type)
1417 && !nested_in_vect_loop_p (vect_loop, def_stmt))
1419 /* Changing the order of operations changes the semantics. */
1420 if (vect_print_dump_info (REPORT_DETAILS))
1421 report_vect_op (def_stmt, "reduction: unsafe int math optimization: ");
1422 return NULL;
1424 else if (SAT_FIXED_POINT_TYPE_P (type))
1426 /* Changing the order of operations changes the semantics. */
1427 if (vect_print_dump_info (REPORT_DETAILS))
1428 report_vect_op (def_stmt,
1429 "reduction: unsafe fixed-point math optimization: ");
1430 return NULL;
1433 /* reduction is safe. we're dealing with one of the following:
1434 1) integer arithmetic and no trapv
1435 2) floating point arithmetic, and special flags permit this optimization.
1437 def1 = SSA_NAME_DEF_STMT (op1);
1438 def2 = SSA_NAME_DEF_STMT (op2);
1439 if (!def1 || !def2 || gimple_nop_p (def1) || gimple_nop_p (def2))
1441 if (vect_print_dump_info (REPORT_DETAILS))
1442 report_vect_op (def_stmt, "reduction: no defs for operands: ");
1443 return NULL;
1447 /* Check that one def is the reduction def, defined by PHI,
1448 the other def is either defined in the loop ("vect_loop_def"),
1449 or it's an induction (defined by a loop-header phi-node). */
1451 if (def2 == phi
1452 && flow_bb_inside_loop_p (loop, gimple_bb (def1))
1453 && (is_gimple_assign (def1)
1454 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1)) == vect_induction_def
1455 || (gimple_code (def1) == GIMPLE_PHI
1456 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1)) == vect_loop_def
1457 && !is_loop_header_bb_p (gimple_bb (def1)))))
1459 if (vect_print_dump_info (REPORT_DETAILS))
1460 report_vect_op (def_stmt, "detected reduction:");
1461 return def_stmt;
1463 else if (def1 == phi
1464 && flow_bb_inside_loop_p (loop, gimple_bb (def2))
1465 && (is_gimple_assign (def2)
1466 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2)) == vect_induction_def
1467 || (gimple_code (def2) == GIMPLE_PHI
1468 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2)) == vect_loop_def
1469 && !is_loop_header_bb_p (gimple_bb (def2)))))
1471 /* Swap operands (just for simplicity - so that the rest of the code
1472 can assume that the reduction variable is always the last (second)
1473 argument). */
1474 if (vect_print_dump_info (REPORT_DETAILS))
1475 report_vect_op (def_stmt ,
1476 "detected reduction: need to swap operands:");
1477 swap_tree_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
1478 gimple_assign_rhs2_ptr (def_stmt));
1479 return def_stmt;
1481 else
1483 if (vect_print_dump_info (REPORT_DETAILS))
1484 report_vect_op (def_stmt, "reduction: unknown pattern.");
1485 return NULL;
1490 /* Function vect_estimate_min_profitable_iters
1492 Return the number of iterations required for the vector version of the
1493 loop to be profitable relative to the cost of the scalar version of the
1494 loop.
1496 TODO: Take profile info into account before making vectorization
1497 decisions, if available. */
1500 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo)
1502 int i;
1503 int min_profitable_iters;
1504 int peel_iters_prologue;
1505 int peel_iters_epilogue;
1506 int vec_inside_cost = 0;
1507 int vec_outside_cost = 0;
1508 int scalar_single_iter_cost = 0;
1509 int scalar_outside_cost = 0;
1510 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1511 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1512 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1513 int nbbs = loop->num_nodes;
1514 int byte_misalign = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo);
1515 int peel_guard_costs = 0;
1516 int innerloop_iters = 0, factor;
1517 VEC (slp_instance, heap) *slp_instances;
1518 slp_instance instance;
1520 /* Cost model disabled. */
1521 if (!flag_vect_cost_model)
1523 if (vect_print_dump_info (REPORT_COST))
1524 fprintf (vect_dump, "cost model disabled.");
1525 return 0;
1528 /* Requires loop versioning tests to handle misalignment. */
1529 if (VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo)))
1531 /* FIXME: Make cost depend on complexity of individual check. */
1532 vec_outside_cost +=
1533 VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
1534 if (vect_print_dump_info (REPORT_COST))
1535 fprintf (vect_dump, "cost model: Adding cost of checks for loop "
1536 "versioning to treat misalignment.\n");
1539 if (VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo)))
1541 /* FIXME: Make cost depend on complexity of individual check. */
1542 vec_outside_cost +=
1543 VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
1544 if (vect_print_dump_info (REPORT_COST))
1545 fprintf (vect_dump, "cost model: Adding cost of checks for loop "
1546 "versioning aliasing.\n");
1549 if (VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))
1550 || VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo)))
1552 vec_outside_cost += TARG_COND_TAKEN_BRANCH_COST;
1555 /* Count statements in scalar loop. Using this as scalar cost for a single
1556 iteration for now.
1558 TODO: Add outer loop support.
1560 TODO: Consider assigning different costs to different scalar
1561 statements. */
1563 /* FORNOW. */
1564 if (loop->inner)
1565 innerloop_iters = 50; /* FIXME */
1567 for (i = 0; i < nbbs; i++)
1569 gimple_stmt_iterator si;
1570 basic_block bb = bbs[i];
1572 if (bb->loop_father == loop->inner)
1573 factor = innerloop_iters;
1574 else
1575 factor = 1;
1577 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1579 gimple stmt = gsi_stmt (si);
1580 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1581 /* Skip stmts that are not vectorized inside the loop. */
1582 if (!STMT_VINFO_RELEVANT_P (stmt_info)
1583 && (!STMT_VINFO_LIVE_P (stmt_info)
1584 || STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def))
1585 continue;
1586 scalar_single_iter_cost += cost_for_stmt (stmt) * factor;
1587 vec_inside_cost += STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) * factor;
1588 /* FIXME: for stmts in the inner-loop in outer-loop vectorization,
1589 some of the "outside" costs are generated inside the outer-loop. */
1590 vec_outside_cost += STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info);
1594 /* Add additional cost for the peeled instructions in prologue and epilogue
1595 loop.
1597 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
1598 at compile-time - we assume it's vf/2 (the worst would be vf-1).
1600 TODO: Build an expression that represents peel_iters for prologue and
1601 epilogue to be used in a run-time test. */
1603 if (byte_misalign < 0)
1605 peel_iters_prologue = vf/2;
1606 if (vect_print_dump_info (REPORT_COST))
1607 fprintf (vect_dump, "cost model: "
1608 "prologue peel iters set to vf/2.");
1610 /* If peeling for alignment is unknown, loop bound of main loop becomes
1611 unknown. */
1612 peel_iters_epilogue = vf/2;
1613 if (vect_print_dump_info (REPORT_COST))
1614 fprintf (vect_dump, "cost model: "
1615 "epilogue peel iters set to vf/2 because "
1616 "peeling for alignment is unknown .");
1618 /* If peeled iterations are unknown, count a taken branch and a not taken
1619 branch per peeled loop. Even if scalar loop iterations are known,
1620 vector iterations are not known since peeled prologue iterations are
1621 not known. Hence guards remain the same. */
1622 peel_guard_costs += 2 * (TARG_COND_TAKEN_BRANCH_COST
1623 + TARG_COND_NOT_TAKEN_BRANCH_COST);
1625 else
1627 if (byte_misalign)
1629 struct data_reference *dr = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
1630 int element_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr))));
1631 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr)));
1632 int nelements = TYPE_VECTOR_SUBPARTS (vectype);
1634 peel_iters_prologue = nelements - (byte_misalign / element_size);
1636 else
1637 peel_iters_prologue = 0;
1639 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
1641 peel_iters_epilogue = vf/2;
1642 if (vect_print_dump_info (REPORT_COST))
1643 fprintf (vect_dump, "cost model: "
1644 "epilogue peel iters set to vf/2 because "
1645 "loop iterations are unknown .");
1647 /* If peeled iterations are known but number of scalar loop
1648 iterations are unknown, count a taken branch per peeled loop. */
1649 peel_guard_costs += 2 * TARG_COND_TAKEN_BRANCH_COST;
1652 else
1654 int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
1655 peel_iters_prologue = niters < peel_iters_prologue ?
1656 niters : peel_iters_prologue;
1657 peel_iters_epilogue = (niters - peel_iters_prologue) % vf;
1661 vec_outside_cost += (peel_iters_prologue * scalar_single_iter_cost)
1662 + (peel_iters_epilogue * scalar_single_iter_cost)
1663 + peel_guard_costs;
1665 /* FORNOW: The scalar outside cost is incremented in one of the
1666 following ways:
1668 1. The vectorizer checks for alignment and aliasing and generates
1669 a condition that allows dynamic vectorization. A cost model
1670 check is ANDED with the versioning condition. Hence scalar code
1671 path now has the added cost of the versioning check.
1673 if (cost > th & versioning_check)
1674 jmp to vector code
1676 Hence run-time scalar is incremented by not-taken branch cost.
1678 2. The vectorizer then checks if a prologue is required. If the
1679 cost model check was not done before during versioning, it has to
1680 be done before the prologue check.
1682 if (cost <= th)
1683 prologue = scalar_iters
1684 if (prologue == 0)
1685 jmp to vector code
1686 else
1687 execute prologue
1688 if (prologue == num_iters)
1689 go to exit
1691 Hence the run-time scalar cost is incremented by a taken branch,
1692 plus a not-taken branch, plus a taken branch cost.
1694 3. The vectorizer then checks if an epilogue is required. If the
1695 cost model check was not done before during prologue check, it
1696 has to be done with the epilogue check.
1698 if (prologue == 0)
1699 jmp to vector code
1700 else
1701 execute prologue
1702 if (prologue == num_iters)
1703 go to exit
1704 vector code:
1705 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
1706 jmp to epilogue
1708 Hence the run-time scalar cost should be incremented by 2 taken
1709 branches.
1711 TODO: The back end may reorder the BBS's differently and reverse
1712 conditions/branch directions. Change the estimates below to
1713 something more reasonable. */
1715 /* If the number of iterations is known and we do not do versioning, we can
1716 decide whether to vectorize at compile time. Hence the scalar version
1717 do not carry cost model guard costs. */
1718 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1719 || VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))
1720 || VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo)))
1722 /* Cost model check occurs at versioning. */
1723 if (VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))
1724 || VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo)))
1725 scalar_outside_cost += TARG_COND_NOT_TAKEN_BRANCH_COST;
1726 else
1728 /* Cost model check occurs at prologue generation. */
1729 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
1730 scalar_outside_cost += 2 * TARG_COND_TAKEN_BRANCH_COST
1731 + TARG_COND_NOT_TAKEN_BRANCH_COST;
1732 /* Cost model check occurs at epilogue generation. */
1733 else
1734 scalar_outside_cost += 2 * TARG_COND_TAKEN_BRANCH_COST;
1738 /* Add SLP costs. */
1739 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
1740 for (i = 0; VEC_iterate (slp_instance, slp_instances, i, instance); i++)
1742 vec_outside_cost += SLP_INSTANCE_OUTSIDE_OF_LOOP_COST (instance);
1743 vec_inside_cost += SLP_INSTANCE_INSIDE_OF_LOOP_COST (instance);
1746 /* Calculate number of iterations required to make the vector version
1747 profitable, relative to the loop bodies only. The following condition
1748 must hold true:
1749 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
1750 where
1751 SIC = scalar iteration cost, VIC = vector iteration cost,
1752 VOC = vector outside cost, VF = vectorization factor,
1753 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
1754 SOC = scalar outside cost for run time cost model check. */
1756 if ((scalar_single_iter_cost * vf) > vec_inside_cost)
1758 if (vec_outside_cost <= 0)
1759 min_profitable_iters = 1;
1760 else
1762 min_profitable_iters = ((vec_outside_cost - scalar_outside_cost) * vf
1763 - vec_inside_cost * peel_iters_prologue
1764 - vec_inside_cost * peel_iters_epilogue)
1765 / ((scalar_single_iter_cost * vf)
1766 - vec_inside_cost);
1768 if ((scalar_single_iter_cost * vf * min_profitable_iters)
1769 <= ((vec_inside_cost * min_profitable_iters)
1770 + ((vec_outside_cost - scalar_outside_cost) * vf)))
1771 min_profitable_iters++;
1774 /* vector version will never be profitable. */
1775 else
1777 if (vect_print_dump_info (REPORT_COST))
1778 fprintf (vect_dump, "cost model: vector iteration cost = %d "
1779 "is divisible by scalar iteration cost = %d by a factor "
1780 "greater than or equal to the vectorization factor = %d .",
1781 vec_inside_cost, scalar_single_iter_cost, vf);
1782 return -1;
1785 if (vect_print_dump_info (REPORT_COST))
1787 fprintf (vect_dump, "Cost model analysis: \n");
1788 fprintf (vect_dump, " Vector inside of loop cost: %d\n",
1789 vec_inside_cost);
1790 fprintf (vect_dump, " Vector outside of loop cost: %d\n",
1791 vec_outside_cost);
1792 fprintf (vect_dump, " Scalar iteration cost: %d\n",
1793 scalar_single_iter_cost);
1794 fprintf (vect_dump, " Scalar outside cost: %d\n", scalar_outside_cost);
1795 fprintf (vect_dump, " prologue iterations: %d\n",
1796 peel_iters_prologue);
1797 fprintf (vect_dump, " epilogue iterations: %d\n",
1798 peel_iters_epilogue);
1799 fprintf (vect_dump, " Calculated minimum iters for profitability: %d\n",
1800 min_profitable_iters);
1803 min_profitable_iters =
1804 min_profitable_iters < vf ? vf : min_profitable_iters;
1806 /* Because the condition we create is:
1807 if (niters <= min_profitable_iters)
1808 then skip the vectorized loop. */
1809 min_profitable_iters--;
1811 if (vect_print_dump_info (REPORT_COST))
1812 fprintf (vect_dump, " Profitability threshold = %d\n",
1813 min_profitable_iters);
1815 return min_profitable_iters;
1819 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
1820 functions. Design better to avoid maintenance issues. */
1822 /* Function vect_model_reduction_cost.
1824 Models cost for a reduction operation, including the vector ops
1825 generated within the strip-mine loop, the initial definition before
1826 the loop, and the epilogue code that must be generated. */
1828 static bool
1829 vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
1830 int ncopies)
1832 int outer_cost = 0;
1833 enum tree_code code;
1834 optab optab;
1835 tree vectype;
1836 gimple stmt, orig_stmt;
1837 tree reduction_op;
1838 enum machine_mode mode;
1839 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1840 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1843 /* Cost of reduction op inside loop. */
1844 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) += ncopies * TARG_VEC_STMT_COST;
1846 stmt = STMT_VINFO_STMT (stmt_info);
1848 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
1850 case GIMPLE_SINGLE_RHS:
1851 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)) == ternary_op);
1852 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), 2);
1853 break;
1854 case GIMPLE_UNARY_RHS:
1855 reduction_op = gimple_assign_rhs1 (stmt);
1856 break;
1857 case GIMPLE_BINARY_RHS:
1858 reduction_op = gimple_assign_rhs2 (stmt);
1859 break;
1860 default:
1861 gcc_unreachable ();
1864 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
1865 if (!vectype)
1867 if (vect_print_dump_info (REPORT_COST))
1869 fprintf (vect_dump, "unsupported data-type ");
1870 print_generic_expr (vect_dump, TREE_TYPE (reduction_op), TDF_SLIM);
1872 return false;
1875 mode = TYPE_MODE (vectype);
1876 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
1878 if (!orig_stmt)
1879 orig_stmt = STMT_VINFO_STMT (stmt_info);
1881 code = gimple_assign_rhs_code (orig_stmt);
1883 /* Add in cost for initial definition. */
1884 outer_cost += TARG_SCALAR_TO_VEC_COST;
1886 /* Determine cost of epilogue code.
1888 We have a reduction operator that will reduce the vector in one statement.
1889 Also requires scalar extract. */
1891 if (!nested_in_vect_loop_p (loop, orig_stmt))
1893 if (reduc_code < NUM_TREE_CODES)
1894 outer_cost += TARG_VEC_STMT_COST + TARG_VEC_TO_SCALAR_COST;
1895 else
1897 int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
1898 tree bitsize =
1899 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt)));
1900 int element_bitsize = tree_low_cst (bitsize, 1);
1901 int nelements = vec_size_in_bits / element_bitsize;
1903 optab = optab_for_tree_code (code, vectype, optab_default);
1905 /* We have a whole vector shift available. */
1906 if (VECTOR_MODE_P (mode)
1907 && optab_handler (optab, mode)->insn_code != CODE_FOR_nothing
1908 && optab_handler (vec_shr_optab, mode)->insn_code != CODE_FOR_nothing)
1909 /* Final reduction via vector shifts and the reduction operator. Also
1910 requires scalar extract. */
1911 outer_cost += ((exact_log2(nelements) * 2) * TARG_VEC_STMT_COST
1912 + TARG_VEC_TO_SCALAR_COST);
1913 else
1914 /* Use extracts and reduction op for final reduction. For N elements,
1915 we have N extracts and N-1 reduction ops. */
1916 outer_cost += ((nelements + nelements - 1) * TARG_VEC_STMT_COST);
1920 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = outer_cost;
1922 if (vect_print_dump_info (REPORT_COST))
1923 fprintf (vect_dump, "vect_model_reduction_cost: inside_cost = %d, "
1924 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
1925 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
1927 return true;
1931 /* Function vect_model_induction_cost.
1933 Models cost for induction operations. */
1935 static void
1936 vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies)
1938 /* loop cost for vec_loop. */
1939 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) = ncopies * TARG_VEC_STMT_COST;
1940 /* prologue cost for vec_init and vec_step. */
1941 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = 2 * TARG_SCALAR_TO_VEC_COST;
1943 if (vect_print_dump_info (REPORT_COST))
1944 fprintf (vect_dump, "vect_model_induction_cost: inside_cost = %d, "
1945 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
1946 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info));
1950 /* Function get_initial_def_for_induction
1952 Input:
1953 STMT - a stmt that performs an induction operation in the loop.
1954 IV_PHI - the initial value of the induction variable
1956 Output:
1957 Return a vector variable, initialized with the first VF values of
1958 the induction variable. E.g., for an iv with IV_PHI='X' and
1959 evolution S, for a vector of 4 units, we want to return:
1960 [X, X + S, X + 2*S, X + 3*S]. */
1962 static tree
1963 get_initial_def_for_induction (gimple iv_phi)
1965 stmt_vec_info stmt_vinfo = vinfo_for_stmt (iv_phi);
1966 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1967 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1968 tree scalar_type = TREE_TYPE (gimple_phi_result (iv_phi));
1969 tree vectype;
1970 int nunits;
1971 edge pe = loop_preheader_edge (loop);
1972 struct loop *iv_loop;
1973 basic_block new_bb;
1974 tree vec, vec_init, vec_step, t;
1975 tree access_fn;
1976 tree new_var;
1977 tree new_name;
1978 gimple init_stmt, induction_phi, new_stmt;
1979 tree induc_def, vec_def, vec_dest;
1980 tree init_expr, step_expr;
1981 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1982 int i;
1983 bool ok;
1984 int ncopies;
1985 tree expr;
1986 stmt_vec_info phi_info = vinfo_for_stmt (iv_phi);
1987 bool nested_in_vect_loop = false;
1988 gimple_seq stmts = NULL;
1989 imm_use_iterator imm_iter;
1990 use_operand_p use_p;
1991 gimple exit_phi;
1992 edge latch_e;
1993 tree loop_arg;
1994 gimple_stmt_iterator si;
1995 basic_block bb = gimple_bb (iv_phi);
1997 vectype = get_vectype_for_scalar_type (scalar_type);
1998 gcc_assert (vectype);
1999 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2000 ncopies = vf / nunits;
2002 gcc_assert (phi_info);
2003 gcc_assert (ncopies >= 1);
2005 /* Find the first insertion point in the BB. */
2006 si = gsi_after_labels (bb);
2008 if (INTEGRAL_TYPE_P (scalar_type) || POINTER_TYPE_P (scalar_type))
2009 step_expr = build_int_cst (scalar_type, 0);
2010 else
2011 step_expr = build_real (scalar_type, dconst0);
2013 /* Is phi in an inner-loop, while vectorizing an enclosing outer-loop? */
2014 if (nested_in_vect_loop_p (loop, iv_phi))
2016 nested_in_vect_loop = true;
2017 iv_loop = loop->inner;
2019 else
2020 iv_loop = loop;
2021 gcc_assert (iv_loop == (gimple_bb (iv_phi))->loop_father);
2023 latch_e = loop_latch_edge (iv_loop);
2024 loop_arg = PHI_ARG_DEF_FROM_EDGE (iv_phi, latch_e);
2026 access_fn = analyze_scalar_evolution (iv_loop, PHI_RESULT (iv_phi));
2027 gcc_assert (access_fn);
2028 ok = vect_is_simple_iv_evolution (iv_loop->num, access_fn,
2029 &init_expr, &step_expr);
2030 gcc_assert (ok);
2031 pe = loop_preheader_edge (iv_loop);
2033 /* Create the vector that holds the initial_value of the induction. */
2034 if (nested_in_vect_loop)
2036 /* iv_loop is nested in the loop to be vectorized. init_expr had already
2037 been created during vectorization of previous stmts; We obtain it from
2038 the STMT_VINFO_VEC_STMT of the defining stmt. */
2039 tree iv_def = PHI_ARG_DEF_FROM_EDGE (iv_phi, loop_preheader_edge (iv_loop));
2040 vec_init = vect_get_vec_def_for_operand (iv_def, iv_phi, NULL);
2042 else
2044 /* iv_loop is the loop to be vectorized. Create:
2045 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
2046 new_var = vect_get_new_vect_var (scalar_type, vect_scalar_var, "var_");
2047 add_referenced_var (new_var);
2049 new_name = force_gimple_operand (init_expr, &stmts, false, new_var);
2050 if (stmts)
2052 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
2053 gcc_assert (!new_bb);
2056 t = NULL_TREE;
2057 t = tree_cons (NULL_TREE, init_expr, t);
2058 for (i = 1; i < nunits; i++)
2060 /* Create: new_name_i = new_name + step_expr */
2061 enum tree_code code = POINTER_TYPE_P (scalar_type)
2062 ? POINTER_PLUS_EXPR : PLUS_EXPR;
2063 init_stmt = gimple_build_assign_with_ops (code, new_var,
2064 new_name, step_expr);
2065 new_name = make_ssa_name (new_var, init_stmt);
2066 gimple_assign_set_lhs (init_stmt, new_name);
2068 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
2069 gcc_assert (!new_bb);
2071 if (vect_print_dump_info (REPORT_DETAILS))
2073 fprintf (vect_dump, "created new init_stmt: ");
2074 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
2076 t = tree_cons (NULL_TREE, new_name, t);
2078 /* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1] */
2079 vec = build_constructor_from_list (vectype, nreverse (t));
2080 vec_init = vect_init_vector (iv_phi, vec, vectype, NULL);
2084 /* Create the vector that holds the step of the induction. */
2085 if (nested_in_vect_loop)
2086 /* iv_loop is nested in the loop to be vectorized. Generate:
2087 vec_step = [S, S, S, S] */
2088 new_name = step_expr;
2089 else
2091 /* iv_loop is the loop to be vectorized. Generate:
2092 vec_step = [VF*S, VF*S, VF*S, VF*S] */
2093 expr = build_int_cst (scalar_type, vf);
2094 new_name = fold_build2 (MULT_EXPR, scalar_type, expr, step_expr);
2097 t = NULL_TREE;
2098 for (i = 0; i < nunits; i++)
2099 t = tree_cons (NULL_TREE, unshare_expr (new_name), t);
2100 gcc_assert (CONSTANT_CLASS_P (new_name));
2101 vec = build_vector (vectype, t);
2102 vec_step = vect_init_vector (iv_phi, vec, vectype, NULL);
2105 /* Create the following def-use cycle:
2106 loop prolog:
2107 vec_init = ...
2108 vec_step = ...
2109 loop:
2110 vec_iv = PHI <vec_init, vec_loop>
2112 STMT
2114 vec_loop = vec_iv + vec_step; */
2116 /* Create the induction-phi that defines the induction-operand. */
2117 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
2118 add_referenced_var (vec_dest);
2119 induction_phi = create_phi_node (vec_dest, iv_loop->header);
2120 set_vinfo_for_stmt (induction_phi,
2121 new_stmt_vec_info (induction_phi, loop_vinfo));
2122 induc_def = PHI_RESULT (induction_phi);
2124 /* Create the iv update inside the loop */
2125 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
2126 induc_def, vec_step);
2127 vec_def = make_ssa_name (vec_dest, new_stmt);
2128 gimple_assign_set_lhs (new_stmt, vec_def);
2129 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
2130 set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo));
2132 /* Set the arguments of the phi node: */
2133 add_phi_arg (induction_phi, vec_init, pe);
2134 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop));
2137 /* In case that vectorization factor (VF) is bigger than the number
2138 of elements that we can fit in a vectype (nunits), we have to generate
2139 more than one vector stmt - i.e - we need to "unroll" the
2140 vector stmt by a factor VF/nunits. For more details see documentation
2141 in vectorizable_operation. */
2143 if (ncopies > 1)
2145 stmt_vec_info prev_stmt_vinfo;
2146 /* FORNOW. This restriction should be relaxed. */
2147 gcc_assert (!nested_in_vect_loop);
2149 /* Create the vector that holds the step of the induction. */
2150 expr = build_int_cst (scalar_type, nunits);
2151 new_name = fold_build2 (MULT_EXPR, scalar_type, expr, step_expr);
2152 t = NULL_TREE;
2153 for (i = 0; i < nunits; i++)
2154 t = tree_cons (NULL_TREE, unshare_expr (new_name), t);
2155 gcc_assert (CONSTANT_CLASS_P (new_name));
2156 vec = build_vector (vectype, t);
2157 vec_step = vect_init_vector (iv_phi, vec, vectype, NULL);
2159 vec_def = induc_def;
2160 prev_stmt_vinfo = vinfo_for_stmt (induction_phi);
2161 for (i = 1; i < ncopies; i++)
2163 /* vec_i = vec_prev + vec_step */
2164 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
2165 vec_def, vec_step);
2166 vec_def = make_ssa_name (vec_dest, new_stmt);
2167 gimple_assign_set_lhs (new_stmt, vec_def);
2169 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
2170 set_vinfo_for_stmt (new_stmt,
2171 new_stmt_vec_info (new_stmt, loop_vinfo));
2172 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt;
2173 prev_stmt_vinfo = vinfo_for_stmt (new_stmt);
2177 if (nested_in_vect_loop)
2179 /* Find the loop-closed exit-phi of the induction, and record
2180 the final vector of induction results: */
2181 exit_phi = NULL;
2182 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
2184 if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (USE_STMT (use_p))))
2186 exit_phi = USE_STMT (use_p);
2187 break;
2190 if (exit_phi)
2192 stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi);
2193 /* FORNOW. Currently not supporting the case that an inner-loop induction
2194 is not used in the outer-loop (i.e. only outside the outer-loop). */
2195 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
2196 && !STMT_VINFO_LIVE_P (stmt_vinfo));
2198 STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt;
2199 if (vect_print_dump_info (REPORT_DETAILS))
2201 fprintf (vect_dump, "vector of inductions after inner-loop:");
2202 print_gimple_stmt (vect_dump, new_stmt, 0, TDF_SLIM);
2208 if (vect_print_dump_info (REPORT_DETAILS))
2210 fprintf (vect_dump, "transform induction: created def-use cycle: ");
2211 print_gimple_stmt (vect_dump, induction_phi, 0, TDF_SLIM);
2212 fprintf (vect_dump, "\n");
2213 print_gimple_stmt (vect_dump, SSA_NAME_DEF_STMT (vec_def), 0, TDF_SLIM);
2216 STMT_VINFO_VEC_STMT (phi_info) = induction_phi;
2217 return induc_def;
2221 /* Function get_initial_def_for_reduction
2223 Input:
2224 STMT - a stmt that performs a reduction operation in the loop.
2225 INIT_VAL - the initial value of the reduction variable
2227 Output:
2228 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
2229 of the reduction (used for adjusting the epilog - see below).
2230 Return a vector variable, initialized according to the operation that STMT
2231 performs. This vector will be used as the initial value of the
2232 vector of partial results.
2234 Option1 (adjust in epilog): Initialize the vector as follows:
2235 add: [0,0,...,0,0]
2236 mult: [1,1,...,1,1]
2237 min/max: [init_val,init_val,..,init_val,init_val]
2238 bit and/or: [init_val,init_val,..,init_val,init_val]
2239 and when necessary (e.g. add/mult case) let the caller know
2240 that it needs to adjust the result by init_val.
2242 Option2: Initialize the vector as follows:
2243 add: [0,0,...,0,init_val]
2244 mult: [1,1,...,1,init_val]
2245 min/max: [init_val,init_val,...,init_val]
2246 bit and/or: [init_val,init_val,...,init_val]
2247 and no adjustments are needed.
2249 For example, for the following code:
2251 s = init_val;
2252 for (i=0;i<n;i++)
2253 s = s + a[i];
2255 STMT is 's = s + a[i]', and the reduction variable is 's'.
2256 For a vector of 4 units, we want to return either [0,0,0,init_val],
2257 or [0,0,0,0] and let the caller know that it needs to adjust
2258 the result at the end by 'init_val'.
2260 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
2261 initialization vector is simpler (same element in all entries).
2262 A cost model should help decide between these two schemes. */
2264 tree
2265 get_initial_def_for_reduction (gimple stmt, tree init_val, tree *adjustment_def)
2267 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
2268 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
2269 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2270 tree scalar_type = TREE_TYPE (init_val);
2271 tree vectype = get_vectype_for_scalar_type (scalar_type);
2272 int nunits;
2273 enum tree_code code = gimple_assign_rhs_code (stmt);
2274 tree def_for_init;
2275 tree init_def;
2276 tree t = NULL_TREE;
2277 int i;
2278 bool nested_in_vect_loop = false;
2280 gcc_assert (vectype);
2281 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2283 gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
2284 || SCALAR_FLOAT_TYPE_P (scalar_type));
2285 if (nested_in_vect_loop_p (loop, stmt))
2286 nested_in_vect_loop = true;
2287 else
2288 gcc_assert (loop == (gimple_bb (stmt))->loop_father);
2290 switch (code)
2292 case WIDEN_SUM_EXPR:
2293 case DOT_PROD_EXPR:
2294 case PLUS_EXPR:
2295 if (nested_in_vect_loop)
2296 *adjustment_def = vect_get_vec_def_for_operand (init_val, stmt, NULL);
2297 else
2298 *adjustment_def = init_val;
2299 /* Create a vector of zeros for init_def. */
2300 if (SCALAR_FLOAT_TYPE_P (scalar_type))
2301 def_for_init = build_real (scalar_type, dconst0);
2302 else
2303 def_for_init = build_int_cst (scalar_type, 0);
2305 for (i = nunits - 1; i >= 0; --i)
2306 t = tree_cons (NULL_TREE, def_for_init, t);
2307 init_def = build_vector (vectype, t);
2308 break;
2310 case MIN_EXPR:
2311 case MAX_EXPR:
2312 *adjustment_def = NULL_TREE;
2313 init_def = vect_get_vec_def_for_operand (init_val, stmt, NULL);
2314 break;
2316 default:
2317 gcc_unreachable ();
2320 return init_def;
2324 /* Function vect_create_epilog_for_reduction
2326 Create code at the loop-epilog to finalize the result of a reduction
2327 computation.
2329 VECT_DEF is a vector of partial results.
2330 REDUC_CODE is the tree-code for the epilog reduction.
2331 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
2332 number of elements that we can fit in a vectype (nunits). In this case
2333 we have to generate more than one vector stmt - i.e - we need to "unroll"
2334 the vector stmt by a factor VF/nunits. For more details see documentation
2335 in vectorizable_operation.
2336 STMT is the scalar reduction stmt that is being vectorized.
2337 REDUCTION_PHI is the phi-node that carries the reduction computation.
2339 This function:
2340 1. Creates the reduction def-use cycle: sets the arguments for
2341 REDUCTION_PHI:
2342 The loop-entry argument is the vectorized initial-value of the reduction.
2343 The loop-latch argument is VECT_DEF - the vector of partial sums.
2344 2. "Reduces" the vector of partial results VECT_DEF into a single result,
2345 by applying the operation specified by REDUC_CODE if available, or by
2346 other means (whole-vector shifts or a scalar loop).
2347 The function also creates a new phi node at the loop exit to preserve
2348 loop-closed form, as illustrated below.
2350 The flow at the entry to this function:
2352 loop:
2353 vec_def = phi <null, null> # REDUCTION_PHI
2354 VECT_DEF = vector_stmt # vectorized form of STMT
2355 s_loop = scalar_stmt # (scalar) STMT
2356 loop_exit:
2357 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
2358 use <s_out0>
2359 use <s_out0>
2361 The above is transformed by this function into:
2363 loop:
2364 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
2365 VECT_DEF = vector_stmt # vectorized form of STMT
2366 s_loop = scalar_stmt # (scalar) STMT
2367 loop_exit:
2368 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
2369 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
2370 v_out2 = reduce <v_out1>
2371 s_out3 = extract_field <v_out2, 0>
2372 s_out4 = adjust_result <s_out3>
2373 use <s_out4>
2374 use <s_out4>
2377 static void
2378 vect_create_epilog_for_reduction (tree vect_def, gimple stmt,
2379 int ncopies,
2380 enum tree_code reduc_code,
2381 gimple reduction_phi)
2383 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2384 stmt_vec_info prev_phi_info;
2385 tree vectype;
2386 enum machine_mode mode;
2387 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2388 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2389 basic_block exit_bb;
2390 tree scalar_dest;
2391 tree scalar_type;
2392 gimple new_phi = NULL, phi;
2393 gimple_stmt_iterator exit_gsi;
2394 tree vec_dest;
2395 tree new_temp = NULL_TREE;
2396 tree new_name;
2397 gimple epilog_stmt = NULL;
2398 tree new_scalar_dest, new_dest;
2399 gimple exit_phi;
2400 tree bitsize, bitpos, bytesize;
2401 enum tree_code code = gimple_assign_rhs_code (stmt);
2402 tree adjustment_def;
2403 tree vec_initial_def, def;
2404 tree orig_name;
2405 imm_use_iterator imm_iter;
2406 use_operand_p use_p;
2407 bool extract_scalar_result = false;
2408 tree reduction_op, expr;
2409 gimple orig_stmt;
2410 gimple use_stmt;
2411 bool nested_in_vect_loop = false;
2412 VEC(gimple,heap) *phis = NULL;
2413 enum vect_def_type dt = vect_unknown_def_type;
2414 int j, i;
2416 if (nested_in_vect_loop_p (loop, stmt))
2418 loop = loop->inner;
2419 nested_in_vect_loop = true;
2422 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
2424 case GIMPLE_SINGLE_RHS:
2425 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)) == ternary_op);
2426 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), 2);
2427 break;
2428 case GIMPLE_UNARY_RHS:
2429 reduction_op = gimple_assign_rhs1 (stmt);
2430 break;
2431 case GIMPLE_BINARY_RHS:
2432 reduction_op = gimple_assign_rhs2 (stmt);
2433 break;
2434 default:
2435 gcc_unreachable ();
2438 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
2439 gcc_assert (vectype);
2440 mode = TYPE_MODE (vectype);
2442 /*** 1. Create the reduction def-use cycle ***/
2444 /* For the case of reduction, vect_get_vec_def_for_operand returns
2445 the scalar def before the loop, that defines the initial value
2446 of the reduction variable. */
2447 vec_initial_def = vect_get_vec_def_for_operand (reduction_op, stmt,
2448 &adjustment_def);
2450 phi = reduction_phi;
2451 def = vect_def;
2452 for (j = 0; j < ncopies; j++)
2454 /* 1.1 set the loop-entry arg of the reduction-phi: */
2455 add_phi_arg (phi, vec_initial_def, loop_preheader_edge (loop));
2457 /* 1.2 set the loop-latch arg for the reduction-phi: */
2458 if (j > 0)
2459 def = vect_get_vec_def_for_stmt_copy (dt, def);
2460 add_phi_arg (phi, def, loop_latch_edge (loop));
2462 if (vect_print_dump_info (REPORT_DETAILS))
2464 fprintf (vect_dump, "transform reduction: created def-use cycle: ");
2465 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
2466 fprintf (vect_dump, "\n");
2467 print_gimple_stmt (vect_dump, SSA_NAME_DEF_STMT (def), 0, TDF_SLIM);
2470 phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
2473 /*** 2. Create epilog code
2474 The reduction epilog code operates across the elements of the vector
2475 of partial results computed by the vectorized loop.
2476 The reduction epilog code consists of:
2477 step 1: compute the scalar result in a vector (v_out2)
2478 step 2: extract the scalar result (s_out3) from the vector (v_out2)
2479 step 3: adjust the scalar result (s_out3) if needed.
2481 Step 1 can be accomplished using one the following three schemes:
2482 (scheme 1) using reduc_code, if available.
2483 (scheme 2) using whole-vector shifts, if available.
2484 (scheme 3) using a scalar loop. In this case steps 1+2 above are
2485 combined.
2487 The overall epilog code looks like this:
2489 s_out0 = phi <s_loop> # original EXIT_PHI
2490 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
2491 v_out2 = reduce <v_out1> # step 1
2492 s_out3 = extract_field <v_out2, 0> # step 2
2493 s_out4 = adjust_result <s_out3> # step 3
2495 (step 3 is optional, and steps 1 and 2 may be combined).
2496 Lastly, the uses of s_out0 are replaced by s_out4.
2498 ***/
2500 /* 2.1 Create new loop-exit-phi to preserve loop-closed form:
2501 v_out1 = phi <v_loop> */
2503 exit_bb = single_exit (loop)->dest;
2504 def = vect_def;
2505 prev_phi_info = NULL;
2506 for (j = 0; j < ncopies; j++)
2508 phi = create_phi_node (SSA_NAME_VAR (vect_def), exit_bb);
2509 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, loop_vinfo));
2510 if (j == 0)
2511 new_phi = phi;
2512 else
2514 def = vect_get_vec_def_for_stmt_copy (dt, def);
2515 STMT_VINFO_RELATED_STMT (prev_phi_info) = phi;
2517 SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
2518 prev_phi_info = vinfo_for_stmt (phi);
2520 exit_gsi = gsi_after_labels (exit_bb);
2522 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
2523 (i.e. when reduc_code is not available) and in the final adjustment
2524 code (if needed). Also get the original scalar reduction variable as
2525 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
2526 represents a reduction pattern), the tree-code and scalar-def are
2527 taken from the original stmt that the pattern-stmt (STMT) replaces.
2528 Otherwise (it is a regular reduction) - the tree-code and scalar-def
2529 are taken from STMT. */
2531 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2532 if (!orig_stmt)
2534 /* Regular reduction */
2535 orig_stmt = stmt;
2537 else
2539 /* Reduction pattern */
2540 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt);
2541 gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo));
2542 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
2544 code = gimple_assign_rhs_code (orig_stmt);
2545 scalar_dest = gimple_assign_lhs (orig_stmt);
2546 scalar_type = TREE_TYPE (scalar_dest);
2547 new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
2548 bitsize = TYPE_SIZE (scalar_type);
2549 bytesize = TYPE_SIZE_UNIT (scalar_type);
2552 /* In case this is a reduction in an inner-loop while vectorizing an outer
2553 loop - we don't need to extract a single scalar result at the end of the
2554 inner-loop. The final vector of partial results will be used in the
2555 vectorized outer-loop, or reduced to a scalar result at the end of the
2556 outer-loop. */
2557 if (nested_in_vect_loop)
2558 goto vect_finalize_reduction;
2560 /* FORNOW */
2561 gcc_assert (ncopies == 1);
2563 /* 2.3 Create the reduction code, using one of the three schemes described
2564 above. */
2566 if (reduc_code < NUM_TREE_CODES)
2568 tree tmp;
2570 /*** Case 1: Create:
2571 v_out2 = reduc_expr <v_out1> */
2573 if (vect_print_dump_info (REPORT_DETAILS))
2574 fprintf (vect_dump, "Reduce using direct vector reduction.");
2576 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2577 tmp = build1 (reduc_code, vectype, PHI_RESULT (new_phi));
2578 epilog_stmt = gimple_build_assign (vec_dest, tmp);
2579 new_temp = make_ssa_name (vec_dest, epilog_stmt);
2580 gimple_assign_set_lhs (epilog_stmt, new_temp);
2581 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
2583 extract_scalar_result = true;
2585 else
2587 enum tree_code shift_code = 0;
2588 bool have_whole_vector_shift = true;
2589 int bit_offset;
2590 int element_bitsize = tree_low_cst (bitsize, 1);
2591 int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
2592 tree vec_temp;
2594 if (optab_handler (vec_shr_optab, mode)->insn_code != CODE_FOR_nothing)
2595 shift_code = VEC_RSHIFT_EXPR;
2596 else
2597 have_whole_vector_shift = false;
2599 /* Regardless of whether we have a whole vector shift, if we're
2600 emulating the operation via tree-vect-generic, we don't want
2601 to use it. Only the first round of the reduction is likely
2602 to still be profitable via emulation. */
2603 /* ??? It might be better to emit a reduction tree code here, so that
2604 tree-vect-generic can expand the first round via bit tricks. */
2605 if (!VECTOR_MODE_P (mode))
2606 have_whole_vector_shift = false;
2607 else
2609 optab optab = optab_for_tree_code (code, vectype, optab_default);
2610 if (optab_handler (optab, mode)->insn_code == CODE_FOR_nothing)
2611 have_whole_vector_shift = false;
2614 if (have_whole_vector_shift)
2616 /*** Case 2: Create:
2617 for (offset = VS/2; offset >= element_size; offset/=2)
2619 Create: va' = vec_shift <va, offset>
2620 Create: va = vop <va, va'>
2621 } */
2623 if (vect_print_dump_info (REPORT_DETAILS))
2624 fprintf (vect_dump, "Reduce using vector shifts");
2626 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2627 new_temp = PHI_RESULT (new_phi);
2629 for (bit_offset = vec_size_in_bits/2;
2630 bit_offset >= element_bitsize;
2631 bit_offset /= 2)
2633 tree bitpos = size_int (bit_offset);
2634 epilog_stmt = gimple_build_assign_with_ops (shift_code, vec_dest,
2635 new_temp, bitpos);
2636 new_name = make_ssa_name (vec_dest, epilog_stmt);
2637 gimple_assign_set_lhs (epilog_stmt, new_name);
2638 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
2640 epilog_stmt = gimple_build_assign_with_ops (code, vec_dest,
2641 new_name, new_temp);
2642 new_temp = make_ssa_name (vec_dest, epilog_stmt);
2643 gimple_assign_set_lhs (epilog_stmt, new_temp);
2644 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
2647 extract_scalar_result = true;
2649 else
2651 tree rhs;
2653 /*** Case 3: Create:
2654 s = extract_field <v_out2, 0>
2655 for (offset = element_size;
2656 offset < vector_size;
2657 offset += element_size;)
2659 Create: s' = extract_field <v_out2, offset>
2660 Create: s = op <s, s'>
2661 } */
2663 if (vect_print_dump_info (REPORT_DETAILS))
2664 fprintf (vect_dump, "Reduce using scalar code. ");
2666 vec_temp = PHI_RESULT (new_phi);
2667 vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
2668 rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
2669 bitsize_zero_node);
2670 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
2671 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
2672 gimple_assign_set_lhs (epilog_stmt, new_temp);
2673 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
2675 for (bit_offset = element_bitsize;
2676 bit_offset < vec_size_in_bits;
2677 bit_offset += element_bitsize)
2679 tree bitpos = bitsize_int (bit_offset);
2680 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
2681 bitpos);
2683 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
2684 new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
2685 gimple_assign_set_lhs (epilog_stmt, new_name);
2686 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
2688 epilog_stmt = gimple_build_assign_with_ops (code,
2689 new_scalar_dest,
2690 new_name, new_temp);
2691 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
2692 gimple_assign_set_lhs (epilog_stmt, new_temp);
2693 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
2696 extract_scalar_result = false;
2700 /* 2.4 Extract the final scalar result. Create:
2701 s_out3 = extract_field <v_out2, bitpos> */
2703 if (extract_scalar_result)
2705 tree rhs;
2707 gcc_assert (!nested_in_vect_loop);
2708 if (vect_print_dump_info (REPORT_DETAILS))
2709 fprintf (vect_dump, "extract scalar result");
2711 if (BYTES_BIG_ENDIAN)
2712 bitpos = size_binop (MULT_EXPR,
2713 bitsize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1),
2714 TYPE_SIZE (scalar_type));
2715 else
2716 bitpos = bitsize_zero_node;
2718 rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp, bitsize, bitpos);
2719 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
2720 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
2721 gimple_assign_set_lhs (epilog_stmt, new_temp);
2722 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
2725 vect_finalize_reduction:
2727 /* 2.5 Adjust the final result by the initial value of the reduction
2728 variable. (When such adjustment is not needed, then
2729 'adjustment_def' is zero). For example, if code is PLUS we create:
2730 new_temp = loop_exit_def + adjustment_def */
2732 if (adjustment_def)
2734 if (nested_in_vect_loop)
2736 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
2737 expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
2738 new_dest = vect_create_destination_var (scalar_dest, vectype);
2740 else
2742 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
2743 expr = build2 (code, scalar_type, new_temp, adjustment_def);
2744 new_dest = vect_create_destination_var (scalar_dest, scalar_type);
2746 epilog_stmt = gimple_build_assign (new_dest, expr);
2747 new_temp = make_ssa_name (new_dest, epilog_stmt);
2748 gimple_assign_set_lhs (epilog_stmt, new_temp);
2749 SSA_NAME_DEF_STMT (new_temp) = epilog_stmt;
2750 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
2754 /* 2.6 Handle the loop-exit phi */
2756 /* Replace uses of s_out0 with uses of s_out3:
2757 Find the loop-closed-use at the loop exit of the original scalar result.
2758 (The reduction result is expected to have two immediate uses - one at the
2759 latch block, and one at the loop exit). */
2760 phis = VEC_alloc (gimple, heap, 10);
2761 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
2763 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
2765 exit_phi = USE_STMT (use_p);
2766 VEC_quick_push (gimple, phis, exit_phi);
2769 /* We expect to have found an exit_phi because of loop-closed-ssa form. */
2770 gcc_assert (!VEC_empty (gimple, phis));
2772 for (i = 0; VEC_iterate (gimple, phis, i, exit_phi); i++)
2774 if (nested_in_vect_loop)
2776 stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi);
2778 /* FORNOW. Currently not supporting the case that an inner-loop
2779 reduction is not used in the outer-loop (but only outside the
2780 outer-loop). */
2781 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
2782 && !STMT_VINFO_LIVE_P (stmt_vinfo));
2784 epilog_stmt = adjustment_def ? epilog_stmt : new_phi;
2785 STMT_VINFO_VEC_STMT (stmt_vinfo) = epilog_stmt;
2786 set_vinfo_for_stmt (epilog_stmt,
2787 new_stmt_vec_info (epilog_stmt, loop_vinfo));
2788 if (adjustment_def)
2789 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt)) =
2790 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi));
2791 continue;
2794 /* Replace the uses: */
2795 orig_name = PHI_RESULT (exit_phi);
2796 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
2797 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
2798 SET_USE (use_p, new_temp);
2800 VEC_free (gimple, heap, phis);
2804 /* Function vectorizable_reduction.
2806 Check if STMT performs a reduction operation that can be vectorized.
2807 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2808 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2809 Return FALSE if not a vectorizable STMT, TRUE otherwise.
2811 This function also handles reduction idioms (patterns) that have been
2812 recognized in advance during vect_pattern_recog. In this case, STMT may be
2813 of this form:
2814 X = pattern_expr (arg0, arg1, ..., X)
2815 and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
2816 sequence that had been detected and replaced by the pattern-stmt (STMT).
2818 In some cases of reduction patterns, the type of the reduction variable X is
2819 different than the type of the other arguments of STMT.
2820 In such cases, the vectype that is used when transforming STMT into a vector
2821 stmt is different than the vectype that is used to determine the
2822 vectorization factor, because it consists of a different number of elements
2823 than the actual number of elements that are being operated upon in parallel.
2825 For example, consider an accumulation of shorts into an int accumulator.
2826 On some targets it's possible to vectorize this pattern operating on 8
2827 shorts at a time (hence, the vectype for purposes of determining the
2828 vectorization factor should be V8HI); on the other hand, the vectype that
2829 is used to create the vector form is actually V4SI (the type of the result).
2831 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
2832 indicates what is the actual level of parallelism (V8HI in the example), so
2833 that the right vectorization factor would be derived. This vectype
2834 corresponds to the type of arguments to the reduction stmt, and should *NOT*
2835 be used to create the vectorized stmt. The right vectype for the vectorized
2836 stmt is obtained from the type of the result X:
2837 get_vectype_for_scalar_type (TREE_TYPE (X))
2839 This means that, contrary to "regular" reductions (or "regular" stmts in
2840 general), the following equation:
2841 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
2842 does *NOT* necessarily hold for reduction patterns. */
2844 bool
2845 vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
2846 gimple *vec_stmt)
2848 tree vec_dest;
2849 tree scalar_dest;
2850 tree loop_vec_def0 = NULL_TREE, loop_vec_def1 = NULL_TREE;
2851 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2852 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2853 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2854 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2855 enum tree_code code, orig_code, epilog_reduc_code = 0;
2856 enum machine_mode vec_mode;
2857 int op_type;
2858 optab optab, reduc_optab;
2859 tree new_temp = NULL_TREE;
2860 tree def;
2861 gimple def_stmt;
2862 enum vect_def_type dt;
2863 gimple new_phi = NULL;
2864 tree scalar_type;
2865 bool is_simple_use;
2866 gimple orig_stmt;
2867 stmt_vec_info orig_stmt_info;
2868 tree expr = NULL_TREE;
2869 int i;
2870 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
2871 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
2872 int epilog_copies;
2873 stmt_vec_info prev_stmt_info, prev_phi_info;
2874 gimple first_phi = NULL;
2875 bool single_defuse_cycle = false;
2876 tree reduc_def;
2877 gimple new_stmt = NULL;
2878 int j;
2879 tree ops[3];
2881 if (nested_in_vect_loop_p (loop, stmt))
2882 loop = loop->inner;
2884 gcc_assert (ncopies >= 1);
2886 /* FORNOW: SLP not supported. */
2887 if (STMT_SLP_TYPE (stmt_info))
2888 return false;
2890 /* 1. Is vectorizable reduction? */
2892 /* Not supportable if the reduction variable is used in the loop. */
2893 if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer)
2894 return false;
2896 /* Reductions that are not used even in an enclosing outer-loop,
2897 are expected to be "live" (used out of the loop). */
2898 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_loop
2899 && !STMT_VINFO_LIVE_P (stmt_info))
2900 return false;
2902 /* Make sure it was already recognized as a reduction computation. */
2903 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def)
2904 return false;
2906 /* 2. Has this been recognized as a reduction pattern?
2908 Check if STMT represents a pattern that has been recognized
2909 in earlier analysis stages. For stmts that represent a pattern,
2910 the STMT_VINFO_RELATED_STMT field records the last stmt in
2911 the original sequence that constitutes the pattern. */
2913 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2914 if (orig_stmt)
2916 orig_stmt_info = vinfo_for_stmt (orig_stmt);
2917 gcc_assert (STMT_VINFO_RELATED_STMT (orig_stmt_info) == stmt);
2918 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
2919 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
2922 /* 3. Check the operands of the operation. The first operands are defined
2923 inside the loop body. The last operand is the reduction variable,
2924 which is defined by the loop-header-phi. */
2926 gcc_assert (is_gimple_assign (stmt));
2928 /* Flatten RHS */
2929 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
2931 case GIMPLE_SINGLE_RHS:
2932 op_type = TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt));
2933 if (op_type == ternary_op)
2935 tree rhs = gimple_assign_rhs1 (stmt);
2936 ops[0] = TREE_OPERAND (rhs, 0);
2937 ops[1] = TREE_OPERAND (rhs, 1);
2938 ops[2] = TREE_OPERAND (rhs, 2);
2939 code = TREE_CODE (rhs);
2941 else
2942 return false;
2943 break;
2945 case GIMPLE_BINARY_RHS:
2946 code = gimple_assign_rhs_code (stmt);
2947 op_type = TREE_CODE_LENGTH (code);
2948 gcc_assert (op_type == binary_op);
2949 ops[0] = gimple_assign_rhs1 (stmt);
2950 ops[1] = gimple_assign_rhs2 (stmt);
2951 break;
2953 case GIMPLE_UNARY_RHS:
2954 return false;
2956 default:
2957 gcc_unreachable ();
2960 scalar_dest = gimple_assign_lhs (stmt);
2961 scalar_type = TREE_TYPE (scalar_dest);
2962 if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
2963 && !SCALAR_FLOAT_TYPE_P (scalar_type))
2964 return false;
2966 /* All uses but the last are expected to be defined in the loop.
2967 The last use is the reduction variable. */
2968 for (i = 0; i < op_type-1; i++)
2970 is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, &def_stmt,
2971 &def, &dt);
2972 gcc_assert (is_simple_use);
2973 if (dt != vect_loop_def
2974 && dt != vect_invariant_def
2975 && dt != vect_constant_def
2976 && dt != vect_induction_def)
2977 return false;
2980 is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, &def_stmt, &def, &dt);
2981 gcc_assert (is_simple_use);
2982 gcc_assert (dt == vect_reduction_def);
2983 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
2984 if (orig_stmt)
2985 gcc_assert (orig_stmt == vect_is_simple_reduction (loop_vinfo, def_stmt));
2986 else
2987 gcc_assert (stmt == vect_is_simple_reduction (loop_vinfo, def_stmt));
2989 if (STMT_VINFO_LIVE_P (vinfo_for_stmt (def_stmt)))
2990 return false;
2992 /* 4. Supportable by target? */
2994 /* 4.1. check support for the operation in the loop */
2995 optab = optab_for_tree_code (code, vectype, optab_default);
2996 if (!optab)
2998 if (vect_print_dump_info (REPORT_DETAILS))
2999 fprintf (vect_dump, "no optab.");
3000 return false;
3002 vec_mode = TYPE_MODE (vectype);
3003 if (optab_handler (optab, vec_mode)->insn_code == CODE_FOR_nothing)
3005 if (vect_print_dump_info (REPORT_DETAILS))
3006 fprintf (vect_dump, "op not supported by target.");
3007 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
3008 || LOOP_VINFO_VECT_FACTOR (loop_vinfo)
3009 < vect_min_worthwhile_factor (code))
3010 return false;
3011 if (vect_print_dump_info (REPORT_DETAILS))
3012 fprintf (vect_dump, "proceeding using word mode.");
3015 /* Worthwhile without SIMD support? */
3016 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
3017 && LOOP_VINFO_VECT_FACTOR (loop_vinfo)
3018 < vect_min_worthwhile_factor (code))
3020 if (vect_print_dump_info (REPORT_DETAILS))
3021 fprintf (vect_dump, "not worthwhile without SIMD support.");
3022 return false;
3025 /* 4.2. Check support for the epilog operation.
3027 If STMT represents a reduction pattern, then the type of the
3028 reduction variable may be different than the type of the rest
3029 of the arguments. For example, consider the case of accumulation
3030 of shorts into an int accumulator; The original code:
3031 S1: int_a = (int) short_a;
3032 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
3034 was replaced with:
3035 STMT: int_acc = widen_sum <short_a, int_acc>
3037 This means that:
3038 1. The tree-code that is used to create the vector operation in the
3039 epilog code (that reduces the partial results) is not the
3040 tree-code of STMT, but is rather the tree-code of the original
3041 stmt from the pattern that STMT is replacing. I.e, in the example
3042 above we want to use 'widen_sum' in the loop, but 'plus' in the
3043 epilog.
3044 2. The type (mode) we use to check available target support
3045 for the vector operation to be created in the *epilog*, is
3046 determined by the type of the reduction variable (in the example
3047 above we'd check this: plus_optab[vect_int_mode]).
3048 However the type (mode) we use to check available target support
3049 for the vector operation to be created *inside the loop*, is
3050 determined by the type of the other arguments to STMT (in the
3051 example we'd check this: widen_sum_optab[vect_short_mode]).
3053 This is contrary to "regular" reductions, in which the types of all
3054 the arguments are the same as the type of the reduction variable.
3055 For "regular" reductions we can therefore use the same vector type
3056 (and also the same tree-code) when generating the epilog code and
3057 when generating the code inside the loop. */
3059 if (orig_stmt)
3061 /* This is a reduction pattern: get the vectype from the type of the
3062 reduction variable, and get the tree-code from orig_stmt. */
3063 orig_code = gimple_assign_rhs_code (orig_stmt);
3064 vectype = get_vectype_for_scalar_type (TREE_TYPE (def));
3065 if (!vectype)
3067 if (vect_print_dump_info (REPORT_DETAILS))
3069 fprintf (vect_dump, "unsupported data-type ");
3070 print_generic_expr (vect_dump, TREE_TYPE (def), TDF_SLIM);
3072 return false;
3075 vec_mode = TYPE_MODE (vectype);
3077 else
3079 /* Regular reduction: use the same vectype and tree-code as used for
3080 the vector code inside the loop can be used for the epilog code. */
3081 orig_code = code;
3084 if (!reduction_code_for_scalar_code (orig_code, &epilog_reduc_code))
3085 return false;
3086 reduc_optab = optab_for_tree_code (epilog_reduc_code, vectype, optab_default);
3087 if (!reduc_optab)
3089 if (vect_print_dump_info (REPORT_DETAILS))
3090 fprintf (vect_dump, "no optab for reduction.");
3091 epilog_reduc_code = NUM_TREE_CODES;
3093 if (optab_handler (reduc_optab, vec_mode)->insn_code == CODE_FOR_nothing)
3095 if (vect_print_dump_info (REPORT_DETAILS))
3096 fprintf (vect_dump, "reduc op not supported by target.");
3097 epilog_reduc_code = NUM_TREE_CODES;
3100 if (!vec_stmt) /* transformation not required. */
3102 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
3103 if (!vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies))
3104 return false;
3105 return true;
3108 /** Transform. **/
3110 if (vect_print_dump_info (REPORT_DETAILS))
3111 fprintf (vect_dump, "transform reduction.");
3113 /* Create the destination vector */
3114 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3116 /* In case the vectorization factor (VF) is bigger than the number
3117 of elements that we can fit in a vectype (nunits), we have to generate
3118 more than one vector stmt - i.e - we need to "unroll" the
3119 vector stmt by a factor VF/nunits. For more details see documentation
3120 in vectorizable_operation. */
3122 /* If the reduction is used in an outer loop we need to generate
3123 VF intermediate results, like so (e.g. for ncopies=2):
3124 r0 = phi (init, r0)
3125 r1 = phi (init, r1)
3126 r0 = x0 + r0;
3127 r1 = x1 + r1;
3128 (i.e. we generate VF results in 2 registers).
3129 In this case we have a separate def-use cycle for each copy, and therefore
3130 for each copy we get the vector def for the reduction variable from the
3131 respective phi node created for this copy.
3133 Otherwise (the reduction is unused in the loop nest), we can combine
3134 together intermediate results, like so (e.g. for ncopies=2):
3135 r = phi (init, r)
3136 r = x0 + r;
3137 r = x1 + r;
3138 (i.e. we generate VF/2 results in a single register).
3139 In this case for each copy we get the vector def for the reduction variable
3140 from the vectorized reduction operation generated in the previous iteration.
3143 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_loop)
3145 single_defuse_cycle = true;
3146 epilog_copies = 1;
3148 else
3149 epilog_copies = ncopies;
3151 prev_stmt_info = NULL;
3152 prev_phi_info = NULL;
3153 for (j = 0; j < ncopies; j++)
3155 if (j == 0 || !single_defuse_cycle)
3157 /* Create the reduction-phi that defines the reduction-operand. */
3158 new_phi = create_phi_node (vec_dest, loop->header);
3159 set_vinfo_for_stmt (new_phi, new_stmt_vec_info (new_phi, loop_vinfo));
3162 /* Handle uses. */
3163 if (j == 0)
3165 loop_vec_def0 = vect_get_vec_def_for_operand (ops[0], stmt, NULL);
3166 if (op_type == ternary_op)
3168 loop_vec_def1 = vect_get_vec_def_for_operand (ops[1], stmt, NULL);
3171 /* Get the vector def for the reduction variable from the phi node */
3172 reduc_def = PHI_RESULT (new_phi);
3173 first_phi = new_phi;
3175 else
3177 enum vect_def_type dt = vect_unknown_def_type; /* Dummy */
3178 loop_vec_def0 = vect_get_vec_def_for_stmt_copy (dt, loop_vec_def0);
3179 if (op_type == ternary_op)
3180 loop_vec_def1 = vect_get_vec_def_for_stmt_copy (dt, loop_vec_def1);
3182 if (single_defuse_cycle)
3183 reduc_def = gimple_assign_lhs (new_stmt);
3184 else
3185 reduc_def = PHI_RESULT (new_phi);
3187 STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi;
3190 /* Arguments are ready. create the new vector stmt. */
3191 if (op_type == binary_op)
3192 expr = build2 (code, vectype, loop_vec_def0, reduc_def);
3193 else
3194 expr = build3 (code, vectype, loop_vec_def0, loop_vec_def1,
3195 reduc_def);
3196 new_stmt = gimple_build_assign (vec_dest, expr);
3197 new_temp = make_ssa_name (vec_dest, new_stmt);
3198 gimple_assign_set_lhs (new_stmt, new_temp);
3199 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3201 if (j == 0)
3202 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3203 else
3204 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3205 prev_stmt_info = vinfo_for_stmt (new_stmt);
3206 prev_phi_info = vinfo_for_stmt (new_phi);
3209 /* Finalize the reduction-phi (set its arguments) and create the
3210 epilog reduction code. */
3211 if (!single_defuse_cycle)
3212 new_temp = gimple_assign_lhs (*vec_stmt);
3213 vect_create_epilog_for_reduction (new_temp, stmt, epilog_copies,
3214 epilog_reduc_code, first_phi);
3215 return true;
3218 /* Function vect_min_worthwhile_factor.
3220 For a loop where we could vectorize the operation indicated by CODE,
3221 return the minimum vectorization factor that makes it worthwhile
3222 to use generic vectors. */
3224 vect_min_worthwhile_factor (enum tree_code code)
3226 switch (code)
3228 case PLUS_EXPR:
3229 case MINUS_EXPR:
3230 case NEGATE_EXPR:
3231 return 4;
3233 case BIT_AND_EXPR:
3234 case BIT_IOR_EXPR:
3235 case BIT_XOR_EXPR:
3236 case BIT_NOT_EXPR:
3237 return 2;
3239 default:
3240 return INT_MAX;
3245 /* Function vectorizable_induction
3247 Check if PHI performs an induction computation that can be vectorized.
3248 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
3249 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
3250 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3252 bool
3253 vectorizable_induction (gimple phi, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
3254 gimple *vec_stmt)
3256 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
3257 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3258 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3259 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3260 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3261 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3262 tree vec_def;
3264 gcc_assert (ncopies >= 1);
3265 /* FORNOW. This restriction should be relaxed. */
3266 if (nested_in_vect_loop_p (loop, phi) && ncopies > 1)
3268 if (vect_print_dump_info (REPORT_DETAILS))
3269 fprintf (vect_dump, "multiple types in nested loop.");
3270 return false;
3273 if (!STMT_VINFO_RELEVANT_P (stmt_info))
3274 return false;
3276 /* FORNOW: SLP not supported. */
3277 if (STMT_SLP_TYPE (stmt_info))
3278 return false;
3280 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def);
3282 if (gimple_code (phi) != GIMPLE_PHI)
3283 return false;
3285 if (!vec_stmt) /* transformation not required. */
3287 STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
3288 if (vect_print_dump_info (REPORT_DETAILS))
3289 fprintf (vect_dump, "=== vectorizable_induction ===");
3290 vect_model_induction_cost (stmt_info, ncopies);
3291 return true;
3294 /** Transform. **/
3296 if (vect_print_dump_info (REPORT_DETAILS))
3297 fprintf (vect_dump, "transform induction phi.");
3299 vec_def = get_initial_def_for_induction (phi);
3300 *vec_stmt = SSA_NAME_DEF_STMT (vec_def);
3301 return true;
3304 /* Function vectorizable_live_operation.
3306 STMT computes a value that is used outside the loop. Check if
3307 it can be supported. */
3309 bool
3310 vectorizable_live_operation (gimple stmt,
3311 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
3312 gimple *vec_stmt ATTRIBUTE_UNUSED)
3314 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3315 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3316 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3317 int i;
3318 int op_type;
3319 tree op;
3320 tree def;
3321 gimple def_stmt;
3322 enum vect_def_type dt;
3323 enum tree_code code;
3324 enum gimple_rhs_class rhs_class;
3326 gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
3328 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
3329 return false;
3331 if (!is_gimple_assign (stmt))
3332 return false;
3334 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
3335 return false;
3337 /* FORNOW. CHECKME. */
3338 if (nested_in_vect_loop_p (loop, stmt))
3339 return false;
3341 code = gimple_assign_rhs_code (stmt);
3342 op_type = TREE_CODE_LENGTH (code);
3343 rhs_class = get_gimple_rhs_class (code);
3344 gcc_assert (rhs_class != GIMPLE_UNARY_RHS || op_type == unary_op);
3345 gcc_assert (rhs_class != GIMPLE_BINARY_RHS || op_type == binary_op);
3347 /* FORNOW: support only if all uses are invariant. This means
3348 that the scalar operations can remain in place, unvectorized.
3349 The original last scalar value that they compute will be used. */
3351 for (i = 0; i < op_type; i++)
3353 if (rhs_class == GIMPLE_SINGLE_RHS)
3354 op = TREE_OPERAND (gimple_op (stmt, 1), i);
3355 else
3356 op = gimple_op (stmt, i + 1);
3357 if (op && !vect_is_simple_use (op, loop_vinfo, &def_stmt, &def, &dt))
3359 if (vect_print_dump_info (REPORT_DETAILS))
3360 fprintf (vect_dump, "use not simple.");
3361 return false;
3364 if (dt != vect_invariant_def && dt != vect_constant_def)
3365 return false;
3368 /* No transformation is required for the cases we currently support. */
3369 return true;
3372 /* Function vect_transform_loop.
3374 The analysis phase has determined that the loop is vectorizable.
3375 Vectorize the loop - created vectorized stmts to replace the scalar
3376 stmts in the loop, and update the loop exit condition. */
3378 void
3379 vect_transform_loop (loop_vec_info loop_vinfo)
3381 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3382 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
3383 int nbbs = loop->num_nodes;
3384 gimple_stmt_iterator si;
3385 int i;
3386 tree ratio = NULL;
3387 int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3388 bool strided_store;
3389 bool slp_scheduled = false;
3390 unsigned int nunits;
3391 tree cond_expr = NULL_TREE;
3392 gimple_seq cond_expr_stmt_list = NULL;
3393 bool do_peeling_for_loop_bound;
3395 if (vect_print_dump_info (REPORT_DETAILS))
3396 fprintf (vect_dump, "=== vec_transform_loop ===");
3398 /* Peel the loop if there are data refs with unknown alignment.
3399 Only one data ref with unknown store is allowed. */
3401 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
3402 vect_do_peeling_for_alignment (loop_vinfo);
3404 do_peeling_for_loop_bound
3405 = (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
3406 || (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
3407 && LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0));
3409 if (VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))
3410 || VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo)))
3411 vect_loop_versioning (loop_vinfo,
3412 !do_peeling_for_loop_bound,
3413 &cond_expr, &cond_expr_stmt_list);
3415 /* CHECKME: we wouldn't need this if we called update_ssa once
3416 for all loops. */
3417 bitmap_zero (vect_memsyms_to_rename);
3419 /* If the loop has a symbolic number of iterations 'n' (i.e. it's not a
3420 compile time constant), or it is a constant that doesn't divide by the
3421 vectorization factor, then an epilog loop needs to be created.
3422 We therefore duplicate the loop: the original loop will be vectorized,
3423 and will compute the first (n/VF) iterations. The second copy of the loop
3424 will remain scalar and will compute the remaining (n%VF) iterations.
3425 (VF is the vectorization factor). */
3427 if (do_peeling_for_loop_bound)
3428 vect_do_peeling_for_loop_bound (loop_vinfo, &ratio,
3429 cond_expr, cond_expr_stmt_list);
3430 else
3431 ratio = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
3432 LOOP_VINFO_INT_NITERS (loop_vinfo) / vectorization_factor);
3434 /* 1) Make sure the loop header has exactly two entries
3435 2) Make sure we have a preheader basic block. */
3437 gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
3439 split_edge (loop_preheader_edge (loop));
3441 /* FORNOW: the vectorizer supports only loops which body consist
3442 of one basic block (header + empty latch). When the vectorizer will
3443 support more involved loop forms, the order by which the BBs are
3444 traversed need to be reconsidered. */
3446 for (i = 0; i < nbbs; i++)
3448 basic_block bb = bbs[i];
3449 stmt_vec_info stmt_info;
3450 gimple phi;
3452 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
3454 phi = gsi_stmt (si);
3455 if (vect_print_dump_info (REPORT_DETAILS))
3457 fprintf (vect_dump, "------>vectorizing phi: ");
3458 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
3460 stmt_info = vinfo_for_stmt (phi);
3461 if (!stmt_info)
3462 continue;
3464 if (!STMT_VINFO_RELEVANT_P (stmt_info)
3465 && !STMT_VINFO_LIVE_P (stmt_info))
3466 continue;
3468 if ((TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info))
3469 != (unsigned HOST_WIDE_INT) vectorization_factor)
3470 && vect_print_dump_info (REPORT_DETAILS))
3471 fprintf (vect_dump, "multiple-types.");
3473 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
3475 if (vect_print_dump_info (REPORT_DETAILS))
3476 fprintf (vect_dump, "transform phi.");
3477 vect_transform_stmt (phi, NULL, NULL, NULL, NULL);
3481 for (si = gsi_start_bb (bb); !gsi_end_p (si);)
3483 gimple stmt = gsi_stmt (si);
3484 bool is_store;
3486 if (vect_print_dump_info (REPORT_DETAILS))
3488 fprintf (vect_dump, "------>vectorizing statement: ");
3489 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
3492 stmt_info = vinfo_for_stmt (stmt);
3494 /* vector stmts created in the outer-loop during vectorization of
3495 stmts in an inner-loop may not have a stmt_info, and do not
3496 need to be vectorized. */
3497 if (!stmt_info)
3499 gsi_next (&si);
3500 continue;
3503 if (!STMT_VINFO_RELEVANT_P (stmt_info)
3504 && !STMT_VINFO_LIVE_P (stmt_info))
3506 gsi_next (&si);
3507 continue;
3510 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
3511 nunits =
3512 (unsigned int) TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
3513 if (!STMT_SLP_TYPE (stmt_info)
3514 && nunits != (unsigned int) vectorization_factor
3515 && vect_print_dump_info (REPORT_DETAILS))
3516 /* For SLP VF is set according to unrolling factor, and not to
3517 vector size, hence for SLP this print is not valid. */
3518 fprintf (vect_dump, "multiple-types.");
3520 /* SLP. Schedule all the SLP instances when the first SLP stmt is
3521 reached. */
3522 if (STMT_SLP_TYPE (stmt_info))
3524 if (!slp_scheduled)
3526 slp_scheduled = true;
3528 if (vect_print_dump_info (REPORT_DETAILS))
3529 fprintf (vect_dump, "=== scheduling SLP instances ===");
3531 is_store = vect_schedule_slp (loop_vinfo);
3533 /* IS_STORE is true if STMT is a store. Stores cannot be of
3534 hybrid SLP type. They are removed in
3535 vect_schedule_slp_instance and their vinfo is destroyed. */
3536 if (is_store)
3538 gsi_next (&si);
3539 continue;
3543 /* Hybrid SLP stmts must be vectorized in addition to SLP. */
3544 if (PURE_SLP_STMT (stmt_info))
3546 gsi_next (&si);
3547 continue;
3551 /* -------- vectorize statement ------------ */
3552 if (vect_print_dump_info (REPORT_DETAILS))
3553 fprintf (vect_dump, "transform statement.");
3555 strided_store = false;
3556 is_store = vect_transform_stmt (stmt, &si, &strided_store, NULL, NULL);
3557 if (is_store)
3559 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3561 /* Interleaving. If IS_STORE is TRUE, the vectorization of the
3562 interleaving chain was completed - free all the stores in
3563 the chain. */
3564 vect_remove_stores (DR_GROUP_FIRST_DR (stmt_info));
3565 gsi_remove (&si, true);
3566 continue;
3568 else
3570 /* Free the attached stmt_vec_info and remove the stmt. */
3571 free_stmt_vec_info (stmt);
3572 gsi_remove (&si, true);
3573 continue;
3576 gsi_next (&si);
3577 } /* stmts in BB */
3578 } /* BBs in loop */
3580 slpeel_make_loop_iterate_ntimes (loop, ratio);
3582 mark_set_for_renaming (vect_memsyms_to_rename);
3584 /* The memory tags and pointers in vectorized statements need to
3585 have their SSA forms updated. FIXME, why can't this be delayed
3586 until all the loops have been transformed? */
3587 update_ssa (TODO_update_ssa);
3589 if (vect_print_dump_info (REPORT_VECTORIZED_LOOPS))
3590 fprintf (vect_dump, "LOOP VECTORIZED.");
3591 if (loop->inner && vect_print_dump_info (REPORT_VECTORIZED_LOOPS))
3592 fprintf (vect_dump, "OUTER LOOP VECTORIZED.");