Daily bump.
[official-gcc.git] / gcc / tree-vect-loop.c
bloba1c6e4435eabcbcb2aa73c3e5d21b09465240a70
1 /* Loop Vectorization
2 Copyright (C) 2003-2014 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
4 Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "dumpfile.h"
26 #include "tm.h"
27 #include "tree.h"
28 #include "stor-layout.h"
29 #include "basic-block.h"
30 #include "gimple-pretty-print.h"
31 #include "tree-ssa-alias.h"
32 #include "internal-fn.h"
33 #include "gimple-expr.h"
34 #include "is-a.h"
35 #include "gimple.h"
36 #include "gimplify.h"
37 #include "gimple-iterator.h"
38 #include "gimplify-me.h"
39 #include "gimple-ssa.h"
40 #include "tree-phinodes.h"
41 #include "ssa-iterators.h"
42 #include "stringpool.h"
43 #include "tree-ssanames.h"
44 #include "tree-ssa-loop-ivopts.h"
45 #include "tree-ssa-loop-manip.h"
46 #include "tree-ssa-loop-niter.h"
47 #include "tree-pass.h"
48 #include "cfgloop.h"
49 #include "expr.h"
50 #include "recog.h"
51 #include "optabs.h"
52 #include "params.h"
53 #include "diagnostic-core.h"
54 #include "tree-chrec.h"
55 #include "tree-scalar-evolution.h"
56 #include "tree-vectorizer.h"
57 #include "target.h"
59 /* Loop Vectorization Pass.
61 This pass tries to vectorize loops.
63 For example, the vectorizer transforms the following simple loop:
65 short a[N]; short b[N]; short c[N]; int i;
67 for (i=0; i<N; i++){
68 a[i] = b[i] + c[i];
71 as if it was manually vectorized by rewriting the source code into:
73 typedef int __attribute__((mode(V8HI))) v8hi;
74 short a[N]; short b[N]; short c[N]; int i;
75 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
76 v8hi va, vb, vc;
78 for (i=0; i<N/8; i++){
79 vb = pb[i];
80 vc = pc[i];
81 va = vb + vc;
82 pa[i] = va;
85 The main entry to this pass is vectorize_loops(), in which
86 the vectorizer applies a set of analyses on a given set of loops,
87 followed by the actual vectorization transformation for the loops that
88 had successfully passed the analysis phase.
89 Throughout this pass we make a distinction between two types of
90 data: scalars (which are represented by SSA_NAMES), and memory references
91 ("data-refs"). These two types of data require different handling both
92 during analysis and transformation. The types of data-refs that the
93 vectorizer currently supports are ARRAY_REFS which base is an array DECL
94 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
95 accesses are required to have a simple (consecutive) access pattern.
97 Analysis phase:
98 ===============
99 The driver for the analysis phase is vect_analyze_loop().
100 It applies a set of analyses, some of which rely on the scalar evolution
101 analyzer (scev) developed by Sebastian Pop.
103 During the analysis phase the vectorizer records some information
104 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
105 loop, as well as general information about the loop as a whole, which is
106 recorded in a "loop_vec_info" struct attached to each loop.
108 Transformation phase:
109 =====================
110 The loop transformation phase scans all the stmts in the loop, and
111 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
112 the loop that needs to be vectorized. It inserts the vector code sequence
113 just before the scalar stmt S, and records a pointer to the vector code
114 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
115 attached to S). This pointer will be used for the vectorization of following
116 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
117 otherwise, we rely on dead code elimination for removing it.
119 For example, say stmt S1 was vectorized into stmt VS1:
121 VS1: vb = px[i];
122 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
123 S2: a = b;
125 To vectorize stmt S2, the vectorizer first finds the stmt that defines
126 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
127 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
128 resulting sequence would be:
130 VS1: vb = px[i];
131 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
132 VS2: va = vb;
133 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
135 Operands that are not SSA_NAMEs, are data-refs that appear in
136 load/store operations (like 'x[i]' in S1), and are handled differently.
138 Target modeling:
139 =================
140 Currently the only target specific information that is used is the
141 size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".
142 Targets that can support different sizes of vectors, for now will need
143 to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More
144 flexibility will be added in the future.
146 Since we only vectorize operations which vector form can be
147 expressed using existing tree codes, to verify that an operation is
148 supported, the vectorizer checks the relevant optab at the relevant
149 machine_mode (e.g, optab_handler (add_optab, V8HImode)). If
150 the value found is CODE_FOR_nothing, then there's no target support, and
151 we can't vectorize the stmt.
153 For additional information on this project see:
154 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
157 static void vect_estimate_min_profitable_iters (loop_vec_info, int *, int *);
159 /* Function vect_determine_vectorization_factor
161 Determine the vectorization factor (VF). VF is the number of data elements
162 that are operated upon in parallel in a single iteration of the vectorized
163 loop. For example, when vectorizing a loop that operates on 4byte elements,
164 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
165 elements can fit in a single vector register.
167 We currently support vectorization of loops in which all types operated upon
168 are of the same size. Therefore this function currently sets VF according to
169 the size of the types operated upon, and fails if there are multiple sizes
170 in the loop.
172 VF is also the factor by which the loop iterations are strip-mined, e.g.:
173 original loop:
174 for (i=0; i<N; i++){
175 a[i] = b[i] + c[i];
178 vectorized loop:
179 for (i=0; i<N; i+=VF){
180 a[i:VF] = b[i:VF] + c[i:VF];
184 static bool
185 vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
187 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
188 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
189 int nbbs = loop->num_nodes;
190 gimple_stmt_iterator si;
191 unsigned int vectorization_factor = 0;
192 tree scalar_type;
193 gimple phi;
194 tree vectype;
195 unsigned int nunits;
196 stmt_vec_info stmt_info;
197 int i;
198 HOST_WIDE_INT dummy;
199 gimple stmt, pattern_stmt = NULL;
200 gimple_seq pattern_def_seq = NULL;
201 gimple_stmt_iterator pattern_def_si = gsi_none ();
202 bool analyze_pattern_stmt = false;
204 if (dump_enabled_p ())
205 dump_printf_loc (MSG_NOTE, vect_location,
206 "=== vect_determine_vectorization_factor ===\n");
208 for (i = 0; i < nbbs; i++)
210 basic_block bb = bbs[i];
212 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
214 phi = gsi_stmt (si);
215 stmt_info = vinfo_for_stmt (phi);
216 if (dump_enabled_p ())
218 dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: ");
219 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
220 dump_printf (MSG_NOTE, "\n");
223 gcc_assert (stmt_info);
225 if (STMT_VINFO_RELEVANT_P (stmt_info))
227 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
228 scalar_type = TREE_TYPE (PHI_RESULT (phi));
230 if (dump_enabled_p ())
232 dump_printf_loc (MSG_NOTE, vect_location,
233 "get vectype for scalar type: ");
234 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
235 dump_printf (MSG_NOTE, "\n");
238 vectype = get_vectype_for_scalar_type (scalar_type);
239 if (!vectype)
241 if (dump_enabled_p ())
243 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
244 "not vectorized: unsupported "
245 "data-type ");
246 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
247 scalar_type);
248 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
250 return false;
252 STMT_VINFO_VECTYPE (stmt_info) = vectype;
254 if (dump_enabled_p ())
256 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
257 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
258 dump_printf (MSG_NOTE, "\n");
261 nunits = TYPE_VECTOR_SUBPARTS (vectype);
262 if (dump_enabled_p ())
263 dump_printf_loc (MSG_NOTE, vect_location, "nunits = %d\n",
264 nunits);
266 if (!vectorization_factor
267 || (nunits > vectorization_factor))
268 vectorization_factor = nunits;
272 for (si = gsi_start_bb (bb); !gsi_end_p (si) || analyze_pattern_stmt;)
274 tree vf_vectype;
276 if (analyze_pattern_stmt)
277 stmt = pattern_stmt;
278 else
279 stmt = gsi_stmt (si);
281 stmt_info = vinfo_for_stmt (stmt);
283 if (dump_enabled_p ())
285 dump_printf_loc (MSG_NOTE, vect_location,
286 "==> examining statement: ");
287 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
288 dump_printf (MSG_NOTE, "\n");
291 gcc_assert (stmt_info);
293 /* Skip stmts which do not need to be vectorized. */
294 if ((!STMT_VINFO_RELEVANT_P (stmt_info)
295 && !STMT_VINFO_LIVE_P (stmt_info))
296 || gimple_clobber_p (stmt))
298 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
299 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
300 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
301 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
303 stmt = pattern_stmt;
304 stmt_info = vinfo_for_stmt (pattern_stmt);
305 if (dump_enabled_p ())
307 dump_printf_loc (MSG_NOTE, vect_location,
308 "==> examining pattern statement: ");
309 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
310 dump_printf (MSG_NOTE, "\n");
313 else
315 if (dump_enabled_p ())
316 dump_printf_loc (MSG_NOTE, vect_location, "skip.\n");
317 gsi_next (&si);
318 continue;
321 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
322 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
323 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
324 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
325 analyze_pattern_stmt = true;
327 /* If a pattern statement has def stmts, analyze them too. */
328 if (is_pattern_stmt_p (stmt_info))
330 if (pattern_def_seq == NULL)
332 pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
333 pattern_def_si = gsi_start (pattern_def_seq);
335 else if (!gsi_end_p (pattern_def_si))
336 gsi_next (&pattern_def_si);
337 if (pattern_def_seq != NULL)
339 gimple pattern_def_stmt = NULL;
340 stmt_vec_info pattern_def_stmt_info = NULL;
342 while (!gsi_end_p (pattern_def_si))
344 pattern_def_stmt = gsi_stmt (pattern_def_si);
345 pattern_def_stmt_info
346 = vinfo_for_stmt (pattern_def_stmt);
347 if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info)
348 || STMT_VINFO_LIVE_P (pattern_def_stmt_info))
349 break;
350 gsi_next (&pattern_def_si);
353 if (!gsi_end_p (pattern_def_si))
355 if (dump_enabled_p ())
357 dump_printf_loc (MSG_NOTE, vect_location,
358 "==> examining pattern def stmt: ");
359 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
360 pattern_def_stmt, 0);
361 dump_printf (MSG_NOTE, "\n");
364 stmt = pattern_def_stmt;
365 stmt_info = pattern_def_stmt_info;
367 else
369 pattern_def_si = gsi_none ();
370 analyze_pattern_stmt = false;
373 else
374 analyze_pattern_stmt = false;
377 if (gimple_get_lhs (stmt) == NULL_TREE
378 /* MASK_STORE has no lhs, but is ok. */
379 && (!is_gimple_call (stmt)
380 || !gimple_call_internal_p (stmt)
381 || gimple_call_internal_fn (stmt) != IFN_MASK_STORE))
383 if (is_gimple_call (stmt))
385 /* Ignore calls with no lhs. These must be calls to
386 #pragma omp simd functions, and what vectorization factor
387 it really needs can't be determined until
388 vectorizable_simd_clone_call. */
389 if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si))
391 pattern_def_seq = NULL;
392 gsi_next (&si);
394 continue;
396 if (dump_enabled_p ())
398 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
399 "not vectorized: irregular stmt.");
400 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt,
402 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
404 return false;
407 if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
409 if (dump_enabled_p ())
411 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
412 "not vectorized: vector stmt in loop:");
413 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
414 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
416 return false;
419 if (STMT_VINFO_VECTYPE (stmt_info))
421 /* The only case when a vectype had been already set is for stmts
422 that contain a dataref, or for "pattern-stmts" (stmts
423 generated by the vectorizer to represent/replace a certain
424 idiom). */
425 gcc_assert (STMT_VINFO_DATA_REF (stmt_info)
426 || is_pattern_stmt_p (stmt_info)
427 || !gsi_end_p (pattern_def_si));
428 vectype = STMT_VINFO_VECTYPE (stmt_info);
430 else
432 gcc_assert (!STMT_VINFO_DATA_REF (stmt_info));
433 if (is_gimple_call (stmt)
434 && gimple_call_internal_p (stmt)
435 && gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
436 scalar_type = TREE_TYPE (gimple_call_arg (stmt, 3));
437 else
438 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
439 if (dump_enabled_p ())
441 dump_printf_loc (MSG_NOTE, vect_location,
442 "get vectype for scalar type: ");
443 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
444 dump_printf (MSG_NOTE, "\n");
446 vectype = get_vectype_for_scalar_type (scalar_type);
447 if (!vectype)
449 if (dump_enabled_p ())
451 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
452 "not vectorized: unsupported "
453 "data-type ");
454 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
455 scalar_type);
456 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
458 return false;
461 STMT_VINFO_VECTYPE (stmt_info) = vectype;
463 if (dump_enabled_p ())
465 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
466 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
467 dump_printf (MSG_NOTE, "\n");
471 /* The vectorization factor is according to the smallest
472 scalar type (or the largest vector size, but we only
473 support one vector size per loop). */
474 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy,
475 &dummy);
476 if (dump_enabled_p ())
478 dump_printf_loc (MSG_NOTE, vect_location,
479 "get vectype for scalar type: ");
480 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
481 dump_printf (MSG_NOTE, "\n");
483 vf_vectype = get_vectype_for_scalar_type (scalar_type);
484 if (!vf_vectype)
486 if (dump_enabled_p ())
488 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
489 "not vectorized: unsupported data-type ");
490 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
491 scalar_type);
492 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
494 return false;
497 if ((GET_MODE_SIZE (TYPE_MODE (vectype))
498 != GET_MODE_SIZE (TYPE_MODE (vf_vectype))))
500 if (dump_enabled_p ())
502 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
503 "not vectorized: different sized vector "
504 "types in statement, ");
505 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
506 vectype);
507 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
508 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
509 vf_vectype);
510 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
512 return false;
515 if (dump_enabled_p ())
517 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
518 dump_generic_expr (MSG_NOTE, TDF_SLIM, vf_vectype);
519 dump_printf (MSG_NOTE, "\n");
522 nunits = TYPE_VECTOR_SUBPARTS (vf_vectype);
523 if (dump_enabled_p ())
524 dump_printf_loc (MSG_NOTE, vect_location, "nunits = %d\n", nunits);
525 if (!vectorization_factor
526 || (nunits > vectorization_factor))
527 vectorization_factor = nunits;
529 if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si))
531 pattern_def_seq = NULL;
532 gsi_next (&si);
537 /* TODO: Analyze cost. Decide if worth while to vectorize. */
538 if (dump_enabled_p ())
539 dump_printf_loc (MSG_NOTE, vect_location, "vectorization factor = %d\n",
540 vectorization_factor);
541 if (vectorization_factor <= 1)
543 if (dump_enabled_p ())
544 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
545 "not vectorized: unsupported data-type\n");
546 return false;
548 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
550 return true;
554 /* Function vect_is_simple_iv_evolution.
556 FORNOW: A simple evolution of an induction variables in the loop is
557 considered a polynomial evolution. */
559 static bool
560 vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
561 tree * step)
563 tree init_expr;
564 tree step_expr;
565 tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
566 basic_block bb;
568 /* When there is no evolution in this loop, the evolution function
569 is not "simple". */
570 if (evolution_part == NULL_TREE)
571 return false;
573 /* When the evolution is a polynomial of degree >= 2
574 the evolution function is not "simple". */
575 if (tree_is_chrec (evolution_part))
576 return false;
578 step_expr = evolution_part;
579 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
581 if (dump_enabled_p ())
583 dump_printf_loc (MSG_NOTE, vect_location, "step: ");
584 dump_generic_expr (MSG_NOTE, TDF_SLIM, step_expr);
585 dump_printf (MSG_NOTE, ", init: ");
586 dump_generic_expr (MSG_NOTE, TDF_SLIM, init_expr);
587 dump_printf (MSG_NOTE, "\n");
590 *init = init_expr;
591 *step = step_expr;
593 if (TREE_CODE (step_expr) != INTEGER_CST
594 && (TREE_CODE (step_expr) != SSA_NAME
595 || ((bb = gimple_bb (SSA_NAME_DEF_STMT (step_expr)))
596 && flow_bb_inside_loop_p (get_loop (cfun, loop_nb), bb))
597 || (!INTEGRAL_TYPE_P (TREE_TYPE (step_expr))
598 && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))
599 || !flag_associative_math)))
600 && (TREE_CODE (step_expr) != REAL_CST
601 || !flag_associative_math))
603 if (dump_enabled_p ())
604 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
605 "step unknown.\n");
606 return false;
609 return true;
612 /* Function vect_analyze_scalar_cycles_1.
614 Examine the cross iteration def-use cycles of scalar variables
615 in LOOP. LOOP_VINFO represents the loop that is now being
616 considered for vectorization (can be LOOP, or an outer-loop
617 enclosing LOOP). */
619 static void
620 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
622 basic_block bb = loop->header;
623 tree init, step;
624 auto_vec<gimple, 64> worklist;
625 gimple_stmt_iterator gsi;
626 bool double_reduc;
628 if (dump_enabled_p ())
629 dump_printf_loc (MSG_NOTE, vect_location,
630 "=== vect_analyze_scalar_cycles ===\n");
632 /* First - identify all inductions. Reduction detection assumes that all the
633 inductions have been identified, therefore, this order must not be
634 changed. */
635 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
637 gimple phi = gsi_stmt (gsi);
638 tree access_fn = NULL;
639 tree def = PHI_RESULT (phi);
640 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
642 if (dump_enabled_p ())
644 dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
645 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
646 dump_printf (MSG_NOTE, "\n");
649 /* Skip virtual phi's. The data dependences that are associated with
650 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
651 if (virtual_operand_p (def))
652 continue;
654 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
656 /* Analyze the evolution function. */
657 access_fn = analyze_scalar_evolution (loop, def);
658 if (access_fn)
660 STRIP_NOPS (access_fn);
661 if (dump_enabled_p ())
663 dump_printf_loc (MSG_NOTE, vect_location,
664 "Access function of PHI: ");
665 dump_generic_expr (MSG_NOTE, TDF_SLIM, access_fn);
666 dump_printf (MSG_NOTE, "\n");
668 STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo)
669 = evolution_part_in_loop_num (access_fn, loop->num);
672 if (!access_fn
673 || !vect_is_simple_iv_evolution (loop->num, access_fn, &init, &step)
674 || (LOOP_VINFO_LOOP (loop_vinfo) != loop
675 && TREE_CODE (step) != INTEGER_CST))
677 worklist.safe_push (phi);
678 continue;
681 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) != NULL_TREE);
683 if (dump_enabled_p ())
684 dump_printf_loc (MSG_NOTE, vect_location, "Detected induction.\n");
685 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
689 /* Second - identify all reductions and nested cycles. */
690 while (worklist.length () > 0)
692 gimple phi = worklist.pop ();
693 tree def = PHI_RESULT (phi);
694 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
695 gimple reduc_stmt;
696 bool nested_cycle;
698 if (dump_enabled_p ())
700 dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
701 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
702 dump_printf (MSG_NOTE, "\n");
705 gcc_assert (!virtual_operand_p (def)
706 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
708 nested_cycle = (loop != LOOP_VINFO_LOOP (loop_vinfo));
709 reduc_stmt = vect_force_simple_reduction (loop_vinfo, phi, !nested_cycle,
710 &double_reduc);
711 if (reduc_stmt)
713 if (double_reduc)
715 if (dump_enabled_p ())
716 dump_printf_loc (MSG_NOTE, vect_location,
717 "Detected double reduction.\n");
719 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
720 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
721 vect_double_reduction_def;
723 else
725 if (nested_cycle)
727 if (dump_enabled_p ())
728 dump_printf_loc (MSG_NOTE, vect_location,
729 "Detected vectorizable nested cycle.\n");
731 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
732 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
733 vect_nested_cycle;
735 else
737 if (dump_enabled_p ())
738 dump_printf_loc (MSG_NOTE, vect_location,
739 "Detected reduction.\n");
741 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
742 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
743 vect_reduction_def;
744 /* Store the reduction cycles for possible vectorization in
745 loop-aware SLP. */
746 LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push (reduc_stmt);
750 else
751 if (dump_enabled_p ())
752 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
753 "Unknown def-use cycle pattern.\n");
758 /* Function vect_analyze_scalar_cycles.
760 Examine the cross iteration def-use cycles of scalar variables, by
761 analyzing the loop-header PHIs of scalar variables. Classify each
762 cycle as one of the following: invariant, induction, reduction, unknown.
763 We do that for the loop represented by LOOP_VINFO, and also to its
764 inner-loop, if exists.
765 Examples for scalar cycles:
767 Example1: reduction:
769 loop1:
770 for (i=0; i<N; i++)
771 sum += a[i];
773 Example2: induction:
775 loop2:
776 for (i=0; i<N; i++)
777 a[i] = i; */
779 static void
780 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
782 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
784 vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
786 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
787 Reductions in such inner-loop therefore have different properties than
788 the reductions in the nest that gets vectorized:
789 1. When vectorized, they are executed in the same order as in the original
790 scalar loop, so we can't change the order of computation when
791 vectorizing them.
792 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
793 current checks are too strict. */
795 if (loop->inner)
796 vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
800 /* Function vect_get_loop_niters.
802 Determine how many iterations the loop is executed and place it
803 in NUMBER_OF_ITERATIONS. Place the number of latch iterations
804 in NUMBER_OF_ITERATIONSM1.
806 Return the loop exit condition. */
808 static gimple
809 vect_get_loop_niters (struct loop *loop, tree *number_of_iterations,
810 tree *number_of_iterationsm1)
812 tree niters;
814 if (dump_enabled_p ())
815 dump_printf_loc (MSG_NOTE, vect_location,
816 "=== get_loop_niters ===\n");
818 niters = number_of_latch_executions (loop);
819 *number_of_iterationsm1 = niters;
821 /* We want the number of loop header executions which is the number
822 of latch executions plus one.
823 ??? For UINT_MAX latch executions this number overflows to zero
824 for loops like do { n++; } while (n != 0); */
825 if (niters && !chrec_contains_undetermined (niters))
826 niters = fold_build2 (PLUS_EXPR, TREE_TYPE (niters), unshare_expr (niters),
827 build_int_cst (TREE_TYPE (niters), 1));
828 *number_of_iterations = niters;
830 return get_loop_exit_condition (loop);
834 /* Function bb_in_loop_p
836 Used as predicate for dfs order traversal of the loop bbs. */
838 static bool
839 bb_in_loop_p (const_basic_block bb, const void *data)
841 const struct loop *const loop = (const struct loop *)data;
842 if (flow_bb_inside_loop_p (loop, bb))
843 return true;
844 return false;
848 /* Function new_loop_vec_info.
850 Create and initialize a new loop_vec_info struct for LOOP, as well as
851 stmt_vec_info structs for all the stmts in LOOP. */
853 static loop_vec_info
854 new_loop_vec_info (struct loop *loop)
856 loop_vec_info res;
857 basic_block *bbs;
858 gimple_stmt_iterator si;
859 unsigned int i, nbbs;
861 res = (loop_vec_info) xcalloc (1, sizeof (struct _loop_vec_info));
862 LOOP_VINFO_LOOP (res) = loop;
864 bbs = get_loop_body (loop);
866 /* Create/Update stmt_info for all stmts in the loop. */
867 for (i = 0; i < loop->num_nodes; i++)
869 basic_block bb = bbs[i];
871 /* BBs in a nested inner-loop will have been already processed (because
872 we will have called vect_analyze_loop_form for any nested inner-loop).
873 Therefore, for stmts in an inner-loop we just want to update the
874 STMT_VINFO_LOOP_VINFO field of their stmt_info to point to the new
875 loop_info of the outer-loop we are currently considering to vectorize
876 (instead of the loop_info of the inner-loop).
877 For stmts in other BBs we need to create a stmt_info from scratch. */
878 if (bb->loop_father != loop)
880 /* Inner-loop bb. */
881 gcc_assert (loop->inner && bb->loop_father == loop->inner);
882 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
884 gimple phi = gsi_stmt (si);
885 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
886 loop_vec_info inner_loop_vinfo =
887 STMT_VINFO_LOOP_VINFO (stmt_info);
888 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
889 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
891 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
893 gimple stmt = gsi_stmt (si);
894 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
895 loop_vec_info inner_loop_vinfo =
896 STMT_VINFO_LOOP_VINFO (stmt_info);
897 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
898 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
901 else
903 /* bb in current nest. */
904 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
906 gimple phi = gsi_stmt (si);
907 gimple_set_uid (phi, 0);
908 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, res, NULL));
911 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
913 gimple stmt = gsi_stmt (si);
914 gimple_set_uid (stmt, 0);
915 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, res, NULL));
920 /* CHECKME: We want to visit all BBs before their successors (except for
921 latch blocks, for which this assertion wouldn't hold). In the simple
922 case of the loop forms we allow, a dfs order of the BBs would the same
923 as reversed postorder traversal, so we are safe. */
925 free (bbs);
926 bbs = XCNEWVEC (basic_block, loop->num_nodes);
927 nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
928 bbs, loop->num_nodes, loop);
929 gcc_assert (nbbs == loop->num_nodes);
931 LOOP_VINFO_BBS (res) = bbs;
932 LOOP_VINFO_NITERSM1 (res) = NULL;
933 LOOP_VINFO_NITERS (res) = NULL;
934 LOOP_VINFO_NITERS_UNCHANGED (res) = NULL;
935 LOOP_VINFO_COST_MODEL_MIN_ITERS (res) = 0;
936 LOOP_VINFO_COST_MODEL_THRESHOLD (res) = 0;
937 LOOP_VINFO_VECTORIZABLE_P (res) = 0;
938 LOOP_VINFO_PEELING_FOR_ALIGNMENT (res) = 0;
939 LOOP_VINFO_VECT_FACTOR (res) = 0;
940 LOOP_VINFO_LOOP_NEST (res).create (3);
941 LOOP_VINFO_DATAREFS (res).create (10);
942 LOOP_VINFO_DDRS (res).create (10 * 10);
943 LOOP_VINFO_UNALIGNED_DR (res) = NULL;
944 LOOP_VINFO_MAY_MISALIGN_STMTS (res).create (
945 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS));
946 LOOP_VINFO_MAY_ALIAS_DDRS (res).create (
947 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS));
948 LOOP_VINFO_GROUPED_STORES (res).create (10);
949 LOOP_VINFO_REDUCTIONS (res).create (10);
950 LOOP_VINFO_REDUCTION_CHAINS (res).create (10);
951 LOOP_VINFO_SLP_INSTANCES (res).create (10);
952 LOOP_VINFO_SLP_UNROLLING_FACTOR (res) = 1;
953 LOOP_VINFO_TARGET_COST_DATA (res) = init_cost (loop);
954 LOOP_VINFO_PEELING_FOR_GAPS (res) = false;
955 LOOP_VINFO_PEELING_FOR_NITER (res) = false;
956 LOOP_VINFO_OPERANDS_SWAPPED (res) = false;
958 return res;
962 /* Function destroy_loop_vec_info.
964 Free LOOP_VINFO struct, as well as all the stmt_vec_info structs of all the
965 stmts in the loop. */
967 void
968 destroy_loop_vec_info (loop_vec_info loop_vinfo, bool clean_stmts)
970 struct loop *loop;
971 basic_block *bbs;
972 int nbbs;
973 gimple_stmt_iterator si;
974 int j;
975 vec<slp_instance> slp_instances;
976 slp_instance instance;
977 bool swapped;
979 if (!loop_vinfo)
980 return;
982 loop = LOOP_VINFO_LOOP (loop_vinfo);
984 bbs = LOOP_VINFO_BBS (loop_vinfo);
985 nbbs = clean_stmts ? loop->num_nodes : 0;
986 swapped = LOOP_VINFO_OPERANDS_SWAPPED (loop_vinfo);
988 for (j = 0; j < nbbs; j++)
990 basic_block bb = bbs[j];
991 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
992 free_stmt_vec_info (gsi_stmt (si));
994 for (si = gsi_start_bb (bb); !gsi_end_p (si); )
996 gimple stmt = gsi_stmt (si);
998 /* We may have broken canonical form by moving a constant
999 into RHS1 of a commutative op. Fix such occurrences. */
1000 if (swapped && is_gimple_assign (stmt))
1002 enum tree_code code = gimple_assign_rhs_code (stmt);
1004 if ((code == PLUS_EXPR
1005 || code == POINTER_PLUS_EXPR
1006 || code == MULT_EXPR)
1007 && CONSTANT_CLASS_P (gimple_assign_rhs1 (stmt)))
1008 swap_ssa_operands (stmt,
1009 gimple_assign_rhs1_ptr (stmt),
1010 gimple_assign_rhs2_ptr (stmt));
1013 /* Free stmt_vec_info. */
1014 free_stmt_vec_info (stmt);
1015 gsi_next (&si);
1019 free (LOOP_VINFO_BBS (loop_vinfo));
1020 vect_destroy_datarefs (loop_vinfo, NULL);
1021 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
1022 LOOP_VINFO_LOOP_NEST (loop_vinfo).release ();
1023 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).release ();
1024 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).release ();
1025 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
1026 FOR_EACH_VEC_ELT (slp_instances, j, instance)
1027 vect_free_slp_instance (instance);
1029 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
1030 LOOP_VINFO_GROUPED_STORES (loop_vinfo).release ();
1031 LOOP_VINFO_REDUCTIONS (loop_vinfo).release ();
1032 LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).release ();
1034 if (LOOP_VINFO_PEELING_HTAB (loop_vinfo).is_created ())
1035 LOOP_VINFO_PEELING_HTAB (loop_vinfo).dispose ();
1037 destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo));
1039 free (loop_vinfo);
1040 loop->aux = NULL;
1044 /* Function vect_analyze_loop_1.
1046 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1047 for it. The different analyses will record information in the
1048 loop_vec_info struct. This is a subset of the analyses applied in
1049 vect_analyze_loop, to be applied on an inner-loop nested in the loop
1050 that is now considered for (outer-loop) vectorization. */
1052 static loop_vec_info
1053 vect_analyze_loop_1 (struct loop *loop)
1055 loop_vec_info loop_vinfo;
1057 if (dump_enabled_p ())
1058 dump_printf_loc (MSG_NOTE, vect_location,
1059 "===== analyze_loop_nest_1 =====\n");
1061 /* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */
1063 loop_vinfo = vect_analyze_loop_form (loop);
1064 if (!loop_vinfo)
1066 if (dump_enabled_p ())
1067 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1068 "bad inner-loop form.\n");
1069 return NULL;
1072 return loop_vinfo;
1076 /* Function vect_analyze_loop_form.
1078 Verify that certain CFG restrictions hold, including:
1079 - the loop has a pre-header
1080 - the loop has a single entry and exit
1081 - the loop exit condition is simple enough, and the number of iterations
1082 can be analyzed (a countable loop). */
1084 loop_vec_info
1085 vect_analyze_loop_form (struct loop *loop)
1087 loop_vec_info loop_vinfo;
1088 gimple loop_cond;
1089 tree number_of_iterations = NULL, number_of_iterationsm1 = NULL;
1090 loop_vec_info inner_loop_vinfo = NULL;
1092 if (dump_enabled_p ())
1093 dump_printf_loc (MSG_NOTE, vect_location,
1094 "=== vect_analyze_loop_form ===\n");
1096 /* Different restrictions apply when we are considering an inner-most loop,
1097 vs. an outer (nested) loop.
1098 (FORNOW. May want to relax some of these restrictions in the future). */
1100 if (!loop->inner)
1102 /* Inner-most loop. We currently require that the number of BBs is
1103 exactly 2 (the header and latch). Vectorizable inner-most loops
1104 look like this:
1106 (pre-header)
1108 header <--------+
1109 | | |
1110 | +--> latch --+
1112 (exit-bb) */
1114 if (loop->num_nodes != 2)
1116 if (dump_enabled_p ())
1117 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1118 "not vectorized: control flow in loop.\n");
1119 return NULL;
1122 if (empty_block_p (loop->header))
1124 if (dump_enabled_p ())
1125 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1126 "not vectorized: empty loop.\n");
1127 return NULL;
1130 else
1132 struct loop *innerloop = loop->inner;
1133 edge entryedge;
1135 /* Nested loop. We currently require that the loop is doubly-nested,
1136 contains a single inner loop, and the number of BBs is exactly 5.
1137 Vectorizable outer-loops look like this:
1139 (pre-header)
1141 header <---+
1143 inner-loop |
1145 tail ------+
1147 (exit-bb)
1149 The inner-loop has the properties expected of inner-most loops
1150 as described above. */
1152 if ((loop->inner)->inner || (loop->inner)->next)
1154 if (dump_enabled_p ())
1155 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1156 "not vectorized: multiple nested loops.\n");
1157 return NULL;
1160 /* Analyze the inner-loop. */
1161 inner_loop_vinfo = vect_analyze_loop_1 (loop->inner);
1162 if (!inner_loop_vinfo)
1164 if (dump_enabled_p ())
1165 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1166 "not vectorized: Bad inner loop.\n");
1167 return NULL;
1170 if (!expr_invariant_in_loop_p (loop,
1171 LOOP_VINFO_NITERS (inner_loop_vinfo)))
1173 if (dump_enabled_p ())
1174 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1175 "not vectorized: inner-loop count not"
1176 " invariant.\n");
1177 destroy_loop_vec_info (inner_loop_vinfo, true);
1178 return NULL;
1181 if (loop->num_nodes != 5)
1183 if (dump_enabled_p ())
1184 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1185 "not vectorized: control flow in loop.\n");
1186 destroy_loop_vec_info (inner_loop_vinfo, true);
1187 return NULL;
1190 gcc_assert (EDGE_COUNT (innerloop->header->preds) == 2);
1191 entryedge = EDGE_PRED (innerloop->header, 0);
1192 if (EDGE_PRED (innerloop->header, 0)->src == innerloop->latch)
1193 entryedge = EDGE_PRED (innerloop->header, 1);
1195 if (entryedge->src != loop->header
1196 || !single_exit (innerloop)
1197 || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
1199 if (dump_enabled_p ())
1200 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1201 "not vectorized: unsupported outerloop form.\n");
1202 destroy_loop_vec_info (inner_loop_vinfo, true);
1203 return NULL;
1206 if (dump_enabled_p ())
1207 dump_printf_loc (MSG_NOTE, vect_location,
1208 "Considering outer-loop vectorization.\n");
1211 if (!single_exit (loop)
1212 || EDGE_COUNT (loop->header->preds) != 2)
1214 if (dump_enabled_p ())
1216 if (!single_exit (loop))
1217 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1218 "not vectorized: multiple exits.\n");
1219 else if (EDGE_COUNT (loop->header->preds) != 2)
1220 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1221 "not vectorized: too many incoming edges.\n");
1223 if (inner_loop_vinfo)
1224 destroy_loop_vec_info (inner_loop_vinfo, true);
1225 return NULL;
1228 /* We assume that the loop exit condition is at the end of the loop. i.e,
1229 that the loop is represented as a do-while (with a proper if-guard
1230 before the loop if needed), where the loop header contains all the
1231 executable statements, and the latch is empty. */
1232 if (!empty_block_p (loop->latch)
1233 || !gimple_seq_empty_p (phi_nodes (loop->latch)))
1235 if (dump_enabled_p ())
1236 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1237 "not vectorized: latch block not empty.\n");
1238 if (inner_loop_vinfo)
1239 destroy_loop_vec_info (inner_loop_vinfo, true);
1240 return NULL;
1243 /* Make sure there exists a single-predecessor exit bb: */
1244 if (!single_pred_p (single_exit (loop)->dest))
1246 edge e = single_exit (loop);
1247 if (!(e->flags & EDGE_ABNORMAL))
1249 split_loop_exit_edge (e);
1250 if (dump_enabled_p ())
1251 dump_printf (MSG_NOTE, "split exit edge.\n");
1253 else
1255 if (dump_enabled_p ())
1256 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1257 "not vectorized: abnormal loop exit edge.\n");
1258 if (inner_loop_vinfo)
1259 destroy_loop_vec_info (inner_loop_vinfo, true);
1260 return NULL;
1264 loop_cond = vect_get_loop_niters (loop, &number_of_iterations,
1265 &number_of_iterationsm1);
1266 if (!loop_cond)
1268 if (dump_enabled_p ())
1269 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1270 "not vectorized: complicated exit condition.\n");
1271 if (inner_loop_vinfo)
1272 destroy_loop_vec_info (inner_loop_vinfo, true);
1273 return NULL;
1276 if (!number_of_iterations
1277 || chrec_contains_undetermined (number_of_iterations))
1279 if (dump_enabled_p ())
1280 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1281 "not vectorized: number of iterations cannot be "
1282 "computed.\n");
1283 if (inner_loop_vinfo)
1284 destroy_loop_vec_info (inner_loop_vinfo, true);
1285 return NULL;
1288 if (integer_zerop (number_of_iterations))
1290 if (dump_enabled_p ())
1291 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1292 "not vectorized: number of iterations = 0.\n");
1293 if (inner_loop_vinfo)
1294 destroy_loop_vec_info (inner_loop_vinfo, true);
1295 return NULL;
1298 loop_vinfo = new_loop_vec_info (loop);
1299 LOOP_VINFO_NITERSM1 (loop_vinfo) = number_of_iterationsm1;
1300 LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
1301 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
1303 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
1305 if (dump_enabled_p ())
1307 dump_printf_loc (MSG_NOTE, vect_location,
1308 "Symbolic number of iterations is ");
1309 dump_generic_expr (MSG_NOTE, TDF_DETAILS, number_of_iterations);
1310 dump_printf (MSG_NOTE, "\n");
1314 STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond)) = loop_exit_ctrl_vec_info_type;
1316 /* CHECKME: May want to keep it around it in the future. */
1317 if (inner_loop_vinfo)
1318 destroy_loop_vec_info (inner_loop_vinfo, false);
1320 gcc_assert (!loop->aux);
1321 loop->aux = loop_vinfo;
1322 return loop_vinfo;
1326 /* Function vect_analyze_loop_operations.
1328 Scan the loop stmts and make sure they are all vectorizable. */
1330 static bool
1331 vect_analyze_loop_operations (loop_vec_info loop_vinfo, bool slp)
1333 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1334 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1335 int nbbs = loop->num_nodes;
1336 gimple_stmt_iterator si;
1337 unsigned int vectorization_factor = 0;
1338 int i;
1339 gimple phi;
1340 stmt_vec_info stmt_info;
1341 bool need_to_vectorize = false;
1342 int min_profitable_iters;
1343 int min_scalar_loop_bound;
1344 unsigned int th;
1345 bool only_slp_in_loop = true, ok;
1346 HOST_WIDE_INT max_niter;
1347 HOST_WIDE_INT estimated_niter;
1348 int min_profitable_estimate;
1350 if (dump_enabled_p ())
1351 dump_printf_loc (MSG_NOTE, vect_location,
1352 "=== vect_analyze_loop_operations ===\n");
1354 gcc_assert (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
1355 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1356 if (slp)
1358 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1359 vectorization factor of the loop is the unrolling factor required by
1360 the SLP instances. If that unrolling factor is 1, we say, that we
1361 perform pure SLP on loop - cross iteration parallelism is not
1362 exploited. */
1363 for (i = 0; i < nbbs; i++)
1365 basic_block bb = bbs[i];
1366 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1368 gimple stmt = gsi_stmt (si);
1369 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1370 gcc_assert (stmt_info);
1371 if ((STMT_VINFO_RELEVANT_P (stmt_info)
1372 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1373 && !PURE_SLP_STMT (stmt_info))
1374 /* STMT needs both SLP and loop-based vectorization. */
1375 only_slp_in_loop = false;
1379 if (only_slp_in_loop)
1380 vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
1381 else
1382 vectorization_factor = least_common_multiple (vectorization_factor,
1383 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
1385 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
1386 if (dump_enabled_p ())
1387 dump_printf_loc (MSG_NOTE, vect_location,
1388 "Updating vectorization factor to %d\n",
1389 vectorization_factor);
1392 for (i = 0; i < nbbs; i++)
1394 basic_block bb = bbs[i];
1396 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
1398 phi = gsi_stmt (si);
1399 ok = true;
1401 stmt_info = vinfo_for_stmt (phi);
1402 if (dump_enabled_p ())
1404 dump_printf_loc (MSG_NOTE, vect_location, "examining phi: ");
1405 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
1406 dump_printf (MSG_NOTE, "\n");
1409 /* Inner-loop loop-closed exit phi in outer-loop vectorization
1410 (i.e., a phi in the tail of the outer-loop). */
1411 if (! is_loop_header_bb_p (bb))
1413 /* FORNOW: we currently don't support the case that these phis
1414 are not used in the outerloop (unless it is double reduction,
1415 i.e., this phi is vect_reduction_def), cause this case
1416 requires to actually do something here. */
1417 if ((!STMT_VINFO_RELEVANT_P (stmt_info)
1418 || STMT_VINFO_LIVE_P (stmt_info))
1419 && STMT_VINFO_DEF_TYPE (stmt_info)
1420 != vect_double_reduction_def)
1422 if (dump_enabled_p ())
1423 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1424 "Unsupported loop-closed phi in "
1425 "outer-loop.\n");
1426 return false;
1429 /* If PHI is used in the outer loop, we check that its operand
1430 is defined in the inner loop. */
1431 if (STMT_VINFO_RELEVANT_P (stmt_info))
1433 tree phi_op;
1434 gimple op_def_stmt;
1436 if (gimple_phi_num_args (phi) != 1)
1437 return false;
1439 phi_op = PHI_ARG_DEF (phi, 0);
1440 if (TREE_CODE (phi_op) != SSA_NAME)
1441 return false;
1443 op_def_stmt = SSA_NAME_DEF_STMT (phi_op);
1444 if (gimple_nop_p (op_def_stmt)
1445 || !flow_bb_inside_loop_p (loop, gimple_bb (op_def_stmt))
1446 || !vinfo_for_stmt (op_def_stmt))
1447 return false;
1449 if (STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt))
1450 != vect_used_in_outer
1451 && STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt))
1452 != vect_used_in_outer_by_reduction)
1453 return false;
1456 continue;
1459 gcc_assert (stmt_info);
1461 if (STMT_VINFO_LIVE_P (stmt_info))
1463 /* FORNOW: not yet supported. */
1464 if (dump_enabled_p ())
1465 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1466 "not vectorized: value used after loop.\n");
1467 return false;
1470 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
1471 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
1473 /* A scalar-dependence cycle that we don't support. */
1474 if (dump_enabled_p ())
1475 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1476 "not vectorized: scalar dependence cycle.\n");
1477 return false;
1480 if (STMT_VINFO_RELEVANT_P (stmt_info))
1482 need_to_vectorize = true;
1483 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
1484 ok = vectorizable_induction (phi, NULL, NULL);
1487 if (!ok)
1489 if (dump_enabled_p ())
1491 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1492 "not vectorized: relevant phi not "
1493 "supported: ");
1494 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, phi, 0);
1495 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1497 return false;
1501 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1503 gimple stmt = gsi_stmt (si);
1504 if (!gimple_clobber_p (stmt)
1505 && !vect_analyze_stmt (stmt, &need_to_vectorize, NULL))
1506 return false;
1508 } /* bbs */
1510 /* All operations in the loop are either irrelevant (deal with loop
1511 control, or dead), or only used outside the loop and can be moved
1512 out of the loop (e.g. invariants, inductions). The loop can be
1513 optimized away by scalar optimizations. We're better off not
1514 touching this loop. */
1515 if (!need_to_vectorize)
1517 if (dump_enabled_p ())
1518 dump_printf_loc (MSG_NOTE, vect_location,
1519 "All the computation can be taken out of the loop.\n");
1520 if (dump_enabled_p ())
1521 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1522 "not vectorized: redundant loop. no profit to "
1523 "vectorize.\n");
1524 return false;
1527 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && dump_enabled_p ())
1528 dump_printf_loc (MSG_NOTE, vect_location,
1529 "vectorization_factor = %d, niters = "
1530 HOST_WIDE_INT_PRINT_DEC "\n", vectorization_factor,
1531 LOOP_VINFO_INT_NITERS (loop_vinfo));
1533 if ((LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1534 && (LOOP_VINFO_INT_NITERS (loop_vinfo) < vectorization_factor))
1535 || ((max_niter = max_stmt_executions_int (loop)) != -1
1536 && (unsigned HOST_WIDE_INT) max_niter < vectorization_factor))
1538 if (dump_enabled_p ())
1539 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1540 "not vectorized: iteration count too small.\n");
1541 if (dump_enabled_p ())
1542 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1543 "not vectorized: iteration count smaller than "
1544 "vectorization factor.\n");
1545 return false;
1548 /* Analyze cost. Decide if worth while to vectorize. */
1550 /* Once VF is set, SLP costs should be updated since the number of created
1551 vector stmts depends on VF. */
1552 vect_update_slp_costs_according_to_vf (loop_vinfo);
1554 vect_estimate_min_profitable_iters (loop_vinfo, &min_profitable_iters,
1555 &min_profitable_estimate);
1556 LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo) = min_profitable_iters;
1558 if (min_profitable_iters < 0)
1560 if (dump_enabled_p ())
1561 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1562 "not vectorized: vectorization not profitable.\n");
1563 if (dump_enabled_p ())
1564 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1565 "not vectorized: vector version will never be "
1566 "profitable.\n");
1567 return false;
1570 min_scalar_loop_bound = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
1571 * vectorization_factor) - 1);
1574 /* Use the cost model only if it is more conservative than user specified
1575 threshold. */
1577 th = (unsigned) min_scalar_loop_bound;
1578 if (min_profitable_iters
1579 && (!min_scalar_loop_bound
1580 || min_profitable_iters > min_scalar_loop_bound))
1581 th = (unsigned) min_profitable_iters;
1583 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = th;
1585 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1586 && LOOP_VINFO_INT_NITERS (loop_vinfo) <= th)
1588 if (dump_enabled_p ())
1589 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1590 "not vectorized: vectorization not profitable.\n");
1591 if (dump_enabled_p ())
1592 dump_printf_loc (MSG_NOTE, vect_location,
1593 "not vectorized: iteration count smaller than user "
1594 "specified loop bound parameter or minimum profitable "
1595 "iterations (whichever is more conservative).\n");
1596 return false;
1599 if ((estimated_niter = estimated_stmt_executions_int (loop)) != -1
1600 && ((unsigned HOST_WIDE_INT) estimated_niter
1601 <= MAX (th, (unsigned)min_profitable_estimate)))
1603 if (dump_enabled_p ())
1604 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1605 "not vectorized: estimated iteration count too "
1606 "small.\n");
1607 if (dump_enabled_p ())
1608 dump_printf_loc (MSG_NOTE, vect_location,
1609 "not vectorized: estimated iteration count smaller "
1610 "than specified loop bound parameter or minimum "
1611 "profitable iterations (whichever is more "
1612 "conservative).\n");
1613 return false;
1616 return true;
1620 /* Function vect_analyze_loop_2.
1622 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1623 for it. The different analyses will record information in the
1624 loop_vec_info struct. */
1625 static bool
1626 vect_analyze_loop_2 (loop_vec_info loop_vinfo)
1628 bool ok, slp = false;
1629 int max_vf = MAX_VECTORIZATION_FACTOR;
1630 int min_vf = 2;
1631 unsigned int th;
1632 unsigned int n_stmts = 0;
1634 /* Find all data references in the loop (which correspond to vdefs/vuses)
1635 and analyze their evolution in the loop. Also adjust the minimal
1636 vectorization factor according to the loads and stores.
1638 FORNOW: Handle only simple, array references, which
1639 alignment can be forced, and aligned pointer-references. */
1641 ok = vect_analyze_data_refs (loop_vinfo, NULL, &min_vf, &n_stmts);
1642 if (!ok)
1644 if (dump_enabled_p ())
1645 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1646 "bad data references.\n");
1647 return false;
1650 /* Classify all cross-iteration scalar data-flow cycles.
1651 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1653 vect_analyze_scalar_cycles (loop_vinfo);
1655 vect_pattern_recog (loop_vinfo, NULL);
1657 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1658 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1660 ok = vect_analyze_data_ref_accesses (loop_vinfo, NULL);
1661 if (!ok)
1663 if (dump_enabled_p ())
1664 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1665 "bad data access.\n");
1666 return false;
1669 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1671 ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
1672 if (!ok)
1674 if (dump_enabled_p ())
1675 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1676 "unexpected pattern.\n");
1677 return false;
1680 /* Analyze data dependences between the data-refs in the loop
1681 and adjust the maximum vectorization factor according to
1682 the dependences.
1683 FORNOW: fail at the first data dependence that we encounter. */
1685 ok = vect_analyze_data_ref_dependences (loop_vinfo, &max_vf);
1686 if (!ok
1687 || max_vf < min_vf)
1689 if (dump_enabled_p ())
1690 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1691 "bad data dependence.\n");
1692 return false;
1695 ok = vect_determine_vectorization_factor (loop_vinfo);
1696 if (!ok)
1698 if (dump_enabled_p ())
1699 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1700 "can't determine vectorization factor.\n");
1701 return false;
1703 if (max_vf < LOOP_VINFO_VECT_FACTOR (loop_vinfo))
1705 if (dump_enabled_p ())
1706 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1707 "bad data dependence.\n");
1708 return false;
1711 /* Analyze the alignment of the data-refs in the loop.
1712 Fail if a data reference is found that cannot be vectorized. */
1714 ok = vect_analyze_data_refs_alignment (loop_vinfo, NULL);
1715 if (!ok)
1717 if (dump_enabled_p ())
1718 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1719 "bad data alignment.\n");
1720 return false;
1723 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
1724 It is important to call pruning after vect_analyze_data_ref_accesses,
1725 since we use grouping information gathered by interleaving analysis. */
1726 ok = vect_prune_runtime_alias_test_list (loop_vinfo);
1727 if (!ok)
1729 if (dump_enabled_p ())
1730 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1731 "number of versioning for alias "
1732 "run-time tests exceeds %d "
1733 "(--param vect-max-version-for-alias-checks)\n",
1734 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS));
1735 return false;
1738 /* This pass will decide on using loop versioning and/or loop peeling in
1739 order to enhance the alignment of data references in the loop. */
1741 ok = vect_enhance_data_refs_alignment (loop_vinfo);
1742 if (!ok)
1744 if (dump_enabled_p ())
1745 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1746 "bad data alignment.\n");
1747 return false;
1750 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
1751 ok = vect_analyze_slp (loop_vinfo, NULL, n_stmts);
1752 if (ok)
1754 /* Decide which possible SLP instances to SLP. */
1755 slp = vect_make_slp_decision (loop_vinfo);
1757 /* Find stmts that need to be both vectorized and SLPed. */
1758 vect_detect_hybrid_slp (loop_vinfo);
1760 else
1761 return false;
1763 /* Scan all the operations in the loop and make sure they are
1764 vectorizable. */
1766 ok = vect_analyze_loop_operations (loop_vinfo, slp);
1767 if (!ok)
1769 if (dump_enabled_p ())
1770 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1771 "bad operation or unsupported loop bound.\n");
1772 return false;
1775 /* Decide whether we need to create an epilogue loop to handle
1776 remaining scalar iterations. */
1777 th = ((LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) + 1)
1778 / LOOP_VINFO_VECT_FACTOR (loop_vinfo))
1779 * LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1781 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1782 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) > 0)
1784 if (ctz_hwi (LOOP_VINFO_INT_NITERS (loop_vinfo)
1785 - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo))
1786 < exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
1787 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
1789 else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
1790 || (tree_ctz (LOOP_VINFO_NITERS (loop_vinfo))
1791 < (unsigned)exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo))
1792 /* In case of versioning, check if the maximum number of
1793 iterations is greater than th. If they are identical,
1794 the epilogue is unnecessary. */
1795 && ((!LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo)
1796 && !LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
1797 || (unsigned HOST_WIDE_INT)max_stmt_executions_int
1798 (LOOP_VINFO_LOOP (loop_vinfo)) > th)))
1799 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
1801 /* If an epilogue loop is required make sure we can create one. */
1802 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
1803 || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo))
1805 if (dump_enabled_p ())
1806 dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required\n");
1807 if (!vect_can_advance_ivs_p (loop_vinfo)
1808 || !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo),
1809 single_exit (LOOP_VINFO_LOOP
1810 (loop_vinfo))))
1812 if (dump_enabled_p ())
1813 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1814 "not vectorized: can't create required "
1815 "epilog loop\n");
1816 return false;
1820 return true;
1823 /* Function vect_analyze_loop.
1825 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1826 for it. The different analyses will record information in the
1827 loop_vec_info struct. */
1828 loop_vec_info
1829 vect_analyze_loop (struct loop *loop)
1831 loop_vec_info loop_vinfo;
1832 unsigned int vector_sizes;
1834 /* Autodetect first vector size we try. */
1835 current_vector_size = 0;
1836 vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
1838 if (dump_enabled_p ())
1839 dump_printf_loc (MSG_NOTE, vect_location,
1840 "===== analyze_loop_nest =====\n");
1842 if (loop_outer (loop)
1843 && loop_vec_info_for_loop (loop_outer (loop))
1844 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
1846 if (dump_enabled_p ())
1847 dump_printf_loc (MSG_NOTE, vect_location,
1848 "outer-loop already vectorized.\n");
1849 return NULL;
1852 while (1)
1854 /* Check the CFG characteristics of the loop (nesting, entry/exit). */
1855 loop_vinfo = vect_analyze_loop_form (loop);
1856 if (!loop_vinfo)
1858 if (dump_enabled_p ())
1859 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1860 "bad loop form.\n");
1861 return NULL;
1864 if (vect_analyze_loop_2 (loop_vinfo))
1866 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
1868 return loop_vinfo;
1871 destroy_loop_vec_info (loop_vinfo, true);
1873 vector_sizes &= ~current_vector_size;
1874 if (vector_sizes == 0
1875 || current_vector_size == 0)
1876 return NULL;
1878 /* Try the next biggest vector size. */
1879 current_vector_size = 1 << floor_log2 (vector_sizes);
1880 if (dump_enabled_p ())
1881 dump_printf_loc (MSG_NOTE, vect_location,
1882 "***** Re-trying analysis with "
1883 "vector size %d\n", current_vector_size);
1888 /* Function reduction_code_for_scalar_code
1890 Input:
1891 CODE - tree_code of a reduction operations.
1893 Output:
1894 REDUC_CODE - the corresponding tree-code to be used to reduce the
1895 vector of partial results into a single scalar result (which
1896 will also reside in a vector) or ERROR_MARK if the operation is
1897 a supported reduction operation, but does not have such tree-code.
1899 Return FALSE if CODE currently cannot be vectorized as reduction. */
1901 static bool
1902 reduction_code_for_scalar_code (enum tree_code code,
1903 enum tree_code *reduc_code)
1905 switch (code)
1907 case MAX_EXPR:
1908 *reduc_code = REDUC_MAX_EXPR;
1909 return true;
1911 case MIN_EXPR:
1912 *reduc_code = REDUC_MIN_EXPR;
1913 return true;
1915 case PLUS_EXPR:
1916 *reduc_code = REDUC_PLUS_EXPR;
1917 return true;
1919 case MULT_EXPR:
1920 case MINUS_EXPR:
1921 case BIT_IOR_EXPR:
1922 case BIT_XOR_EXPR:
1923 case BIT_AND_EXPR:
1924 *reduc_code = ERROR_MARK;
1925 return true;
1927 default:
1928 return false;
1933 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
1934 STMT is printed with a message MSG. */
1936 static void
1937 report_vect_op (int msg_type, gimple stmt, const char *msg)
1939 dump_printf_loc (msg_type, vect_location, "%s", msg);
1940 dump_gimple_stmt (msg_type, TDF_SLIM, stmt, 0);
1941 dump_printf (msg_type, "\n");
1945 /* Detect SLP reduction of the form:
1947 #a1 = phi <a5, a0>
1948 a2 = operation (a1)
1949 a3 = operation (a2)
1950 a4 = operation (a3)
1951 a5 = operation (a4)
1953 #a = phi <a5>
1955 PHI is the reduction phi node (#a1 = phi <a5, a0> above)
1956 FIRST_STMT is the first reduction stmt in the chain
1957 (a2 = operation (a1)).
1959 Return TRUE if a reduction chain was detected. */
1961 static bool
1962 vect_is_slp_reduction (loop_vec_info loop_info, gimple phi, gimple first_stmt)
1964 struct loop *loop = (gimple_bb (phi))->loop_father;
1965 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
1966 enum tree_code code;
1967 gimple current_stmt = NULL, loop_use_stmt = NULL, first, next_stmt;
1968 stmt_vec_info use_stmt_info, current_stmt_info;
1969 tree lhs;
1970 imm_use_iterator imm_iter;
1971 use_operand_p use_p;
1972 int nloop_uses, size = 0, n_out_of_loop_uses;
1973 bool found = false;
1975 if (loop != vect_loop)
1976 return false;
1978 lhs = PHI_RESULT (phi);
1979 code = gimple_assign_rhs_code (first_stmt);
1980 while (1)
1982 nloop_uses = 0;
1983 n_out_of_loop_uses = 0;
1984 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
1986 gimple use_stmt = USE_STMT (use_p);
1987 if (is_gimple_debug (use_stmt))
1988 continue;
1990 /* Check if we got back to the reduction phi. */
1991 if (use_stmt == phi)
1993 loop_use_stmt = use_stmt;
1994 found = true;
1995 break;
1998 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2000 if (vinfo_for_stmt (use_stmt)
2001 && !STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt)))
2003 loop_use_stmt = use_stmt;
2004 nloop_uses++;
2007 else
2008 n_out_of_loop_uses++;
2010 /* There are can be either a single use in the loop or two uses in
2011 phi nodes. */
2012 if (nloop_uses > 1 || (n_out_of_loop_uses && nloop_uses))
2013 return false;
2016 if (found)
2017 break;
2019 /* We reached a statement with no loop uses. */
2020 if (nloop_uses == 0)
2021 return false;
2023 /* This is a loop exit phi, and we haven't reached the reduction phi. */
2024 if (gimple_code (loop_use_stmt) == GIMPLE_PHI)
2025 return false;
2027 if (!is_gimple_assign (loop_use_stmt)
2028 || code != gimple_assign_rhs_code (loop_use_stmt)
2029 || !flow_bb_inside_loop_p (loop, gimple_bb (loop_use_stmt)))
2030 return false;
2032 /* Insert USE_STMT into reduction chain. */
2033 use_stmt_info = vinfo_for_stmt (loop_use_stmt);
2034 if (current_stmt)
2036 current_stmt_info = vinfo_for_stmt (current_stmt);
2037 GROUP_NEXT_ELEMENT (current_stmt_info) = loop_use_stmt;
2038 GROUP_FIRST_ELEMENT (use_stmt_info)
2039 = GROUP_FIRST_ELEMENT (current_stmt_info);
2041 else
2042 GROUP_FIRST_ELEMENT (use_stmt_info) = loop_use_stmt;
2044 lhs = gimple_assign_lhs (loop_use_stmt);
2045 current_stmt = loop_use_stmt;
2046 size++;
2049 if (!found || loop_use_stmt != phi || size < 2)
2050 return false;
2052 /* Swap the operands, if needed, to make the reduction operand be the second
2053 operand. */
2054 lhs = PHI_RESULT (phi);
2055 next_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt));
2056 while (next_stmt)
2058 if (gimple_assign_rhs2 (next_stmt) == lhs)
2060 tree op = gimple_assign_rhs1 (next_stmt);
2061 gimple def_stmt = NULL;
2063 if (TREE_CODE (op) == SSA_NAME)
2064 def_stmt = SSA_NAME_DEF_STMT (op);
2066 /* Check that the other def is either defined in the loop
2067 ("vect_internal_def"), or it's an induction (defined by a
2068 loop-header phi-node). */
2069 if (def_stmt
2070 && gimple_bb (def_stmt)
2071 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
2072 && (is_gimple_assign (def_stmt)
2073 || is_gimple_call (def_stmt)
2074 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
2075 == vect_induction_def
2076 || (gimple_code (def_stmt) == GIMPLE_PHI
2077 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
2078 == vect_internal_def
2079 && !is_loop_header_bb_p (gimple_bb (def_stmt)))))
2081 lhs = gimple_assign_lhs (next_stmt);
2082 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
2083 continue;
2086 return false;
2088 else
2090 tree op = gimple_assign_rhs2 (next_stmt);
2091 gimple def_stmt = NULL;
2093 if (TREE_CODE (op) == SSA_NAME)
2094 def_stmt = SSA_NAME_DEF_STMT (op);
2096 /* Check that the other def is either defined in the loop
2097 ("vect_internal_def"), or it's an induction (defined by a
2098 loop-header phi-node). */
2099 if (def_stmt
2100 && gimple_bb (def_stmt)
2101 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
2102 && (is_gimple_assign (def_stmt)
2103 || is_gimple_call (def_stmt)
2104 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
2105 == vect_induction_def
2106 || (gimple_code (def_stmt) == GIMPLE_PHI
2107 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
2108 == vect_internal_def
2109 && !is_loop_header_bb_p (gimple_bb (def_stmt)))))
2111 if (dump_enabled_p ())
2113 dump_printf_loc (MSG_NOTE, vect_location, "swapping oprnds: ");
2114 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, next_stmt, 0);
2115 dump_printf (MSG_NOTE, "\n");
2118 swap_ssa_operands (next_stmt,
2119 gimple_assign_rhs1_ptr (next_stmt),
2120 gimple_assign_rhs2_ptr (next_stmt));
2121 update_stmt (next_stmt);
2123 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (next_stmt)))
2124 LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true;
2126 else
2127 return false;
2130 lhs = gimple_assign_lhs (next_stmt);
2131 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
2134 /* Save the chain for further analysis in SLP detection. */
2135 first = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt));
2136 LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (first);
2137 GROUP_SIZE (vinfo_for_stmt (first)) = size;
2139 return true;
2143 /* Function vect_is_simple_reduction_1
2145 (1) Detect a cross-iteration def-use cycle that represents a simple
2146 reduction computation. We look for the following pattern:
2148 loop_header:
2149 a1 = phi < a0, a2 >
2150 a3 = ...
2151 a2 = operation (a3, a1)
2155 a3 = ...
2156 loop_header:
2157 a1 = phi < a0, a2 >
2158 a2 = operation (a3, a1)
2160 such that:
2161 1. operation is commutative and associative and it is safe to
2162 change the order of the computation (if CHECK_REDUCTION is true)
2163 2. no uses for a2 in the loop (a2 is used out of the loop)
2164 3. no uses of a1 in the loop besides the reduction operation
2165 4. no uses of a1 outside the loop.
2167 Conditions 1,4 are tested here.
2168 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
2170 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
2171 nested cycles, if CHECK_REDUCTION is false.
2173 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
2174 reductions:
2176 a1 = phi < a0, a2 >
2177 inner loop (def of a3)
2178 a2 = phi < a3 >
2180 If MODIFY is true it tries also to rework the code in-place to enable
2181 detection of more reduction patterns. For the time being we rewrite
2182 "res -= RHS" into "rhs += -RHS" when it seems worthwhile.
2185 static gimple
2186 vect_is_simple_reduction_1 (loop_vec_info loop_info, gimple phi,
2187 bool check_reduction, bool *double_reduc,
2188 bool modify)
2190 struct loop *loop = (gimple_bb (phi))->loop_father;
2191 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
2192 edge latch_e = loop_latch_edge (loop);
2193 tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
2194 gimple def_stmt, def1 = NULL, def2 = NULL;
2195 enum tree_code orig_code, code;
2196 tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE;
2197 tree type;
2198 int nloop_uses;
2199 tree name;
2200 imm_use_iterator imm_iter;
2201 use_operand_p use_p;
2202 bool phi_def;
2204 *double_reduc = false;
2206 /* If CHECK_REDUCTION is true, we assume inner-most loop vectorization,
2207 otherwise, we assume outer loop vectorization. */
2208 gcc_assert ((check_reduction && loop == vect_loop)
2209 || (!check_reduction && flow_loop_nested_p (vect_loop, loop)));
2211 name = PHI_RESULT (phi);
2212 /* ??? If there are no uses of the PHI result the inner loop reduction
2213 won't be detected as possibly double-reduction by vectorizable_reduction
2214 because that tries to walk the PHI arg from the preheader edge which
2215 can be constant. See PR60382. */
2216 if (has_zero_uses (name))
2217 return NULL;
2218 nloop_uses = 0;
2219 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
2221 gimple use_stmt = USE_STMT (use_p);
2222 if (is_gimple_debug (use_stmt))
2223 continue;
2225 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2227 if (dump_enabled_p ())
2228 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2229 "intermediate value used outside loop.\n");
2231 return NULL;
2234 if (vinfo_for_stmt (use_stmt)
2235 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
2236 nloop_uses++;
2237 if (nloop_uses > 1)
2239 if (dump_enabled_p ())
2240 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2241 "reduction used in loop.\n");
2242 return NULL;
2246 if (TREE_CODE (loop_arg) != SSA_NAME)
2248 if (dump_enabled_p ())
2250 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2251 "reduction: not ssa_name: ");
2252 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, loop_arg);
2253 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2255 return NULL;
2258 def_stmt = SSA_NAME_DEF_STMT (loop_arg);
2259 if (!def_stmt)
2261 if (dump_enabled_p ())
2262 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2263 "reduction: no def_stmt.\n");
2264 return NULL;
2267 if (!is_gimple_assign (def_stmt) && gimple_code (def_stmt) != GIMPLE_PHI)
2269 if (dump_enabled_p ())
2271 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
2272 dump_printf (MSG_NOTE, "\n");
2274 return NULL;
2277 if (is_gimple_assign (def_stmt))
2279 name = gimple_assign_lhs (def_stmt);
2280 phi_def = false;
2282 else
2284 name = PHI_RESULT (def_stmt);
2285 phi_def = true;
2288 nloop_uses = 0;
2289 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
2291 gimple use_stmt = USE_STMT (use_p);
2292 if (is_gimple_debug (use_stmt))
2293 continue;
2294 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
2295 && vinfo_for_stmt (use_stmt)
2296 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
2297 nloop_uses++;
2298 if (nloop_uses > 1)
2300 if (dump_enabled_p ())
2301 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2302 "reduction used in loop.\n");
2303 return NULL;
2307 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
2308 defined in the inner loop. */
2309 if (phi_def)
2311 op1 = PHI_ARG_DEF (def_stmt, 0);
2313 if (gimple_phi_num_args (def_stmt) != 1
2314 || TREE_CODE (op1) != SSA_NAME)
2316 if (dump_enabled_p ())
2317 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2318 "unsupported phi node definition.\n");
2320 return NULL;
2323 def1 = SSA_NAME_DEF_STMT (op1);
2324 if (gimple_bb (def1)
2325 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
2326 && loop->inner
2327 && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1))
2328 && is_gimple_assign (def1))
2330 if (dump_enabled_p ())
2331 report_vect_op (MSG_NOTE, def_stmt,
2332 "detected double reduction: ");
2334 *double_reduc = true;
2335 return def_stmt;
2338 return NULL;
2341 code = orig_code = gimple_assign_rhs_code (def_stmt);
2343 /* We can handle "res -= x[i]", which is non-associative by
2344 simply rewriting this into "res += -x[i]". Avoid changing
2345 gimple instruction for the first simple tests and only do this
2346 if we're allowed to change code at all. */
2347 if (code == MINUS_EXPR
2348 && modify
2349 && (op1 = gimple_assign_rhs1 (def_stmt))
2350 && TREE_CODE (op1) == SSA_NAME
2351 && SSA_NAME_DEF_STMT (op1) == phi)
2352 code = PLUS_EXPR;
2354 if (check_reduction
2355 && (!commutative_tree_code (code) || !associative_tree_code (code)))
2357 if (dump_enabled_p ())
2358 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2359 "reduction: not commutative/associative: ");
2360 return NULL;
2363 if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS)
2365 if (code != COND_EXPR)
2367 if (dump_enabled_p ())
2368 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2369 "reduction: not binary operation: ");
2371 return NULL;
2374 op3 = gimple_assign_rhs1 (def_stmt);
2375 if (COMPARISON_CLASS_P (op3))
2377 op4 = TREE_OPERAND (op3, 1);
2378 op3 = TREE_OPERAND (op3, 0);
2381 op1 = gimple_assign_rhs2 (def_stmt);
2382 op2 = gimple_assign_rhs3 (def_stmt);
2384 if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
2386 if (dump_enabled_p ())
2387 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2388 "reduction: uses not ssa_names: ");
2390 return NULL;
2393 else
2395 op1 = gimple_assign_rhs1 (def_stmt);
2396 op2 = gimple_assign_rhs2 (def_stmt);
2398 if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
2400 if (dump_enabled_p ())
2401 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2402 "reduction: uses not ssa_names: ");
2404 return NULL;
2408 type = TREE_TYPE (gimple_assign_lhs (def_stmt));
2409 if ((TREE_CODE (op1) == SSA_NAME
2410 && !types_compatible_p (type,TREE_TYPE (op1)))
2411 || (TREE_CODE (op2) == SSA_NAME
2412 && !types_compatible_p (type, TREE_TYPE (op2)))
2413 || (op3 && TREE_CODE (op3) == SSA_NAME
2414 && !types_compatible_p (type, TREE_TYPE (op3)))
2415 || (op4 && TREE_CODE (op4) == SSA_NAME
2416 && !types_compatible_p (type, TREE_TYPE (op4))))
2418 if (dump_enabled_p ())
2420 dump_printf_loc (MSG_NOTE, vect_location,
2421 "reduction: multiple types: operation type: ");
2422 dump_generic_expr (MSG_NOTE, TDF_SLIM, type);
2423 dump_printf (MSG_NOTE, ", operands types: ");
2424 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2425 TREE_TYPE (op1));
2426 dump_printf (MSG_NOTE, ",");
2427 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2428 TREE_TYPE (op2));
2429 if (op3)
2431 dump_printf (MSG_NOTE, ",");
2432 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2433 TREE_TYPE (op3));
2436 if (op4)
2438 dump_printf (MSG_NOTE, ",");
2439 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2440 TREE_TYPE (op4));
2442 dump_printf (MSG_NOTE, "\n");
2445 return NULL;
2448 /* Check that it's ok to change the order of the computation.
2449 Generally, when vectorizing a reduction we change the order of the
2450 computation. This may change the behavior of the program in some
2451 cases, so we need to check that this is ok. One exception is when
2452 vectorizing an outer-loop: the inner-loop is executed sequentially,
2453 and therefore vectorizing reductions in the inner-loop during
2454 outer-loop vectorization is safe. */
2456 /* CHECKME: check for !flag_finite_math_only too? */
2457 if (SCALAR_FLOAT_TYPE_P (type) && !flag_associative_math
2458 && check_reduction)
2460 /* Changing the order of operations changes the semantics. */
2461 if (dump_enabled_p ())
2462 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2463 "reduction: unsafe fp math optimization: ");
2464 return NULL;
2466 else if (INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type)
2467 && check_reduction)
2469 /* Changing the order of operations changes the semantics. */
2470 if (dump_enabled_p ())
2471 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2472 "reduction: unsafe int math optimization: ");
2473 return NULL;
2475 else if (SAT_FIXED_POINT_TYPE_P (type) && check_reduction)
2477 /* Changing the order of operations changes the semantics. */
2478 if (dump_enabled_p ())
2479 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2480 "reduction: unsafe fixed-point math optimization: ");
2481 return NULL;
2484 /* If we detected "res -= x[i]" earlier, rewrite it into
2485 "res += -x[i]" now. If this turns out to be useless reassoc
2486 will clean it up again. */
2487 if (orig_code == MINUS_EXPR)
2489 tree rhs = gimple_assign_rhs2 (def_stmt);
2490 tree negrhs = make_ssa_name (TREE_TYPE (rhs), NULL);
2491 gimple negate_stmt = gimple_build_assign_with_ops (NEGATE_EXPR, negrhs,
2492 rhs, NULL);
2493 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
2494 set_vinfo_for_stmt (negate_stmt, new_stmt_vec_info (negate_stmt,
2495 loop_info, NULL));
2496 gsi_insert_before (&gsi, negate_stmt, GSI_NEW_STMT);
2497 gimple_assign_set_rhs2 (def_stmt, negrhs);
2498 gimple_assign_set_rhs_code (def_stmt, PLUS_EXPR);
2499 update_stmt (def_stmt);
2502 /* Reduction is safe. We're dealing with one of the following:
2503 1) integer arithmetic and no trapv
2504 2) floating point arithmetic, and special flags permit this optimization
2505 3) nested cycle (i.e., outer loop vectorization). */
2506 if (TREE_CODE (op1) == SSA_NAME)
2507 def1 = SSA_NAME_DEF_STMT (op1);
2509 if (TREE_CODE (op2) == SSA_NAME)
2510 def2 = SSA_NAME_DEF_STMT (op2);
2512 if (code != COND_EXPR
2513 && ((!def1 || gimple_nop_p (def1)) && (!def2 || gimple_nop_p (def2))))
2515 if (dump_enabled_p ())
2516 report_vect_op (MSG_NOTE, def_stmt, "reduction: no defs for operands: ");
2517 return NULL;
2520 /* Check that one def is the reduction def, defined by PHI,
2521 the other def is either defined in the loop ("vect_internal_def"),
2522 or it's an induction (defined by a loop-header phi-node). */
2524 if (def2 && def2 == phi
2525 && (code == COND_EXPR
2526 || !def1 || gimple_nop_p (def1)
2527 || !flow_bb_inside_loop_p (loop, gimple_bb (def1))
2528 || (def1 && flow_bb_inside_loop_p (loop, gimple_bb (def1))
2529 && (is_gimple_assign (def1)
2530 || is_gimple_call (def1)
2531 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
2532 == vect_induction_def
2533 || (gimple_code (def1) == GIMPLE_PHI
2534 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
2535 == vect_internal_def
2536 && !is_loop_header_bb_p (gimple_bb (def1)))))))
2538 if (dump_enabled_p ())
2539 report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
2540 return def_stmt;
2543 if (def1 && def1 == phi
2544 && (code == COND_EXPR
2545 || !def2 || gimple_nop_p (def2)
2546 || !flow_bb_inside_loop_p (loop, gimple_bb (def2))
2547 || (def2 && flow_bb_inside_loop_p (loop, gimple_bb (def2))
2548 && (is_gimple_assign (def2)
2549 || is_gimple_call (def2)
2550 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
2551 == vect_induction_def
2552 || (gimple_code (def2) == GIMPLE_PHI
2553 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
2554 == vect_internal_def
2555 && !is_loop_header_bb_p (gimple_bb (def2)))))))
2557 if (check_reduction)
2559 /* Swap operands (just for simplicity - so that the rest of the code
2560 can assume that the reduction variable is always the last (second)
2561 argument). */
2562 if (dump_enabled_p ())
2563 report_vect_op (MSG_NOTE, def_stmt,
2564 "detected reduction: need to swap operands: ");
2566 swap_ssa_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
2567 gimple_assign_rhs2_ptr (def_stmt));
2569 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (def_stmt)))
2570 LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true;
2572 else
2574 if (dump_enabled_p ())
2575 report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
2578 return def_stmt;
2581 /* Try to find SLP reduction chain. */
2582 if (check_reduction && vect_is_slp_reduction (loop_info, phi, def_stmt))
2584 if (dump_enabled_p ())
2585 report_vect_op (MSG_NOTE, def_stmt,
2586 "reduction: detected reduction chain: ");
2588 return def_stmt;
2591 if (dump_enabled_p ())
2592 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2593 "reduction: unknown pattern: ");
2595 return NULL;
2598 /* Wrapper around vect_is_simple_reduction_1, that won't modify code
2599 in-place. Arguments as there. */
2601 static gimple
2602 vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
2603 bool check_reduction, bool *double_reduc)
2605 return vect_is_simple_reduction_1 (loop_info, phi, check_reduction,
2606 double_reduc, false);
2609 /* Wrapper around vect_is_simple_reduction_1, which will modify code
2610 in-place if it enables detection of more reductions. Arguments
2611 as there. */
2613 gimple
2614 vect_force_simple_reduction (loop_vec_info loop_info, gimple phi,
2615 bool check_reduction, bool *double_reduc)
2617 return vect_is_simple_reduction_1 (loop_info, phi, check_reduction,
2618 double_reduc, true);
2621 /* Calculate the cost of one scalar iteration of the loop. */
2623 vect_get_single_scalar_iteration_cost (loop_vec_info loop_vinfo)
2625 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2626 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
2627 int nbbs = loop->num_nodes, factor, scalar_single_iter_cost = 0;
2628 int innerloop_iters, i, stmt_cost;
2630 /* Count statements in scalar loop. Using this as scalar cost for a single
2631 iteration for now.
2633 TODO: Add outer loop support.
2635 TODO: Consider assigning different costs to different scalar
2636 statements. */
2638 /* FORNOW. */
2639 innerloop_iters = 1;
2640 if (loop->inner)
2641 innerloop_iters = 50; /* FIXME */
2643 for (i = 0; i < nbbs; i++)
2645 gimple_stmt_iterator si;
2646 basic_block bb = bbs[i];
2648 if (bb->loop_father == loop->inner)
2649 factor = innerloop_iters;
2650 else
2651 factor = 1;
2653 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
2655 gimple stmt = gsi_stmt (si);
2656 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2658 if (!is_gimple_assign (stmt) && !is_gimple_call (stmt))
2659 continue;
2661 /* Skip stmts that are not vectorized inside the loop. */
2662 if (stmt_info
2663 && !STMT_VINFO_RELEVANT_P (stmt_info)
2664 && (!STMT_VINFO_LIVE_P (stmt_info)
2665 || !VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
2666 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
2667 continue;
2669 if (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt)))
2671 if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))))
2672 stmt_cost = vect_get_stmt_cost (scalar_load);
2673 else
2674 stmt_cost = vect_get_stmt_cost (scalar_store);
2676 else
2677 stmt_cost = vect_get_stmt_cost (scalar_stmt);
2679 scalar_single_iter_cost += stmt_cost * factor;
2682 return scalar_single_iter_cost;
2685 /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */
2687 vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue,
2688 int *peel_iters_epilogue,
2689 int scalar_single_iter_cost,
2690 stmt_vector_for_cost *prologue_cost_vec,
2691 stmt_vector_for_cost *epilogue_cost_vec)
2693 int retval = 0;
2694 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2696 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
2698 *peel_iters_epilogue = vf/2;
2699 if (dump_enabled_p ())
2700 dump_printf_loc (MSG_NOTE, vect_location,
2701 "cost model: epilogue peel iters set to vf/2 "
2702 "because loop iterations are unknown .\n");
2704 /* If peeled iterations are known but number of scalar loop
2705 iterations are unknown, count a taken branch per peeled loop. */
2706 retval = record_stmt_cost (prologue_cost_vec, 2, cond_branch_taken,
2707 NULL, 0, vect_prologue);
2709 else
2711 int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
2712 peel_iters_prologue = niters < peel_iters_prologue ?
2713 niters : peel_iters_prologue;
2714 *peel_iters_epilogue = (niters - peel_iters_prologue) % vf;
2715 /* If we need to peel for gaps, but no peeling is required, we have to
2716 peel VF iterations. */
2717 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && !*peel_iters_epilogue)
2718 *peel_iters_epilogue = vf;
2721 if (peel_iters_prologue)
2722 retval += record_stmt_cost (prologue_cost_vec,
2723 peel_iters_prologue * scalar_single_iter_cost,
2724 scalar_stmt, NULL, 0, vect_prologue);
2725 if (*peel_iters_epilogue)
2726 retval += record_stmt_cost (epilogue_cost_vec,
2727 *peel_iters_epilogue * scalar_single_iter_cost,
2728 scalar_stmt, NULL, 0, vect_epilogue);
2729 return retval;
2732 /* Function vect_estimate_min_profitable_iters
2734 Return the number of iterations required for the vector version of the
2735 loop to be profitable relative to the cost of the scalar version of the
2736 loop. */
2738 static void
2739 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
2740 int *ret_min_profitable_niters,
2741 int *ret_min_profitable_estimate)
2743 int min_profitable_iters;
2744 int min_profitable_estimate;
2745 int peel_iters_prologue;
2746 int peel_iters_epilogue;
2747 unsigned vec_inside_cost = 0;
2748 int vec_outside_cost = 0;
2749 unsigned vec_prologue_cost = 0;
2750 unsigned vec_epilogue_cost = 0;
2751 int scalar_single_iter_cost = 0;
2752 int scalar_outside_cost = 0;
2753 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2754 int npeel = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
2755 void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
2757 /* Cost model disabled. */
2758 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
2760 dump_printf_loc (MSG_NOTE, vect_location, "cost model disabled.\n");
2761 *ret_min_profitable_niters = 0;
2762 *ret_min_profitable_estimate = 0;
2763 return;
2766 /* Requires loop versioning tests to handle misalignment. */
2767 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
2769 /* FIXME: Make cost depend on complexity of individual check. */
2770 unsigned len = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ();
2771 (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
2772 vect_prologue);
2773 dump_printf (MSG_NOTE,
2774 "cost model: Adding cost of checks for loop "
2775 "versioning to treat misalignment.\n");
2778 /* Requires loop versioning with alias checks. */
2779 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2781 /* FIXME: Make cost depend on complexity of individual check. */
2782 unsigned len = LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).length ();
2783 (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
2784 vect_prologue);
2785 dump_printf (MSG_NOTE,
2786 "cost model: Adding cost of checks for loop "
2787 "versioning aliasing.\n");
2790 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2791 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2792 (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, NULL, 0,
2793 vect_prologue);
2795 /* Count statements in scalar loop. Using this as scalar cost for a single
2796 iteration for now.
2798 TODO: Add outer loop support.
2800 TODO: Consider assigning different costs to different scalar
2801 statements. */
2803 scalar_single_iter_cost = vect_get_single_scalar_iteration_cost (loop_vinfo);
2804 /* ??? Below we use this cost as number of stmts with scalar_stmt cost,
2805 thus divide by that. This introduces rounding errors, thus better
2806 introduce a new cost kind (raw_cost? scalar_iter_cost?). */
2807 int scalar_single_iter_stmts
2808 = scalar_single_iter_cost / vect_get_stmt_cost (scalar_stmt);
2810 /* Add additional cost for the peeled instructions in prologue and epilogue
2811 loop.
2813 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
2814 at compile-time - we assume it's vf/2 (the worst would be vf-1).
2816 TODO: Build an expression that represents peel_iters for prologue and
2817 epilogue to be used in a run-time test. */
2819 if (npeel < 0)
2821 peel_iters_prologue = vf/2;
2822 dump_printf (MSG_NOTE, "cost model: "
2823 "prologue peel iters set to vf/2.\n");
2825 /* If peeling for alignment is unknown, loop bound of main loop becomes
2826 unknown. */
2827 peel_iters_epilogue = vf/2;
2828 dump_printf (MSG_NOTE, "cost model: "
2829 "epilogue peel iters set to vf/2 because "
2830 "peeling for alignment is unknown.\n");
2832 /* If peeled iterations are unknown, count a taken branch and a not taken
2833 branch per peeled loop. Even if scalar loop iterations are known,
2834 vector iterations are not known since peeled prologue iterations are
2835 not known. Hence guards remain the same. */
2836 (void) add_stmt_cost (target_cost_data, 2, cond_branch_taken,
2837 NULL, 0, vect_prologue);
2838 (void) add_stmt_cost (target_cost_data, 2, cond_branch_not_taken,
2839 NULL, 0, vect_prologue);
2840 /* FORNOW: Don't attempt to pass individual scalar instructions to
2841 the model; just assume linear cost for scalar iterations. */
2842 (void) add_stmt_cost (target_cost_data,
2843 peel_iters_prologue * scalar_single_iter_stmts,
2844 scalar_stmt, NULL, 0, vect_prologue);
2845 (void) add_stmt_cost (target_cost_data,
2846 peel_iters_epilogue * scalar_single_iter_stmts,
2847 scalar_stmt, NULL, 0, vect_epilogue);
2849 else
2851 stmt_vector_for_cost prologue_cost_vec, epilogue_cost_vec;
2852 stmt_info_for_cost *si;
2853 int j;
2854 void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
2856 prologue_cost_vec.create (2);
2857 epilogue_cost_vec.create (2);
2858 peel_iters_prologue = npeel;
2860 (void) vect_get_known_peeling_cost (loop_vinfo, peel_iters_prologue,
2861 &peel_iters_epilogue,
2862 scalar_single_iter_stmts,
2863 &prologue_cost_vec,
2864 &epilogue_cost_vec);
2866 FOR_EACH_VEC_ELT (prologue_cost_vec, j, si)
2868 struct _stmt_vec_info *stmt_info
2869 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
2870 (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
2871 si->misalign, vect_prologue);
2874 FOR_EACH_VEC_ELT (epilogue_cost_vec, j, si)
2876 struct _stmt_vec_info *stmt_info
2877 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
2878 (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
2879 si->misalign, vect_epilogue);
2882 prologue_cost_vec.release ();
2883 epilogue_cost_vec.release ();
2886 /* FORNOW: The scalar outside cost is incremented in one of the
2887 following ways:
2889 1. The vectorizer checks for alignment and aliasing and generates
2890 a condition that allows dynamic vectorization. A cost model
2891 check is ANDED with the versioning condition. Hence scalar code
2892 path now has the added cost of the versioning check.
2894 if (cost > th & versioning_check)
2895 jmp to vector code
2897 Hence run-time scalar is incremented by not-taken branch cost.
2899 2. The vectorizer then checks if a prologue is required. If the
2900 cost model check was not done before during versioning, it has to
2901 be done before the prologue check.
2903 if (cost <= th)
2904 prologue = scalar_iters
2905 if (prologue == 0)
2906 jmp to vector code
2907 else
2908 execute prologue
2909 if (prologue == num_iters)
2910 go to exit
2912 Hence the run-time scalar cost is incremented by a taken branch,
2913 plus a not-taken branch, plus a taken branch cost.
2915 3. The vectorizer then checks if an epilogue is required. If the
2916 cost model check was not done before during prologue check, it
2917 has to be done with the epilogue check.
2919 if (prologue == 0)
2920 jmp to vector code
2921 else
2922 execute prologue
2923 if (prologue == num_iters)
2924 go to exit
2925 vector code:
2926 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
2927 jmp to epilogue
2929 Hence the run-time scalar cost should be incremented by 2 taken
2930 branches.
2932 TODO: The back end may reorder the BBS's differently and reverse
2933 conditions/branch directions. Change the estimates below to
2934 something more reasonable. */
2936 /* If the number of iterations is known and we do not do versioning, we can
2937 decide whether to vectorize at compile time. Hence the scalar version
2938 do not carry cost model guard costs. */
2939 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2940 || LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2941 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2943 /* Cost model check occurs at versioning. */
2944 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2945 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2946 scalar_outside_cost += vect_get_stmt_cost (cond_branch_not_taken);
2947 else
2949 /* Cost model check occurs at prologue generation. */
2950 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
2951 scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken)
2952 + vect_get_stmt_cost (cond_branch_not_taken);
2953 /* Cost model check occurs at epilogue generation. */
2954 else
2955 scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken);
2959 /* Complete the target-specific cost calculations. */
2960 finish_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo), &vec_prologue_cost,
2961 &vec_inside_cost, &vec_epilogue_cost);
2963 vec_outside_cost = (int)(vec_prologue_cost + vec_epilogue_cost);
2965 /* Calculate number of iterations required to make the vector version
2966 profitable, relative to the loop bodies only. The following condition
2967 must hold true:
2968 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
2969 where
2970 SIC = scalar iteration cost, VIC = vector iteration cost,
2971 VOC = vector outside cost, VF = vectorization factor,
2972 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
2973 SOC = scalar outside cost for run time cost model check. */
2975 if ((scalar_single_iter_cost * vf) > (int) vec_inside_cost)
2977 if (vec_outside_cost <= 0)
2978 min_profitable_iters = 1;
2979 else
2981 min_profitable_iters = ((vec_outside_cost - scalar_outside_cost) * vf
2982 - vec_inside_cost * peel_iters_prologue
2983 - vec_inside_cost * peel_iters_epilogue)
2984 / ((scalar_single_iter_cost * vf)
2985 - vec_inside_cost);
2987 if ((scalar_single_iter_cost * vf * min_profitable_iters)
2988 <= (((int) vec_inside_cost * min_profitable_iters)
2989 + (((int) vec_outside_cost - scalar_outside_cost) * vf)))
2990 min_profitable_iters++;
2993 /* vector version will never be profitable. */
2994 else
2996 if (LOOP_VINFO_LOOP (loop_vinfo)->force_vect)
2997 warning_at (vect_location, OPT_Wopenmp_simd, "vectorization "
2998 "did not happen for a simd loop");
3000 if (dump_enabled_p ())
3001 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3002 "cost model: the vector iteration cost = %d "
3003 "divided by the scalar iteration cost = %d "
3004 "is greater or equal to the vectorization factor = %d"
3005 ".\n",
3006 vec_inside_cost, scalar_single_iter_cost, vf);
3007 *ret_min_profitable_niters = -1;
3008 *ret_min_profitable_estimate = -1;
3009 return;
3012 if (dump_enabled_p ())
3014 dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
3015 dump_printf (MSG_NOTE, " Vector inside of loop cost: %d\n",
3016 vec_inside_cost);
3017 dump_printf (MSG_NOTE, " Vector prologue cost: %d\n",
3018 vec_prologue_cost);
3019 dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n",
3020 vec_epilogue_cost);
3021 dump_printf (MSG_NOTE, " Scalar iteration cost: %d\n",
3022 scalar_single_iter_cost);
3023 dump_printf (MSG_NOTE, " Scalar outside cost: %d\n",
3024 scalar_outside_cost);
3025 dump_printf (MSG_NOTE, " Vector outside cost: %d\n",
3026 vec_outside_cost);
3027 dump_printf (MSG_NOTE, " prologue iterations: %d\n",
3028 peel_iters_prologue);
3029 dump_printf (MSG_NOTE, " epilogue iterations: %d\n",
3030 peel_iters_epilogue);
3031 dump_printf (MSG_NOTE,
3032 " Calculated minimum iters for profitability: %d\n",
3033 min_profitable_iters);
3034 dump_printf (MSG_NOTE, "\n");
3037 min_profitable_iters =
3038 min_profitable_iters < vf ? vf : min_profitable_iters;
3040 /* Because the condition we create is:
3041 if (niters <= min_profitable_iters)
3042 then skip the vectorized loop. */
3043 min_profitable_iters--;
3045 if (dump_enabled_p ())
3046 dump_printf_loc (MSG_NOTE, vect_location,
3047 " Runtime profitability threshold = %d\n",
3048 min_profitable_iters);
3050 *ret_min_profitable_niters = min_profitable_iters;
3052 /* Calculate number of iterations required to make the vector version
3053 profitable, relative to the loop bodies only.
3055 Non-vectorized variant is SIC * niters and it must win over vector
3056 variant on the expected loop trip count. The following condition must hold true:
3057 SIC * niters > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC + SOC */
3059 if (vec_outside_cost <= 0)
3060 min_profitable_estimate = 1;
3061 else
3063 min_profitable_estimate = ((vec_outside_cost + scalar_outside_cost) * vf
3064 - vec_inside_cost * peel_iters_prologue
3065 - vec_inside_cost * peel_iters_epilogue)
3066 / ((scalar_single_iter_cost * vf)
3067 - vec_inside_cost);
3069 min_profitable_estimate --;
3070 min_profitable_estimate = MAX (min_profitable_estimate, min_profitable_iters);
3071 if (dump_enabled_p ())
3072 dump_printf_loc (MSG_NOTE, vect_location,
3073 " Static estimate profitability threshold = %d\n",
3074 min_profitable_iters);
3076 *ret_min_profitable_estimate = min_profitable_estimate;
3080 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
3081 functions. Design better to avoid maintenance issues. */
3083 /* Function vect_model_reduction_cost.
3085 Models cost for a reduction operation, including the vector ops
3086 generated within the strip-mine loop, the initial definition before
3087 the loop, and the epilogue code that must be generated. */
3089 static bool
3090 vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
3091 int ncopies)
3093 int prologue_cost = 0, epilogue_cost = 0;
3094 enum tree_code code;
3095 optab optab;
3096 tree vectype;
3097 gimple stmt, orig_stmt;
3098 tree reduction_op;
3099 enum machine_mode mode;
3100 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3101 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3102 void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
3104 /* Cost of reduction op inside loop. */
3105 unsigned inside_cost = add_stmt_cost (target_cost_data, ncopies, vector_stmt,
3106 stmt_info, 0, vect_body);
3107 stmt = STMT_VINFO_STMT (stmt_info);
3109 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
3111 case GIMPLE_SINGLE_RHS:
3112 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)) == ternary_op);
3113 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), 2);
3114 break;
3115 case GIMPLE_UNARY_RHS:
3116 reduction_op = gimple_assign_rhs1 (stmt);
3117 break;
3118 case GIMPLE_BINARY_RHS:
3119 reduction_op = gimple_assign_rhs2 (stmt);
3120 break;
3121 case GIMPLE_TERNARY_RHS:
3122 reduction_op = gimple_assign_rhs3 (stmt);
3123 break;
3124 default:
3125 gcc_unreachable ();
3128 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
3129 if (!vectype)
3131 if (dump_enabled_p ())
3133 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3134 "unsupported data-type ");
3135 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
3136 TREE_TYPE (reduction_op));
3137 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3139 return false;
3142 mode = TYPE_MODE (vectype);
3143 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
3145 if (!orig_stmt)
3146 orig_stmt = STMT_VINFO_STMT (stmt_info);
3148 code = gimple_assign_rhs_code (orig_stmt);
3150 /* Add in cost for initial definition. */
3151 prologue_cost += add_stmt_cost (target_cost_data, 1, scalar_to_vec,
3152 stmt_info, 0, vect_prologue);
3154 /* Determine cost of epilogue code.
3156 We have a reduction operator that will reduce the vector in one statement.
3157 Also requires scalar extract. */
3159 if (!nested_in_vect_loop_p (loop, orig_stmt))
3161 if (reduc_code != ERROR_MARK)
3163 epilogue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
3164 stmt_info, 0, vect_epilogue);
3165 epilogue_cost += add_stmt_cost (target_cost_data, 1, vec_to_scalar,
3166 stmt_info, 0, vect_epilogue);
3168 else
3170 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
3171 tree bitsize =
3172 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt)));
3173 int element_bitsize = tree_to_uhwi (bitsize);
3174 int nelements = vec_size_in_bits / element_bitsize;
3176 optab = optab_for_tree_code (code, vectype, optab_default);
3178 /* We have a whole vector shift available. */
3179 if (VECTOR_MODE_P (mode)
3180 && optab_handler (optab, mode) != CODE_FOR_nothing
3181 && optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
3183 /* Final reduction via vector shifts and the reduction operator.
3184 Also requires scalar extract. */
3185 epilogue_cost += add_stmt_cost (target_cost_data,
3186 exact_log2 (nelements) * 2,
3187 vector_stmt, stmt_info, 0,
3188 vect_epilogue);
3189 epilogue_cost += add_stmt_cost (target_cost_data, 1,
3190 vec_to_scalar, stmt_info, 0,
3191 vect_epilogue);
3193 else
3194 /* Use extracts and reduction op for final reduction. For N
3195 elements, we have N extracts and N-1 reduction ops. */
3196 epilogue_cost += add_stmt_cost (target_cost_data,
3197 nelements + nelements - 1,
3198 vector_stmt, stmt_info, 0,
3199 vect_epilogue);
3203 if (dump_enabled_p ())
3204 dump_printf (MSG_NOTE,
3205 "vect_model_reduction_cost: inside_cost = %d, "
3206 "prologue_cost = %d, epilogue_cost = %d .\n", inside_cost,
3207 prologue_cost, epilogue_cost);
3209 return true;
3213 /* Function vect_model_induction_cost.
3215 Models cost for induction operations. */
3217 static void
3218 vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies)
3220 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3221 void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
3222 unsigned inside_cost, prologue_cost;
3224 /* loop cost for vec_loop. */
3225 inside_cost = add_stmt_cost (target_cost_data, ncopies, vector_stmt,
3226 stmt_info, 0, vect_body);
3228 /* prologue cost for vec_init and vec_step. */
3229 prologue_cost = add_stmt_cost (target_cost_data, 2, scalar_to_vec,
3230 stmt_info, 0, vect_prologue);
3232 if (dump_enabled_p ())
3233 dump_printf_loc (MSG_NOTE, vect_location,
3234 "vect_model_induction_cost: inside_cost = %d, "
3235 "prologue_cost = %d .\n", inside_cost, prologue_cost);
3239 /* Function get_initial_def_for_induction
3241 Input:
3242 STMT - a stmt that performs an induction operation in the loop.
3243 IV_PHI - the initial value of the induction variable
3245 Output:
3246 Return a vector variable, initialized with the first VF values of
3247 the induction variable. E.g., for an iv with IV_PHI='X' and
3248 evolution S, for a vector of 4 units, we want to return:
3249 [X, X + S, X + 2*S, X + 3*S]. */
3251 static tree
3252 get_initial_def_for_induction (gimple iv_phi)
3254 stmt_vec_info stmt_vinfo = vinfo_for_stmt (iv_phi);
3255 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
3256 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3257 tree vectype;
3258 int nunits;
3259 edge pe = loop_preheader_edge (loop);
3260 struct loop *iv_loop;
3261 basic_block new_bb;
3262 tree new_vec, vec_init, vec_step, t;
3263 tree new_var;
3264 tree new_name;
3265 gimple init_stmt, induction_phi, new_stmt;
3266 tree induc_def, vec_def, vec_dest;
3267 tree init_expr, step_expr;
3268 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3269 int i;
3270 int ncopies;
3271 tree expr;
3272 stmt_vec_info phi_info = vinfo_for_stmt (iv_phi);
3273 bool nested_in_vect_loop = false;
3274 gimple_seq stmts = NULL;
3275 imm_use_iterator imm_iter;
3276 use_operand_p use_p;
3277 gimple exit_phi;
3278 edge latch_e;
3279 tree loop_arg;
3280 gimple_stmt_iterator si;
3281 basic_block bb = gimple_bb (iv_phi);
3282 tree stepvectype;
3283 tree resvectype;
3285 /* Is phi in an inner-loop, while vectorizing an enclosing outer-loop? */
3286 if (nested_in_vect_loop_p (loop, iv_phi))
3288 nested_in_vect_loop = true;
3289 iv_loop = loop->inner;
3291 else
3292 iv_loop = loop;
3293 gcc_assert (iv_loop == (gimple_bb (iv_phi))->loop_father);
3295 latch_e = loop_latch_edge (iv_loop);
3296 loop_arg = PHI_ARG_DEF_FROM_EDGE (iv_phi, latch_e);
3298 step_expr = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (phi_info);
3299 gcc_assert (step_expr != NULL_TREE);
3301 pe = loop_preheader_edge (iv_loop);
3302 init_expr = PHI_ARG_DEF_FROM_EDGE (iv_phi,
3303 loop_preheader_edge (iv_loop));
3305 vectype = get_vectype_for_scalar_type (TREE_TYPE (init_expr));
3306 resvectype = get_vectype_for_scalar_type (TREE_TYPE (PHI_RESULT (iv_phi)));
3307 gcc_assert (vectype);
3308 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3309 ncopies = vf / nunits;
3311 gcc_assert (phi_info);
3312 gcc_assert (ncopies >= 1);
3314 /* Convert the step to the desired type. */
3315 step_expr = force_gimple_operand (fold_convert (TREE_TYPE (vectype),
3316 step_expr),
3317 &stmts, true, NULL_TREE);
3318 if (stmts)
3320 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
3321 gcc_assert (!new_bb);
3324 /* Find the first insertion point in the BB. */
3325 si = gsi_after_labels (bb);
3327 /* Create the vector that holds the initial_value of the induction. */
3328 if (nested_in_vect_loop)
3330 /* iv_loop is nested in the loop to be vectorized. init_expr had already
3331 been created during vectorization of previous stmts. We obtain it
3332 from the STMT_VINFO_VEC_STMT of the defining stmt. */
3333 vec_init = vect_get_vec_def_for_operand (init_expr, iv_phi, NULL);
3334 /* If the initial value is not of proper type, convert it. */
3335 if (!useless_type_conversion_p (vectype, TREE_TYPE (vec_init)))
3337 new_stmt = gimple_build_assign_with_ops
3338 (VIEW_CONVERT_EXPR,
3339 vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_"),
3340 build1 (VIEW_CONVERT_EXPR, vectype, vec_init), NULL_TREE);
3341 vec_init = make_ssa_name (gimple_assign_lhs (new_stmt), new_stmt);
3342 gimple_assign_set_lhs (new_stmt, vec_init);
3343 new_bb = gsi_insert_on_edge_immediate (loop_preheader_edge (iv_loop),
3344 new_stmt);
3345 gcc_assert (!new_bb);
3346 set_vinfo_for_stmt (new_stmt,
3347 new_stmt_vec_info (new_stmt, loop_vinfo, NULL));
3350 else
3352 vec<constructor_elt, va_gc> *v;
3354 /* iv_loop is the loop to be vectorized. Create:
3355 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
3356 new_var = vect_get_new_vect_var (TREE_TYPE (vectype),
3357 vect_scalar_var, "var_");
3358 new_name = force_gimple_operand (fold_convert (TREE_TYPE (vectype),
3359 init_expr),
3360 &stmts, false, new_var);
3361 if (stmts)
3363 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
3364 gcc_assert (!new_bb);
3367 vec_alloc (v, nunits);
3368 bool constant_p = is_gimple_min_invariant (new_name);
3369 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, new_name);
3370 for (i = 1; i < nunits; i++)
3372 /* Create: new_name_i = new_name + step_expr */
3373 new_name = fold_build2 (PLUS_EXPR, TREE_TYPE (new_name),
3374 new_name, step_expr);
3375 if (!is_gimple_min_invariant (new_name))
3377 init_stmt = gimple_build_assign (new_var, new_name);
3378 new_name = make_ssa_name (new_var, init_stmt);
3379 gimple_assign_set_lhs (init_stmt, new_name);
3380 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
3381 gcc_assert (!new_bb);
3382 if (dump_enabled_p ())
3384 dump_printf_loc (MSG_NOTE, vect_location,
3385 "created new init_stmt: ");
3386 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, init_stmt, 0);
3387 dump_printf (MSG_NOTE, "\n");
3389 constant_p = false;
3391 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, new_name);
3393 /* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1] */
3394 if (constant_p)
3395 new_vec = build_vector_from_ctor (vectype, v);
3396 else
3397 new_vec = build_constructor (vectype, v);
3398 vec_init = vect_init_vector (iv_phi, new_vec, vectype, NULL);
3402 /* Create the vector that holds the step of the induction. */
3403 if (nested_in_vect_loop)
3404 /* iv_loop is nested in the loop to be vectorized. Generate:
3405 vec_step = [S, S, S, S] */
3406 new_name = step_expr;
3407 else
3409 /* iv_loop is the loop to be vectorized. Generate:
3410 vec_step = [VF*S, VF*S, VF*S, VF*S] */
3411 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
3413 expr = build_int_cst (integer_type_node, vf);
3414 expr = fold_convert (TREE_TYPE (step_expr), expr);
3416 else
3417 expr = build_int_cst (TREE_TYPE (step_expr), vf);
3418 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
3419 expr, step_expr);
3420 if (TREE_CODE (step_expr) == SSA_NAME)
3421 new_name = vect_init_vector (iv_phi, new_name,
3422 TREE_TYPE (step_expr), NULL);
3425 t = unshare_expr (new_name);
3426 gcc_assert (CONSTANT_CLASS_P (new_name)
3427 || TREE_CODE (new_name) == SSA_NAME);
3428 stepvectype = get_vectype_for_scalar_type (TREE_TYPE (new_name));
3429 gcc_assert (stepvectype);
3430 new_vec = build_vector_from_val (stepvectype, t);
3431 vec_step = vect_init_vector (iv_phi, new_vec, stepvectype, NULL);
3434 /* Create the following def-use cycle:
3435 loop prolog:
3436 vec_init = ...
3437 vec_step = ...
3438 loop:
3439 vec_iv = PHI <vec_init, vec_loop>
3441 STMT
3443 vec_loop = vec_iv + vec_step; */
3445 /* Create the induction-phi that defines the induction-operand. */
3446 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
3447 induction_phi = create_phi_node (vec_dest, iv_loop->header);
3448 set_vinfo_for_stmt (induction_phi,
3449 new_stmt_vec_info (induction_phi, loop_vinfo, NULL));
3450 induc_def = PHI_RESULT (induction_phi);
3452 /* Create the iv update inside the loop */
3453 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
3454 induc_def, vec_step);
3455 vec_def = make_ssa_name (vec_dest, new_stmt);
3456 gimple_assign_set_lhs (new_stmt, vec_def);
3457 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3458 set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo,
3459 NULL));
3461 /* Set the arguments of the phi node: */
3462 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
3463 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
3464 UNKNOWN_LOCATION);
3467 /* In case that vectorization factor (VF) is bigger than the number
3468 of elements that we can fit in a vectype (nunits), we have to generate
3469 more than one vector stmt - i.e - we need to "unroll" the
3470 vector stmt by a factor VF/nunits. For more details see documentation
3471 in vectorizable_operation. */
3473 if (ncopies > 1)
3475 stmt_vec_info prev_stmt_vinfo;
3476 /* FORNOW. This restriction should be relaxed. */
3477 gcc_assert (!nested_in_vect_loop);
3479 /* Create the vector that holds the step of the induction. */
3480 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
3482 expr = build_int_cst (integer_type_node, nunits);
3483 expr = fold_convert (TREE_TYPE (step_expr), expr);
3485 else
3486 expr = build_int_cst (TREE_TYPE (step_expr), nunits);
3487 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
3488 expr, step_expr);
3489 if (TREE_CODE (step_expr) == SSA_NAME)
3490 new_name = vect_init_vector (iv_phi, new_name,
3491 TREE_TYPE (step_expr), NULL);
3492 t = unshare_expr (new_name);
3493 gcc_assert (CONSTANT_CLASS_P (new_name)
3494 || TREE_CODE (new_name) == SSA_NAME);
3495 new_vec = build_vector_from_val (stepvectype, t);
3496 vec_step = vect_init_vector (iv_phi, new_vec, stepvectype, NULL);
3498 vec_def = induc_def;
3499 prev_stmt_vinfo = vinfo_for_stmt (induction_phi);
3500 for (i = 1; i < ncopies; i++)
3502 /* vec_i = vec_prev + vec_step */
3503 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
3504 vec_def, vec_step);
3505 vec_def = make_ssa_name (vec_dest, new_stmt);
3506 gimple_assign_set_lhs (new_stmt, vec_def);
3508 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3509 if (!useless_type_conversion_p (resvectype, vectype))
3511 new_stmt = gimple_build_assign_with_ops
3512 (VIEW_CONVERT_EXPR,
3513 vect_get_new_vect_var (resvectype, vect_simple_var,
3514 "vec_iv_"),
3515 build1 (VIEW_CONVERT_EXPR, resvectype,
3516 gimple_assign_lhs (new_stmt)), NULL_TREE);
3517 gimple_assign_set_lhs (new_stmt,
3518 make_ssa_name
3519 (gimple_assign_lhs (new_stmt), new_stmt));
3520 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3522 set_vinfo_for_stmt (new_stmt,
3523 new_stmt_vec_info (new_stmt, loop_vinfo, NULL));
3524 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt;
3525 prev_stmt_vinfo = vinfo_for_stmt (new_stmt);
3529 if (nested_in_vect_loop)
3531 /* Find the loop-closed exit-phi of the induction, and record
3532 the final vector of induction results: */
3533 exit_phi = NULL;
3534 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
3536 gimple use_stmt = USE_STMT (use_p);
3537 if (is_gimple_debug (use_stmt))
3538 continue;
3540 if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (use_stmt)))
3542 exit_phi = use_stmt;
3543 break;
3546 if (exit_phi)
3548 stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi);
3549 /* FORNOW. Currently not supporting the case that an inner-loop induction
3550 is not used in the outer-loop (i.e. only outside the outer-loop). */
3551 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
3552 && !STMT_VINFO_LIVE_P (stmt_vinfo));
3554 STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt;
3555 if (dump_enabled_p ())
3557 dump_printf_loc (MSG_NOTE, vect_location,
3558 "vector of inductions after inner-loop:");
3559 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
3560 dump_printf (MSG_NOTE, "\n");
3566 if (dump_enabled_p ())
3568 dump_printf_loc (MSG_NOTE, vect_location,
3569 "transform induction: created def-use cycle: ");
3570 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, induction_phi, 0);
3571 dump_printf (MSG_NOTE, "\n");
3572 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
3573 SSA_NAME_DEF_STMT (vec_def), 0);
3574 dump_printf (MSG_NOTE, "\n");
3577 STMT_VINFO_VEC_STMT (phi_info) = induction_phi;
3578 if (!useless_type_conversion_p (resvectype, vectype))
3580 new_stmt = gimple_build_assign_with_ops
3581 (VIEW_CONVERT_EXPR,
3582 vect_get_new_vect_var (resvectype, vect_simple_var, "vec_iv_"),
3583 build1 (VIEW_CONVERT_EXPR, resvectype, induc_def), NULL_TREE);
3584 induc_def = make_ssa_name (gimple_assign_lhs (new_stmt), new_stmt);
3585 gimple_assign_set_lhs (new_stmt, induc_def);
3586 si = gsi_after_labels (bb);
3587 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3588 set_vinfo_for_stmt (new_stmt,
3589 new_stmt_vec_info (new_stmt, loop_vinfo, NULL));
3590 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_stmt))
3591 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (induction_phi));
3594 return induc_def;
3598 /* Function get_initial_def_for_reduction
3600 Input:
3601 STMT - a stmt that performs a reduction operation in the loop.
3602 INIT_VAL - the initial value of the reduction variable
3604 Output:
3605 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
3606 of the reduction (used for adjusting the epilog - see below).
3607 Return a vector variable, initialized according to the operation that STMT
3608 performs. This vector will be used as the initial value of the
3609 vector of partial results.
3611 Option1 (adjust in epilog): Initialize the vector as follows:
3612 add/bit or/xor: [0,0,...,0,0]
3613 mult/bit and: [1,1,...,1,1]
3614 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
3615 and when necessary (e.g. add/mult case) let the caller know
3616 that it needs to adjust the result by init_val.
3618 Option2: Initialize the vector as follows:
3619 add/bit or/xor: [init_val,0,0,...,0]
3620 mult/bit and: [init_val,1,1,...,1]
3621 min/max/cond_expr: [init_val,init_val,...,init_val]
3622 and no adjustments are needed.
3624 For example, for the following code:
3626 s = init_val;
3627 for (i=0;i<n;i++)
3628 s = s + a[i];
3630 STMT is 's = s + a[i]', and the reduction variable is 's'.
3631 For a vector of 4 units, we want to return either [0,0,0,init_val],
3632 or [0,0,0,0] and let the caller know that it needs to adjust
3633 the result at the end by 'init_val'.
3635 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
3636 initialization vector is simpler (same element in all entries), if
3637 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
3639 A cost model should help decide between these two schemes. */
3641 tree
3642 get_initial_def_for_reduction (gimple stmt, tree init_val,
3643 tree *adjustment_def)
3645 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
3646 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
3647 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3648 tree scalar_type = TREE_TYPE (init_val);
3649 tree vectype = get_vectype_for_scalar_type (scalar_type);
3650 int nunits;
3651 enum tree_code code = gimple_assign_rhs_code (stmt);
3652 tree def_for_init;
3653 tree init_def;
3654 tree *elts;
3655 int i;
3656 bool nested_in_vect_loop = false;
3657 tree init_value;
3658 REAL_VALUE_TYPE real_init_val = dconst0;
3659 int int_init_val = 0;
3660 gimple def_stmt = NULL;
3662 gcc_assert (vectype);
3663 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3665 gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
3666 || SCALAR_FLOAT_TYPE_P (scalar_type));
3668 if (nested_in_vect_loop_p (loop, stmt))
3669 nested_in_vect_loop = true;
3670 else
3671 gcc_assert (loop == (gimple_bb (stmt))->loop_father);
3673 /* In case of double reduction we only create a vector variable to be put
3674 in the reduction phi node. The actual statement creation is done in
3675 vect_create_epilog_for_reduction. */
3676 if (adjustment_def && nested_in_vect_loop
3677 && TREE_CODE (init_val) == SSA_NAME
3678 && (def_stmt = SSA_NAME_DEF_STMT (init_val))
3679 && gimple_code (def_stmt) == GIMPLE_PHI
3680 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
3681 && vinfo_for_stmt (def_stmt)
3682 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
3683 == vect_double_reduction_def)
3685 *adjustment_def = NULL;
3686 return vect_create_destination_var (init_val, vectype);
3689 if (TREE_CONSTANT (init_val))
3691 if (SCALAR_FLOAT_TYPE_P (scalar_type))
3692 init_value = build_real (scalar_type, TREE_REAL_CST (init_val));
3693 else
3694 init_value = build_int_cst (scalar_type, TREE_INT_CST_LOW (init_val));
3696 else
3697 init_value = init_val;
3699 switch (code)
3701 case WIDEN_SUM_EXPR:
3702 case DOT_PROD_EXPR:
3703 case PLUS_EXPR:
3704 case MINUS_EXPR:
3705 case BIT_IOR_EXPR:
3706 case BIT_XOR_EXPR:
3707 case MULT_EXPR:
3708 case BIT_AND_EXPR:
3709 /* ADJUSMENT_DEF is NULL when called from
3710 vect_create_epilog_for_reduction to vectorize double reduction. */
3711 if (adjustment_def)
3713 if (nested_in_vect_loop)
3714 *adjustment_def = vect_get_vec_def_for_operand (init_val, stmt,
3715 NULL);
3716 else
3717 *adjustment_def = init_val;
3720 if (code == MULT_EXPR)
3722 real_init_val = dconst1;
3723 int_init_val = 1;
3726 if (code == BIT_AND_EXPR)
3727 int_init_val = -1;
3729 if (SCALAR_FLOAT_TYPE_P (scalar_type))
3730 def_for_init = build_real (scalar_type, real_init_val);
3731 else
3732 def_for_init = build_int_cst (scalar_type, int_init_val);
3734 /* Create a vector of '0' or '1' except the first element. */
3735 elts = XALLOCAVEC (tree, nunits);
3736 for (i = nunits - 2; i >= 0; --i)
3737 elts[i + 1] = def_for_init;
3739 /* Option1: the first element is '0' or '1' as well. */
3740 if (adjustment_def)
3742 elts[0] = def_for_init;
3743 init_def = build_vector (vectype, elts);
3744 break;
3747 /* Option2: the first element is INIT_VAL. */
3748 elts[0] = init_val;
3749 if (TREE_CONSTANT (init_val))
3750 init_def = build_vector (vectype, elts);
3751 else
3753 vec<constructor_elt, va_gc> *v;
3754 vec_alloc (v, nunits);
3755 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, init_val);
3756 for (i = 1; i < nunits; ++i)
3757 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, elts[i]);
3758 init_def = build_constructor (vectype, v);
3761 break;
3763 case MIN_EXPR:
3764 case MAX_EXPR:
3765 case COND_EXPR:
3766 if (adjustment_def)
3768 *adjustment_def = NULL_TREE;
3769 init_def = vect_get_vec_def_for_operand (init_val, stmt, NULL);
3770 break;
3773 init_def = build_vector_from_val (vectype, init_value);
3774 break;
3776 default:
3777 gcc_unreachable ();
3780 return init_def;
3784 /* Function vect_create_epilog_for_reduction
3786 Create code at the loop-epilog to finalize the result of a reduction
3787 computation.
3789 VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector
3790 reduction statements.
3791 STMT is the scalar reduction stmt that is being vectorized.
3792 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
3793 number of elements that we can fit in a vectype (nunits). In this case
3794 we have to generate more than one vector stmt - i.e - we need to "unroll"
3795 the vector stmt by a factor VF/nunits. For more details see documentation
3796 in vectorizable_operation.
3797 REDUC_CODE is the tree-code for the epilog reduction.
3798 REDUCTION_PHIS is a list of the phi-nodes that carry the reduction
3799 computation.
3800 REDUC_INDEX is the index of the operand in the right hand side of the
3801 statement that is defined by REDUCTION_PHI.
3802 DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
3803 SLP_NODE is an SLP node containing a group of reduction statements. The
3804 first one in this group is STMT.
3806 This function:
3807 1. Creates the reduction def-use cycles: sets the arguments for
3808 REDUCTION_PHIS:
3809 The loop-entry argument is the vectorized initial-value of the reduction.
3810 The loop-latch argument is taken from VECT_DEFS - the vector of partial
3811 sums.
3812 2. "Reduces" each vector of partial results VECT_DEFS into a single result,
3813 by applying the operation specified by REDUC_CODE if available, or by
3814 other means (whole-vector shifts or a scalar loop).
3815 The function also creates a new phi node at the loop exit to preserve
3816 loop-closed form, as illustrated below.
3818 The flow at the entry to this function:
3820 loop:
3821 vec_def = phi <null, null> # REDUCTION_PHI
3822 VECT_DEF = vector_stmt # vectorized form of STMT
3823 s_loop = scalar_stmt # (scalar) STMT
3824 loop_exit:
3825 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3826 use <s_out0>
3827 use <s_out0>
3829 The above is transformed by this function into:
3831 loop:
3832 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
3833 VECT_DEF = vector_stmt # vectorized form of STMT
3834 s_loop = scalar_stmt # (scalar) STMT
3835 loop_exit:
3836 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3837 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3838 v_out2 = reduce <v_out1>
3839 s_out3 = extract_field <v_out2, 0>
3840 s_out4 = adjust_result <s_out3>
3841 use <s_out4>
3842 use <s_out4>
3845 static void
3846 vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple stmt,
3847 int ncopies, enum tree_code reduc_code,
3848 vec<gimple> reduction_phis,
3849 int reduc_index, bool double_reduc,
3850 slp_tree slp_node)
3852 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3853 stmt_vec_info prev_phi_info;
3854 tree vectype;
3855 enum machine_mode mode;
3856 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3857 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
3858 basic_block exit_bb;
3859 tree scalar_dest;
3860 tree scalar_type;
3861 gimple new_phi = NULL, phi;
3862 gimple_stmt_iterator exit_gsi;
3863 tree vec_dest;
3864 tree new_temp = NULL_TREE, new_dest, new_name, new_scalar_dest;
3865 gimple epilog_stmt = NULL;
3866 enum tree_code code = gimple_assign_rhs_code (stmt);
3867 gimple exit_phi;
3868 tree bitsize, bitpos;
3869 tree adjustment_def = NULL;
3870 tree vec_initial_def = NULL;
3871 tree reduction_op, expr, def;
3872 tree orig_name, scalar_result;
3873 imm_use_iterator imm_iter, phi_imm_iter;
3874 use_operand_p use_p, phi_use_p;
3875 bool extract_scalar_result = false;
3876 gimple use_stmt, orig_stmt, reduction_phi = NULL;
3877 bool nested_in_vect_loop = false;
3878 auto_vec<gimple> new_phis;
3879 auto_vec<gimple> inner_phis;
3880 enum vect_def_type dt = vect_unknown_def_type;
3881 int j, i;
3882 auto_vec<tree> scalar_results;
3883 unsigned int group_size = 1, k, ratio;
3884 auto_vec<tree> vec_initial_defs;
3885 auto_vec<gimple> phis;
3886 bool slp_reduc = false;
3887 tree new_phi_result;
3888 gimple inner_phi = NULL;
3890 if (slp_node)
3891 group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
3893 if (nested_in_vect_loop_p (loop, stmt))
3895 outer_loop = loop;
3896 loop = loop->inner;
3897 nested_in_vect_loop = true;
3898 gcc_assert (!slp_node);
3901 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
3903 case GIMPLE_SINGLE_RHS:
3904 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt))
3905 == ternary_op);
3906 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), reduc_index);
3907 break;
3908 case GIMPLE_UNARY_RHS:
3909 reduction_op = gimple_assign_rhs1 (stmt);
3910 break;
3911 case GIMPLE_BINARY_RHS:
3912 reduction_op = reduc_index ?
3913 gimple_assign_rhs2 (stmt) : gimple_assign_rhs1 (stmt);
3914 break;
3915 case GIMPLE_TERNARY_RHS:
3916 reduction_op = gimple_op (stmt, reduc_index + 1);
3917 break;
3918 default:
3919 gcc_unreachable ();
3922 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
3923 gcc_assert (vectype);
3924 mode = TYPE_MODE (vectype);
3926 /* 1. Create the reduction def-use cycle:
3927 Set the arguments of REDUCTION_PHIS, i.e., transform
3929 loop:
3930 vec_def = phi <null, null> # REDUCTION_PHI
3931 VECT_DEF = vector_stmt # vectorized form of STMT
3934 into:
3936 loop:
3937 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
3938 VECT_DEF = vector_stmt # vectorized form of STMT
3941 (in case of SLP, do it for all the phis). */
3943 /* Get the loop-entry arguments. */
3944 if (slp_node)
3945 vect_get_vec_defs (reduction_op, NULL_TREE, stmt, &vec_initial_defs,
3946 NULL, slp_node, reduc_index);
3947 else
3949 vec_initial_defs.create (1);
3950 /* For the case of reduction, vect_get_vec_def_for_operand returns
3951 the scalar def before the loop, that defines the initial value
3952 of the reduction variable. */
3953 vec_initial_def = vect_get_vec_def_for_operand (reduction_op, stmt,
3954 &adjustment_def);
3955 vec_initial_defs.quick_push (vec_initial_def);
3958 /* Set phi nodes arguments. */
3959 FOR_EACH_VEC_ELT (reduction_phis, i, phi)
3961 tree vec_init_def, def;
3962 gimple_seq stmts;
3963 vec_init_def = force_gimple_operand (vec_initial_defs[i], &stmts,
3964 true, NULL_TREE);
3965 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
3966 def = vect_defs[i];
3967 for (j = 0; j < ncopies; j++)
3969 /* Set the loop-entry arg of the reduction-phi. */
3970 add_phi_arg (phi, vec_init_def, loop_preheader_edge (loop),
3971 UNKNOWN_LOCATION);
3973 /* Set the loop-latch arg for the reduction-phi. */
3974 if (j > 0)
3975 def = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type, def);
3977 add_phi_arg (phi, def, loop_latch_edge (loop), UNKNOWN_LOCATION);
3979 if (dump_enabled_p ())
3981 dump_printf_loc (MSG_NOTE, vect_location,
3982 "transform reduction: created def-use cycle: ");
3983 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
3984 dump_printf (MSG_NOTE, "\n");
3985 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (def), 0);
3986 dump_printf (MSG_NOTE, "\n");
3989 phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
3993 /* 2. Create epilog code.
3994 The reduction epilog code operates across the elements of the vector
3995 of partial results computed by the vectorized loop.
3996 The reduction epilog code consists of:
3998 step 1: compute the scalar result in a vector (v_out2)
3999 step 2: extract the scalar result (s_out3) from the vector (v_out2)
4000 step 3: adjust the scalar result (s_out3) if needed.
4002 Step 1 can be accomplished using one the following three schemes:
4003 (scheme 1) using reduc_code, if available.
4004 (scheme 2) using whole-vector shifts, if available.
4005 (scheme 3) using a scalar loop. In this case steps 1+2 above are
4006 combined.
4008 The overall epilog code looks like this:
4010 s_out0 = phi <s_loop> # original EXIT_PHI
4011 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4012 v_out2 = reduce <v_out1> # step 1
4013 s_out3 = extract_field <v_out2, 0> # step 2
4014 s_out4 = adjust_result <s_out3> # step 3
4016 (step 3 is optional, and steps 1 and 2 may be combined).
4017 Lastly, the uses of s_out0 are replaced by s_out4. */
4020 /* 2.1 Create new loop-exit-phis to preserve loop-closed form:
4021 v_out1 = phi <VECT_DEF>
4022 Store them in NEW_PHIS. */
4024 exit_bb = single_exit (loop)->dest;
4025 prev_phi_info = NULL;
4026 new_phis.create (vect_defs.length ());
4027 FOR_EACH_VEC_ELT (vect_defs, i, def)
4029 for (j = 0; j < ncopies; j++)
4031 tree new_def = copy_ssa_name (def, NULL);
4032 phi = create_phi_node (new_def, exit_bb);
4033 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, loop_vinfo, NULL));
4034 if (j == 0)
4035 new_phis.quick_push (phi);
4036 else
4038 def = vect_get_vec_def_for_stmt_copy (dt, def);
4039 STMT_VINFO_RELATED_STMT (prev_phi_info) = phi;
4042 SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
4043 prev_phi_info = vinfo_for_stmt (phi);
4047 /* The epilogue is created for the outer-loop, i.e., for the loop being
4048 vectorized. Create exit phis for the outer loop. */
4049 if (double_reduc)
4051 loop = outer_loop;
4052 exit_bb = single_exit (loop)->dest;
4053 inner_phis.create (vect_defs.length ());
4054 FOR_EACH_VEC_ELT (new_phis, i, phi)
4056 tree new_result = copy_ssa_name (PHI_RESULT (phi), NULL);
4057 gimple outer_phi = create_phi_node (new_result, exit_bb);
4058 SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
4059 PHI_RESULT (phi));
4060 set_vinfo_for_stmt (outer_phi, new_stmt_vec_info (outer_phi,
4061 loop_vinfo, NULL));
4062 inner_phis.quick_push (phi);
4063 new_phis[i] = outer_phi;
4064 prev_phi_info = vinfo_for_stmt (outer_phi);
4065 while (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi)))
4067 phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
4068 new_result = copy_ssa_name (PHI_RESULT (phi), NULL);
4069 outer_phi = create_phi_node (new_result, exit_bb);
4070 SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
4071 PHI_RESULT (phi));
4072 set_vinfo_for_stmt (outer_phi, new_stmt_vec_info (outer_phi,
4073 loop_vinfo, NULL));
4074 STMT_VINFO_RELATED_STMT (prev_phi_info) = outer_phi;
4075 prev_phi_info = vinfo_for_stmt (outer_phi);
4080 exit_gsi = gsi_after_labels (exit_bb);
4082 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
4083 (i.e. when reduc_code is not available) and in the final adjustment
4084 code (if needed). Also get the original scalar reduction variable as
4085 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
4086 represents a reduction pattern), the tree-code and scalar-def are
4087 taken from the original stmt that the pattern-stmt (STMT) replaces.
4088 Otherwise (it is a regular reduction) - the tree-code and scalar-def
4089 are taken from STMT. */
4091 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
4092 if (!orig_stmt)
4094 /* Regular reduction */
4095 orig_stmt = stmt;
4097 else
4099 /* Reduction pattern */
4100 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt);
4101 gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo));
4102 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
4105 code = gimple_assign_rhs_code (orig_stmt);
4106 /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
4107 partial results are added and not subtracted. */
4108 if (code == MINUS_EXPR)
4109 code = PLUS_EXPR;
4111 scalar_dest = gimple_assign_lhs (orig_stmt);
4112 scalar_type = TREE_TYPE (scalar_dest);
4113 scalar_results.create (group_size);
4114 new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
4115 bitsize = TYPE_SIZE (scalar_type);
4117 /* In case this is a reduction in an inner-loop while vectorizing an outer
4118 loop - we don't need to extract a single scalar result at the end of the
4119 inner-loop (unless it is double reduction, i.e., the use of reduction is
4120 outside the outer-loop). The final vector of partial results will be used
4121 in the vectorized outer-loop, or reduced to a scalar result at the end of
4122 the outer-loop. */
4123 if (nested_in_vect_loop && !double_reduc)
4124 goto vect_finalize_reduction;
4126 /* SLP reduction without reduction chain, e.g.,
4127 # a1 = phi <a2, a0>
4128 # b1 = phi <b2, b0>
4129 a2 = operation (a1)
4130 b2 = operation (b1) */
4131 slp_reduc = (slp_node && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)));
4133 /* In case of reduction chain, e.g.,
4134 # a1 = phi <a3, a0>
4135 a2 = operation (a1)
4136 a3 = operation (a2),
4138 we may end up with more than one vector result. Here we reduce them to
4139 one vector. */
4140 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
4142 tree first_vect = PHI_RESULT (new_phis[0]);
4143 tree tmp;
4144 gimple new_vec_stmt = NULL;
4146 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4147 for (k = 1; k < new_phis.length (); k++)
4149 gimple next_phi = new_phis[k];
4150 tree second_vect = PHI_RESULT (next_phi);
4152 tmp = build2 (code, vectype, first_vect, second_vect);
4153 new_vec_stmt = gimple_build_assign (vec_dest, tmp);
4154 first_vect = make_ssa_name (vec_dest, new_vec_stmt);
4155 gimple_assign_set_lhs (new_vec_stmt, first_vect);
4156 gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT);
4159 new_phi_result = first_vect;
4160 if (new_vec_stmt)
4162 new_phis.truncate (0);
4163 new_phis.safe_push (new_vec_stmt);
4166 else
4167 new_phi_result = PHI_RESULT (new_phis[0]);
4169 /* 2.3 Create the reduction code, using one of the three schemes described
4170 above. In SLP we simply need to extract all the elements from the
4171 vector (without reducing them), so we use scalar shifts. */
4172 if (reduc_code != ERROR_MARK && !slp_reduc)
4174 tree tmp;
4176 /*** Case 1: Create:
4177 v_out2 = reduc_expr <v_out1> */
4179 if (dump_enabled_p ())
4180 dump_printf_loc (MSG_NOTE, vect_location,
4181 "Reduce using direct vector reduction.\n");
4183 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4184 tmp = build1 (reduc_code, vectype, new_phi_result);
4185 epilog_stmt = gimple_build_assign (vec_dest, tmp);
4186 new_temp = make_ssa_name (vec_dest, epilog_stmt);
4187 gimple_assign_set_lhs (epilog_stmt, new_temp);
4188 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4190 extract_scalar_result = true;
4192 else
4194 enum tree_code shift_code = ERROR_MARK;
4195 bool have_whole_vector_shift = true;
4196 int bit_offset;
4197 int element_bitsize = tree_to_uhwi (bitsize);
4198 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
4199 tree vec_temp;
4201 if (optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
4202 shift_code = VEC_RSHIFT_EXPR;
4203 else
4204 have_whole_vector_shift = false;
4206 /* Regardless of whether we have a whole vector shift, if we're
4207 emulating the operation via tree-vect-generic, we don't want
4208 to use it. Only the first round of the reduction is likely
4209 to still be profitable via emulation. */
4210 /* ??? It might be better to emit a reduction tree code here, so that
4211 tree-vect-generic can expand the first round via bit tricks. */
4212 if (!VECTOR_MODE_P (mode))
4213 have_whole_vector_shift = false;
4214 else
4216 optab optab = optab_for_tree_code (code, vectype, optab_default);
4217 if (optab_handler (optab, mode) == CODE_FOR_nothing)
4218 have_whole_vector_shift = false;
4221 if (have_whole_vector_shift && !slp_reduc)
4223 /*** Case 2: Create:
4224 for (offset = VS/2; offset >= element_size; offset/=2)
4226 Create: va' = vec_shift <va, offset>
4227 Create: va = vop <va, va'>
4228 } */
4230 if (dump_enabled_p ())
4231 dump_printf_loc (MSG_NOTE, vect_location,
4232 "Reduce using vector shifts\n");
4234 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4235 new_temp = new_phi_result;
4236 for (bit_offset = vec_size_in_bits/2;
4237 bit_offset >= element_bitsize;
4238 bit_offset /= 2)
4240 tree bitpos = size_int (bit_offset);
4242 epilog_stmt = gimple_build_assign_with_ops (shift_code,
4243 vec_dest, new_temp, bitpos);
4244 new_name = make_ssa_name (vec_dest, epilog_stmt);
4245 gimple_assign_set_lhs (epilog_stmt, new_name);
4246 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4248 epilog_stmt = gimple_build_assign_with_ops (code, vec_dest,
4249 new_name, new_temp);
4250 new_temp = make_ssa_name (vec_dest, epilog_stmt);
4251 gimple_assign_set_lhs (epilog_stmt, new_temp);
4252 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4255 extract_scalar_result = true;
4257 else
4259 tree rhs;
4261 /*** Case 3: Create:
4262 s = extract_field <v_out2, 0>
4263 for (offset = element_size;
4264 offset < vector_size;
4265 offset += element_size;)
4267 Create: s' = extract_field <v_out2, offset>
4268 Create: s = op <s, s'> // For non SLP cases
4269 } */
4271 if (dump_enabled_p ())
4272 dump_printf_loc (MSG_NOTE, vect_location,
4273 "Reduce using scalar code.\n");
4275 vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
4276 FOR_EACH_VEC_ELT (new_phis, i, new_phi)
4278 if (gimple_code (new_phi) == GIMPLE_PHI)
4279 vec_temp = PHI_RESULT (new_phi);
4280 else
4281 vec_temp = gimple_assign_lhs (new_phi);
4282 rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
4283 bitsize_zero_node);
4284 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
4285 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
4286 gimple_assign_set_lhs (epilog_stmt, new_temp);
4287 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4289 /* In SLP we don't need to apply reduction operation, so we just
4290 collect s' values in SCALAR_RESULTS. */
4291 if (slp_reduc)
4292 scalar_results.safe_push (new_temp);
4294 for (bit_offset = element_bitsize;
4295 bit_offset < vec_size_in_bits;
4296 bit_offset += element_bitsize)
4298 tree bitpos = bitsize_int (bit_offset);
4299 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp,
4300 bitsize, bitpos);
4302 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
4303 new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
4304 gimple_assign_set_lhs (epilog_stmt, new_name);
4305 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4307 if (slp_reduc)
4309 /* In SLP we don't need to apply reduction operation, so
4310 we just collect s' values in SCALAR_RESULTS. */
4311 new_temp = new_name;
4312 scalar_results.safe_push (new_name);
4314 else
4316 epilog_stmt = gimple_build_assign_with_ops (code,
4317 new_scalar_dest, new_name, new_temp);
4318 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
4319 gimple_assign_set_lhs (epilog_stmt, new_temp);
4320 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4325 /* The only case where we need to reduce scalar results in SLP, is
4326 unrolling. If the size of SCALAR_RESULTS is greater than
4327 GROUP_SIZE, we reduce them combining elements modulo
4328 GROUP_SIZE. */
4329 if (slp_reduc)
4331 tree res, first_res, new_res;
4332 gimple new_stmt;
4334 /* Reduce multiple scalar results in case of SLP unrolling. */
4335 for (j = group_size; scalar_results.iterate (j, &res);
4336 j++)
4338 first_res = scalar_results[j % group_size];
4339 new_stmt = gimple_build_assign_with_ops (code,
4340 new_scalar_dest, first_res, res);
4341 new_res = make_ssa_name (new_scalar_dest, new_stmt);
4342 gimple_assign_set_lhs (new_stmt, new_res);
4343 gsi_insert_before (&exit_gsi, new_stmt, GSI_SAME_STMT);
4344 scalar_results[j % group_size] = new_res;
4347 else
4348 /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
4349 scalar_results.safe_push (new_temp);
4351 extract_scalar_result = false;
4355 /* 2.4 Extract the final scalar result. Create:
4356 s_out3 = extract_field <v_out2, bitpos> */
4358 if (extract_scalar_result)
4360 tree rhs;
4362 if (dump_enabled_p ())
4363 dump_printf_loc (MSG_NOTE, vect_location,
4364 "extract scalar result\n");
4366 if (BYTES_BIG_ENDIAN)
4367 bitpos = size_binop (MULT_EXPR,
4368 bitsize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1),
4369 TYPE_SIZE (scalar_type));
4370 else
4371 bitpos = bitsize_zero_node;
4373 rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp, bitsize, bitpos);
4374 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
4375 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
4376 gimple_assign_set_lhs (epilog_stmt, new_temp);
4377 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4378 scalar_results.safe_push (new_temp);
4381 vect_finalize_reduction:
4383 if (double_reduc)
4384 loop = loop->inner;
4386 /* 2.5 Adjust the final result by the initial value of the reduction
4387 variable. (When such adjustment is not needed, then
4388 'adjustment_def' is zero). For example, if code is PLUS we create:
4389 new_temp = loop_exit_def + adjustment_def */
4391 if (adjustment_def)
4393 gcc_assert (!slp_reduc);
4394 if (nested_in_vect_loop)
4396 new_phi = new_phis[0];
4397 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
4398 expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
4399 new_dest = vect_create_destination_var (scalar_dest, vectype);
4401 else
4403 new_temp = scalar_results[0];
4404 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
4405 expr = build2 (code, scalar_type, new_temp, adjustment_def);
4406 new_dest = vect_create_destination_var (scalar_dest, scalar_type);
4409 epilog_stmt = gimple_build_assign (new_dest, expr);
4410 new_temp = make_ssa_name (new_dest, epilog_stmt);
4411 gimple_assign_set_lhs (epilog_stmt, new_temp);
4412 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4413 if (nested_in_vect_loop)
4415 set_vinfo_for_stmt (epilog_stmt,
4416 new_stmt_vec_info (epilog_stmt, loop_vinfo,
4417 NULL));
4418 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt)) =
4419 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi));
4421 if (!double_reduc)
4422 scalar_results.quick_push (new_temp);
4423 else
4424 scalar_results[0] = new_temp;
4426 else
4427 scalar_results[0] = new_temp;
4429 new_phis[0] = epilog_stmt;
4432 /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit
4433 phis with new adjusted scalar results, i.e., replace use <s_out0>
4434 with use <s_out4>.
4436 Transform:
4437 loop_exit:
4438 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4439 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4440 v_out2 = reduce <v_out1>
4441 s_out3 = extract_field <v_out2, 0>
4442 s_out4 = adjust_result <s_out3>
4443 use <s_out0>
4444 use <s_out0>
4446 into:
4448 loop_exit:
4449 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4450 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4451 v_out2 = reduce <v_out1>
4452 s_out3 = extract_field <v_out2, 0>
4453 s_out4 = adjust_result <s_out3>
4454 use <s_out4>
4455 use <s_out4> */
4458 /* In SLP reduction chain we reduce vector results into one vector if
4459 necessary, hence we set here GROUP_SIZE to 1. SCALAR_DEST is the LHS of
4460 the last stmt in the reduction chain, since we are looking for the loop
4461 exit phi node. */
4462 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
4464 scalar_dest = gimple_assign_lhs (
4465 SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1]);
4466 group_size = 1;
4469 /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
4470 case that GROUP_SIZE is greater than vectorization factor). Therefore, we
4471 need to match SCALAR_RESULTS with corresponding statements. The first
4472 (GROUP_SIZE / number of new vector stmts) scalar results correspond to
4473 the first vector stmt, etc.
4474 (RATIO is equal to (GROUP_SIZE / number of new vector stmts)). */
4475 if (group_size > new_phis.length ())
4477 ratio = group_size / new_phis.length ();
4478 gcc_assert (!(group_size % new_phis.length ()));
4480 else
4481 ratio = 1;
4483 for (k = 0; k < group_size; k++)
4485 if (k % ratio == 0)
4487 epilog_stmt = new_phis[k / ratio];
4488 reduction_phi = reduction_phis[k / ratio];
4489 if (double_reduc)
4490 inner_phi = inner_phis[k / ratio];
4493 if (slp_reduc)
4495 gimple current_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[k];
4497 orig_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (current_stmt));
4498 /* SLP statements can't participate in patterns. */
4499 gcc_assert (!orig_stmt);
4500 scalar_dest = gimple_assign_lhs (current_stmt);
4503 phis.create (3);
4504 /* Find the loop-closed-use at the loop exit of the original scalar
4505 result. (The reduction result is expected to have two immediate uses -
4506 one at the latch block, and one at the loop exit). */
4507 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4508 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p)))
4509 && !is_gimple_debug (USE_STMT (use_p)))
4510 phis.safe_push (USE_STMT (use_p));
4512 /* While we expect to have found an exit_phi because of loop-closed-ssa
4513 form we can end up without one if the scalar cycle is dead. */
4515 FOR_EACH_VEC_ELT (phis, i, exit_phi)
4517 if (outer_loop)
4519 stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi);
4520 gimple vect_phi;
4522 /* FORNOW. Currently not supporting the case that an inner-loop
4523 reduction is not used in the outer-loop (but only outside the
4524 outer-loop), unless it is double reduction. */
4525 gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
4526 && !STMT_VINFO_LIVE_P (exit_phi_vinfo))
4527 || double_reduc);
4529 if (double_reduc)
4530 STMT_VINFO_VEC_STMT (exit_phi_vinfo) = inner_phi;
4531 else
4532 STMT_VINFO_VEC_STMT (exit_phi_vinfo) = epilog_stmt;
4533 if (!double_reduc
4534 || STMT_VINFO_DEF_TYPE (exit_phi_vinfo)
4535 != vect_double_reduction_def)
4536 continue;
4538 /* Handle double reduction:
4540 stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
4541 stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop)
4542 stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop)
4543 stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
4545 At that point the regular reduction (stmt2 and stmt3) is
4546 already vectorized, as well as the exit phi node, stmt4.
4547 Here we vectorize the phi node of double reduction, stmt1, and
4548 update all relevant statements. */
4550 /* Go through all the uses of s2 to find double reduction phi
4551 node, i.e., stmt1 above. */
4552 orig_name = PHI_RESULT (exit_phi);
4553 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
4555 stmt_vec_info use_stmt_vinfo;
4556 stmt_vec_info new_phi_vinfo;
4557 tree vect_phi_init, preheader_arg, vect_phi_res, init_def;
4558 basic_block bb = gimple_bb (use_stmt);
4559 gimple use;
4561 /* Check that USE_STMT is really double reduction phi
4562 node. */
4563 if (gimple_code (use_stmt) != GIMPLE_PHI
4564 || gimple_phi_num_args (use_stmt) != 2
4565 || bb->loop_father != outer_loop)
4566 continue;
4567 use_stmt_vinfo = vinfo_for_stmt (use_stmt);
4568 if (!use_stmt_vinfo
4569 || STMT_VINFO_DEF_TYPE (use_stmt_vinfo)
4570 != vect_double_reduction_def)
4571 continue;
4573 /* Create vector phi node for double reduction:
4574 vs1 = phi <vs0, vs2>
4575 vs1 was created previously in this function by a call to
4576 vect_get_vec_def_for_operand and is stored in
4577 vec_initial_def;
4578 vs2 is defined by INNER_PHI, the vectorized EXIT_PHI;
4579 vs0 is created here. */
4581 /* Create vector phi node. */
4582 vect_phi = create_phi_node (vec_initial_def, bb);
4583 new_phi_vinfo = new_stmt_vec_info (vect_phi,
4584 loop_vec_info_for_loop (outer_loop), NULL);
4585 set_vinfo_for_stmt (vect_phi, new_phi_vinfo);
4587 /* Create vs0 - initial def of the double reduction phi. */
4588 preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt,
4589 loop_preheader_edge (outer_loop));
4590 init_def = get_initial_def_for_reduction (stmt,
4591 preheader_arg, NULL);
4592 vect_phi_init = vect_init_vector (use_stmt, init_def,
4593 vectype, NULL);
4595 /* Update phi node arguments with vs0 and vs2. */
4596 add_phi_arg (vect_phi, vect_phi_init,
4597 loop_preheader_edge (outer_loop),
4598 UNKNOWN_LOCATION);
4599 add_phi_arg (vect_phi, PHI_RESULT (inner_phi),
4600 loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
4601 if (dump_enabled_p ())
4603 dump_printf_loc (MSG_NOTE, vect_location,
4604 "created double reduction phi node: ");
4605 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vect_phi, 0);
4606 dump_printf (MSG_NOTE, "\n");
4609 vect_phi_res = PHI_RESULT (vect_phi);
4611 /* Replace the use, i.e., set the correct vs1 in the regular
4612 reduction phi node. FORNOW, NCOPIES is always 1, so the
4613 loop is redundant. */
4614 use = reduction_phi;
4615 for (j = 0; j < ncopies; j++)
4617 edge pr_edge = loop_preheader_edge (loop);
4618 SET_PHI_ARG_DEF (use, pr_edge->dest_idx, vect_phi_res);
4619 use = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use));
4625 phis.release ();
4626 if (nested_in_vect_loop)
4628 if (double_reduc)
4629 loop = outer_loop;
4630 else
4631 continue;
4634 phis.create (3);
4635 /* Find the loop-closed-use at the loop exit of the original scalar
4636 result. (The reduction result is expected to have two immediate uses,
4637 one at the latch block, and one at the loop exit). For double
4638 reductions we are looking for exit phis of the outer loop. */
4639 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4641 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
4643 if (!is_gimple_debug (USE_STMT (use_p)))
4644 phis.safe_push (USE_STMT (use_p));
4646 else
4648 if (double_reduc && gimple_code (USE_STMT (use_p)) == GIMPLE_PHI)
4650 tree phi_res = PHI_RESULT (USE_STMT (use_p));
4652 FOR_EACH_IMM_USE_FAST (phi_use_p, phi_imm_iter, phi_res)
4654 if (!flow_bb_inside_loop_p (loop,
4655 gimple_bb (USE_STMT (phi_use_p)))
4656 && !is_gimple_debug (USE_STMT (phi_use_p)))
4657 phis.safe_push (USE_STMT (phi_use_p));
4663 FOR_EACH_VEC_ELT (phis, i, exit_phi)
4665 /* Replace the uses: */
4666 orig_name = PHI_RESULT (exit_phi);
4667 scalar_result = scalar_results[k];
4668 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
4669 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
4670 SET_USE (use_p, scalar_result);
4673 phis.release ();
4678 /* Function vectorizable_reduction.
4680 Check if STMT performs a reduction operation that can be vectorized.
4681 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4682 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
4683 Return FALSE if not a vectorizable STMT, TRUE otherwise.
4685 This function also handles reduction idioms (patterns) that have been
4686 recognized in advance during vect_pattern_recog. In this case, STMT may be
4687 of this form:
4688 X = pattern_expr (arg0, arg1, ..., X)
4689 and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
4690 sequence that had been detected and replaced by the pattern-stmt (STMT).
4692 In some cases of reduction patterns, the type of the reduction variable X is
4693 different than the type of the other arguments of STMT.
4694 In such cases, the vectype that is used when transforming STMT into a vector
4695 stmt is different than the vectype that is used to determine the
4696 vectorization factor, because it consists of a different number of elements
4697 than the actual number of elements that are being operated upon in parallel.
4699 For example, consider an accumulation of shorts into an int accumulator.
4700 On some targets it's possible to vectorize this pattern operating on 8
4701 shorts at a time (hence, the vectype for purposes of determining the
4702 vectorization factor should be V8HI); on the other hand, the vectype that
4703 is used to create the vector form is actually V4SI (the type of the result).
4705 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
4706 indicates what is the actual level of parallelism (V8HI in the example), so
4707 that the right vectorization factor would be derived. This vectype
4708 corresponds to the type of arguments to the reduction stmt, and should *NOT*
4709 be used to create the vectorized stmt. The right vectype for the vectorized
4710 stmt is obtained from the type of the result X:
4711 get_vectype_for_scalar_type (TREE_TYPE (X))
4713 This means that, contrary to "regular" reductions (or "regular" stmts in
4714 general), the following equation:
4715 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
4716 does *NOT* necessarily hold for reduction patterns. */
4718 bool
4719 vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
4720 gimple *vec_stmt, slp_tree slp_node)
4722 tree vec_dest;
4723 tree scalar_dest;
4724 tree loop_vec_def0 = NULL_TREE, loop_vec_def1 = NULL_TREE;
4725 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4726 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4727 tree vectype_in = NULL_TREE;
4728 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4729 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4730 enum tree_code code, orig_code, epilog_reduc_code;
4731 enum machine_mode vec_mode;
4732 int op_type;
4733 optab optab, reduc_optab;
4734 tree new_temp = NULL_TREE;
4735 tree def;
4736 gimple def_stmt;
4737 enum vect_def_type dt;
4738 gimple new_phi = NULL;
4739 tree scalar_type;
4740 bool is_simple_use;
4741 gimple orig_stmt;
4742 stmt_vec_info orig_stmt_info;
4743 tree expr = NULL_TREE;
4744 int i;
4745 int ncopies;
4746 int epilog_copies;
4747 stmt_vec_info prev_stmt_info, prev_phi_info;
4748 bool single_defuse_cycle = false;
4749 tree reduc_def = NULL_TREE;
4750 gimple new_stmt = NULL;
4751 int j;
4752 tree ops[3];
4753 bool nested_cycle = false, found_nested_cycle_def = false;
4754 gimple reduc_def_stmt = NULL;
4755 /* The default is that the reduction variable is the last in statement. */
4756 int reduc_index = 2;
4757 bool double_reduc = false, dummy;
4758 basic_block def_bb;
4759 struct loop * def_stmt_loop, *outer_loop = NULL;
4760 tree def_arg;
4761 gimple def_arg_stmt;
4762 auto_vec<tree> vec_oprnds0;
4763 auto_vec<tree> vec_oprnds1;
4764 auto_vec<tree> vect_defs;
4765 auto_vec<gimple> phis;
4766 int vec_num;
4767 tree def0, def1, tem, op0, op1 = NULL_TREE;
4769 /* In case of reduction chain we switch to the first stmt in the chain, but
4770 we don't update STMT_INFO, since only the last stmt is marked as reduction
4771 and has reduction properties. */
4772 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
4773 stmt = GROUP_FIRST_ELEMENT (stmt_info);
4775 if (nested_in_vect_loop_p (loop, stmt))
4777 outer_loop = loop;
4778 loop = loop->inner;
4779 nested_cycle = true;
4782 /* 1. Is vectorizable reduction? */
4783 /* Not supportable if the reduction variable is used in the loop, unless
4784 it's a reduction chain. */
4785 if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer
4786 && !GROUP_FIRST_ELEMENT (stmt_info))
4787 return false;
4789 /* Reductions that are not used even in an enclosing outer-loop,
4790 are expected to be "live" (used out of the loop). */
4791 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope
4792 && !STMT_VINFO_LIVE_P (stmt_info))
4793 return false;
4795 /* Make sure it was already recognized as a reduction computation. */
4796 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def
4797 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_nested_cycle)
4798 return false;
4800 /* 2. Has this been recognized as a reduction pattern?
4802 Check if STMT represents a pattern that has been recognized
4803 in earlier analysis stages. For stmts that represent a pattern,
4804 the STMT_VINFO_RELATED_STMT field records the last stmt in
4805 the original sequence that constitutes the pattern. */
4807 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
4808 if (orig_stmt)
4810 orig_stmt_info = vinfo_for_stmt (orig_stmt);
4811 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
4812 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
4815 /* 3. Check the operands of the operation. The first operands are defined
4816 inside the loop body. The last operand is the reduction variable,
4817 which is defined by the loop-header-phi. */
4819 gcc_assert (is_gimple_assign (stmt));
4821 /* Flatten RHS. */
4822 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
4824 case GIMPLE_SINGLE_RHS:
4825 op_type = TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt));
4826 if (op_type == ternary_op)
4828 tree rhs = gimple_assign_rhs1 (stmt);
4829 ops[0] = TREE_OPERAND (rhs, 0);
4830 ops[1] = TREE_OPERAND (rhs, 1);
4831 ops[2] = TREE_OPERAND (rhs, 2);
4832 code = TREE_CODE (rhs);
4834 else
4835 return false;
4836 break;
4838 case GIMPLE_BINARY_RHS:
4839 code = gimple_assign_rhs_code (stmt);
4840 op_type = TREE_CODE_LENGTH (code);
4841 gcc_assert (op_type == binary_op);
4842 ops[0] = gimple_assign_rhs1 (stmt);
4843 ops[1] = gimple_assign_rhs2 (stmt);
4844 break;
4846 case GIMPLE_TERNARY_RHS:
4847 code = gimple_assign_rhs_code (stmt);
4848 op_type = TREE_CODE_LENGTH (code);
4849 gcc_assert (op_type == ternary_op);
4850 ops[0] = gimple_assign_rhs1 (stmt);
4851 ops[1] = gimple_assign_rhs2 (stmt);
4852 ops[2] = gimple_assign_rhs3 (stmt);
4853 break;
4855 case GIMPLE_UNARY_RHS:
4856 return false;
4858 default:
4859 gcc_unreachable ();
4862 if (code == COND_EXPR && slp_node)
4863 return false;
4865 scalar_dest = gimple_assign_lhs (stmt);
4866 scalar_type = TREE_TYPE (scalar_dest);
4867 if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
4868 && !SCALAR_FLOAT_TYPE_P (scalar_type))
4869 return false;
4871 /* Do not try to vectorize bit-precision reductions. */
4872 if ((TYPE_PRECISION (scalar_type)
4873 != GET_MODE_PRECISION (TYPE_MODE (scalar_type))))
4874 return false;
4876 /* All uses but the last are expected to be defined in the loop.
4877 The last use is the reduction variable. In case of nested cycle this
4878 assumption is not true: we use reduc_index to record the index of the
4879 reduction variable. */
4880 for (i = 0; i < op_type - 1; i++)
4882 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
4883 if (i == 0 && code == COND_EXPR)
4884 continue;
4886 is_simple_use = vect_is_simple_use_1 (ops[i], stmt, loop_vinfo, NULL,
4887 &def_stmt, &def, &dt, &tem);
4888 if (!vectype_in)
4889 vectype_in = tem;
4890 gcc_assert (is_simple_use);
4892 if (dt != vect_internal_def
4893 && dt != vect_external_def
4894 && dt != vect_constant_def
4895 && dt != vect_induction_def
4896 && !(dt == vect_nested_cycle && nested_cycle))
4897 return false;
4899 if (dt == vect_nested_cycle)
4901 found_nested_cycle_def = true;
4902 reduc_def_stmt = def_stmt;
4903 reduc_index = i;
4907 is_simple_use = vect_is_simple_use_1 (ops[i], stmt, loop_vinfo, NULL,
4908 &def_stmt, &def, &dt, &tem);
4909 if (!vectype_in)
4910 vectype_in = tem;
4911 gcc_assert (is_simple_use);
4912 if (!found_nested_cycle_def)
4913 reduc_def_stmt = def_stmt;
4915 if (reduc_def_stmt && gimple_code (reduc_def_stmt) != GIMPLE_PHI)
4916 return false;
4918 if (!(dt == vect_reduction_def
4919 || dt == vect_nested_cycle
4920 || ((dt == vect_internal_def || dt == vect_external_def
4921 || dt == vect_constant_def || dt == vect_induction_def)
4922 && nested_cycle && found_nested_cycle_def)))
4924 /* For pattern recognized stmts, orig_stmt might be a reduction,
4925 but some helper statements for the pattern might not, or
4926 might be COND_EXPRs with reduction uses in the condition. */
4927 gcc_assert (orig_stmt);
4928 return false;
4931 if (orig_stmt)
4932 gcc_assert (orig_stmt == vect_is_simple_reduction (loop_vinfo,
4933 reduc_def_stmt,
4934 !nested_cycle,
4935 &dummy));
4936 else
4938 gimple tmp = vect_is_simple_reduction (loop_vinfo, reduc_def_stmt,
4939 !nested_cycle, &dummy);
4940 /* We changed STMT to be the first stmt in reduction chain, hence we
4941 check that in this case the first element in the chain is STMT. */
4942 gcc_assert (stmt == tmp
4943 || GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) == stmt);
4946 if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt)))
4947 return false;
4949 if (slp_node || PURE_SLP_STMT (stmt_info))
4950 ncopies = 1;
4951 else
4952 ncopies = (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
4953 / TYPE_VECTOR_SUBPARTS (vectype_in));
4955 gcc_assert (ncopies >= 1);
4957 vec_mode = TYPE_MODE (vectype_in);
4959 if (code == COND_EXPR)
4961 if (!vectorizable_condition (stmt, gsi, NULL, ops[reduc_index], 0, NULL))
4963 if (dump_enabled_p ())
4964 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4965 "unsupported condition in reduction\n");
4967 return false;
4970 else
4972 /* 4. Supportable by target? */
4974 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR
4975 || code == LROTATE_EXPR || code == RROTATE_EXPR)
4977 /* Shifts and rotates are only supported by vectorizable_shifts,
4978 not vectorizable_reduction. */
4979 if (dump_enabled_p ())
4980 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4981 "unsupported shift or rotation.\n");
4982 return false;
4985 /* 4.1. check support for the operation in the loop */
4986 optab = optab_for_tree_code (code, vectype_in, optab_default);
4987 if (!optab)
4989 if (dump_enabled_p ())
4990 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4991 "no optab.\n");
4993 return false;
4996 if (optab_handler (optab, vec_mode) == CODE_FOR_nothing)
4998 if (dump_enabled_p ())
4999 dump_printf (MSG_NOTE, "op not supported by target.\n");
5001 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
5002 || LOOP_VINFO_VECT_FACTOR (loop_vinfo)
5003 < vect_min_worthwhile_factor (code))
5004 return false;
5006 if (dump_enabled_p ())
5007 dump_printf (MSG_NOTE, "proceeding using word mode.\n");
5010 /* Worthwhile without SIMD support? */
5011 if (!VECTOR_MODE_P (TYPE_MODE (vectype_in))
5012 && LOOP_VINFO_VECT_FACTOR (loop_vinfo)
5013 < vect_min_worthwhile_factor (code))
5015 if (dump_enabled_p ())
5016 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5017 "not worthwhile without SIMD support.\n");
5019 return false;
5023 /* 4.2. Check support for the epilog operation.
5025 If STMT represents a reduction pattern, then the type of the
5026 reduction variable may be different than the type of the rest
5027 of the arguments. For example, consider the case of accumulation
5028 of shorts into an int accumulator; The original code:
5029 S1: int_a = (int) short_a;
5030 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
5032 was replaced with:
5033 STMT: int_acc = widen_sum <short_a, int_acc>
5035 This means that:
5036 1. The tree-code that is used to create the vector operation in the
5037 epilog code (that reduces the partial results) is not the
5038 tree-code of STMT, but is rather the tree-code of the original
5039 stmt from the pattern that STMT is replacing. I.e, in the example
5040 above we want to use 'widen_sum' in the loop, but 'plus' in the
5041 epilog.
5042 2. The type (mode) we use to check available target support
5043 for the vector operation to be created in the *epilog*, is
5044 determined by the type of the reduction variable (in the example
5045 above we'd check this: optab_handler (plus_optab, vect_int_mode])).
5046 However the type (mode) we use to check available target support
5047 for the vector operation to be created *inside the loop*, is
5048 determined by the type of the other arguments to STMT (in the
5049 example we'd check this: optab_handler (widen_sum_optab,
5050 vect_short_mode)).
5052 This is contrary to "regular" reductions, in which the types of all
5053 the arguments are the same as the type of the reduction variable.
5054 For "regular" reductions we can therefore use the same vector type
5055 (and also the same tree-code) when generating the epilog code and
5056 when generating the code inside the loop. */
5058 if (orig_stmt)
5060 /* This is a reduction pattern: get the vectype from the type of the
5061 reduction variable, and get the tree-code from orig_stmt. */
5062 orig_code = gimple_assign_rhs_code (orig_stmt);
5063 gcc_assert (vectype_out);
5064 vec_mode = TYPE_MODE (vectype_out);
5066 else
5068 /* Regular reduction: use the same vectype and tree-code as used for
5069 the vector code inside the loop can be used for the epilog code. */
5070 orig_code = code;
5073 if (nested_cycle)
5075 def_bb = gimple_bb (reduc_def_stmt);
5076 def_stmt_loop = def_bb->loop_father;
5077 def_arg = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
5078 loop_preheader_edge (def_stmt_loop));
5079 if (TREE_CODE (def_arg) == SSA_NAME
5080 && (def_arg_stmt = SSA_NAME_DEF_STMT (def_arg))
5081 && gimple_code (def_arg_stmt) == GIMPLE_PHI
5082 && flow_bb_inside_loop_p (outer_loop, gimple_bb (def_arg_stmt))
5083 && vinfo_for_stmt (def_arg_stmt)
5084 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_arg_stmt))
5085 == vect_double_reduction_def)
5086 double_reduc = true;
5089 epilog_reduc_code = ERROR_MARK;
5090 if (reduction_code_for_scalar_code (orig_code, &epilog_reduc_code))
5092 reduc_optab = optab_for_tree_code (epilog_reduc_code, vectype_out,
5093 optab_default);
5094 if (!reduc_optab)
5096 if (dump_enabled_p ())
5097 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5098 "no optab for reduction.\n");
5100 epilog_reduc_code = ERROR_MARK;
5103 if (reduc_optab
5104 && optab_handler (reduc_optab, vec_mode) == CODE_FOR_nothing)
5106 if (dump_enabled_p ())
5107 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5108 "reduc op not supported by target.\n");
5110 epilog_reduc_code = ERROR_MARK;
5113 else
5115 if (!nested_cycle || double_reduc)
5117 if (dump_enabled_p ())
5118 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5119 "no reduc code for scalar code.\n");
5121 return false;
5125 if (double_reduc && ncopies > 1)
5127 if (dump_enabled_p ())
5128 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5129 "multiple types in double reduction\n");
5131 return false;
5134 /* In case of widenning multiplication by a constant, we update the type
5135 of the constant to be the type of the other operand. We check that the
5136 constant fits the type in the pattern recognition pass. */
5137 if (code == DOT_PROD_EXPR
5138 && !types_compatible_p (TREE_TYPE (ops[0]), TREE_TYPE (ops[1])))
5140 if (TREE_CODE (ops[0]) == INTEGER_CST)
5141 ops[0] = fold_convert (TREE_TYPE (ops[1]), ops[0]);
5142 else if (TREE_CODE (ops[1]) == INTEGER_CST)
5143 ops[1] = fold_convert (TREE_TYPE (ops[0]), ops[1]);
5144 else
5146 if (dump_enabled_p ())
5147 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5148 "invalid types in dot-prod\n");
5150 return false;
5154 if (!vec_stmt) /* transformation not required. */
5156 if (!vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies))
5157 return false;
5158 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
5159 return true;
5162 /** Transform. **/
5164 if (dump_enabled_p ())
5165 dump_printf_loc (MSG_NOTE, vect_location, "transform reduction.\n");
5167 /* FORNOW: Multiple types are not supported for condition. */
5168 if (code == COND_EXPR)
5169 gcc_assert (ncopies == 1);
5171 /* Create the destination vector */
5172 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
5174 /* In case the vectorization factor (VF) is bigger than the number
5175 of elements that we can fit in a vectype (nunits), we have to generate
5176 more than one vector stmt - i.e - we need to "unroll" the
5177 vector stmt by a factor VF/nunits. For more details see documentation
5178 in vectorizable_operation. */
5180 /* If the reduction is used in an outer loop we need to generate
5181 VF intermediate results, like so (e.g. for ncopies=2):
5182 r0 = phi (init, r0)
5183 r1 = phi (init, r1)
5184 r0 = x0 + r0;
5185 r1 = x1 + r1;
5186 (i.e. we generate VF results in 2 registers).
5187 In this case we have a separate def-use cycle for each copy, and therefore
5188 for each copy we get the vector def for the reduction variable from the
5189 respective phi node created for this copy.
5191 Otherwise (the reduction is unused in the loop nest), we can combine
5192 together intermediate results, like so (e.g. for ncopies=2):
5193 r = phi (init, r)
5194 r = x0 + r;
5195 r = x1 + r;
5196 (i.e. we generate VF/2 results in a single register).
5197 In this case for each copy we get the vector def for the reduction variable
5198 from the vectorized reduction operation generated in the previous iteration.
5201 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope)
5203 single_defuse_cycle = true;
5204 epilog_copies = 1;
5206 else
5207 epilog_copies = ncopies;
5209 prev_stmt_info = NULL;
5210 prev_phi_info = NULL;
5211 if (slp_node)
5213 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
5214 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype_out)
5215 == TYPE_VECTOR_SUBPARTS (vectype_in));
5217 else
5219 vec_num = 1;
5220 vec_oprnds0.create (1);
5221 if (op_type == ternary_op)
5222 vec_oprnds1.create (1);
5225 phis.create (vec_num);
5226 vect_defs.create (vec_num);
5227 if (!slp_node)
5228 vect_defs.quick_push (NULL_TREE);
5230 for (j = 0; j < ncopies; j++)
5232 if (j == 0 || !single_defuse_cycle)
5234 for (i = 0; i < vec_num; i++)
5236 /* Create the reduction-phi that defines the reduction
5237 operand. */
5238 new_phi = create_phi_node (vec_dest, loop->header);
5239 set_vinfo_for_stmt (new_phi,
5240 new_stmt_vec_info (new_phi, loop_vinfo,
5241 NULL));
5242 if (j == 0 || slp_node)
5243 phis.quick_push (new_phi);
5247 if (code == COND_EXPR)
5249 gcc_assert (!slp_node);
5250 vectorizable_condition (stmt, gsi, vec_stmt,
5251 PHI_RESULT (phis[0]),
5252 reduc_index, NULL);
5253 /* Multiple types are not supported for condition. */
5254 break;
5257 /* Handle uses. */
5258 if (j == 0)
5260 op0 = ops[!reduc_index];
5261 if (op_type == ternary_op)
5263 if (reduc_index == 0)
5264 op1 = ops[2];
5265 else
5266 op1 = ops[1];
5269 if (slp_node)
5270 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
5271 slp_node, -1);
5272 else
5274 loop_vec_def0 = vect_get_vec_def_for_operand (ops[!reduc_index],
5275 stmt, NULL);
5276 vec_oprnds0.quick_push (loop_vec_def0);
5277 if (op_type == ternary_op)
5279 loop_vec_def1 = vect_get_vec_def_for_operand (op1, stmt,
5280 NULL);
5281 vec_oprnds1.quick_push (loop_vec_def1);
5285 else
5287 if (!slp_node)
5289 enum vect_def_type dt;
5290 gimple dummy_stmt;
5291 tree dummy;
5293 vect_is_simple_use (ops[!reduc_index], stmt, loop_vinfo, NULL,
5294 &dummy_stmt, &dummy, &dt);
5295 loop_vec_def0 = vect_get_vec_def_for_stmt_copy (dt,
5296 loop_vec_def0);
5297 vec_oprnds0[0] = loop_vec_def0;
5298 if (op_type == ternary_op)
5300 vect_is_simple_use (op1, stmt, loop_vinfo, NULL, &dummy_stmt,
5301 &dummy, &dt);
5302 loop_vec_def1 = vect_get_vec_def_for_stmt_copy (dt,
5303 loop_vec_def1);
5304 vec_oprnds1[0] = loop_vec_def1;
5308 if (single_defuse_cycle)
5309 reduc_def = gimple_assign_lhs (new_stmt);
5311 STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi;
5314 FOR_EACH_VEC_ELT (vec_oprnds0, i, def0)
5316 if (slp_node)
5317 reduc_def = PHI_RESULT (phis[i]);
5318 else
5320 if (!single_defuse_cycle || j == 0)
5321 reduc_def = PHI_RESULT (new_phi);
5324 def1 = ((op_type == ternary_op)
5325 ? vec_oprnds1[i] : NULL);
5326 if (op_type == binary_op)
5328 if (reduc_index == 0)
5329 expr = build2 (code, vectype_out, reduc_def, def0);
5330 else
5331 expr = build2 (code, vectype_out, def0, reduc_def);
5333 else
5335 if (reduc_index == 0)
5336 expr = build3 (code, vectype_out, reduc_def, def0, def1);
5337 else
5339 if (reduc_index == 1)
5340 expr = build3 (code, vectype_out, def0, reduc_def, def1);
5341 else
5342 expr = build3 (code, vectype_out, def0, def1, reduc_def);
5346 new_stmt = gimple_build_assign (vec_dest, expr);
5347 new_temp = make_ssa_name (vec_dest, new_stmt);
5348 gimple_assign_set_lhs (new_stmt, new_temp);
5349 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5351 if (slp_node)
5353 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
5354 vect_defs.quick_push (new_temp);
5356 else
5357 vect_defs[0] = new_temp;
5360 if (slp_node)
5361 continue;
5363 if (j == 0)
5364 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5365 else
5366 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5368 prev_stmt_info = vinfo_for_stmt (new_stmt);
5369 prev_phi_info = vinfo_for_stmt (new_phi);
5372 /* Finalize the reduction-phi (set its arguments) and create the
5373 epilog reduction code. */
5374 if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node)
5376 new_temp = gimple_assign_lhs (*vec_stmt);
5377 vect_defs[0] = new_temp;
5380 vect_create_epilog_for_reduction (vect_defs, stmt, epilog_copies,
5381 epilog_reduc_code, phis, reduc_index,
5382 double_reduc, slp_node);
5384 return true;
5387 /* Function vect_min_worthwhile_factor.
5389 For a loop where we could vectorize the operation indicated by CODE,
5390 return the minimum vectorization factor that makes it worthwhile
5391 to use generic vectors. */
5393 vect_min_worthwhile_factor (enum tree_code code)
5395 switch (code)
5397 case PLUS_EXPR:
5398 case MINUS_EXPR:
5399 case NEGATE_EXPR:
5400 return 4;
5402 case BIT_AND_EXPR:
5403 case BIT_IOR_EXPR:
5404 case BIT_XOR_EXPR:
5405 case BIT_NOT_EXPR:
5406 return 2;
5408 default:
5409 return INT_MAX;
5414 /* Function vectorizable_induction
5416 Check if PHI performs an induction computation that can be vectorized.
5417 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
5418 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
5419 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5421 bool
5422 vectorizable_induction (gimple phi, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
5423 gimple *vec_stmt)
5425 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
5426 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5427 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5428 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5429 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
5430 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
5431 tree vec_def;
5433 gcc_assert (ncopies >= 1);
5434 /* FORNOW. These restrictions should be relaxed. */
5435 if (nested_in_vect_loop_p (loop, phi))
5437 imm_use_iterator imm_iter;
5438 use_operand_p use_p;
5439 gimple exit_phi;
5440 edge latch_e;
5441 tree loop_arg;
5443 if (ncopies > 1)
5445 if (dump_enabled_p ())
5446 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5447 "multiple types in nested loop.\n");
5448 return false;
5451 exit_phi = NULL;
5452 latch_e = loop_latch_edge (loop->inner);
5453 loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
5454 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
5456 gimple use_stmt = USE_STMT (use_p);
5457 if (is_gimple_debug (use_stmt))
5458 continue;
5460 if (!flow_bb_inside_loop_p (loop->inner, gimple_bb (use_stmt)))
5462 exit_phi = use_stmt;
5463 break;
5466 if (exit_phi)
5468 stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi);
5469 if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
5470 && !STMT_VINFO_LIVE_P (exit_phi_vinfo)))
5472 if (dump_enabled_p ())
5473 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5474 "inner-loop induction only used outside "
5475 "of the outer vectorized loop.\n");
5476 return false;
5481 if (!STMT_VINFO_RELEVANT_P (stmt_info))
5482 return false;
5484 /* FORNOW: SLP not supported. */
5485 if (STMT_SLP_TYPE (stmt_info))
5486 return false;
5488 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def);
5490 if (gimple_code (phi) != GIMPLE_PHI)
5491 return false;
5493 if (!vec_stmt) /* transformation not required. */
5495 STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
5496 if (dump_enabled_p ())
5497 dump_printf_loc (MSG_NOTE, vect_location,
5498 "=== vectorizable_induction ===\n");
5499 vect_model_induction_cost (stmt_info, ncopies);
5500 return true;
5503 /** Transform. **/
5505 if (dump_enabled_p ())
5506 dump_printf_loc (MSG_NOTE, vect_location, "transform induction phi.\n");
5508 vec_def = get_initial_def_for_induction (phi);
5509 *vec_stmt = SSA_NAME_DEF_STMT (vec_def);
5510 return true;
5513 /* Function vectorizable_live_operation.
5515 STMT computes a value that is used outside the loop. Check if
5516 it can be supported. */
5518 bool
5519 vectorizable_live_operation (gimple stmt,
5520 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
5521 gimple *vec_stmt)
5523 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5524 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5525 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5526 int i;
5527 int op_type;
5528 tree op;
5529 tree def;
5530 gimple def_stmt;
5531 enum vect_def_type dt;
5532 enum tree_code code;
5533 enum gimple_rhs_class rhs_class;
5535 gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
5537 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
5538 return false;
5540 if (!is_gimple_assign (stmt))
5542 if (gimple_call_internal_p (stmt)
5543 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE
5544 && gimple_call_lhs (stmt)
5545 && loop->simduid
5546 && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
5547 && loop->simduid
5548 == SSA_NAME_VAR (gimple_call_arg (stmt, 0)))
5550 edge e = single_exit (loop);
5551 basic_block merge_bb = e->dest;
5552 imm_use_iterator imm_iter;
5553 use_operand_p use_p;
5554 tree lhs = gimple_call_lhs (stmt);
5556 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
5558 gimple use_stmt = USE_STMT (use_p);
5559 if (gimple_code (use_stmt) == GIMPLE_PHI
5560 && gimple_bb (use_stmt) == merge_bb)
5562 if (vec_stmt)
5564 tree vfm1
5565 = build_int_cst (unsigned_type_node,
5566 loop_vinfo->vectorization_factor - 1);
5567 SET_PHI_ARG_DEF (use_stmt, e->dest_idx, vfm1);
5569 return true;
5574 return false;
5577 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
5578 return false;
5580 /* FORNOW. CHECKME. */
5581 if (nested_in_vect_loop_p (loop, stmt))
5582 return false;
5584 code = gimple_assign_rhs_code (stmt);
5585 op_type = TREE_CODE_LENGTH (code);
5586 rhs_class = get_gimple_rhs_class (code);
5587 gcc_assert (rhs_class != GIMPLE_UNARY_RHS || op_type == unary_op);
5588 gcc_assert (rhs_class != GIMPLE_BINARY_RHS || op_type == binary_op);
5590 /* FORNOW: support only if all uses are invariant. This means
5591 that the scalar operations can remain in place, unvectorized.
5592 The original last scalar value that they compute will be used. */
5594 for (i = 0; i < op_type; i++)
5596 if (rhs_class == GIMPLE_SINGLE_RHS)
5597 op = TREE_OPERAND (gimple_op (stmt, 1), i);
5598 else
5599 op = gimple_op (stmt, i + 1);
5600 if (op
5601 && !vect_is_simple_use (op, stmt, loop_vinfo, NULL, &def_stmt, &def,
5602 &dt))
5604 if (dump_enabled_p ())
5605 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5606 "use not simple.\n");
5607 return false;
5610 if (dt != vect_external_def && dt != vect_constant_def)
5611 return false;
5614 /* No transformation is required for the cases we currently support. */
5615 return true;
5618 /* Kill any debug uses outside LOOP of SSA names defined in STMT. */
5620 static void
5621 vect_loop_kill_debug_uses (struct loop *loop, gimple stmt)
5623 ssa_op_iter op_iter;
5624 imm_use_iterator imm_iter;
5625 def_operand_p def_p;
5626 gimple ustmt;
5628 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
5630 FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p))
5632 basic_block bb;
5634 if (!is_gimple_debug (ustmt))
5635 continue;
5637 bb = gimple_bb (ustmt);
5639 if (!flow_bb_inside_loop_p (loop, bb))
5641 if (gimple_debug_bind_p (ustmt))
5643 if (dump_enabled_p ())
5644 dump_printf_loc (MSG_NOTE, vect_location,
5645 "killing debug use\n");
5647 gimple_debug_bind_reset_value (ustmt);
5648 update_stmt (ustmt);
5650 else
5651 gcc_unreachable ();
5658 /* This function builds ni_name = number of iterations. Statements
5659 are emitted on the loop preheader edge. */
5661 static tree
5662 vect_build_loop_niters (loop_vec_info loop_vinfo)
5664 tree ni = unshare_expr (LOOP_VINFO_NITERS (loop_vinfo));
5665 if (TREE_CODE (ni) == INTEGER_CST)
5666 return ni;
5667 else
5669 tree ni_name, var;
5670 gimple_seq stmts = NULL;
5671 edge pe = loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo));
5673 var = create_tmp_var (TREE_TYPE (ni), "niters");
5674 ni_name = force_gimple_operand (ni, &stmts, false, var);
5675 if (stmts)
5676 gsi_insert_seq_on_edge_immediate (pe, stmts);
5678 return ni_name;
5683 /* This function generates the following statements:
5685 ni_name = number of iterations loop executes
5686 ratio = ni_name / vf
5687 ratio_mult_vf_name = ratio * vf
5689 and places them on the loop preheader edge. */
5691 static void
5692 vect_generate_tmps_on_preheader (loop_vec_info loop_vinfo,
5693 tree ni_name,
5694 tree *ratio_mult_vf_name_ptr,
5695 tree *ratio_name_ptr)
5697 tree ni_minus_gap_name;
5698 tree var;
5699 tree ratio_name;
5700 tree ratio_mult_vf_name;
5701 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
5702 edge pe = loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo));
5703 tree log_vf;
5705 log_vf = build_int_cst (TREE_TYPE (ni_name), exact_log2 (vf));
5707 /* If epilogue loop is required because of data accesses with gaps, we
5708 subtract one iteration from the total number of iterations here for
5709 correct calculation of RATIO. */
5710 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
5712 ni_minus_gap_name = fold_build2 (MINUS_EXPR, TREE_TYPE (ni_name),
5713 ni_name,
5714 build_one_cst (TREE_TYPE (ni_name)));
5715 if (!is_gimple_val (ni_minus_gap_name))
5717 var = create_tmp_var (TREE_TYPE (ni_name), "ni_gap");
5718 gimple stmts = NULL;
5719 ni_minus_gap_name = force_gimple_operand (ni_minus_gap_name, &stmts,
5720 true, var);
5721 gsi_insert_seq_on_edge_immediate (pe, stmts);
5724 else
5725 ni_minus_gap_name = ni_name;
5727 /* Create: ratio = ni >> log2(vf) */
5728 /* ??? As we have ni == number of latch executions + 1, ni could
5729 have overflown to zero. So avoid computing ratio based on ni
5730 but compute it using the fact that we know ratio will be at least
5731 one, thus via (ni - vf) >> log2(vf) + 1. */
5732 ratio_name
5733 = fold_build2 (PLUS_EXPR, TREE_TYPE (ni_name),
5734 fold_build2 (RSHIFT_EXPR, TREE_TYPE (ni_name),
5735 fold_build2 (MINUS_EXPR, TREE_TYPE (ni_name),
5736 ni_minus_gap_name,
5737 build_int_cst
5738 (TREE_TYPE (ni_name), vf)),
5739 log_vf),
5740 build_int_cst (TREE_TYPE (ni_name), 1));
5741 if (!is_gimple_val (ratio_name))
5743 var = create_tmp_var (TREE_TYPE (ni_name), "bnd");
5744 gimple stmts = NULL;
5745 ratio_name = force_gimple_operand (ratio_name, &stmts, true, var);
5746 gsi_insert_seq_on_edge_immediate (pe, stmts);
5748 *ratio_name_ptr = ratio_name;
5750 /* Create: ratio_mult_vf = ratio << log2 (vf). */
5752 if (ratio_mult_vf_name_ptr)
5754 ratio_mult_vf_name = fold_build2 (LSHIFT_EXPR, TREE_TYPE (ratio_name),
5755 ratio_name, log_vf);
5756 if (!is_gimple_val (ratio_mult_vf_name))
5758 var = create_tmp_var (TREE_TYPE (ni_name), "ratio_mult_vf");
5759 gimple stmts = NULL;
5760 ratio_mult_vf_name = force_gimple_operand (ratio_mult_vf_name, &stmts,
5761 true, var);
5762 gsi_insert_seq_on_edge_immediate (pe, stmts);
5764 *ratio_mult_vf_name_ptr = ratio_mult_vf_name;
5767 return;
5771 /* Function vect_transform_loop.
5773 The analysis phase has determined that the loop is vectorizable.
5774 Vectorize the loop - created vectorized stmts to replace the scalar
5775 stmts in the loop, and update the loop exit condition. */
5777 void
5778 vect_transform_loop (loop_vec_info loop_vinfo)
5780 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5781 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
5782 int nbbs = loop->num_nodes;
5783 gimple_stmt_iterator si;
5784 int i;
5785 tree ratio = NULL;
5786 int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
5787 bool grouped_store;
5788 bool slp_scheduled = false;
5789 gimple stmt, pattern_stmt;
5790 gimple_seq pattern_def_seq = NULL;
5791 gimple_stmt_iterator pattern_def_si = gsi_none ();
5792 bool transform_pattern_stmt = false;
5793 bool check_profitability = false;
5794 int th;
5795 /* Record number of iterations before we started tampering with the profile. */
5796 gcov_type expected_iterations = expected_loop_iterations_unbounded (loop);
5798 if (dump_enabled_p ())
5799 dump_printf_loc (MSG_NOTE, vect_location, "=== vec_transform_loop ===\n");
5801 /* If profile is inprecise, we have chance to fix it up. */
5802 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
5803 expected_iterations = LOOP_VINFO_INT_NITERS (loop_vinfo);
5805 /* Use the more conservative vectorization threshold. If the number
5806 of iterations is constant assume the cost check has been performed
5807 by our caller. If the threshold makes all loops profitable that
5808 run at least the vectorization factor number of times checking
5809 is pointless, too. */
5810 th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
5811 if (th >= LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1
5812 && !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
5814 if (dump_enabled_p ())
5815 dump_printf_loc (MSG_NOTE, vect_location,
5816 "Profitability threshold is %d loop iterations.\n",
5817 th);
5818 check_profitability = true;
5821 /* Version the loop first, if required, so the profitability check
5822 comes first. */
5824 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
5825 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
5827 vect_loop_versioning (loop_vinfo, th, check_profitability);
5828 check_profitability = false;
5831 tree ni_name = vect_build_loop_niters (loop_vinfo);
5832 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = ni_name;
5834 /* Peel the loop if there are data refs with unknown alignment.
5835 Only one data ref with unknown store is allowed. */
5837 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo))
5839 vect_do_peeling_for_alignment (loop_vinfo, ni_name,
5840 th, check_profitability);
5841 check_profitability = false;
5842 /* The above adjusts LOOP_VINFO_NITERS, so cause ni_name to
5843 be re-computed. */
5844 ni_name = NULL_TREE;
5847 /* If the loop has a symbolic number of iterations 'n' (i.e. it's not a
5848 compile time constant), or it is a constant that doesn't divide by the
5849 vectorization factor, then an epilog loop needs to be created.
5850 We therefore duplicate the loop: the original loop will be vectorized,
5851 and will compute the first (n/VF) iterations. The second copy of the loop
5852 will remain scalar and will compute the remaining (n%VF) iterations.
5853 (VF is the vectorization factor). */
5855 if (LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo)
5856 || LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
5858 tree ratio_mult_vf;
5859 if (!ni_name)
5860 ni_name = vect_build_loop_niters (loop_vinfo);
5861 vect_generate_tmps_on_preheader (loop_vinfo, ni_name, &ratio_mult_vf,
5862 &ratio);
5863 vect_do_peeling_for_loop_bound (loop_vinfo, ni_name, ratio_mult_vf,
5864 th, check_profitability);
5866 else if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
5867 ratio = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
5868 LOOP_VINFO_INT_NITERS (loop_vinfo) / vectorization_factor);
5869 else
5871 if (!ni_name)
5872 ni_name = vect_build_loop_niters (loop_vinfo);
5873 vect_generate_tmps_on_preheader (loop_vinfo, ni_name, NULL, &ratio);
5876 /* 1) Make sure the loop header has exactly two entries
5877 2) Make sure we have a preheader basic block. */
5879 gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
5881 split_edge (loop_preheader_edge (loop));
5883 /* FORNOW: the vectorizer supports only loops which body consist
5884 of one basic block (header + empty latch). When the vectorizer will
5885 support more involved loop forms, the order by which the BBs are
5886 traversed need to be reconsidered. */
5888 for (i = 0; i < nbbs; i++)
5890 basic_block bb = bbs[i];
5891 stmt_vec_info stmt_info;
5892 gimple phi;
5894 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
5896 phi = gsi_stmt (si);
5897 if (dump_enabled_p ())
5899 dump_printf_loc (MSG_NOTE, vect_location,
5900 "------>vectorizing phi: ");
5901 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
5902 dump_printf (MSG_NOTE, "\n");
5904 stmt_info = vinfo_for_stmt (phi);
5905 if (!stmt_info)
5906 continue;
5908 if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
5909 vect_loop_kill_debug_uses (loop, phi);
5911 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5912 && !STMT_VINFO_LIVE_P (stmt_info))
5913 continue;
5915 if (STMT_VINFO_VECTYPE (stmt_info)
5916 && (TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info))
5917 != (unsigned HOST_WIDE_INT) vectorization_factor)
5918 && dump_enabled_p ())
5919 dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
5921 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
5923 if (dump_enabled_p ())
5924 dump_printf_loc (MSG_NOTE, vect_location, "transform phi.\n");
5925 vect_transform_stmt (phi, NULL, NULL, NULL, NULL);
5929 pattern_stmt = NULL;
5930 for (si = gsi_start_bb (bb); !gsi_end_p (si) || transform_pattern_stmt;)
5932 bool is_store;
5934 if (transform_pattern_stmt)
5935 stmt = pattern_stmt;
5936 else
5938 stmt = gsi_stmt (si);
5939 /* During vectorization remove existing clobber stmts. */
5940 if (gimple_clobber_p (stmt))
5942 unlink_stmt_vdef (stmt);
5943 gsi_remove (&si, true);
5944 release_defs (stmt);
5945 continue;
5949 if (dump_enabled_p ())
5951 dump_printf_loc (MSG_NOTE, vect_location,
5952 "------>vectorizing statement: ");
5953 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
5954 dump_printf (MSG_NOTE, "\n");
5957 stmt_info = vinfo_for_stmt (stmt);
5959 /* vector stmts created in the outer-loop during vectorization of
5960 stmts in an inner-loop may not have a stmt_info, and do not
5961 need to be vectorized. */
5962 if (!stmt_info)
5964 gsi_next (&si);
5965 continue;
5968 if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
5969 vect_loop_kill_debug_uses (loop, stmt);
5971 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5972 && !STMT_VINFO_LIVE_P (stmt_info))
5974 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
5975 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
5976 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
5977 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
5979 stmt = pattern_stmt;
5980 stmt_info = vinfo_for_stmt (stmt);
5982 else
5984 gsi_next (&si);
5985 continue;
5988 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
5989 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
5990 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
5991 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
5992 transform_pattern_stmt = true;
5994 /* If pattern statement has def stmts, vectorize them too. */
5995 if (is_pattern_stmt_p (stmt_info))
5997 if (pattern_def_seq == NULL)
5999 pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
6000 pattern_def_si = gsi_start (pattern_def_seq);
6002 else if (!gsi_end_p (pattern_def_si))
6003 gsi_next (&pattern_def_si);
6004 if (pattern_def_seq != NULL)
6006 gimple pattern_def_stmt = NULL;
6007 stmt_vec_info pattern_def_stmt_info = NULL;
6009 while (!gsi_end_p (pattern_def_si))
6011 pattern_def_stmt = gsi_stmt (pattern_def_si);
6012 pattern_def_stmt_info
6013 = vinfo_for_stmt (pattern_def_stmt);
6014 if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info)
6015 || STMT_VINFO_LIVE_P (pattern_def_stmt_info))
6016 break;
6017 gsi_next (&pattern_def_si);
6020 if (!gsi_end_p (pattern_def_si))
6022 if (dump_enabled_p ())
6024 dump_printf_loc (MSG_NOTE, vect_location,
6025 "==> vectorizing pattern def "
6026 "stmt: ");
6027 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
6028 pattern_def_stmt, 0);
6029 dump_printf (MSG_NOTE, "\n");
6032 stmt = pattern_def_stmt;
6033 stmt_info = pattern_def_stmt_info;
6035 else
6037 pattern_def_si = gsi_none ();
6038 transform_pattern_stmt = false;
6041 else
6042 transform_pattern_stmt = false;
6045 if (STMT_VINFO_VECTYPE (stmt_info))
6047 unsigned int nunits
6048 = (unsigned int)
6049 TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
6050 if (!STMT_SLP_TYPE (stmt_info)
6051 && nunits != (unsigned int) vectorization_factor
6052 && dump_enabled_p ())
6053 /* For SLP VF is set according to unrolling factor, and not
6054 to vector size, hence for SLP this print is not valid. */
6055 dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
6058 /* SLP. Schedule all the SLP instances when the first SLP stmt is
6059 reached. */
6060 if (STMT_SLP_TYPE (stmt_info))
6062 if (!slp_scheduled)
6064 slp_scheduled = true;
6066 if (dump_enabled_p ())
6067 dump_printf_loc (MSG_NOTE, vect_location,
6068 "=== scheduling SLP instances ===\n");
6070 vect_schedule_slp (loop_vinfo, NULL);
6073 /* Hybrid SLP stmts must be vectorized in addition to SLP. */
6074 if (!vinfo_for_stmt (stmt) || PURE_SLP_STMT (stmt_info))
6076 if (!transform_pattern_stmt && gsi_end_p (pattern_def_si))
6078 pattern_def_seq = NULL;
6079 gsi_next (&si);
6081 continue;
6085 /* -------- vectorize statement ------------ */
6086 if (dump_enabled_p ())
6087 dump_printf_loc (MSG_NOTE, vect_location, "transform statement.\n");
6089 grouped_store = false;
6090 is_store = vect_transform_stmt (stmt, &si, &grouped_store, NULL, NULL);
6091 if (is_store)
6093 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
6095 /* Interleaving. If IS_STORE is TRUE, the vectorization of the
6096 interleaving chain was completed - free all the stores in
6097 the chain. */
6098 gsi_next (&si);
6099 vect_remove_stores (GROUP_FIRST_ELEMENT (stmt_info));
6101 else
6103 /* Free the attached stmt_vec_info and remove the stmt. */
6104 gimple store = gsi_stmt (si);
6105 free_stmt_vec_info (store);
6106 unlink_stmt_vdef (store);
6107 gsi_remove (&si, true);
6108 release_defs (store);
6111 /* Stores can only appear at the end of pattern statements. */
6112 gcc_assert (!transform_pattern_stmt);
6113 pattern_def_seq = NULL;
6115 else if (!transform_pattern_stmt && gsi_end_p (pattern_def_si))
6117 pattern_def_seq = NULL;
6118 gsi_next (&si);
6120 } /* stmts in BB */
6121 } /* BBs in loop */
6123 slpeel_make_loop_iterate_ntimes (loop, ratio);
6125 /* Reduce loop iterations by the vectorization factor. */
6126 scale_loop_profile (loop, GCOV_COMPUTE_SCALE (1, vectorization_factor),
6127 expected_iterations / vectorization_factor);
6128 loop->nb_iterations_upper_bound
6129 = loop->nb_iterations_upper_bound.udiv (double_int::from_uhwi (vectorization_factor),
6130 FLOOR_DIV_EXPR);
6131 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
6132 && loop->nb_iterations_upper_bound != double_int_zero)
6133 loop->nb_iterations_upper_bound = loop->nb_iterations_upper_bound - double_int_one;
6134 if (loop->any_estimate)
6136 loop->nb_iterations_estimate
6137 = loop->nb_iterations_estimate.udiv (double_int::from_uhwi (vectorization_factor),
6138 FLOOR_DIV_EXPR);
6139 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
6140 && loop->nb_iterations_estimate != double_int_zero)
6141 loop->nb_iterations_estimate = loop->nb_iterations_estimate - double_int_one;
6144 if (dump_enabled_p ())
6146 dump_printf_loc (MSG_NOTE, vect_location,
6147 "LOOP VECTORIZED\n");
6148 if (loop->inner)
6149 dump_printf_loc (MSG_NOTE, vect_location,
6150 "OUTER LOOP VECTORIZED\n");
6151 dump_printf (MSG_NOTE, "\n");