* gcc.dg/predict-12.c: New testcase.
[official-gcc.git] / gcc / tree-vect-loop.c
blob6c0337bbbcbebd6443fd3bcef45c1b23a7833486
1 /* Loop Vectorization
2 Copyright (C) 2003-2016 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
4 Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "cfghooks.h"
31 #include "tree-pass.h"
32 #include "ssa.h"
33 #include "optabs-tree.h"
34 #include "diagnostic-core.h"
35 #include "fold-const.h"
36 #include "stor-layout.h"
37 #include "cfganal.h"
38 #include "gimplify.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
41 #include "tree-ssa-loop-ivopts.h"
42 #include "tree-ssa-loop-manip.h"
43 #include "tree-ssa-loop-niter.h"
44 #include "cfgloop.h"
45 #include "params.h"
46 #include "tree-scalar-evolution.h"
47 #include "tree-vectorizer.h"
48 #include "gimple-fold.h"
49 #include "cgraph.h"
50 #include "tree-cfg.h"
52 /* Loop Vectorization Pass.
54 This pass tries to vectorize loops.
56 For example, the vectorizer transforms the following simple loop:
58 short a[N]; short b[N]; short c[N]; int i;
60 for (i=0; i<N; i++){
61 a[i] = b[i] + c[i];
64 as if it was manually vectorized by rewriting the source code into:
66 typedef int __attribute__((mode(V8HI))) v8hi;
67 short a[N]; short b[N]; short c[N]; int i;
68 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
69 v8hi va, vb, vc;
71 for (i=0; i<N/8; i++){
72 vb = pb[i];
73 vc = pc[i];
74 va = vb + vc;
75 pa[i] = va;
78 The main entry to this pass is vectorize_loops(), in which
79 the vectorizer applies a set of analyses on a given set of loops,
80 followed by the actual vectorization transformation for the loops that
81 had successfully passed the analysis phase.
82 Throughout this pass we make a distinction between two types of
83 data: scalars (which are represented by SSA_NAMES), and memory references
84 ("data-refs"). These two types of data require different handling both
85 during analysis and transformation. The types of data-refs that the
86 vectorizer currently supports are ARRAY_REFS which base is an array DECL
87 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
88 accesses are required to have a simple (consecutive) access pattern.
90 Analysis phase:
91 ===============
92 The driver for the analysis phase is vect_analyze_loop().
93 It applies a set of analyses, some of which rely on the scalar evolution
94 analyzer (scev) developed by Sebastian Pop.
96 During the analysis phase the vectorizer records some information
97 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
98 loop, as well as general information about the loop as a whole, which is
99 recorded in a "loop_vec_info" struct attached to each loop.
101 Transformation phase:
102 =====================
103 The loop transformation phase scans all the stmts in the loop, and
104 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
105 the loop that needs to be vectorized. It inserts the vector code sequence
106 just before the scalar stmt S, and records a pointer to the vector code
107 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
108 attached to S). This pointer will be used for the vectorization of following
109 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
110 otherwise, we rely on dead code elimination for removing it.
112 For example, say stmt S1 was vectorized into stmt VS1:
114 VS1: vb = px[i];
115 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
116 S2: a = b;
118 To vectorize stmt S2, the vectorizer first finds the stmt that defines
119 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
120 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
121 resulting sequence would be:
123 VS1: vb = px[i];
124 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
125 VS2: va = vb;
126 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
128 Operands that are not SSA_NAMEs, are data-refs that appear in
129 load/store operations (like 'x[i]' in S1), and are handled differently.
131 Target modeling:
132 =================
133 Currently the only target specific information that is used is the
134 size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".
135 Targets that can support different sizes of vectors, for now will need
136 to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More
137 flexibility will be added in the future.
139 Since we only vectorize operations which vector form can be
140 expressed using existing tree codes, to verify that an operation is
141 supported, the vectorizer checks the relevant optab at the relevant
142 machine_mode (e.g, optab_handler (add_optab, V8HImode)). If
143 the value found is CODE_FOR_nothing, then there's no target support, and
144 we can't vectorize the stmt.
146 For additional information on this project see:
147 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
150 static void vect_estimate_min_profitable_iters (loop_vec_info, int *, int *);
152 /* Function vect_determine_vectorization_factor
154 Determine the vectorization factor (VF). VF is the number of data elements
155 that are operated upon in parallel in a single iteration of the vectorized
156 loop. For example, when vectorizing a loop that operates on 4byte elements,
157 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
158 elements can fit in a single vector register.
160 We currently support vectorization of loops in which all types operated upon
161 are of the same size. Therefore this function currently sets VF according to
162 the size of the types operated upon, and fails if there are multiple sizes
163 in the loop.
165 VF is also the factor by which the loop iterations are strip-mined, e.g.:
166 original loop:
167 for (i=0; i<N; i++){
168 a[i] = b[i] + c[i];
171 vectorized loop:
172 for (i=0; i<N; i+=VF){
173 a[i:VF] = b[i:VF] + c[i:VF];
177 static bool
178 vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
180 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
181 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
182 unsigned nbbs = loop->num_nodes;
183 unsigned int vectorization_factor = 0;
184 tree scalar_type;
185 gphi *phi;
186 tree vectype;
187 unsigned int nunits;
188 stmt_vec_info stmt_info;
189 unsigned i;
190 HOST_WIDE_INT dummy;
191 gimple *stmt, *pattern_stmt = NULL;
192 gimple_seq pattern_def_seq = NULL;
193 gimple_stmt_iterator pattern_def_si = gsi_none ();
194 bool analyze_pattern_stmt = false;
195 bool bool_result;
196 auto_vec<stmt_vec_info> mask_producers;
198 if (dump_enabled_p ())
199 dump_printf_loc (MSG_NOTE, vect_location,
200 "=== vect_determine_vectorization_factor ===\n");
202 for (i = 0; i < nbbs; i++)
204 basic_block bb = bbs[i];
206 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
207 gsi_next (&si))
209 phi = si.phi ();
210 stmt_info = vinfo_for_stmt (phi);
211 if (dump_enabled_p ())
213 dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: ");
214 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
217 gcc_assert (stmt_info);
219 if (STMT_VINFO_RELEVANT_P (stmt_info)
220 || STMT_VINFO_LIVE_P (stmt_info))
222 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
223 scalar_type = TREE_TYPE (PHI_RESULT (phi));
225 if (dump_enabled_p ())
227 dump_printf_loc (MSG_NOTE, vect_location,
228 "get vectype for scalar type: ");
229 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
230 dump_printf (MSG_NOTE, "\n");
233 vectype = get_vectype_for_scalar_type (scalar_type);
234 if (!vectype)
236 if (dump_enabled_p ())
238 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
239 "not vectorized: unsupported "
240 "data-type ");
241 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
242 scalar_type);
243 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
245 return false;
247 STMT_VINFO_VECTYPE (stmt_info) = vectype;
249 if (dump_enabled_p ())
251 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
252 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
253 dump_printf (MSG_NOTE, "\n");
256 nunits = TYPE_VECTOR_SUBPARTS (vectype);
257 if (dump_enabled_p ())
258 dump_printf_loc (MSG_NOTE, vect_location, "nunits = %d\n",
259 nunits);
261 if (!vectorization_factor
262 || (nunits > vectorization_factor))
263 vectorization_factor = nunits;
267 for (gimple_stmt_iterator si = gsi_start_bb (bb);
268 !gsi_end_p (si) || analyze_pattern_stmt;)
270 tree vf_vectype;
272 if (analyze_pattern_stmt)
273 stmt = pattern_stmt;
274 else
275 stmt = gsi_stmt (si);
277 stmt_info = vinfo_for_stmt (stmt);
279 if (dump_enabled_p ())
281 dump_printf_loc (MSG_NOTE, vect_location,
282 "==> examining statement: ");
283 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
286 gcc_assert (stmt_info);
288 /* Skip stmts which do not need to be vectorized. */
289 if ((!STMT_VINFO_RELEVANT_P (stmt_info)
290 && !STMT_VINFO_LIVE_P (stmt_info))
291 || gimple_clobber_p (stmt))
293 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
294 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
295 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
296 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
298 stmt = pattern_stmt;
299 stmt_info = vinfo_for_stmt (pattern_stmt);
300 if (dump_enabled_p ())
302 dump_printf_loc (MSG_NOTE, vect_location,
303 "==> examining pattern statement: ");
304 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
307 else
309 if (dump_enabled_p ())
310 dump_printf_loc (MSG_NOTE, vect_location, "skip.\n");
311 gsi_next (&si);
312 continue;
315 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
316 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
317 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
318 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
319 analyze_pattern_stmt = true;
321 /* If a pattern statement has def stmts, analyze them too. */
322 if (is_pattern_stmt_p (stmt_info))
324 if (pattern_def_seq == NULL)
326 pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
327 pattern_def_si = gsi_start (pattern_def_seq);
329 else if (!gsi_end_p (pattern_def_si))
330 gsi_next (&pattern_def_si);
331 if (pattern_def_seq != NULL)
333 gimple *pattern_def_stmt = NULL;
334 stmt_vec_info pattern_def_stmt_info = NULL;
336 while (!gsi_end_p (pattern_def_si))
338 pattern_def_stmt = gsi_stmt (pattern_def_si);
339 pattern_def_stmt_info
340 = vinfo_for_stmt (pattern_def_stmt);
341 if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info)
342 || STMT_VINFO_LIVE_P (pattern_def_stmt_info))
343 break;
344 gsi_next (&pattern_def_si);
347 if (!gsi_end_p (pattern_def_si))
349 if (dump_enabled_p ())
351 dump_printf_loc (MSG_NOTE, vect_location,
352 "==> examining pattern def stmt: ");
353 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
354 pattern_def_stmt, 0);
357 stmt = pattern_def_stmt;
358 stmt_info = pattern_def_stmt_info;
360 else
362 pattern_def_si = gsi_none ();
363 analyze_pattern_stmt = false;
366 else
367 analyze_pattern_stmt = false;
370 if (gimple_get_lhs (stmt) == NULL_TREE
371 /* MASK_STORE has no lhs, but is ok. */
372 && (!is_gimple_call (stmt)
373 || !gimple_call_internal_p (stmt)
374 || gimple_call_internal_fn (stmt) != IFN_MASK_STORE))
376 if (is_gimple_call (stmt))
378 /* Ignore calls with no lhs. These must be calls to
379 #pragma omp simd functions, and what vectorization factor
380 it really needs can't be determined until
381 vectorizable_simd_clone_call. */
382 if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si))
384 pattern_def_seq = NULL;
385 gsi_next (&si);
387 continue;
389 if (dump_enabled_p ())
391 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
392 "not vectorized: irregular stmt.");
393 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt,
396 return false;
399 if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
401 if (dump_enabled_p ())
403 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
404 "not vectorized: vector stmt in loop:");
405 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
407 return false;
410 bool_result = false;
412 if (STMT_VINFO_VECTYPE (stmt_info))
414 /* The only case when a vectype had been already set is for stmts
415 that contain a dataref, or for "pattern-stmts" (stmts
416 generated by the vectorizer to represent/replace a certain
417 idiom). */
418 gcc_assert (STMT_VINFO_DATA_REF (stmt_info)
419 || is_pattern_stmt_p (stmt_info)
420 || !gsi_end_p (pattern_def_si));
421 vectype = STMT_VINFO_VECTYPE (stmt_info);
423 else
425 gcc_assert (!STMT_VINFO_DATA_REF (stmt_info));
426 if (is_gimple_call (stmt)
427 && gimple_call_internal_p (stmt)
428 && gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
429 scalar_type = TREE_TYPE (gimple_call_arg (stmt, 3));
430 else
431 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
433 /* Bool ops don't participate in vectorization factor
434 computation. For comparison use compared types to
435 compute a factor. */
436 if (TREE_CODE (scalar_type) == BOOLEAN_TYPE
437 && is_gimple_assign (stmt)
438 && gimple_assign_rhs_code (stmt) != COND_EXPR)
440 if (STMT_VINFO_RELEVANT_P (stmt_info)
441 || STMT_VINFO_LIVE_P (stmt_info))
442 mask_producers.safe_push (stmt_info);
443 bool_result = true;
445 if (gimple_code (stmt) == GIMPLE_ASSIGN
446 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt))
447 == tcc_comparison
448 && TREE_CODE (TREE_TYPE (gimple_assign_rhs1 (stmt)))
449 != BOOLEAN_TYPE)
450 scalar_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
451 else
453 if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si))
455 pattern_def_seq = NULL;
456 gsi_next (&si);
458 continue;
462 if (dump_enabled_p ())
464 dump_printf_loc (MSG_NOTE, vect_location,
465 "get vectype for scalar type: ");
466 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
467 dump_printf (MSG_NOTE, "\n");
469 vectype = get_vectype_for_scalar_type (scalar_type);
470 if (!vectype)
472 if (dump_enabled_p ())
474 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
475 "not vectorized: unsupported "
476 "data-type ");
477 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
478 scalar_type);
479 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
481 return false;
484 if (!bool_result)
485 STMT_VINFO_VECTYPE (stmt_info) = vectype;
487 if (dump_enabled_p ())
489 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
490 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
491 dump_printf (MSG_NOTE, "\n");
495 /* Don't try to compute VF out scalar types if we stmt
496 produces boolean vector. Use result vectype instead. */
497 if (VECTOR_BOOLEAN_TYPE_P (vectype))
498 vf_vectype = vectype;
499 else
501 /* The vectorization factor is according to the smallest
502 scalar type (or the largest vector size, but we only
503 support one vector size per loop). */
504 if (!bool_result)
505 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy,
506 &dummy);
507 if (dump_enabled_p ())
509 dump_printf_loc (MSG_NOTE, vect_location,
510 "get vectype for scalar type: ");
511 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
512 dump_printf (MSG_NOTE, "\n");
514 vf_vectype = get_vectype_for_scalar_type (scalar_type);
516 if (!vf_vectype)
518 if (dump_enabled_p ())
520 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
521 "not vectorized: unsupported data-type ");
522 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
523 scalar_type);
524 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
526 return false;
529 if ((GET_MODE_SIZE (TYPE_MODE (vectype))
530 != GET_MODE_SIZE (TYPE_MODE (vf_vectype))))
532 if (dump_enabled_p ())
534 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
535 "not vectorized: different sized vector "
536 "types in statement, ");
537 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
538 vectype);
539 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
540 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
541 vf_vectype);
542 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
544 return false;
547 if (dump_enabled_p ())
549 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
550 dump_generic_expr (MSG_NOTE, TDF_SLIM, vf_vectype);
551 dump_printf (MSG_NOTE, "\n");
554 nunits = TYPE_VECTOR_SUBPARTS (vf_vectype);
555 if (dump_enabled_p ())
556 dump_printf_loc (MSG_NOTE, vect_location, "nunits = %d\n", nunits);
557 if (!vectorization_factor
558 || (nunits > vectorization_factor))
559 vectorization_factor = nunits;
561 if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si))
563 pattern_def_seq = NULL;
564 gsi_next (&si);
569 /* TODO: Analyze cost. Decide if worth while to vectorize. */
570 if (dump_enabled_p ())
571 dump_printf_loc (MSG_NOTE, vect_location, "vectorization factor = %d\n",
572 vectorization_factor);
573 if (vectorization_factor <= 1)
575 if (dump_enabled_p ())
576 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
577 "not vectorized: unsupported data-type\n");
578 return false;
580 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
582 for (i = 0; i < mask_producers.length (); i++)
584 tree mask_type = NULL;
586 stmt = STMT_VINFO_STMT (mask_producers[i]);
588 if (gimple_code (stmt) == GIMPLE_ASSIGN
589 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison
590 && TREE_CODE (TREE_TYPE (gimple_assign_rhs1 (stmt))) != BOOLEAN_TYPE)
592 scalar_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
593 mask_type = get_mask_type_for_scalar_type (scalar_type);
595 if (!mask_type)
597 if (dump_enabled_p ())
598 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
599 "not vectorized: unsupported mask\n");
600 return false;
603 else
605 tree rhs;
606 ssa_op_iter iter;
607 gimple *def_stmt;
608 enum vect_def_type dt;
610 FOR_EACH_SSA_TREE_OPERAND (rhs, stmt, iter, SSA_OP_USE)
612 if (!vect_is_simple_use (rhs, mask_producers[i]->vinfo,
613 &def_stmt, &dt, &vectype))
615 if (dump_enabled_p ())
617 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
618 "not vectorized: can't compute mask type "
619 "for statement, ");
620 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt,
623 return false;
626 /* No vectype probably means external definition.
627 Allow it in case there is another operand which
628 allows to determine mask type. */
629 if (!vectype)
630 continue;
632 if (!mask_type)
633 mask_type = vectype;
634 else if (TYPE_VECTOR_SUBPARTS (mask_type)
635 != TYPE_VECTOR_SUBPARTS (vectype))
637 if (dump_enabled_p ())
639 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
640 "not vectorized: different sized masks "
641 "types in statement, ");
642 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
643 mask_type);
644 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
645 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
646 vectype);
647 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
649 return false;
651 else if (VECTOR_BOOLEAN_TYPE_P (mask_type)
652 != VECTOR_BOOLEAN_TYPE_P (vectype))
654 if (dump_enabled_p ())
656 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
657 "not vectorized: mixed mask and "
658 "nonmask vector types in statement, ");
659 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
660 mask_type);
661 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
662 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
663 vectype);
664 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
666 return false;
670 /* We may compare boolean value loaded as vector of integers.
671 Fix mask_type in such case. */
672 if (mask_type
673 && !VECTOR_BOOLEAN_TYPE_P (mask_type)
674 && gimple_code (stmt) == GIMPLE_ASSIGN
675 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
676 mask_type = build_same_sized_truth_vector_type (mask_type);
679 /* No mask_type should mean loop invariant predicate.
680 This is probably a subject for optimization in
681 if-conversion. */
682 if (!mask_type)
684 if (dump_enabled_p ())
686 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
687 "not vectorized: can't compute mask type "
688 "for statement, ");
689 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt,
692 return false;
695 STMT_VINFO_VECTYPE (mask_producers[i]) = mask_type;
698 return true;
702 /* Function vect_is_simple_iv_evolution.
704 FORNOW: A simple evolution of an induction variables in the loop is
705 considered a polynomial evolution. */
707 static bool
708 vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
709 tree * step)
711 tree init_expr;
712 tree step_expr;
713 tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
714 basic_block bb;
716 /* When there is no evolution in this loop, the evolution function
717 is not "simple". */
718 if (evolution_part == NULL_TREE)
719 return false;
721 /* When the evolution is a polynomial of degree >= 2
722 the evolution function is not "simple". */
723 if (tree_is_chrec (evolution_part))
724 return false;
726 step_expr = evolution_part;
727 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
729 if (dump_enabled_p ())
731 dump_printf_loc (MSG_NOTE, vect_location, "step: ");
732 dump_generic_expr (MSG_NOTE, TDF_SLIM, step_expr);
733 dump_printf (MSG_NOTE, ", init: ");
734 dump_generic_expr (MSG_NOTE, TDF_SLIM, init_expr);
735 dump_printf (MSG_NOTE, "\n");
738 *init = init_expr;
739 *step = step_expr;
741 if (TREE_CODE (step_expr) != INTEGER_CST
742 && (TREE_CODE (step_expr) != SSA_NAME
743 || ((bb = gimple_bb (SSA_NAME_DEF_STMT (step_expr)))
744 && flow_bb_inside_loop_p (get_loop (cfun, loop_nb), bb))
745 || (!INTEGRAL_TYPE_P (TREE_TYPE (step_expr))
746 && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))
747 || !flag_associative_math)))
748 && (TREE_CODE (step_expr) != REAL_CST
749 || !flag_associative_math))
751 if (dump_enabled_p ())
752 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
753 "step unknown.\n");
754 return false;
757 return true;
760 /* Function vect_analyze_scalar_cycles_1.
762 Examine the cross iteration def-use cycles of scalar variables
763 in LOOP. LOOP_VINFO represents the loop that is now being
764 considered for vectorization (can be LOOP, or an outer-loop
765 enclosing LOOP). */
767 static void
768 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
770 basic_block bb = loop->header;
771 tree init, step;
772 auto_vec<gimple *, 64> worklist;
773 gphi_iterator gsi;
774 bool double_reduc;
776 if (dump_enabled_p ())
777 dump_printf_loc (MSG_NOTE, vect_location,
778 "=== vect_analyze_scalar_cycles ===\n");
780 /* First - identify all inductions. Reduction detection assumes that all the
781 inductions have been identified, therefore, this order must not be
782 changed. */
783 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
785 gphi *phi = gsi.phi ();
786 tree access_fn = NULL;
787 tree def = PHI_RESULT (phi);
788 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
790 if (dump_enabled_p ())
792 dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
793 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
796 /* Skip virtual phi's. The data dependences that are associated with
797 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
798 if (virtual_operand_p (def))
799 continue;
801 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
803 /* Analyze the evolution function. */
804 access_fn = analyze_scalar_evolution (loop, def);
805 if (access_fn)
807 STRIP_NOPS (access_fn);
808 if (dump_enabled_p ())
810 dump_printf_loc (MSG_NOTE, vect_location,
811 "Access function of PHI: ");
812 dump_generic_expr (MSG_NOTE, TDF_SLIM, access_fn);
813 dump_printf (MSG_NOTE, "\n");
815 STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo)
816 = initial_condition_in_loop_num (access_fn, loop->num);
817 STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo)
818 = evolution_part_in_loop_num (access_fn, loop->num);
821 if (!access_fn
822 || !vect_is_simple_iv_evolution (loop->num, access_fn, &init, &step)
823 || (LOOP_VINFO_LOOP (loop_vinfo) != loop
824 && TREE_CODE (step) != INTEGER_CST))
826 worklist.safe_push (phi);
827 continue;
830 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo)
831 != NULL_TREE);
832 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) != NULL_TREE);
834 if (dump_enabled_p ())
835 dump_printf_loc (MSG_NOTE, vect_location, "Detected induction.\n");
836 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
840 /* Second - identify all reductions and nested cycles. */
841 while (worklist.length () > 0)
843 gimple *phi = worklist.pop ();
844 tree def = PHI_RESULT (phi);
845 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
846 gimple *reduc_stmt;
847 bool nested_cycle;
849 if (dump_enabled_p ())
851 dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
852 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
855 gcc_assert (!virtual_operand_p (def)
856 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
858 nested_cycle = (loop != LOOP_VINFO_LOOP (loop_vinfo));
859 reduc_stmt = vect_force_simple_reduction (loop_vinfo, phi, !nested_cycle,
860 &double_reduc, false);
861 if (reduc_stmt)
863 if (double_reduc)
865 if (dump_enabled_p ())
866 dump_printf_loc (MSG_NOTE, vect_location,
867 "Detected double reduction.\n");
869 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
870 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
871 vect_double_reduction_def;
873 else
875 if (nested_cycle)
877 if (dump_enabled_p ())
878 dump_printf_loc (MSG_NOTE, vect_location,
879 "Detected vectorizable nested cycle.\n");
881 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
882 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
883 vect_nested_cycle;
885 else
887 if (dump_enabled_p ())
888 dump_printf_loc (MSG_NOTE, vect_location,
889 "Detected reduction.\n");
891 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
892 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
893 vect_reduction_def;
894 /* Store the reduction cycles for possible vectorization in
895 loop-aware SLP. */
896 LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push (reduc_stmt);
900 else
901 if (dump_enabled_p ())
902 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
903 "Unknown def-use cycle pattern.\n");
908 /* Function vect_analyze_scalar_cycles.
910 Examine the cross iteration def-use cycles of scalar variables, by
911 analyzing the loop-header PHIs of scalar variables. Classify each
912 cycle as one of the following: invariant, induction, reduction, unknown.
913 We do that for the loop represented by LOOP_VINFO, and also to its
914 inner-loop, if exists.
915 Examples for scalar cycles:
917 Example1: reduction:
919 loop1:
920 for (i=0; i<N; i++)
921 sum += a[i];
923 Example2: induction:
925 loop2:
926 for (i=0; i<N; i++)
927 a[i] = i; */
929 static void
930 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
932 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
934 vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
936 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
937 Reductions in such inner-loop therefore have different properties than
938 the reductions in the nest that gets vectorized:
939 1. When vectorized, they are executed in the same order as in the original
940 scalar loop, so we can't change the order of computation when
941 vectorizing them.
942 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
943 current checks are too strict. */
945 if (loop->inner)
946 vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
949 /* Transfer group and reduction information from STMT to its pattern stmt. */
951 static void
952 vect_fixup_reduc_chain (gimple *stmt)
954 gimple *firstp = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt));
955 gimple *stmtp;
956 gcc_assert (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (firstp))
957 && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)));
958 GROUP_SIZE (vinfo_for_stmt (firstp)) = GROUP_SIZE (vinfo_for_stmt (stmt));
961 stmtp = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt));
962 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmtp)) = firstp;
963 stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
964 if (stmt)
965 GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmtp))
966 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt));
968 while (stmt);
969 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmtp)) = vect_reduction_def;
972 /* Fixup scalar cycles that now have their stmts detected as patterns. */
974 static void
975 vect_fixup_scalar_cycles_with_patterns (loop_vec_info loop_vinfo)
977 gimple *first;
978 unsigned i;
980 FOR_EACH_VEC_ELT (LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo), i, first)
981 if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (first)))
983 gimple *next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (first));
984 while (next)
986 if (! STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next)))
987 break;
988 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
990 /* If not all stmt in the chain are patterns try to handle
991 the chain without patterns. */
992 if (! next)
994 vect_fixup_reduc_chain (first);
995 LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo)[i]
996 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first));
1001 /* Function vect_get_loop_niters.
1003 Determine how many iterations the loop is executed and place it
1004 in NUMBER_OF_ITERATIONS. Place the number of latch iterations
1005 in NUMBER_OF_ITERATIONSM1.
1007 Return the loop exit condition. */
1010 static gcond *
1011 vect_get_loop_niters (struct loop *loop, tree *number_of_iterations,
1012 tree *number_of_iterationsm1)
1014 tree niters;
1016 if (dump_enabled_p ())
1017 dump_printf_loc (MSG_NOTE, vect_location,
1018 "=== get_loop_niters ===\n");
1020 niters = number_of_latch_executions (loop);
1021 *number_of_iterationsm1 = niters;
1023 /* We want the number of loop header executions which is the number
1024 of latch executions plus one.
1025 ??? For UINT_MAX latch executions this number overflows to zero
1026 for loops like do { n++; } while (n != 0); */
1027 if (niters && !chrec_contains_undetermined (niters))
1028 niters = fold_build2 (PLUS_EXPR, TREE_TYPE (niters), unshare_expr (niters),
1029 build_int_cst (TREE_TYPE (niters), 1));
1030 *number_of_iterations = niters;
1032 return get_loop_exit_condition (loop);
1036 /* Function bb_in_loop_p
1038 Used as predicate for dfs order traversal of the loop bbs. */
1040 static bool
1041 bb_in_loop_p (const_basic_block bb, const void *data)
1043 const struct loop *const loop = (const struct loop *)data;
1044 if (flow_bb_inside_loop_p (loop, bb))
1045 return true;
1046 return false;
1050 /* Function new_loop_vec_info.
1052 Create and initialize a new loop_vec_info struct for LOOP, as well as
1053 stmt_vec_info structs for all the stmts in LOOP. */
1055 static loop_vec_info
1056 new_loop_vec_info (struct loop *loop)
1058 loop_vec_info res;
1059 basic_block *bbs;
1060 gimple_stmt_iterator si;
1061 unsigned int i, nbbs;
1063 res = (loop_vec_info) xcalloc (1, sizeof (struct _loop_vec_info));
1064 res->kind = vec_info::loop;
1065 LOOP_VINFO_LOOP (res) = loop;
1067 bbs = get_loop_body (loop);
1069 /* Create/Update stmt_info for all stmts in the loop. */
1070 for (i = 0; i < loop->num_nodes; i++)
1072 basic_block bb = bbs[i];
1074 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
1076 gimple *phi = gsi_stmt (si);
1077 gimple_set_uid (phi, 0);
1078 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, res));
1081 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1083 gimple *stmt = gsi_stmt (si);
1084 gimple_set_uid (stmt, 0);
1085 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, res));
1089 /* CHECKME: We want to visit all BBs before their successors (except for
1090 latch blocks, for which this assertion wouldn't hold). In the simple
1091 case of the loop forms we allow, a dfs order of the BBs would the same
1092 as reversed postorder traversal, so we are safe. */
1094 free (bbs);
1095 bbs = XCNEWVEC (basic_block, loop->num_nodes);
1096 nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
1097 bbs, loop->num_nodes, loop);
1098 gcc_assert (nbbs == loop->num_nodes);
1100 LOOP_VINFO_BBS (res) = bbs;
1101 LOOP_VINFO_NITERSM1 (res) = NULL;
1102 LOOP_VINFO_NITERS (res) = NULL;
1103 LOOP_VINFO_NITERS_UNCHANGED (res) = NULL;
1104 LOOP_VINFO_COST_MODEL_THRESHOLD (res) = 0;
1105 LOOP_VINFO_VECTORIZABLE_P (res) = 0;
1106 LOOP_VINFO_PEELING_FOR_ALIGNMENT (res) = 0;
1107 LOOP_VINFO_VECT_FACTOR (res) = 0;
1108 LOOP_VINFO_LOOP_NEST (res) = vNULL;
1109 LOOP_VINFO_DATAREFS (res) = vNULL;
1110 LOOP_VINFO_DDRS (res) = vNULL;
1111 LOOP_VINFO_UNALIGNED_DR (res) = NULL;
1112 LOOP_VINFO_MAY_MISALIGN_STMTS (res) = vNULL;
1113 LOOP_VINFO_MAY_ALIAS_DDRS (res) = vNULL;
1114 LOOP_VINFO_GROUPED_STORES (res) = vNULL;
1115 LOOP_VINFO_REDUCTIONS (res) = vNULL;
1116 LOOP_VINFO_REDUCTION_CHAINS (res) = vNULL;
1117 LOOP_VINFO_SLP_INSTANCES (res) = vNULL;
1118 LOOP_VINFO_SLP_UNROLLING_FACTOR (res) = 1;
1119 LOOP_VINFO_TARGET_COST_DATA (res) = init_cost (loop);
1120 LOOP_VINFO_PEELING_FOR_GAPS (res) = false;
1121 LOOP_VINFO_PEELING_FOR_NITER (res) = false;
1122 LOOP_VINFO_OPERANDS_SWAPPED (res) = false;
1124 return res;
1128 /* Function destroy_loop_vec_info.
1130 Free LOOP_VINFO struct, as well as all the stmt_vec_info structs of all the
1131 stmts in the loop. */
1133 void
1134 destroy_loop_vec_info (loop_vec_info loop_vinfo, bool clean_stmts)
1136 struct loop *loop;
1137 basic_block *bbs;
1138 int nbbs;
1139 gimple_stmt_iterator si;
1140 int j;
1141 vec<slp_instance> slp_instances;
1142 slp_instance instance;
1143 bool swapped;
1145 if (!loop_vinfo)
1146 return;
1148 loop = LOOP_VINFO_LOOP (loop_vinfo);
1150 bbs = LOOP_VINFO_BBS (loop_vinfo);
1151 nbbs = clean_stmts ? loop->num_nodes : 0;
1152 swapped = LOOP_VINFO_OPERANDS_SWAPPED (loop_vinfo);
1154 for (j = 0; j < nbbs; j++)
1156 basic_block bb = bbs[j];
1157 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
1158 free_stmt_vec_info (gsi_stmt (si));
1160 for (si = gsi_start_bb (bb); !gsi_end_p (si); )
1162 gimple *stmt = gsi_stmt (si);
1164 /* We may have broken canonical form by moving a constant
1165 into RHS1 of a commutative op. Fix such occurrences. */
1166 if (swapped && is_gimple_assign (stmt))
1168 enum tree_code code = gimple_assign_rhs_code (stmt);
1170 if ((code == PLUS_EXPR
1171 || code == POINTER_PLUS_EXPR
1172 || code == MULT_EXPR)
1173 && CONSTANT_CLASS_P (gimple_assign_rhs1 (stmt)))
1174 swap_ssa_operands (stmt,
1175 gimple_assign_rhs1_ptr (stmt),
1176 gimple_assign_rhs2_ptr (stmt));
1179 /* Free stmt_vec_info. */
1180 free_stmt_vec_info (stmt);
1181 gsi_next (&si);
1185 free (LOOP_VINFO_BBS (loop_vinfo));
1186 vect_destroy_datarefs (loop_vinfo);
1187 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
1188 LOOP_VINFO_LOOP_NEST (loop_vinfo).release ();
1189 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).release ();
1190 LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).release ();
1191 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).release ();
1192 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
1193 FOR_EACH_VEC_ELT (slp_instances, j, instance)
1194 vect_free_slp_instance (instance);
1196 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
1197 LOOP_VINFO_GROUPED_STORES (loop_vinfo).release ();
1198 LOOP_VINFO_REDUCTIONS (loop_vinfo).release ();
1199 LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).release ();
1201 destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo));
1202 loop_vinfo->scalar_cost_vec.release ();
1204 free (loop_vinfo);
1205 loop->aux = NULL;
1209 /* Calculate the cost of one scalar iteration of the loop. */
1210 static void
1211 vect_compute_single_scalar_iteration_cost (loop_vec_info loop_vinfo)
1213 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1214 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1215 int nbbs = loop->num_nodes, factor, scalar_single_iter_cost = 0;
1216 int innerloop_iters, i;
1218 /* Count statements in scalar loop. Using this as scalar cost for a single
1219 iteration for now.
1221 TODO: Add outer loop support.
1223 TODO: Consider assigning different costs to different scalar
1224 statements. */
1226 /* FORNOW. */
1227 innerloop_iters = 1;
1228 if (loop->inner)
1229 innerloop_iters = 50; /* FIXME */
1231 for (i = 0; i < nbbs; i++)
1233 gimple_stmt_iterator si;
1234 basic_block bb = bbs[i];
1236 if (bb->loop_father == loop->inner)
1237 factor = innerloop_iters;
1238 else
1239 factor = 1;
1241 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1243 gimple *stmt = gsi_stmt (si);
1244 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1246 if (!is_gimple_assign (stmt) && !is_gimple_call (stmt))
1247 continue;
1249 /* Skip stmts that are not vectorized inside the loop. */
1250 if (stmt_info
1251 && !STMT_VINFO_RELEVANT_P (stmt_info)
1252 && (!STMT_VINFO_LIVE_P (stmt_info)
1253 || !VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1254 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
1255 continue;
1257 vect_cost_for_stmt kind;
1258 if (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt)))
1260 if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))))
1261 kind = scalar_load;
1262 else
1263 kind = scalar_store;
1265 else
1266 kind = scalar_stmt;
1268 scalar_single_iter_cost
1269 += record_stmt_cost (&LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
1270 factor, kind, NULL, 0, vect_prologue);
1273 LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo)
1274 = scalar_single_iter_cost;
1278 /* Function vect_analyze_loop_form_1.
1280 Verify that certain CFG restrictions hold, including:
1281 - the loop has a pre-header
1282 - the loop has a single entry and exit
1283 - the loop exit condition is simple enough, and the number of iterations
1284 can be analyzed (a countable loop). */
1286 bool
1287 vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
1288 tree *number_of_iterationsm1,
1289 tree *number_of_iterations, gcond **inner_loop_cond)
1291 if (dump_enabled_p ())
1292 dump_printf_loc (MSG_NOTE, vect_location,
1293 "=== vect_analyze_loop_form ===\n");
1295 /* Different restrictions apply when we are considering an inner-most loop,
1296 vs. an outer (nested) loop.
1297 (FORNOW. May want to relax some of these restrictions in the future). */
1299 if (!loop->inner)
1301 /* Inner-most loop. We currently require that the number of BBs is
1302 exactly 2 (the header and latch). Vectorizable inner-most loops
1303 look like this:
1305 (pre-header)
1307 header <--------+
1308 | | |
1309 | +--> latch --+
1311 (exit-bb) */
1313 if (loop->num_nodes != 2)
1315 if (dump_enabled_p ())
1316 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1317 "not vectorized: control flow in loop.\n");
1318 return false;
1321 if (empty_block_p (loop->header))
1323 if (dump_enabled_p ())
1324 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1325 "not vectorized: empty loop.\n");
1326 return false;
1329 else
1331 struct loop *innerloop = loop->inner;
1332 edge entryedge;
1334 /* Nested loop. We currently require that the loop is doubly-nested,
1335 contains a single inner loop, and the number of BBs is exactly 5.
1336 Vectorizable outer-loops look like this:
1338 (pre-header)
1340 header <---+
1342 inner-loop |
1344 tail ------+
1346 (exit-bb)
1348 The inner-loop has the properties expected of inner-most loops
1349 as described above. */
1351 if ((loop->inner)->inner || (loop->inner)->next)
1353 if (dump_enabled_p ())
1354 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1355 "not vectorized: multiple nested loops.\n");
1356 return false;
1359 if (loop->num_nodes != 5)
1361 if (dump_enabled_p ())
1362 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1363 "not vectorized: control flow in loop.\n");
1364 return false;
1367 entryedge = loop_preheader_edge (innerloop);
1368 if (entryedge->src != loop->header
1369 || !single_exit (innerloop)
1370 || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
1372 if (dump_enabled_p ())
1373 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1374 "not vectorized: unsupported outerloop form.\n");
1375 return false;
1378 /* Analyze the inner-loop. */
1379 tree inner_niterm1, inner_niter;
1380 if (! vect_analyze_loop_form_1 (loop->inner, inner_loop_cond,
1381 &inner_niterm1, &inner_niter, NULL))
1383 if (dump_enabled_p ())
1384 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1385 "not vectorized: Bad inner loop.\n");
1386 return false;
1389 if (!expr_invariant_in_loop_p (loop, inner_niter))
1391 if (dump_enabled_p ())
1392 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1393 "not vectorized: inner-loop count not"
1394 " invariant.\n");
1395 return false;
1398 if (dump_enabled_p ())
1399 dump_printf_loc (MSG_NOTE, vect_location,
1400 "Considering outer-loop vectorization.\n");
1403 if (!single_exit (loop)
1404 || EDGE_COUNT (loop->header->preds) != 2)
1406 if (dump_enabled_p ())
1408 if (!single_exit (loop))
1409 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1410 "not vectorized: multiple exits.\n");
1411 else if (EDGE_COUNT (loop->header->preds) != 2)
1412 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1413 "not vectorized: too many incoming edges.\n");
1415 return false;
1418 /* We assume that the loop exit condition is at the end of the loop. i.e,
1419 that the loop is represented as a do-while (with a proper if-guard
1420 before the loop if needed), where the loop header contains all the
1421 executable statements, and the latch is empty. */
1422 if (!empty_block_p (loop->latch)
1423 || !gimple_seq_empty_p (phi_nodes (loop->latch)))
1425 if (dump_enabled_p ())
1426 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1427 "not vectorized: latch block not empty.\n");
1428 return false;
1431 /* Make sure there exists a single-predecessor exit bb: */
1432 if (!single_pred_p (single_exit (loop)->dest))
1434 edge e = single_exit (loop);
1435 if (!(e->flags & EDGE_ABNORMAL))
1437 split_loop_exit_edge (e);
1438 if (dump_enabled_p ())
1439 dump_printf (MSG_NOTE, "split exit edge.\n");
1441 else
1443 if (dump_enabled_p ())
1444 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1445 "not vectorized: abnormal loop exit edge.\n");
1446 return false;
1450 *loop_cond = vect_get_loop_niters (loop, number_of_iterations,
1451 number_of_iterationsm1);
1452 if (!*loop_cond)
1454 if (dump_enabled_p ())
1455 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1456 "not vectorized: complicated exit condition.\n");
1457 return false;
1460 if (!*number_of_iterations
1461 || chrec_contains_undetermined (*number_of_iterations))
1463 if (dump_enabled_p ())
1464 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1465 "not vectorized: number of iterations cannot be "
1466 "computed.\n");
1467 return false;
1470 if (integer_zerop (*number_of_iterations))
1472 if (dump_enabled_p ())
1473 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1474 "not vectorized: number of iterations = 0.\n");
1475 return false;
1478 return true;
1481 /* Analyze LOOP form and return a loop_vec_info if it is of suitable form. */
1483 loop_vec_info
1484 vect_analyze_loop_form (struct loop *loop)
1486 tree number_of_iterations, number_of_iterationsm1;
1487 gcond *loop_cond, *inner_loop_cond = NULL;
1489 if (! vect_analyze_loop_form_1 (loop, &loop_cond, &number_of_iterationsm1,
1490 &number_of_iterations, &inner_loop_cond))
1491 return NULL;
1493 loop_vec_info loop_vinfo = new_loop_vec_info (loop);
1494 LOOP_VINFO_NITERSM1 (loop_vinfo) = number_of_iterationsm1;
1495 LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
1496 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
1498 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
1500 if (dump_enabled_p ())
1502 dump_printf_loc (MSG_NOTE, vect_location,
1503 "Symbolic number of iterations is ");
1504 dump_generic_expr (MSG_NOTE, TDF_DETAILS, number_of_iterations);
1505 dump_printf (MSG_NOTE, "\n");
1509 STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond)) = loop_exit_ctrl_vec_info_type;
1510 if (inner_loop_cond)
1511 STMT_VINFO_TYPE (vinfo_for_stmt (inner_loop_cond))
1512 = loop_exit_ctrl_vec_info_type;
1514 gcc_assert (!loop->aux);
1515 loop->aux = loop_vinfo;
1516 return loop_vinfo;
1521 /* Scan the loop stmts and dependent on whether there are any (non-)SLP
1522 statements update the vectorization factor. */
1524 static void
1525 vect_update_vf_for_slp (loop_vec_info loop_vinfo)
1527 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1528 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1529 int nbbs = loop->num_nodes;
1530 unsigned int vectorization_factor;
1531 int i;
1533 if (dump_enabled_p ())
1534 dump_printf_loc (MSG_NOTE, vect_location,
1535 "=== vect_update_vf_for_slp ===\n");
1537 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1538 gcc_assert (vectorization_factor != 0);
1540 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1541 vectorization factor of the loop is the unrolling factor required by
1542 the SLP instances. If that unrolling factor is 1, we say, that we
1543 perform pure SLP on loop - cross iteration parallelism is not
1544 exploited. */
1545 bool only_slp_in_loop = true;
1546 for (i = 0; i < nbbs; i++)
1548 basic_block bb = bbs[i];
1549 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
1550 gsi_next (&si))
1552 gimple *stmt = gsi_stmt (si);
1553 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1554 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
1555 && STMT_VINFO_RELATED_STMT (stmt_info))
1557 stmt = STMT_VINFO_RELATED_STMT (stmt_info);
1558 stmt_info = vinfo_for_stmt (stmt);
1560 if ((STMT_VINFO_RELEVANT_P (stmt_info)
1561 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1562 && !PURE_SLP_STMT (stmt_info))
1563 /* STMT needs both SLP and loop-based vectorization. */
1564 only_slp_in_loop = false;
1568 if (only_slp_in_loop)
1569 vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
1570 else
1571 vectorization_factor
1572 = least_common_multiple (vectorization_factor,
1573 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
1575 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
1576 if (dump_enabled_p ())
1577 dump_printf_loc (MSG_NOTE, vect_location,
1578 "Updating vectorization factor to %d\n",
1579 vectorization_factor);
1582 /* Function vect_analyze_loop_operations.
1584 Scan the loop stmts and make sure they are all vectorizable. */
1586 static bool
1587 vect_analyze_loop_operations (loop_vec_info loop_vinfo)
1589 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1590 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1591 int nbbs = loop->num_nodes;
1592 int i;
1593 stmt_vec_info stmt_info;
1594 bool need_to_vectorize = false;
1595 bool ok;
1597 if (dump_enabled_p ())
1598 dump_printf_loc (MSG_NOTE, vect_location,
1599 "=== vect_analyze_loop_operations ===\n");
1601 for (i = 0; i < nbbs; i++)
1603 basic_block bb = bbs[i];
1605 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
1606 gsi_next (&si))
1608 gphi *phi = si.phi ();
1609 ok = true;
1611 stmt_info = vinfo_for_stmt (phi);
1612 if (dump_enabled_p ())
1614 dump_printf_loc (MSG_NOTE, vect_location, "examining phi: ");
1615 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
1617 if (virtual_operand_p (gimple_phi_result (phi)))
1618 continue;
1620 /* Inner-loop loop-closed exit phi in outer-loop vectorization
1621 (i.e., a phi in the tail of the outer-loop). */
1622 if (! is_loop_header_bb_p (bb))
1624 /* FORNOW: we currently don't support the case that these phis
1625 are not used in the outerloop (unless it is double reduction,
1626 i.e., this phi is vect_reduction_def), cause this case
1627 requires to actually do something here. */
1628 if ((!STMT_VINFO_RELEVANT_P (stmt_info)
1629 || STMT_VINFO_LIVE_P (stmt_info))
1630 && STMT_VINFO_DEF_TYPE (stmt_info)
1631 != vect_double_reduction_def)
1633 if (dump_enabled_p ())
1634 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1635 "Unsupported loop-closed phi in "
1636 "outer-loop.\n");
1637 return false;
1640 /* If PHI is used in the outer loop, we check that its operand
1641 is defined in the inner loop. */
1642 if (STMT_VINFO_RELEVANT_P (stmt_info))
1644 tree phi_op;
1645 gimple *op_def_stmt;
1647 if (gimple_phi_num_args (phi) != 1)
1648 return false;
1650 phi_op = PHI_ARG_DEF (phi, 0);
1651 if (TREE_CODE (phi_op) != SSA_NAME)
1652 return false;
1654 op_def_stmt = SSA_NAME_DEF_STMT (phi_op);
1655 if (gimple_nop_p (op_def_stmt)
1656 || !flow_bb_inside_loop_p (loop, gimple_bb (op_def_stmt))
1657 || !vinfo_for_stmt (op_def_stmt))
1658 return false;
1660 if (STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt))
1661 != vect_used_in_outer
1662 && STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt))
1663 != vect_used_in_outer_by_reduction)
1664 return false;
1667 continue;
1670 gcc_assert (stmt_info);
1672 if ((STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
1673 || STMT_VINFO_LIVE_P (stmt_info))
1674 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
1676 /* A scalar-dependence cycle that we don't support. */
1677 if (dump_enabled_p ())
1678 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1679 "not vectorized: scalar dependence cycle.\n");
1680 return false;
1683 if (STMT_VINFO_RELEVANT_P (stmt_info))
1685 need_to_vectorize = true;
1686 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
1687 ok = vectorizable_induction (phi, NULL, NULL);
1690 if (ok && STMT_VINFO_LIVE_P (stmt_info))
1691 ok = vectorizable_live_operation (phi, NULL, NULL, -1, NULL);
1693 if (!ok)
1695 if (dump_enabled_p ())
1697 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1698 "not vectorized: relevant phi not "
1699 "supported: ");
1700 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, phi, 0);
1702 return false;
1706 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
1707 gsi_next (&si))
1709 gimple *stmt = gsi_stmt (si);
1710 if (!gimple_clobber_p (stmt)
1711 && !vect_analyze_stmt (stmt, &need_to_vectorize, NULL))
1712 return false;
1714 } /* bbs */
1716 /* All operations in the loop are either irrelevant (deal with loop
1717 control, or dead), or only used outside the loop and can be moved
1718 out of the loop (e.g. invariants, inductions). The loop can be
1719 optimized away by scalar optimizations. We're better off not
1720 touching this loop. */
1721 if (!need_to_vectorize)
1723 if (dump_enabled_p ())
1724 dump_printf_loc (MSG_NOTE, vect_location,
1725 "All the computation can be taken out of the loop.\n");
1726 if (dump_enabled_p ())
1727 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1728 "not vectorized: redundant loop. no profit to "
1729 "vectorize.\n");
1730 return false;
1733 return true;
1737 /* Function vect_analyze_loop_2.
1739 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1740 for it. The different analyses will record information in the
1741 loop_vec_info struct. */
1742 static bool
1743 vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal)
1745 bool ok;
1746 int max_vf = MAX_VECTORIZATION_FACTOR;
1747 int min_vf = 2;
1748 unsigned int n_stmts = 0;
1750 /* The first group of checks is independent of the vector size. */
1751 fatal = true;
1753 /* Find all data references in the loop (which correspond to vdefs/vuses)
1754 and analyze their evolution in the loop. */
1756 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1758 loop_p loop = LOOP_VINFO_LOOP (loop_vinfo);
1759 if (!find_loop_nest (loop, &LOOP_VINFO_LOOP_NEST (loop_vinfo)))
1761 if (dump_enabled_p ())
1762 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1763 "not vectorized: loop nest containing two "
1764 "or more consecutive inner loops cannot be "
1765 "vectorized\n");
1766 return false;
1769 for (unsigned i = 0; i < loop->num_nodes; i++)
1770 for (gimple_stmt_iterator gsi = gsi_start_bb (bbs[i]);
1771 !gsi_end_p (gsi); gsi_next (&gsi))
1773 gimple *stmt = gsi_stmt (gsi);
1774 if (is_gimple_debug (stmt))
1775 continue;
1776 ++n_stmts;
1777 if (!find_data_references_in_stmt (loop, stmt,
1778 &LOOP_VINFO_DATAREFS (loop_vinfo)))
1780 if (is_gimple_call (stmt) && loop->safelen)
1782 tree fndecl = gimple_call_fndecl (stmt), op;
1783 if (fndecl != NULL_TREE)
1785 cgraph_node *node = cgraph_node::get (fndecl);
1786 if (node != NULL && node->simd_clones != NULL)
1788 unsigned int j, n = gimple_call_num_args (stmt);
1789 for (j = 0; j < n; j++)
1791 op = gimple_call_arg (stmt, j);
1792 if (DECL_P (op)
1793 || (REFERENCE_CLASS_P (op)
1794 && get_base_address (op)))
1795 break;
1797 op = gimple_call_lhs (stmt);
1798 /* Ignore #pragma omp declare simd functions
1799 if they don't have data references in the
1800 call stmt itself. */
1801 if (j == n
1802 && !(op
1803 && (DECL_P (op)
1804 || (REFERENCE_CLASS_P (op)
1805 && get_base_address (op)))))
1806 continue;
1810 if (dump_enabled_p ())
1811 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1812 "not vectorized: loop contains function "
1813 "calls or data references that cannot "
1814 "be analyzed\n");
1815 return false;
1819 /* Analyze the data references and also adjust the minimal
1820 vectorization factor according to the loads and stores. */
1822 ok = vect_analyze_data_refs (loop_vinfo, &min_vf);
1823 if (!ok)
1825 if (dump_enabled_p ())
1826 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1827 "bad data references.\n");
1828 return false;
1831 /* Classify all cross-iteration scalar data-flow cycles.
1832 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1833 vect_analyze_scalar_cycles (loop_vinfo);
1835 vect_pattern_recog (loop_vinfo);
1837 vect_fixup_scalar_cycles_with_patterns (loop_vinfo);
1839 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1840 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1842 ok = vect_analyze_data_ref_accesses (loop_vinfo);
1843 if (!ok)
1845 if (dump_enabled_p ())
1846 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1847 "bad data access.\n");
1848 return false;
1851 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1853 ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
1854 if (!ok)
1856 if (dump_enabled_p ())
1857 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1858 "unexpected pattern.\n");
1859 return false;
1862 /* While the rest of the analysis below depends on it in some way. */
1863 fatal = false;
1865 /* Analyze data dependences between the data-refs in the loop
1866 and adjust the maximum vectorization factor according to
1867 the dependences.
1868 FORNOW: fail at the first data dependence that we encounter. */
1870 ok = vect_analyze_data_ref_dependences (loop_vinfo, &max_vf);
1871 if (!ok
1872 || max_vf < min_vf)
1874 if (dump_enabled_p ())
1875 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1876 "bad data dependence.\n");
1877 return false;
1880 ok = vect_determine_vectorization_factor (loop_vinfo);
1881 if (!ok)
1883 if (dump_enabled_p ())
1884 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1885 "can't determine vectorization factor.\n");
1886 return false;
1888 if (max_vf < LOOP_VINFO_VECT_FACTOR (loop_vinfo))
1890 if (dump_enabled_p ())
1891 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1892 "bad data dependence.\n");
1893 return false;
1896 /* Compute the scalar iteration cost. */
1897 vect_compute_single_scalar_iteration_cost (loop_vinfo);
1899 int saved_vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1900 HOST_WIDE_INT estimated_niter;
1901 unsigned th;
1902 int min_scalar_loop_bound;
1904 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
1905 ok = vect_analyze_slp (loop_vinfo, n_stmts);
1906 if (!ok)
1907 return false;
1909 /* If there are any SLP instances mark them as pure_slp. */
1910 bool slp = vect_make_slp_decision (loop_vinfo);
1911 if (slp)
1913 /* Find stmts that need to be both vectorized and SLPed. */
1914 vect_detect_hybrid_slp (loop_vinfo);
1916 /* Update the vectorization factor based on the SLP decision. */
1917 vect_update_vf_for_slp (loop_vinfo);
1920 /* This is the point where we can re-start analysis with SLP forced off. */
1921 start_over:
1923 /* Now the vectorization factor is final. */
1924 unsigned vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1925 gcc_assert (vectorization_factor != 0);
1927 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && dump_enabled_p ())
1928 dump_printf_loc (MSG_NOTE, vect_location,
1929 "vectorization_factor = %d, niters = "
1930 HOST_WIDE_INT_PRINT_DEC "\n", vectorization_factor,
1931 LOOP_VINFO_INT_NITERS (loop_vinfo));
1933 HOST_WIDE_INT max_niter
1934 = likely_max_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo));
1935 if ((LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1936 && (LOOP_VINFO_INT_NITERS (loop_vinfo) < vectorization_factor))
1937 || (max_niter != -1
1938 && (unsigned HOST_WIDE_INT) max_niter < vectorization_factor))
1940 if (dump_enabled_p ())
1941 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1942 "not vectorized: iteration count smaller than "
1943 "vectorization factor.\n");
1944 return false;
1947 /* Analyze the alignment of the data-refs in the loop.
1948 Fail if a data reference is found that cannot be vectorized. */
1950 ok = vect_analyze_data_refs_alignment (loop_vinfo);
1951 if (!ok)
1953 if (dump_enabled_p ())
1954 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1955 "bad data alignment.\n");
1956 return false;
1959 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
1960 It is important to call pruning after vect_analyze_data_ref_accesses,
1961 since we use grouping information gathered by interleaving analysis. */
1962 ok = vect_prune_runtime_alias_test_list (loop_vinfo);
1963 if (!ok)
1965 if (dump_enabled_p ())
1966 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1967 "number of versioning for alias "
1968 "run-time tests exceeds %d "
1969 "(--param vect-max-version-for-alias-checks)\n",
1970 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS));
1971 return false;
1974 /* This pass will decide on using loop versioning and/or loop peeling in
1975 order to enhance the alignment of data references in the loop. */
1976 ok = vect_enhance_data_refs_alignment (loop_vinfo);
1977 if (!ok)
1979 if (dump_enabled_p ())
1980 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1981 "bad data alignment.\n");
1982 return false;
1985 if (slp)
1987 /* Analyze operations in the SLP instances. Note this may
1988 remove unsupported SLP instances which makes the above
1989 SLP kind detection invalid. */
1990 unsigned old_size = LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length ();
1991 vect_slp_analyze_operations (LOOP_VINFO_SLP_INSTANCES (loop_vinfo),
1992 LOOP_VINFO_TARGET_COST_DATA (loop_vinfo));
1993 if (LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length () != old_size)
1994 goto again;
1997 /* Scan all the remaining operations in the loop that are not subject
1998 to SLP and make sure they are vectorizable. */
1999 ok = vect_analyze_loop_operations (loop_vinfo);
2000 if (!ok)
2002 if (dump_enabled_p ())
2003 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2004 "bad operation or unsupported loop bound.\n");
2005 return false;
2008 /* Analyze cost. Decide if worth while to vectorize. */
2009 int min_profitable_estimate, min_profitable_iters;
2010 vect_estimate_min_profitable_iters (loop_vinfo, &min_profitable_iters,
2011 &min_profitable_estimate);
2013 if (min_profitable_iters < 0)
2015 if (dump_enabled_p ())
2016 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2017 "not vectorized: vectorization not profitable.\n");
2018 if (dump_enabled_p ())
2019 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2020 "not vectorized: vector version will never be "
2021 "profitable.\n");
2022 goto again;
2025 min_scalar_loop_bound = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
2026 * vectorization_factor) - 1);
2028 /* Use the cost model only if it is more conservative than user specified
2029 threshold. */
2030 th = (unsigned) min_scalar_loop_bound;
2031 if (min_profitable_iters
2032 && (!min_scalar_loop_bound
2033 || min_profitable_iters > min_scalar_loop_bound))
2034 th = (unsigned) min_profitable_iters;
2036 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = th;
2038 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2039 && LOOP_VINFO_INT_NITERS (loop_vinfo) <= th)
2041 if (dump_enabled_p ())
2042 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2043 "not vectorized: vectorization not profitable.\n");
2044 if (dump_enabled_p ())
2045 dump_printf_loc (MSG_NOTE, vect_location,
2046 "not vectorized: iteration count smaller than user "
2047 "specified loop bound parameter or minimum profitable "
2048 "iterations (whichever is more conservative).\n");
2049 goto again;
2052 estimated_niter
2053 = estimated_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo));
2054 if (estimated_niter == -1)
2055 estimated_niter = max_niter;
2056 if (estimated_niter != -1
2057 && ((unsigned HOST_WIDE_INT) estimated_niter
2058 <= MAX (th, (unsigned)min_profitable_estimate)))
2060 if (dump_enabled_p ())
2061 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2062 "not vectorized: estimated iteration count too "
2063 "small.\n");
2064 if (dump_enabled_p ())
2065 dump_printf_loc (MSG_NOTE, vect_location,
2066 "not vectorized: estimated iteration count smaller "
2067 "than specified loop bound parameter or minimum "
2068 "profitable iterations (whichever is more "
2069 "conservative).\n");
2070 goto again;
2073 /* Decide whether we need to create an epilogue loop to handle
2074 remaining scalar iterations. */
2075 th = ((LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) + 1)
2076 / LOOP_VINFO_VECT_FACTOR (loop_vinfo))
2077 * LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2079 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2080 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) > 0)
2082 if (ctz_hwi (LOOP_VINFO_INT_NITERS (loop_vinfo)
2083 - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo))
2084 < exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
2085 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
2087 else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
2088 || (tree_ctz (LOOP_VINFO_NITERS (loop_vinfo))
2089 < (unsigned)exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo))
2090 /* In case of versioning, check if the maximum number of
2091 iterations is greater than th. If they are identical,
2092 the epilogue is unnecessary. */
2093 && ((!LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo)
2094 && !LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
2095 || (unsigned HOST_WIDE_INT) max_niter > th)))
2096 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
2098 /* If an epilogue loop is required make sure we can create one. */
2099 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
2100 || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo))
2102 if (dump_enabled_p ())
2103 dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required\n");
2104 if (!vect_can_advance_ivs_p (loop_vinfo)
2105 || !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo),
2106 single_exit (LOOP_VINFO_LOOP
2107 (loop_vinfo))))
2109 if (dump_enabled_p ())
2110 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2111 "not vectorized: can't create required "
2112 "epilog loop\n");
2113 goto again;
2117 gcc_assert (vectorization_factor
2118 == (unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo));
2120 /* Ok to vectorize! */
2121 return true;
2123 again:
2124 /* Try again with SLP forced off but if we didn't do any SLP there is
2125 no point in re-trying. */
2126 if (!slp)
2127 return false;
2129 /* If there are reduction chains re-trying will fail anyway. */
2130 if (! LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).is_empty ())
2131 return false;
2133 /* Likewise if the grouped loads or stores in the SLP cannot be handled
2134 via interleaving or lane instructions. */
2135 slp_instance instance;
2136 slp_tree node;
2137 unsigned i, j;
2138 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance)
2140 stmt_vec_info vinfo;
2141 vinfo = vinfo_for_stmt
2142 (SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance))[0]);
2143 if (! STMT_VINFO_GROUPED_ACCESS (vinfo))
2144 continue;
2145 vinfo = vinfo_for_stmt (STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo));
2146 unsigned int size = STMT_VINFO_GROUP_SIZE (vinfo);
2147 tree vectype = STMT_VINFO_VECTYPE (vinfo);
2148 if (! vect_store_lanes_supported (vectype, size)
2149 && ! vect_grouped_store_supported (vectype, size))
2150 return false;
2151 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), j, node)
2153 vinfo = vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (node)[0]);
2154 vinfo = vinfo_for_stmt (STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo));
2155 size = STMT_VINFO_GROUP_SIZE (vinfo);
2156 vectype = STMT_VINFO_VECTYPE (vinfo);
2157 if (! vect_load_lanes_supported (vectype, size)
2158 && ! vect_grouped_load_supported (vectype, size))
2159 return false;
2163 if (dump_enabled_p ())
2164 dump_printf_loc (MSG_NOTE, vect_location,
2165 "re-trying with SLP disabled\n");
2167 /* Roll back state appropriately. No SLP this time. */
2168 slp = false;
2169 /* Restore vectorization factor as it were without SLP. */
2170 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = saved_vectorization_factor;
2171 /* Free the SLP instances. */
2172 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), j, instance)
2173 vect_free_slp_instance (instance);
2174 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
2175 /* Reset SLP type to loop_vect on all stmts. */
2176 for (i = 0; i < LOOP_VINFO_LOOP (loop_vinfo)->num_nodes; ++i)
2178 basic_block bb = LOOP_VINFO_BBS (loop_vinfo)[i];
2179 for (gimple_stmt_iterator si = gsi_start_bb (bb);
2180 !gsi_end_p (si); gsi_next (&si))
2182 stmt_vec_info stmt_info = vinfo_for_stmt (gsi_stmt (si));
2183 STMT_SLP_TYPE (stmt_info) = loop_vect;
2184 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
2186 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
2187 STMT_SLP_TYPE (stmt_info) = loop_vect;
2188 for (gimple_stmt_iterator pi
2189 = gsi_start (STMT_VINFO_PATTERN_DEF_SEQ (stmt_info));
2190 !gsi_end_p (pi); gsi_next (&pi))
2192 gimple *pstmt = gsi_stmt (pi);
2193 STMT_SLP_TYPE (vinfo_for_stmt (pstmt)) = loop_vect;
2198 /* Free optimized alias test DDRS. */
2199 LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).release ();
2200 /* Reset target cost data. */
2201 destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo));
2202 LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)
2203 = init_cost (LOOP_VINFO_LOOP (loop_vinfo));
2204 /* Reset assorted flags. */
2205 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false;
2206 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = false;
2207 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = 0;
2209 goto start_over;
2212 /* Function vect_analyze_loop.
2214 Apply a set of analyses on LOOP, and create a loop_vec_info struct
2215 for it. The different analyses will record information in the
2216 loop_vec_info struct. */
2217 loop_vec_info
2218 vect_analyze_loop (struct loop *loop)
2220 loop_vec_info loop_vinfo;
2221 unsigned int vector_sizes;
2223 /* Autodetect first vector size we try. */
2224 current_vector_size = 0;
2225 vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
2227 if (dump_enabled_p ())
2228 dump_printf_loc (MSG_NOTE, vect_location,
2229 "===== analyze_loop_nest =====\n");
2231 if (loop_outer (loop)
2232 && loop_vec_info_for_loop (loop_outer (loop))
2233 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
2235 if (dump_enabled_p ())
2236 dump_printf_loc (MSG_NOTE, vect_location,
2237 "outer-loop already vectorized.\n");
2238 return NULL;
2241 while (1)
2243 /* Check the CFG characteristics of the loop (nesting, entry/exit). */
2244 loop_vinfo = vect_analyze_loop_form (loop);
2245 if (!loop_vinfo)
2247 if (dump_enabled_p ())
2248 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2249 "bad loop form.\n");
2250 return NULL;
2253 bool fatal = false;
2254 if (vect_analyze_loop_2 (loop_vinfo, fatal))
2256 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
2258 return loop_vinfo;
2261 destroy_loop_vec_info (loop_vinfo, true);
2263 vector_sizes &= ~current_vector_size;
2264 if (fatal
2265 || vector_sizes == 0
2266 || current_vector_size == 0)
2267 return NULL;
2269 /* Try the next biggest vector size. */
2270 current_vector_size = 1 << floor_log2 (vector_sizes);
2271 if (dump_enabled_p ())
2272 dump_printf_loc (MSG_NOTE, vect_location,
2273 "***** Re-trying analysis with "
2274 "vector size %d\n", current_vector_size);
2279 /* Function reduction_code_for_scalar_code
2281 Input:
2282 CODE - tree_code of a reduction operations.
2284 Output:
2285 REDUC_CODE - the corresponding tree-code to be used to reduce the
2286 vector of partial results into a single scalar result, or ERROR_MARK
2287 if the operation is a supported reduction operation, but does not have
2288 such a tree-code.
2290 Return FALSE if CODE currently cannot be vectorized as reduction. */
2292 static bool
2293 reduction_code_for_scalar_code (enum tree_code code,
2294 enum tree_code *reduc_code)
2296 switch (code)
2298 case MAX_EXPR:
2299 *reduc_code = REDUC_MAX_EXPR;
2300 return true;
2302 case MIN_EXPR:
2303 *reduc_code = REDUC_MIN_EXPR;
2304 return true;
2306 case PLUS_EXPR:
2307 *reduc_code = REDUC_PLUS_EXPR;
2308 return true;
2310 case MULT_EXPR:
2311 case MINUS_EXPR:
2312 case BIT_IOR_EXPR:
2313 case BIT_XOR_EXPR:
2314 case BIT_AND_EXPR:
2315 *reduc_code = ERROR_MARK;
2316 return true;
2318 default:
2319 return false;
2324 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
2325 STMT is printed with a message MSG. */
2327 static void
2328 report_vect_op (int msg_type, gimple *stmt, const char *msg)
2330 dump_printf_loc (msg_type, vect_location, "%s", msg);
2331 dump_gimple_stmt (msg_type, TDF_SLIM, stmt, 0);
2335 /* Detect SLP reduction of the form:
2337 #a1 = phi <a5, a0>
2338 a2 = operation (a1)
2339 a3 = operation (a2)
2340 a4 = operation (a3)
2341 a5 = operation (a4)
2343 #a = phi <a5>
2345 PHI is the reduction phi node (#a1 = phi <a5, a0> above)
2346 FIRST_STMT is the first reduction stmt in the chain
2347 (a2 = operation (a1)).
2349 Return TRUE if a reduction chain was detected. */
2351 static bool
2352 vect_is_slp_reduction (loop_vec_info loop_info, gimple *phi,
2353 gimple *first_stmt)
2355 struct loop *loop = (gimple_bb (phi))->loop_father;
2356 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
2357 enum tree_code code;
2358 gimple *current_stmt = NULL, *loop_use_stmt = NULL, *first, *next_stmt;
2359 stmt_vec_info use_stmt_info, current_stmt_info;
2360 tree lhs;
2361 imm_use_iterator imm_iter;
2362 use_operand_p use_p;
2363 int nloop_uses, size = 0, n_out_of_loop_uses;
2364 bool found = false;
2366 if (loop != vect_loop)
2367 return false;
2369 lhs = PHI_RESULT (phi);
2370 code = gimple_assign_rhs_code (first_stmt);
2371 while (1)
2373 nloop_uses = 0;
2374 n_out_of_loop_uses = 0;
2375 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
2377 gimple *use_stmt = USE_STMT (use_p);
2378 if (is_gimple_debug (use_stmt))
2379 continue;
2381 /* Check if we got back to the reduction phi. */
2382 if (use_stmt == phi)
2384 loop_use_stmt = use_stmt;
2385 found = true;
2386 break;
2389 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2391 loop_use_stmt = use_stmt;
2392 nloop_uses++;
2394 else
2395 n_out_of_loop_uses++;
2397 /* There are can be either a single use in the loop or two uses in
2398 phi nodes. */
2399 if (nloop_uses > 1 || (n_out_of_loop_uses && nloop_uses))
2400 return false;
2403 if (found)
2404 break;
2406 /* We reached a statement with no loop uses. */
2407 if (nloop_uses == 0)
2408 return false;
2410 /* This is a loop exit phi, and we haven't reached the reduction phi. */
2411 if (gimple_code (loop_use_stmt) == GIMPLE_PHI)
2412 return false;
2414 if (!is_gimple_assign (loop_use_stmt)
2415 || code != gimple_assign_rhs_code (loop_use_stmt)
2416 || !flow_bb_inside_loop_p (loop, gimple_bb (loop_use_stmt)))
2417 return false;
2419 /* Insert USE_STMT into reduction chain. */
2420 use_stmt_info = vinfo_for_stmt (loop_use_stmt);
2421 if (current_stmt)
2423 current_stmt_info = vinfo_for_stmt (current_stmt);
2424 GROUP_NEXT_ELEMENT (current_stmt_info) = loop_use_stmt;
2425 GROUP_FIRST_ELEMENT (use_stmt_info)
2426 = GROUP_FIRST_ELEMENT (current_stmt_info);
2428 else
2429 GROUP_FIRST_ELEMENT (use_stmt_info) = loop_use_stmt;
2431 lhs = gimple_assign_lhs (loop_use_stmt);
2432 current_stmt = loop_use_stmt;
2433 size++;
2436 if (!found || loop_use_stmt != phi || size < 2)
2437 return false;
2439 /* Swap the operands, if needed, to make the reduction operand be the second
2440 operand. */
2441 lhs = PHI_RESULT (phi);
2442 next_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt));
2443 while (next_stmt)
2445 if (gimple_assign_rhs2 (next_stmt) == lhs)
2447 tree op = gimple_assign_rhs1 (next_stmt);
2448 gimple *def_stmt = NULL;
2450 if (TREE_CODE (op) == SSA_NAME)
2451 def_stmt = SSA_NAME_DEF_STMT (op);
2453 /* Check that the other def is either defined in the loop
2454 ("vect_internal_def"), or it's an induction (defined by a
2455 loop-header phi-node). */
2456 if (def_stmt
2457 && gimple_bb (def_stmt)
2458 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
2459 && (is_gimple_assign (def_stmt)
2460 || is_gimple_call (def_stmt)
2461 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
2462 == vect_induction_def
2463 || (gimple_code (def_stmt) == GIMPLE_PHI
2464 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
2465 == vect_internal_def
2466 && !is_loop_header_bb_p (gimple_bb (def_stmt)))))
2468 lhs = gimple_assign_lhs (next_stmt);
2469 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
2470 continue;
2473 return false;
2475 else
2477 tree op = gimple_assign_rhs2 (next_stmt);
2478 gimple *def_stmt = NULL;
2480 if (TREE_CODE (op) == SSA_NAME)
2481 def_stmt = SSA_NAME_DEF_STMT (op);
2483 /* Check that the other def is either defined in the loop
2484 ("vect_internal_def"), or it's an induction (defined by a
2485 loop-header phi-node). */
2486 if (def_stmt
2487 && gimple_bb (def_stmt)
2488 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
2489 && (is_gimple_assign (def_stmt)
2490 || is_gimple_call (def_stmt)
2491 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
2492 == vect_induction_def
2493 || (gimple_code (def_stmt) == GIMPLE_PHI
2494 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
2495 == vect_internal_def
2496 && !is_loop_header_bb_p (gimple_bb (def_stmt)))))
2498 if (dump_enabled_p ())
2500 dump_printf_loc (MSG_NOTE, vect_location, "swapping oprnds: ");
2501 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, next_stmt, 0);
2504 swap_ssa_operands (next_stmt,
2505 gimple_assign_rhs1_ptr (next_stmt),
2506 gimple_assign_rhs2_ptr (next_stmt));
2507 update_stmt (next_stmt);
2509 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (next_stmt)))
2510 LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true;
2512 else
2513 return false;
2516 lhs = gimple_assign_lhs (next_stmt);
2517 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
2520 /* Save the chain for further analysis in SLP detection. */
2521 first = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt));
2522 LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (first);
2523 GROUP_SIZE (vinfo_for_stmt (first)) = size;
2525 return true;
2529 /* Function vect_is_simple_reduction_1
2531 (1) Detect a cross-iteration def-use cycle that represents a simple
2532 reduction computation. We look for the following pattern:
2534 loop_header:
2535 a1 = phi < a0, a2 >
2536 a3 = ...
2537 a2 = operation (a3, a1)
2541 a3 = ...
2542 loop_header:
2543 a1 = phi < a0, a2 >
2544 a2 = operation (a3, a1)
2546 such that:
2547 1. operation is commutative and associative and it is safe to
2548 change the order of the computation (if CHECK_REDUCTION is true)
2549 2. no uses for a2 in the loop (a2 is used out of the loop)
2550 3. no uses of a1 in the loop besides the reduction operation
2551 4. no uses of a1 outside the loop.
2553 Conditions 1,4 are tested here.
2554 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
2556 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
2557 nested cycles, if CHECK_REDUCTION is false.
2559 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
2560 reductions:
2562 a1 = phi < a0, a2 >
2563 inner loop (def of a3)
2564 a2 = phi < a3 >
2566 (4) Detect condition expressions, ie:
2567 for (int i = 0; i < N; i++)
2568 if (a[i] < val)
2569 ret_val = a[i];
2573 static gimple *
2574 vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
2575 bool check_reduction, bool *double_reduc,
2576 bool need_wrapping_integral_overflow,
2577 enum vect_reduction_type *v_reduc_type)
2579 struct loop *loop = (gimple_bb (phi))->loop_father;
2580 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
2581 edge latch_e = loop_latch_edge (loop);
2582 tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
2583 gimple *def_stmt, *def1 = NULL, *def2 = NULL, *phi_use_stmt = NULL;
2584 enum tree_code orig_code, code;
2585 tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE;
2586 tree type;
2587 int nloop_uses;
2588 tree name;
2589 imm_use_iterator imm_iter;
2590 use_operand_p use_p;
2591 bool phi_def;
2593 *double_reduc = false;
2594 *v_reduc_type = TREE_CODE_REDUCTION;
2596 /* If CHECK_REDUCTION is true, we assume inner-most loop vectorization,
2597 otherwise, we assume outer loop vectorization. */
2598 gcc_assert ((check_reduction && loop == vect_loop)
2599 || (!check_reduction && flow_loop_nested_p (vect_loop, loop)));
2601 name = PHI_RESULT (phi);
2602 /* ??? If there are no uses of the PHI result the inner loop reduction
2603 won't be detected as possibly double-reduction by vectorizable_reduction
2604 because that tries to walk the PHI arg from the preheader edge which
2605 can be constant. See PR60382. */
2606 if (has_zero_uses (name))
2607 return NULL;
2608 nloop_uses = 0;
2609 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
2611 gimple *use_stmt = USE_STMT (use_p);
2612 if (is_gimple_debug (use_stmt))
2613 continue;
2615 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2617 if (dump_enabled_p ())
2618 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2619 "intermediate value used outside loop.\n");
2621 return NULL;
2624 nloop_uses++;
2625 if (nloop_uses > 1)
2627 if (dump_enabled_p ())
2628 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2629 "reduction used in loop.\n");
2630 return NULL;
2633 phi_use_stmt = use_stmt;
2636 if (TREE_CODE (loop_arg) != SSA_NAME)
2638 if (dump_enabled_p ())
2640 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2641 "reduction: not ssa_name: ");
2642 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, loop_arg);
2643 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2645 return NULL;
2648 def_stmt = SSA_NAME_DEF_STMT (loop_arg);
2649 if (!def_stmt)
2651 if (dump_enabled_p ())
2652 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2653 "reduction: no def_stmt.\n");
2654 return NULL;
2657 if (!is_gimple_assign (def_stmt) && gimple_code (def_stmt) != GIMPLE_PHI)
2659 if (dump_enabled_p ())
2660 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
2661 return NULL;
2664 if (is_gimple_assign (def_stmt))
2666 name = gimple_assign_lhs (def_stmt);
2667 phi_def = false;
2669 else
2671 name = PHI_RESULT (def_stmt);
2672 phi_def = true;
2675 nloop_uses = 0;
2676 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
2678 gimple *use_stmt = USE_STMT (use_p);
2679 if (is_gimple_debug (use_stmt))
2680 continue;
2681 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2682 nloop_uses++;
2683 if (nloop_uses > 1)
2685 if (dump_enabled_p ())
2686 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2687 "reduction used in loop.\n");
2688 return NULL;
2692 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
2693 defined in the inner loop. */
2694 if (phi_def)
2696 op1 = PHI_ARG_DEF (def_stmt, 0);
2698 if (gimple_phi_num_args (def_stmt) != 1
2699 || TREE_CODE (op1) != SSA_NAME)
2701 if (dump_enabled_p ())
2702 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2703 "unsupported phi node definition.\n");
2705 return NULL;
2708 def1 = SSA_NAME_DEF_STMT (op1);
2709 if (gimple_bb (def1)
2710 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
2711 && loop->inner
2712 && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1))
2713 && is_gimple_assign (def1)
2714 && flow_bb_inside_loop_p (loop->inner, gimple_bb (phi_use_stmt)))
2716 if (dump_enabled_p ())
2717 report_vect_op (MSG_NOTE, def_stmt,
2718 "detected double reduction: ");
2720 *double_reduc = true;
2721 return def_stmt;
2724 return NULL;
2727 code = orig_code = gimple_assign_rhs_code (def_stmt);
2729 /* We can handle "res -= x[i]", which is non-associative by
2730 simply rewriting this into "res += -x[i]". Avoid changing
2731 gimple instruction for the first simple tests and only do this
2732 if we're allowed to change code at all. */
2733 if (code == MINUS_EXPR
2734 && (op1 = gimple_assign_rhs1 (def_stmt))
2735 && TREE_CODE (op1) == SSA_NAME
2736 && SSA_NAME_DEF_STMT (op1) == phi)
2737 code = PLUS_EXPR;
2739 if (code == COND_EXPR)
2741 if (check_reduction)
2742 *v_reduc_type = COND_REDUCTION;
2744 else if (!commutative_tree_code (code) || !associative_tree_code (code))
2746 if (dump_enabled_p ())
2747 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2748 "reduction: not commutative/associative: ");
2749 return NULL;
2752 if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS)
2754 if (code != COND_EXPR)
2756 if (dump_enabled_p ())
2757 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2758 "reduction: not binary operation: ");
2760 return NULL;
2763 op3 = gimple_assign_rhs1 (def_stmt);
2764 if (COMPARISON_CLASS_P (op3))
2766 op4 = TREE_OPERAND (op3, 1);
2767 op3 = TREE_OPERAND (op3, 0);
2770 op1 = gimple_assign_rhs2 (def_stmt);
2771 op2 = gimple_assign_rhs3 (def_stmt);
2773 if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
2775 if (dump_enabled_p ())
2776 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2777 "reduction: uses not ssa_names: ");
2779 return NULL;
2782 else
2784 op1 = gimple_assign_rhs1 (def_stmt);
2785 op2 = gimple_assign_rhs2 (def_stmt);
2787 if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
2789 if (dump_enabled_p ())
2790 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2791 "reduction: uses not ssa_names: ");
2793 return NULL;
2797 type = TREE_TYPE (gimple_assign_lhs (def_stmt));
2798 if ((TREE_CODE (op1) == SSA_NAME
2799 && !types_compatible_p (type,TREE_TYPE (op1)))
2800 || (TREE_CODE (op2) == SSA_NAME
2801 && !types_compatible_p (type, TREE_TYPE (op2)))
2802 || (op3 && TREE_CODE (op3) == SSA_NAME
2803 && !types_compatible_p (type, TREE_TYPE (op3)))
2804 || (op4 && TREE_CODE (op4) == SSA_NAME
2805 && !types_compatible_p (type, TREE_TYPE (op4))))
2807 if (dump_enabled_p ())
2809 dump_printf_loc (MSG_NOTE, vect_location,
2810 "reduction: multiple types: operation type: ");
2811 dump_generic_expr (MSG_NOTE, TDF_SLIM, type);
2812 dump_printf (MSG_NOTE, ", operands types: ");
2813 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2814 TREE_TYPE (op1));
2815 dump_printf (MSG_NOTE, ",");
2816 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2817 TREE_TYPE (op2));
2818 if (op3)
2820 dump_printf (MSG_NOTE, ",");
2821 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2822 TREE_TYPE (op3));
2825 if (op4)
2827 dump_printf (MSG_NOTE, ",");
2828 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2829 TREE_TYPE (op4));
2831 dump_printf (MSG_NOTE, "\n");
2834 return NULL;
2837 /* Check that it's ok to change the order of the computation.
2838 Generally, when vectorizing a reduction we change the order of the
2839 computation. This may change the behavior of the program in some
2840 cases, so we need to check that this is ok. One exception is when
2841 vectorizing an outer-loop: the inner-loop is executed sequentially,
2842 and therefore vectorizing reductions in the inner-loop during
2843 outer-loop vectorization is safe. */
2845 if (*v_reduc_type != COND_REDUCTION
2846 && check_reduction)
2848 /* CHECKME: check for !flag_finite_math_only too? */
2849 if (SCALAR_FLOAT_TYPE_P (type) && !flag_associative_math)
2851 /* Changing the order of operations changes the semantics. */
2852 if (dump_enabled_p ())
2853 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2854 "reduction: unsafe fp math optimization: ");
2855 return NULL;
2857 else if (INTEGRAL_TYPE_P (type))
2859 if (!operation_no_trapping_overflow (type, code))
2861 /* Changing the order of operations changes the semantics. */
2862 if (dump_enabled_p ())
2863 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2864 "reduction: unsafe int math optimization"
2865 " (overflow traps): ");
2866 return NULL;
2868 if (need_wrapping_integral_overflow
2869 && !TYPE_OVERFLOW_WRAPS (type)
2870 && operation_can_overflow (code))
2872 /* Changing the order of operations changes the semantics. */
2873 if (dump_enabled_p ())
2874 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2875 "reduction: unsafe int math optimization"
2876 " (overflow doesn't wrap): ");
2877 return NULL;
2880 else if (SAT_FIXED_POINT_TYPE_P (type))
2882 /* Changing the order of operations changes the semantics. */
2883 if (dump_enabled_p ())
2884 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2885 "reduction: unsafe fixed-point math optimization: ");
2886 return NULL;
2890 /* Reduction is safe. We're dealing with one of the following:
2891 1) integer arithmetic and no trapv
2892 2) floating point arithmetic, and special flags permit this optimization
2893 3) nested cycle (i.e., outer loop vectorization). */
2894 if (TREE_CODE (op1) == SSA_NAME)
2895 def1 = SSA_NAME_DEF_STMT (op1);
2897 if (TREE_CODE (op2) == SSA_NAME)
2898 def2 = SSA_NAME_DEF_STMT (op2);
2900 if (code != COND_EXPR
2901 && ((!def1 || gimple_nop_p (def1)) && (!def2 || gimple_nop_p (def2))))
2903 if (dump_enabled_p ())
2904 report_vect_op (MSG_NOTE, def_stmt, "reduction: no defs for operands: ");
2905 return NULL;
2908 /* Check that one def is the reduction def, defined by PHI,
2909 the other def is either defined in the loop ("vect_internal_def"),
2910 or it's an induction (defined by a loop-header phi-node). */
2912 if (def2 && def2 == phi
2913 && (code == COND_EXPR
2914 || !def1 || gimple_nop_p (def1)
2915 || !flow_bb_inside_loop_p (loop, gimple_bb (def1))
2916 || (def1 && flow_bb_inside_loop_p (loop, gimple_bb (def1))
2917 && (is_gimple_assign (def1)
2918 || is_gimple_call (def1)
2919 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
2920 == vect_induction_def
2921 || (gimple_code (def1) == GIMPLE_PHI
2922 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
2923 == vect_internal_def
2924 && !is_loop_header_bb_p (gimple_bb (def1)))))))
2926 if (dump_enabled_p ())
2927 report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
2928 return def_stmt;
2931 if (def1 && def1 == phi
2932 && (code == COND_EXPR
2933 || !def2 || gimple_nop_p (def2)
2934 || !flow_bb_inside_loop_p (loop, gimple_bb (def2))
2935 || (def2 && flow_bb_inside_loop_p (loop, gimple_bb (def2))
2936 && (is_gimple_assign (def2)
2937 || is_gimple_call (def2)
2938 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
2939 == vect_induction_def
2940 || (gimple_code (def2) == GIMPLE_PHI
2941 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
2942 == vect_internal_def
2943 && !is_loop_header_bb_p (gimple_bb (def2)))))))
2945 if (check_reduction
2946 && orig_code != MINUS_EXPR)
2948 if (code == COND_EXPR)
2950 /* No current known use where this case would be useful. */
2951 if (dump_enabled_p ())
2952 report_vect_op (MSG_NOTE, def_stmt,
2953 "detected reduction: cannot currently swap "
2954 "operands for cond_expr");
2955 return NULL;
2958 /* Swap operands (just for simplicity - so that the rest of the code
2959 can assume that the reduction variable is always the last (second)
2960 argument). */
2961 if (dump_enabled_p ())
2962 report_vect_op (MSG_NOTE, def_stmt,
2963 "detected reduction: need to swap operands: ");
2965 swap_ssa_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
2966 gimple_assign_rhs2_ptr (def_stmt));
2968 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (def_stmt)))
2969 LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true;
2971 else
2973 if (dump_enabled_p ())
2974 report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
2977 return def_stmt;
2980 /* Try to find SLP reduction chain. */
2981 if (check_reduction && code != COND_EXPR
2982 && vect_is_slp_reduction (loop_info, phi, def_stmt))
2984 if (dump_enabled_p ())
2985 report_vect_op (MSG_NOTE, def_stmt,
2986 "reduction: detected reduction chain: ");
2988 return def_stmt;
2991 if (dump_enabled_p ())
2992 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2993 "reduction: unknown pattern: ");
2995 return NULL;
2998 /* Wrapper around vect_is_simple_reduction_1, which will modify code
2999 in-place if it enables detection of more reductions. Arguments
3000 as there. */
3002 gimple *
3003 vect_force_simple_reduction (loop_vec_info loop_info, gimple *phi,
3004 bool check_reduction, bool *double_reduc,
3005 bool need_wrapping_integral_overflow)
3007 enum vect_reduction_type v_reduc_type;
3008 return vect_is_simple_reduction (loop_info, phi, check_reduction,
3009 double_reduc,
3010 need_wrapping_integral_overflow,
3011 &v_reduc_type);
3014 /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */
3016 vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue,
3017 int *peel_iters_epilogue,
3018 stmt_vector_for_cost *scalar_cost_vec,
3019 stmt_vector_for_cost *prologue_cost_vec,
3020 stmt_vector_for_cost *epilogue_cost_vec)
3022 int retval = 0;
3023 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3025 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
3027 *peel_iters_epilogue = vf/2;
3028 if (dump_enabled_p ())
3029 dump_printf_loc (MSG_NOTE, vect_location,
3030 "cost model: epilogue peel iters set to vf/2 "
3031 "because loop iterations are unknown .\n");
3033 /* If peeled iterations are known but number of scalar loop
3034 iterations are unknown, count a taken branch per peeled loop. */
3035 retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken,
3036 NULL, 0, vect_prologue);
3037 retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken,
3038 NULL, 0, vect_epilogue);
3040 else
3042 int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
3043 peel_iters_prologue = niters < peel_iters_prologue ?
3044 niters : peel_iters_prologue;
3045 *peel_iters_epilogue = (niters - peel_iters_prologue) % vf;
3046 /* If we need to peel for gaps, but no peeling is required, we have to
3047 peel VF iterations. */
3048 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && !*peel_iters_epilogue)
3049 *peel_iters_epilogue = vf;
3052 stmt_info_for_cost *si;
3053 int j;
3054 if (peel_iters_prologue)
3055 FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si)
3056 retval += record_stmt_cost (prologue_cost_vec,
3057 si->count * peel_iters_prologue,
3058 si->kind, NULL, si->misalign,
3059 vect_prologue);
3060 if (*peel_iters_epilogue)
3061 FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si)
3062 retval += record_stmt_cost (epilogue_cost_vec,
3063 si->count * *peel_iters_epilogue,
3064 si->kind, NULL, si->misalign,
3065 vect_epilogue);
3067 return retval;
3070 /* Function vect_estimate_min_profitable_iters
3072 Return the number of iterations required for the vector version of the
3073 loop to be profitable relative to the cost of the scalar version of the
3074 loop. */
3076 static void
3077 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
3078 int *ret_min_profitable_niters,
3079 int *ret_min_profitable_estimate)
3081 int min_profitable_iters;
3082 int min_profitable_estimate;
3083 int peel_iters_prologue;
3084 int peel_iters_epilogue;
3085 unsigned vec_inside_cost = 0;
3086 int vec_outside_cost = 0;
3087 unsigned vec_prologue_cost = 0;
3088 unsigned vec_epilogue_cost = 0;
3089 int scalar_single_iter_cost = 0;
3090 int scalar_outside_cost = 0;
3091 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3092 int npeel = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
3093 void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
3095 /* Cost model disabled. */
3096 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
3098 dump_printf_loc (MSG_NOTE, vect_location, "cost model disabled.\n");
3099 *ret_min_profitable_niters = 0;
3100 *ret_min_profitable_estimate = 0;
3101 return;
3104 /* Requires loop versioning tests to handle misalignment. */
3105 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
3107 /* FIXME: Make cost depend on complexity of individual check. */
3108 unsigned len = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ();
3109 (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
3110 vect_prologue);
3111 dump_printf (MSG_NOTE,
3112 "cost model: Adding cost of checks for loop "
3113 "versioning to treat misalignment.\n");
3116 /* Requires loop versioning with alias checks. */
3117 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
3119 /* FIXME: Make cost depend on complexity of individual check. */
3120 unsigned len = LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).length ();
3121 (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
3122 vect_prologue);
3123 dump_printf (MSG_NOTE,
3124 "cost model: Adding cost of checks for loop "
3125 "versioning aliasing.\n");
3128 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
3129 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
3130 (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, NULL, 0,
3131 vect_prologue);
3133 /* Count statements in scalar loop. Using this as scalar cost for a single
3134 iteration for now.
3136 TODO: Add outer loop support.
3138 TODO: Consider assigning different costs to different scalar
3139 statements. */
3141 scalar_single_iter_cost
3142 = LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo);
3144 /* Add additional cost for the peeled instructions in prologue and epilogue
3145 loop.
3147 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
3148 at compile-time - we assume it's vf/2 (the worst would be vf-1).
3150 TODO: Build an expression that represents peel_iters for prologue and
3151 epilogue to be used in a run-time test. */
3153 if (npeel < 0)
3155 peel_iters_prologue = vf/2;
3156 dump_printf (MSG_NOTE, "cost model: "
3157 "prologue peel iters set to vf/2.\n");
3159 /* If peeling for alignment is unknown, loop bound of main loop becomes
3160 unknown. */
3161 peel_iters_epilogue = vf/2;
3162 dump_printf (MSG_NOTE, "cost model: "
3163 "epilogue peel iters set to vf/2 because "
3164 "peeling for alignment is unknown.\n");
3166 /* If peeled iterations are unknown, count a taken branch and a not taken
3167 branch per peeled loop. Even if scalar loop iterations are known,
3168 vector iterations are not known since peeled prologue iterations are
3169 not known. Hence guards remain the same. */
3170 (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken,
3171 NULL, 0, vect_prologue);
3172 (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken,
3173 NULL, 0, vect_prologue);
3174 (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken,
3175 NULL, 0, vect_epilogue);
3176 (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken,
3177 NULL, 0, vect_epilogue);
3178 stmt_info_for_cost *si;
3179 int j;
3180 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), j, si)
3182 struct _stmt_vec_info *stmt_info
3183 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
3184 (void) add_stmt_cost (target_cost_data,
3185 si->count * peel_iters_prologue,
3186 si->kind, stmt_info, si->misalign,
3187 vect_prologue);
3188 (void) add_stmt_cost (target_cost_data,
3189 si->count * peel_iters_epilogue,
3190 si->kind, stmt_info, si->misalign,
3191 vect_epilogue);
3194 else
3196 stmt_vector_for_cost prologue_cost_vec, epilogue_cost_vec;
3197 stmt_info_for_cost *si;
3198 int j;
3199 void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
3201 prologue_cost_vec.create (2);
3202 epilogue_cost_vec.create (2);
3203 peel_iters_prologue = npeel;
3205 (void) vect_get_known_peeling_cost (loop_vinfo, peel_iters_prologue,
3206 &peel_iters_epilogue,
3207 &LOOP_VINFO_SCALAR_ITERATION_COST
3208 (loop_vinfo),
3209 &prologue_cost_vec,
3210 &epilogue_cost_vec);
3212 FOR_EACH_VEC_ELT (prologue_cost_vec, j, si)
3214 struct _stmt_vec_info *stmt_info
3215 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
3216 (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
3217 si->misalign, vect_prologue);
3220 FOR_EACH_VEC_ELT (epilogue_cost_vec, j, si)
3222 struct _stmt_vec_info *stmt_info
3223 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
3224 (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
3225 si->misalign, vect_epilogue);
3228 prologue_cost_vec.release ();
3229 epilogue_cost_vec.release ();
3232 /* FORNOW: The scalar outside cost is incremented in one of the
3233 following ways:
3235 1. The vectorizer checks for alignment and aliasing and generates
3236 a condition that allows dynamic vectorization. A cost model
3237 check is ANDED with the versioning condition. Hence scalar code
3238 path now has the added cost of the versioning check.
3240 if (cost > th & versioning_check)
3241 jmp to vector code
3243 Hence run-time scalar is incremented by not-taken branch cost.
3245 2. The vectorizer then checks if a prologue is required. If the
3246 cost model check was not done before during versioning, it has to
3247 be done before the prologue check.
3249 if (cost <= th)
3250 prologue = scalar_iters
3251 if (prologue == 0)
3252 jmp to vector code
3253 else
3254 execute prologue
3255 if (prologue == num_iters)
3256 go to exit
3258 Hence the run-time scalar cost is incremented by a taken branch,
3259 plus a not-taken branch, plus a taken branch cost.
3261 3. The vectorizer then checks if an epilogue is required. If the
3262 cost model check was not done before during prologue check, it
3263 has to be done with the epilogue check.
3265 if (prologue == 0)
3266 jmp to vector code
3267 else
3268 execute prologue
3269 if (prologue == num_iters)
3270 go to exit
3271 vector code:
3272 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
3273 jmp to epilogue
3275 Hence the run-time scalar cost should be incremented by 2 taken
3276 branches.
3278 TODO: The back end may reorder the BBS's differently and reverse
3279 conditions/branch directions. Change the estimates below to
3280 something more reasonable. */
3282 /* If the number of iterations is known and we do not do versioning, we can
3283 decide whether to vectorize at compile time. Hence the scalar version
3284 do not carry cost model guard costs. */
3285 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
3286 || LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
3287 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
3289 /* Cost model check occurs at versioning. */
3290 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
3291 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
3292 scalar_outside_cost += vect_get_stmt_cost (cond_branch_not_taken);
3293 else
3295 /* Cost model check occurs at prologue generation. */
3296 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
3297 scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken)
3298 + vect_get_stmt_cost (cond_branch_not_taken);
3299 /* Cost model check occurs at epilogue generation. */
3300 else
3301 scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken);
3305 /* Complete the target-specific cost calculations. */
3306 finish_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo), &vec_prologue_cost,
3307 &vec_inside_cost, &vec_epilogue_cost);
3309 vec_outside_cost = (int)(vec_prologue_cost + vec_epilogue_cost);
3311 if (dump_enabled_p ())
3313 dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
3314 dump_printf (MSG_NOTE, " Vector inside of loop cost: %d\n",
3315 vec_inside_cost);
3316 dump_printf (MSG_NOTE, " Vector prologue cost: %d\n",
3317 vec_prologue_cost);
3318 dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n",
3319 vec_epilogue_cost);
3320 dump_printf (MSG_NOTE, " Scalar iteration cost: %d\n",
3321 scalar_single_iter_cost);
3322 dump_printf (MSG_NOTE, " Scalar outside cost: %d\n",
3323 scalar_outside_cost);
3324 dump_printf (MSG_NOTE, " Vector outside cost: %d\n",
3325 vec_outside_cost);
3326 dump_printf (MSG_NOTE, " prologue iterations: %d\n",
3327 peel_iters_prologue);
3328 dump_printf (MSG_NOTE, " epilogue iterations: %d\n",
3329 peel_iters_epilogue);
3332 /* Calculate number of iterations required to make the vector version
3333 profitable, relative to the loop bodies only. The following condition
3334 must hold true:
3335 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
3336 where
3337 SIC = scalar iteration cost, VIC = vector iteration cost,
3338 VOC = vector outside cost, VF = vectorization factor,
3339 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
3340 SOC = scalar outside cost for run time cost model check. */
3342 if ((scalar_single_iter_cost * vf) > (int) vec_inside_cost)
3344 if (vec_outside_cost <= 0)
3345 min_profitable_iters = 1;
3346 else
3348 min_profitable_iters = ((vec_outside_cost - scalar_outside_cost) * vf
3349 - vec_inside_cost * peel_iters_prologue
3350 - vec_inside_cost * peel_iters_epilogue)
3351 / ((scalar_single_iter_cost * vf)
3352 - vec_inside_cost);
3354 if ((scalar_single_iter_cost * vf * min_profitable_iters)
3355 <= (((int) vec_inside_cost * min_profitable_iters)
3356 + (((int) vec_outside_cost - scalar_outside_cost) * vf)))
3357 min_profitable_iters++;
3360 /* vector version will never be profitable. */
3361 else
3363 if (LOOP_VINFO_LOOP (loop_vinfo)->force_vectorize)
3364 warning_at (vect_location, OPT_Wopenmp_simd, "vectorization "
3365 "did not happen for a simd loop");
3367 if (dump_enabled_p ())
3368 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3369 "cost model: the vector iteration cost = %d "
3370 "divided by the scalar iteration cost = %d "
3371 "is greater or equal to the vectorization factor = %d"
3372 ".\n",
3373 vec_inside_cost, scalar_single_iter_cost, vf);
3374 *ret_min_profitable_niters = -1;
3375 *ret_min_profitable_estimate = -1;
3376 return;
3379 dump_printf (MSG_NOTE,
3380 " Calculated minimum iters for profitability: %d\n",
3381 min_profitable_iters);
3383 min_profitable_iters =
3384 min_profitable_iters < vf ? vf : min_profitable_iters;
3386 /* Because the condition we create is:
3387 if (niters <= min_profitable_iters)
3388 then skip the vectorized loop. */
3389 min_profitable_iters--;
3391 if (dump_enabled_p ())
3392 dump_printf_loc (MSG_NOTE, vect_location,
3393 " Runtime profitability threshold = %d\n",
3394 min_profitable_iters);
3396 *ret_min_profitable_niters = min_profitable_iters;
3398 /* Calculate number of iterations required to make the vector version
3399 profitable, relative to the loop bodies only.
3401 Non-vectorized variant is SIC * niters and it must win over vector
3402 variant on the expected loop trip count. The following condition must hold true:
3403 SIC * niters > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC + SOC */
3405 if (vec_outside_cost <= 0)
3406 min_profitable_estimate = 1;
3407 else
3409 min_profitable_estimate = ((vec_outside_cost + scalar_outside_cost) * vf
3410 - vec_inside_cost * peel_iters_prologue
3411 - vec_inside_cost * peel_iters_epilogue)
3412 / ((scalar_single_iter_cost * vf)
3413 - vec_inside_cost);
3415 min_profitable_estimate --;
3416 min_profitable_estimate = MAX (min_profitable_estimate, min_profitable_iters);
3417 if (dump_enabled_p ())
3418 dump_printf_loc (MSG_NOTE, vect_location,
3419 " Static estimate profitability threshold = %d\n",
3420 min_profitable_estimate);
3422 *ret_min_profitable_estimate = min_profitable_estimate;
3425 /* Writes into SEL a mask for a vec_perm, equivalent to a vec_shr by OFFSET
3426 vector elements (not bits) for a vector of mode MODE. */
3427 static void
3428 calc_vec_perm_mask_for_shift (enum machine_mode mode, unsigned int offset,
3429 unsigned char *sel)
3431 unsigned int i, nelt = GET_MODE_NUNITS (mode);
3433 for (i = 0; i < nelt; i++)
3434 sel[i] = (i + offset) & (2*nelt - 1);
3437 /* Checks whether the target supports whole-vector shifts for vectors of mode
3438 MODE. This is the case if _either_ the platform handles vec_shr_optab, _or_
3439 it supports vec_perm_const with masks for all necessary shift amounts. */
3440 static bool
3441 have_whole_vector_shift (enum machine_mode mode)
3443 if (optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
3444 return true;
3446 if (direct_optab_handler (vec_perm_const_optab, mode) == CODE_FOR_nothing)
3447 return false;
3449 unsigned int i, nelt = GET_MODE_NUNITS (mode);
3450 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
3452 for (i = nelt/2; i >= 1; i/=2)
3454 calc_vec_perm_mask_for_shift (mode, i, sel);
3455 if (!can_vec_perm_p (mode, false, sel))
3456 return false;
3458 return true;
3461 /* Return the reduction operand (with index REDUC_INDEX) of STMT. */
3463 static tree
3464 get_reduction_op (gimple *stmt, int reduc_index)
3466 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
3468 case GIMPLE_SINGLE_RHS:
3469 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt))
3470 == ternary_op);
3471 return TREE_OPERAND (gimple_assign_rhs1 (stmt), reduc_index);
3472 case GIMPLE_UNARY_RHS:
3473 return gimple_assign_rhs1 (stmt);
3474 case GIMPLE_BINARY_RHS:
3475 return (reduc_index
3476 ? gimple_assign_rhs2 (stmt) : gimple_assign_rhs1 (stmt));
3477 case GIMPLE_TERNARY_RHS:
3478 return gimple_op (stmt, reduc_index + 1);
3479 default:
3480 gcc_unreachable ();
3484 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
3485 functions. Design better to avoid maintenance issues. */
3487 /* Function vect_model_reduction_cost.
3489 Models cost for a reduction operation, including the vector ops
3490 generated within the strip-mine loop, the initial definition before
3491 the loop, and the epilogue code that must be generated. */
3493 static bool
3494 vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
3495 int ncopies, int reduc_index)
3497 int prologue_cost = 0, epilogue_cost = 0;
3498 enum tree_code code;
3499 optab optab;
3500 tree vectype;
3501 gimple *stmt, *orig_stmt;
3502 tree reduction_op;
3503 machine_mode mode;
3504 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3505 struct loop *loop = NULL;
3506 void *target_cost_data;
3508 if (loop_vinfo)
3510 loop = LOOP_VINFO_LOOP (loop_vinfo);
3511 target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
3513 else
3514 target_cost_data = BB_VINFO_TARGET_COST_DATA (STMT_VINFO_BB_VINFO (stmt_info));
3516 /* Condition reductions generate two reductions in the loop. */
3517 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION)
3518 ncopies *= 2;
3520 /* Cost of reduction op inside loop. */
3521 unsigned inside_cost = add_stmt_cost (target_cost_data, ncopies, vector_stmt,
3522 stmt_info, 0, vect_body);
3523 stmt = STMT_VINFO_STMT (stmt_info);
3525 reduction_op = get_reduction_op (stmt, reduc_index);
3527 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
3528 if (!vectype)
3530 if (dump_enabled_p ())
3532 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3533 "unsupported data-type ");
3534 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
3535 TREE_TYPE (reduction_op));
3536 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3538 return false;
3541 mode = TYPE_MODE (vectype);
3542 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
3544 if (!orig_stmt)
3545 orig_stmt = STMT_VINFO_STMT (stmt_info);
3547 code = gimple_assign_rhs_code (orig_stmt);
3549 /* Add in cost for initial definition.
3550 For cond reduction we have four vectors: initial index, step, initial
3551 result of the data reduction, initial value of the index reduction. */
3552 int prologue_stmts = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
3553 == COND_REDUCTION ? 4 : 1;
3554 prologue_cost += add_stmt_cost (target_cost_data, prologue_stmts,
3555 scalar_to_vec, stmt_info, 0,
3556 vect_prologue);
3558 /* Determine cost of epilogue code.
3560 We have a reduction operator that will reduce the vector in one statement.
3561 Also requires scalar extract. */
3563 if (!loop || !nested_in_vect_loop_p (loop, orig_stmt))
3565 if (reduc_code != ERROR_MARK)
3567 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION)
3569 /* An EQ stmt and an COND_EXPR stmt. */
3570 epilogue_cost += add_stmt_cost (target_cost_data, 2,
3571 vector_stmt, stmt_info, 0,
3572 vect_epilogue);
3573 /* Reduction of the max index and a reduction of the found
3574 values. */
3575 epilogue_cost += add_stmt_cost (target_cost_data, 2,
3576 vec_to_scalar, stmt_info, 0,
3577 vect_epilogue);
3578 /* A broadcast of the max value. */
3579 epilogue_cost += add_stmt_cost (target_cost_data, 1,
3580 scalar_to_vec, stmt_info, 0,
3581 vect_epilogue);
3583 else
3585 epilogue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
3586 stmt_info, 0, vect_epilogue);
3587 epilogue_cost += add_stmt_cost (target_cost_data, 1,
3588 vec_to_scalar, stmt_info, 0,
3589 vect_epilogue);
3592 else
3594 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
3595 tree bitsize =
3596 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt)));
3597 int element_bitsize = tree_to_uhwi (bitsize);
3598 int nelements = vec_size_in_bits / element_bitsize;
3600 optab = optab_for_tree_code (code, vectype, optab_default);
3602 /* We have a whole vector shift available. */
3603 if (VECTOR_MODE_P (mode)
3604 && optab_handler (optab, mode) != CODE_FOR_nothing
3605 && have_whole_vector_shift (mode))
3607 /* Final reduction via vector shifts and the reduction operator.
3608 Also requires scalar extract. */
3609 epilogue_cost += add_stmt_cost (target_cost_data,
3610 exact_log2 (nelements) * 2,
3611 vector_stmt, stmt_info, 0,
3612 vect_epilogue);
3613 epilogue_cost += add_stmt_cost (target_cost_data, 1,
3614 vec_to_scalar, stmt_info, 0,
3615 vect_epilogue);
3617 else
3618 /* Use extracts and reduction op for final reduction. For N
3619 elements, we have N extracts and N-1 reduction ops. */
3620 epilogue_cost += add_stmt_cost (target_cost_data,
3621 nelements + nelements - 1,
3622 vector_stmt, stmt_info, 0,
3623 vect_epilogue);
3627 if (dump_enabled_p ())
3628 dump_printf (MSG_NOTE,
3629 "vect_model_reduction_cost: inside_cost = %d, "
3630 "prologue_cost = %d, epilogue_cost = %d .\n", inside_cost,
3631 prologue_cost, epilogue_cost);
3633 return true;
3637 /* Function vect_model_induction_cost.
3639 Models cost for induction operations. */
3641 static void
3642 vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies)
3644 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3645 void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
3646 unsigned inside_cost, prologue_cost;
3648 /* loop cost for vec_loop. */
3649 inside_cost = add_stmt_cost (target_cost_data, ncopies, vector_stmt,
3650 stmt_info, 0, vect_body);
3652 /* prologue cost for vec_init and vec_step. */
3653 prologue_cost = add_stmt_cost (target_cost_data, 2, scalar_to_vec,
3654 stmt_info, 0, vect_prologue);
3656 if (dump_enabled_p ())
3657 dump_printf_loc (MSG_NOTE, vect_location,
3658 "vect_model_induction_cost: inside_cost = %d, "
3659 "prologue_cost = %d .\n", inside_cost, prologue_cost);
3663 /* Function get_initial_def_for_induction
3665 Input:
3666 STMT - a stmt that performs an induction operation in the loop.
3667 IV_PHI - the initial value of the induction variable
3669 Output:
3670 Return a vector variable, initialized with the first VF values of
3671 the induction variable. E.g., for an iv with IV_PHI='X' and
3672 evolution S, for a vector of 4 units, we want to return:
3673 [X, X + S, X + 2*S, X + 3*S]. */
3675 static tree
3676 get_initial_def_for_induction (gimple *iv_phi)
3678 stmt_vec_info stmt_vinfo = vinfo_for_stmt (iv_phi);
3679 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
3680 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3681 tree vectype;
3682 int nunits;
3683 edge pe = loop_preheader_edge (loop);
3684 struct loop *iv_loop;
3685 basic_block new_bb;
3686 tree new_vec, vec_init, vec_step, t;
3687 tree new_name;
3688 gimple *new_stmt;
3689 gphi *induction_phi;
3690 tree induc_def, vec_def, vec_dest;
3691 tree init_expr, step_expr;
3692 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3693 int i;
3694 int ncopies;
3695 tree expr;
3696 stmt_vec_info phi_info = vinfo_for_stmt (iv_phi);
3697 bool nested_in_vect_loop = false;
3698 gimple_seq stmts;
3699 imm_use_iterator imm_iter;
3700 use_operand_p use_p;
3701 gimple *exit_phi;
3702 edge latch_e;
3703 tree loop_arg;
3704 gimple_stmt_iterator si;
3705 basic_block bb = gimple_bb (iv_phi);
3706 tree stepvectype;
3707 tree resvectype;
3709 /* Is phi in an inner-loop, while vectorizing an enclosing outer-loop? */
3710 if (nested_in_vect_loop_p (loop, iv_phi))
3712 nested_in_vect_loop = true;
3713 iv_loop = loop->inner;
3715 else
3716 iv_loop = loop;
3717 gcc_assert (iv_loop == (gimple_bb (iv_phi))->loop_father);
3719 latch_e = loop_latch_edge (iv_loop);
3720 loop_arg = PHI_ARG_DEF_FROM_EDGE (iv_phi, latch_e);
3722 step_expr = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (phi_info);
3723 gcc_assert (step_expr != NULL_TREE);
3725 pe = loop_preheader_edge (iv_loop);
3726 init_expr = PHI_ARG_DEF_FROM_EDGE (iv_phi,
3727 loop_preheader_edge (iv_loop));
3729 vectype = get_vectype_for_scalar_type (TREE_TYPE (init_expr));
3730 resvectype = get_vectype_for_scalar_type (TREE_TYPE (PHI_RESULT (iv_phi)));
3731 gcc_assert (vectype);
3732 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3733 ncopies = vf / nunits;
3735 gcc_assert (phi_info);
3736 gcc_assert (ncopies >= 1);
3738 /* Convert the step to the desired type. */
3739 stmts = NULL;
3740 step_expr = gimple_convert (&stmts, TREE_TYPE (vectype), step_expr);
3741 if (stmts)
3743 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
3744 gcc_assert (!new_bb);
3747 /* Find the first insertion point in the BB. */
3748 si = gsi_after_labels (bb);
3750 /* Create the vector that holds the initial_value of the induction. */
3751 if (nested_in_vect_loop)
3753 /* iv_loop is nested in the loop to be vectorized. init_expr had already
3754 been created during vectorization of previous stmts. We obtain it
3755 from the STMT_VINFO_VEC_STMT of the defining stmt. */
3756 vec_init = vect_get_vec_def_for_operand (init_expr, iv_phi);
3757 /* If the initial value is not of proper type, convert it. */
3758 if (!useless_type_conversion_p (vectype, TREE_TYPE (vec_init)))
3760 new_stmt
3761 = gimple_build_assign (vect_get_new_ssa_name (vectype,
3762 vect_simple_var,
3763 "vec_iv_"),
3764 VIEW_CONVERT_EXPR,
3765 build1 (VIEW_CONVERT_EXPR, vectype,
3766 vec_init));
3767 vec_init = gimple_assign_lhs (new_stmt);
3768 new_bb = gsi_insert_on_edge_immediate (loop_preheader_edge (iv_loop),
3769 new_stmt);
3770 gcc_assert (!new_bb);
3771 set_vinfo_for_stmt (new_stmt,
3772 new_stmt_vec_info (new_stmt, loop_vinfo));
3775 else
3777 vec<constructor_elt, va_gc> *v;
3779 /* iv_loop is the loop to be vectorized. Create:
3780 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
3781 stmts = NULL;
3782 new_name = gimple_convert (&stmts, TREE_TYPE (vectype), init_expr);
3784 vec_alloc (v, nunits);
3785 bool constant_p = is_gimple_min_invariant (new_name);
3786 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, new_name);
3787 for (i = 1; i < nunits; i++)
3789 /* Create: new_name_i = new_name + step_expr */
3790 new_name = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (new_name),
3791 new_name, step_expr);
3792 if (!is_gimple_min_invariant (new_name))
3793 constant_p = false;
3794 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, new_name);
3796 if (stmts)
3798 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
3799 gcc_assert (!new_bb);
3802 /* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1] */
3803 if (constant_p)
3804 new_vec = build_vector_from_ctor (vectype, v);
3805 else
3806 new_vec = build_constructor (vectype, v);
3807 vec_init = vect_init_vector (iv_phi, new_vec, vectype, NULL);
3811 /* Create the vector that holds the step of the induction. */
3812 if (nested_in_vect_loop)
3813 /* iv_loop is nested in the loop to be vectorized. Generate:
3814 vec_step = [S, S, S, S] */
3815 new_name = step_expr;
3816 else
3818 /* iv_loop is the loop to be vectorized. Generate:
3819 vec_step = [VF*S, VF*S, VF*S, VF*S] */
3820 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
3822 expr = build_int_cst (integer_type_node, vf);
3823 expr = fold_convert (TREE_TYPE (step_expr), expr);
3825 else
3826 expr = build_int_cst (TREE_TYPE (step_expr), vf);
3827 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
3828 expr, step_expr);
3829 if (TREE_CODE (step_expr) == SSA_NAME)
3830 new_name = vect_init_vector (iv_phi, new_name,
3831 TREE_TYPE (step_expr), NULL);
3834 t = unshare_expr (new_name);
3835 gcc_assert (CONSTANT_CLASS_P (new_name)
3836 || TREE_CODE (new_name) == SSA_NAME);
3837 stepvectype = get_vectype_for_scalar_type (TREE_TYPE (new_name));
3838 gcc_assert (stepvectype);
3839 new_vec = build_vector_from_val (stepvectype, t);
3840 vec_step = vect_init_vector (iv_phi, new_vec, stepvectype, NULL);
3843 /* Create the following def-use cycle:
3844 loop prolog:
3845 vec_init = ...
3846 vec_step = ...
3847 loop:
3848 vec_iv = PHI <vec_init, vec_loop>
3850 STMT
3852 vec_loop = vec_iv + vec_step; */
3854 /* Create the induction-phi that defines the induction-operand. */
3855 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
3856 induction_phi = create_phi_node (vec_dest, iv_loop->header);
3857 set_vinfo_for_stmt (induction_phi,
3858 new_stmt_vec_info (induction_phi, loop_vinfo));
3859 induc_def = PHI_RESULT (induction_phi);
3861 /* Create the iv update inside the loop */
3862 new_stmt = gimple_build_assign (vec_dest, PLUS_EXPR, induc_def, vec_step);
3863 vec_def = make_ssa_name (vec_dest, new_stmt);
3864 gimple_assign_set_lhs (new_stmt, vec_def);
3865 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3866 set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo));
3868 /* Set the arguments of the phi node: */
3869 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
3870 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
3871 UNKNOWN_LOCATION);
3874 /* In case that vectorization factor (VF) is bigger than the number
3875 of elements that we can fit in a vectype (nunits), we have to generate
3876 more than one vector stmt - i.e - we need to "unroll" the
3877 vector stmt by a factor VF/nunits. For more details see documentation
3878 in vectorizable_operation. */
3880 if (ncopies > 1)
3882 stmt_vec_info prev_stmt_vinfo;
3883 /* FORNOW. This restriction should be relaxed. */
3884 gcc_assert (!nested_in_vect_loop);
3886 /* Create the vector that holds the step of the induction. */
3887 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
3889 expr = build_int_cst (integer_type_node, nunits);
3890 expr = fold_convert (TREE_TYPE (step_expr), expr);
3892 else
3893 expr = build_int_cst (TREE_TYPE (step_expr), nunits);
3894 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
3895 expr, step_expr);
3896 if (TREE_CODE (step_expr) == SSA_NAME)
3897 new_name = vect_init_vector (iv_phi, new_name,
3898 TREE_TYPE (step_expr), NULL);
3899 t = unshare_expr (new_name);
3900 gcc_assert (CONSTANT_CLASS_P (new_name)
3901 || TREE_CODE (new_name) == SSA_NAME);
3902 new_vec = build_vector_from_val (stepvectype, t);
3903 vec_step = vect_init_vector (iv_phi, new_vec, stepvectype, NULL);
3905 vec_def = induc_def;
3906 prev_stmt_vinfo = vinfo_for_stmt (induction_phi);
3907 for (i = 1; i < ncopies; i++)
3909 /* vec_i = vec_prev + vec_step */
3910 new_stmt = gimple_build_assign (vec_dest, PLUS_EXPR,
3911 vec_def, vec_step);
3912 vec_def = make_ssa_name (vec_dest, new_stmt);
3913 gimple_assign_set_lhs (new_stmt, vec_def);
3915 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3916 if (!useless_type_conversion_p (resvectype, vectype))
3918 new_stmt
3919 = gimple_build_assign
3920 (vect_get_new_vect_var (resvectype, vect_simple_var,
3921 "vec_iv_"),
3922 VIEW_CONVERT_EXPR,
3923 build1 (VIEW_CONVERT_EXPR, resvectype,
3924 gimple_assign_lhs (new_stmt)));
3925 gimple_assign_set_lhs (new_stmt,
3926 make_ssa_name
3927 (gimple_assign_lhs (new_stmt), new_stmt));
3928 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3930 set_vinfo_for_stmt (new_stmt,
3931 new_stmt_vec_info (new_stmt, loop_vinfo));
3932 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt;
3933 prev_stmt_vinfo = vinfo_for_stmt (new_stmt);
3937 if (nested_in_vect_loop)
3939 /* Find the loop-closed exit-phi of the induction, and record
3940 the final vector of induction results: */
3941 exit_phi = NULL;
3942 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
3944 gimple *use_stmt = USE_STMT (use_p);
3945 if (is_gimple_debug (use_stmt))
3946 continue;
3948 if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (use_stmt)))
3950 exit_phi = use_stmt;
3951 break;
3954 if (exit_phi)
3956 stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi);
3957 /* FORNOW. Currently not supporting the case that an inner-loop induction
3958 is not used in the outer-loop (i.e. only outside the outer-loop). */
3959 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
3960 && !STMT_VINFO_LIVE_P (stmt_vinfo));
3962 STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt;
3963 if (dump_enabled_p ())
3965 dump_printf_loc (MSG_NOTE, vect_location,
3966 "vector of inductions after inner-loop:");
3967 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
3973 if (dump_enabled_p ())
3975 dump_printf_loc (MSG_NOTE, vect_location,
3976 "transform induction: created def-use cycle: ");
3977 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, induction_phi, 0);
3978 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
3979 SSA_NAME_DEF_STMT (vec_def), 0);
3982 STMT_VINFO_VEC_STMT (phi_info) = induction_phi;
3983 if (!useless_type_conversion_p (resvectype, vectype))
3985 new_stmt = gimple_build_assign (vect_get_new_vect_var (resvectype,
3986 vect_simple_var,
3987 "vec_iv_"),
3988 VIEW_CONVERT_EXPR,
3989 build1 (VIEW_CONVERT_EXPR, resvectype,
3990 induc_def));
3991 induc_def = make_ssa_name (gimple_assign_lhs (new_stmt), new_stmt);
3992 gimple_assign_set_lhs (new_stmt, induc_def);
3993 si = gsi_after_labels (bb);
3994 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3995 set_vinfo_for_stmt (new_stmt,
3996 new_stmt_vec_info (new_stmt, loop_vinfo));
3997 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_stmt))
3998 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (induction_phi));
4001 return induc_def;
4005 /* Function get_initial_def_for_reduction
4007 Input:
4008 STMT - a stmt that performs a reduction operation in the loop.
4009 INIT_VAL - the initial value of the reduction variable
4011 Output:
4012 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
4013 of the reduction (used for adjusting the epilog - see below).
4014 Return a vector variable, initialized according to the operation that STMT
4015 performs. This vector will be used as the initial value of the
4016 vector of partial results.
4018 Option1 (adjust in epilog): Initialize the vector as follows:
4019 add/bit or/xor: [0,0,...,0,0]
4020 mult/bit and: [1,1,...,1,1]
4021 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
4022 and when necessary (e.g. add/mult case) let the caller know
4023 that it needs to adjust the result by init_val.
4025 Option2: Initialize the vector as follows:
4026 add/bit or/xor: [init_val,0,0,...,0]
4027 mult/bit and: [init_val,1,1,...,1]
4028 min/max/cond_expr: [init_val,init_val,...,init_val]
4029 and no adjustments are needed.
4031 For example, for the following code:
4033 s = init_val;
4034 for (i=0;i<n;i++)
4035 s = s + a[i];
4037 STMT is 's = s + a[i]', and the reduction variable is 's'.
4038 For a vector of 4 units, we want to return either [0,0,0,init_val],
4039 or [0,0,0,0] and let the caller know that it needs to adjust
4040 the result at the end by 'init_val'.
4042 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
4043 initialization vector is simpler (same element in all entries), if
4044 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
4046 A cost model should help decide between these two schemes. */
4048 tree
4049 get_initial_def_for_reduction (gimple *stmt, tree init_val,
4050 tree *adjustment_def)
4052 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
4053 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
4054 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4055 tree scalar_type = TREE_TYPE (init_val);
4056 tree vectype = get_vectype_for_scalar_type (scalar_type);
4057 int nunits;
4058 enum tree_code code = gimple_assign_rhs_code (stmt);
4059 tree def_for_init;
4060 tree init_def;
4061 tree *elts;
4062 int i;
4063 bool nested_in_vect_loop = false;
4064 REAL_VALUE_TYPE real_init_val = dconst0;
4065 int int_init_val = 0;
4066 gimple *def_stmt = NULL;
4067 gimple_seq stmts = NULL;
4069 gcc_assert (vectype);
4070 nunits = TYPE_VECTOR_SUBPARTS (vectype);
4072 gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
4073 || SCALAR_FLOAT_TYPE_P (scalar_type));
4075 if (nested_in_vect_loop_p (loop, stmt))
4076 nested_in_vect_loop = true;
4077 else
4078 gcc_assert (loop == (gimple_bb (stmt))->loop_father);
4080 /* In case of double reduction we only create a vector variable to be put
4081 in the reduction phi node. The actual statement creation is done in
4082 vect_create_epilog_for_reduction. */
4083 if (adjustment_def && nested_in_vect_loop
4084 && TREE_CODE (init_val) == SSA_NAME
4085 && (def_stmt = SSA_NAME_DEF_STMT (init_val))
4086 && gimple_code (def_stmt) == GIMPLE_PHI
4087 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
4088 && vinfo_for_stmt (def_stmt)
4089 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
4090 == vect_double_reduction_def)
4092 *adjustment_def = NULL;
4093 return vect_create_destination_var (init_val, vectype);
4096 /* In case of a nested reduction do not use an adjustment def as
4097 that case is not supported by the epilogue generation correctly
4098 if ncopies is not one. */
4099 if (adjustment_def && nested_in_vect_loop)
4101 *adjustment_def = NULL;
4102 return vect_get_vec_def_for_operand (init_val, stmt);
4105 switch (code)
4107 case WIDEN_SUM_EXPR:
4108 case DOT_PROD_EXPR:
4109 case SAD_EXPR:
4110 case PLUS_EXPR:
4111 case MINUS_EXPR:
4112 case BIT_IOR_EXPR:
4113 case BIT_XOR_EXPR:
4114 case MULT_EXPR:
4115 case BIT_AND_EXPR:
4116 /* ADJUSMENT_DEF is NULL when called from
4117 vect_create_epilog_for_reduction to vectorize double reduction. */
4118 if (adjustment_def)
4119 *adjustment_def = init_val;
4121 if (code == MULT_EXPR)
4123 real_init_val = dconst1;
4124 int_init_val = 1;
4127 if (code == BIT_AND_EXPR)
4128 int_init_val = -1;
4130 if (SCALAR_FLOAT_TYPE_P (scalar_type))
4131 def_for_init = build_real (scalar_type, real_init_val);
4132 else
4133 def_for_init = build_int_cst (scalar_type, int_init_val);
4135 /* Create a vector of '0' or '1' except the first element. */
4136 elts = XALLOCAVEC (tree, nunits);
4137 for (i = nunits - 2; i >= 0; --i)
4138 elts[i + 1] = def_for_init;
4140 /* Option1: the first element is '0' or '1' as well. */
4141 if (adjustment_def)
4143 elts[0] = def_for_init;
4144 init_def = build_vector (vectype, elts);
4145 break;
4148 /* Option2: the first element is INIT_VAL. */
4149 elts[0] = init_val;
4150 if (TREE_CONSTANT (init_val))
4151 init_def = build_vector (vectype, elts);
4152 else
4154 vec<constructor_elt, va_gc> *v;
4155 vec_alloc (v, nunits);
4156 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, init_val);
4157 for (i = 1; i < nunits; ++i)
4158 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, elts[i]);
4159 init_def = build_constructor (vectype, v);
4162 break;
4164 case MIN_EXPR:
4165 case MAX_EXPR:
4166 case COND_EXPR:
4167 if (adjustment_def)
4169 *adjustment_def = NULL_TREE;
4170 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_vinfo) != COND_REDUCTION)
4172 init_def = vect_get_vec_def_for_operand (init_val, stmt);
4173 break;
4176 init_val = gimple_convert (&stmts, TREE_TYPE (vectype), init_val);
4177 if (! gimple_seq_empty_p (stmts))
4178 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
4179 init_def = build_vector_from_val (vectype, init_val);
4180 break;
4182 default:
4183 gcc_unreachable ();
4186 return init_def;
4189 /* Function vect_create_epilog_for_reduction
4191 Create code at the loop-epilog to finalize the result of a reduction
4192 computation.
4194 VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector
4195 reduction statements.
4196 STMT is the scalar reduction stmt that is being vectorized.
4197 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
4198 number of elements that we can fit in a vectype (nunits). In this case
4199 we have to generate more than one vector stmt - i.e - we need to "unroll"
4200 the vector stmt by a factor VF/nunits. For more details see documentation
4201 in vectorizable_operation.
4202 REDUC_CODE is the tree-code for the epilog reduction.
4203 REDUCTION_PHIS is a list of the phi-nodes that carry the reduction
4204 computation.
4205 REDUC_INDEX is the index of the operand in the right hand side of the
4206 statement that is defined by REDUCTION_PHI.
4207 DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
4208 SLP_NODE is an SLP node containing a group of reduction statements. The
4209 first one in this group is STMT.
4210 INDUCTION_INDEX is the index of the loop for condition reductions.
4211 Otherwise it is undefined.
4213 This function:
4214 1. Creates the reduction def-use cycles: sets the arguments for
4215 REDUCTION_PHIS:
4216 The loop-entry argument is the vectorized initial-value of the reduction.
4217 The loop-latch argument is taken from VECT_DEFS - the vector of partial
4218 sums.
4219 2. "Reduces" each vector of partial results VECT_DEFS into a single result,
4220 by applying the operation specified by REDUC_CODE if available, or by
4221 other means (whole-vector shifts or a scalar loop).
4222 The function also creates a new phi node at the loop exit to preserve
4223 loop-closed form, as illustrated below.
4225 The flow at the entry to this function:
4227 loop:
4228 vec_def = phi <null, null> # REDUCTION_PHI
4229 VECT_DEF = vector_stmt # vectorized form of STMT
4230 s_loop = scalar_stmt # (scalar) STMT
4231 loop_exit:
4232 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4233 use <s_out0>
4234 use <s_out0>
4236 The above is transformed by this function into:
4238 loop:
4239 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
4240 VECT_DEF = vector_stmt # vectorized form of STMT
4241 s_loop = scalar_stmt # (scalar) STMT
4242 loop_exit:
4243 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4244 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4245 v_out2 = reduce <v_out1>
4246 s_out3 = extract_field <v_out2, 0>
4247 s_out4 = adjust_result <s_out3>
4248 use <s_out4>
4249 use <s_out4>
4252 static void
4253 vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
4254 int ncopies, enum tree_code reduc_code,
4255 vec<gimple *> reduction_phis,
4256 int reduc_index, bool double_reduc,
4257 slp_tree slp_node, tree induction_index)
4259 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4260 stmt_vec_info prev_phi_info;
4261 tree vectype;
4262 machine_mode mode;
4263 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4264 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
4265 basic_block exit_bb;
4266 tree scalar_dest;
4267 tree scalar_type;
4268 gimple *new_phi = NULL, *phi;
4269 gimple_stmt_iterator exit_gsi;
4270 tree vec_dest;
4271 tree new_temp = NULL_TREE, new_dest, new_name, new_scalar_dest;
4272 gimple *epilog_stmt = NULL;
4273 enum tree_code code = gimple_assign_rhs_code (stmt);
4274 gimple *exit_phi;
4275 tree bitsize;
4276 tree adjustment_def = NULL;
4277 tree vec_initial_def = NULL;
4278 tree reduction_op, expr, def, initial_def = NULL;
4279 tree orig_name, scalar_result;
4280 imm_use_iterator imm_iter, phi_imm_iter;
4281 use_operand_p use_p, phi_use_p;
4282 gimple *use_stmt, *orig_stmt, *reduction_phi = NULL;
4283 bool nested_in_vect_loop = false;
4284 auto_vec<gimple *> new_phis;
4285 auto_vec<gimple *> inner_phis;
4286 enum vect_def_type dt = vect_unknown_def_type;
4287 int j, i;
4288 auto_vec<tree> scalar_results;
4289 unsigned int group_size = 1, k, ratio;
4290 auto_vec<tree> vec_initial_defs;
4291 auto_vec<gimple *> phis;
4292 bool slp_reduc = false;
4293 tree new_phi_result;
4294 gimple *inner_phi = NULL;
4296 if (slp_node)
4297 group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
4299 if (nested_in_vect_loop_p (loop, stmt))
4301 outer_loop = loop;
4302 loop = loop->inner;
4303 nested_in_vect_loop = true;
4304 gcc_assert (!slp_node);
4307 reduction_op = get_reduction_op (stmt, reduc_index);
4309 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
4310 gcc_assert (vectype);
4311 mode = TYPE_MODE (vectype);
4313 /* 1. Create the reduction def-use cycle:
4314 Set the arguments of REDUCTION_PHIS, i.e., transform
4316 loop:
4317 vec_def = phi <null, null> # REDUCTION_PHI
4318 VECT_DEF = vector_stmt # vectorized form of STMT
4321 into:
4323 loop:
4324 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
4325 VECT_DEF = vector_stmt # vectorized form of STMT
4328 (in case of SLP, do it for all the phis). */
4330 /* Get the loop-entry arguments. */
4331 enum vect_def_type initial_def_dt = vect_unknown_def_type;
4332 if (slp_node)
4333 vect_get_vec_defs (reduction_op, NULL_TREE, stmt, &vec_initial_defs,
4334 NULL, slp_node, reduc_index);
4335 else
4337 /* Get at the scalar def before the loop, that defines the initial value
4338 of the reduction variable. */
4339 gimple *def_stmt = SSA_NAME_DEF_STMT (reduction_op);
4340 initial_def = PHI_ARG_DEF_FROM_EDGE (def_stmt,
4341 loop_preheader_edge (loop));
4342 vect_is_simple_use (initial_def, loop_vinfo, &def_stmt, &initial_def_dt);
4343 vec_initial_def = get_initial_def_for_reduction (stmt, initial_def,
4344 &adjustment_def);
4345 vec_initial_defs.create (1);
4346 vec_initial_defs.quick_push (vec_initial_def);
4349 /* Set phi nodes arguments. */
4350 FOR_EACH_VEC_ELT (reduction_phis, i, phi)
4352 tree vec_init_def, def;
4353 gimple_seq stmts;
4354 vec_init_def = force_gimple_operand (vec_initial_defs[i], &stmts,
4355 true, NULL_TREE);
4356 if (stmts)
4357 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
4359 def = vect_defs[i];
4360 for (j = 0; j < ncopies; j++)
4362 if (j != 0)
4364 phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
4365 if (nested_in_vect_loop)
4366 vec_init_def
4367 = vect_get_vec_def_for_stmt_copy (initial_def_dt,
4368 vec_init_def);
4371 /* Set the loop-entry arg of the reduction-phi. */
4373 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
4374 == INTEGER_INDUC_COND_REDUCTION)
4376 /* Initialise the reduction phi to zero. This prevents initial
4377 values of non-zero interferring with the reduction op. */
4378 gcc_assert (ncopies == 1);
4379 gcc_assert (i == 0);
4381 tree vec_init_def_type = TREE_TYPE (vec_init_def);
4382 tree zero_vec = build_zero_cst (vec_init_def_type);
4384 add_phi_arg (as_a <gphi *> (phi), zero_vec,
4385 loop_preheader_edge (loop), UNKNOWN_LOCATION);
4387 else
4388 add_phi_arg (as_a <gphi *> (phi), vec_init_def,
4389 loop_preheader_edge (loop), UNKNOWN_LOCATION);
4391 /* Set the loop-latch arg for the reduction-phi. */
4392 if (j > 0)
4393 def = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type, def);
4395 add_phi_arg (as_a <gphi *> (phi), def, loop_latch_edge (loop),
4396 UNKNOWN_LOCATION);
4398 if (dump_enabled_p ())
4400 dump_printf_loc (MSG_NOTE, vect_location,
4401 "transform reduction: created def-use cycle: ");
4402 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
4403 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (def), 0);
4408 /* 2. Create epilog code.
4409 The reduction epilog code operates across the elements of the vector
4410 of partial results computed by the vectorized loop.
4411 The reduction epilog code consists of:
4413 step 1: compute the scalar result in a vector (v_out2)
4414 step 2: extract the scalar result (s_out3) from the vector (v_out2)
4415 step 3: adjust the scalar result (s_out3) if needed.
4417 Step 1 can be accomplished using one the following three schemes:
4418 (scheme 1) using reduc_code, if available.
4419 (scheme 2) using whole-vector shifts, if available.
4420 (scheme 3) using a scalar loop. In this case steps 1+2 above are
4421 combined.
4423 The overall epilog code looks like this:
4425 s_out0 = phi <s_loop> # original EXIT_PHI
4426 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4427 v_out2 = reduce <v_out1> # step 1
4428 s_out3 = extract_field <v_out2, 0> # step 2
4429 s_out4 = adjust_result <s_out3> # step 3
4431 (step 3 is optional, and steps 1 and 2 may be combined).
4432 Lastly, the uses of s_out0 are replaced by s_out4. */
4435 /* 2.1 Create new loop-exit-phis to preserve loop-closed form:
4436 v_out1 = phi <VECT_DEF>
4437 Store them in NEW_PHIS. */
4439 exit_bb = single_exit (loop)->dest;
4440 prev_phi_info = NULL;
4441 new_phis.create (vect_defs.length ());
4442 FOR_EACH_VEC_ELT (vect_defs, i, def)
4444 for (j = 0; j < ncopies; j++)
4446 tree new_def = copy_ssa_name (def);
4447 phi = create_phi_node (new_def, exit_bb);
4448 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, loop_vinfo));
4449 if (j == 0)
4450 new_phis.quick_push (phi);
4451 else
4453 def = vect_get_vec_def_for_stmt_copy (dt, def);
4454 STMT_VINFO_RELATED_STMT (prev_phi_info) = phi;
4457 SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
4458 prev_phi_info = vinfo_for_stmt (phi);
4462 /* The epilogue is created for the outer-loop, i.e., for the loop being
4463 vectorized. Create exit phis for the outer loop. */
4464 if (double_reduc)
4466 loop = outer_loop;
4467 exit_bb = single_exit (loop)->dest;
4468 inner_phis.create (vect_defs.length ());
4469 FOR_EACH_VEC_ELT (new_phis, i, phi)
4471 tree new_result = copy_ssa_name (PHI_RESULT (phi));
4472 gphi *outer_phi = create_phi_node (new_result, exit_bb);
4473 SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
4474 PHI_RESULT (phi));
4475 set_vinfo_for_stmt (outer_phi, new_stmt_vec_info (outer_phi,
4476 loop_vinfo));
4477 inner_phis.quick_push (phi);
4478 new_phis[i] = outer_phi;
4479 prev_phi_info = vinfo_for_stmt (outer_phi);
4480 while (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi)))
4482 phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
4483 new_result = copy_ssa_name (PHI_RESULT (phi));
4484 outer_phi = create_phi_node (new_result, exit_bb);
4485 SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
4486 PHI_RESULT (phi));
4487 set_vinfo_for_stmt (outer_phi, new_stmt_vec_info (outer_phi,
4488 loop_vinfo));
4489 STMT_VINFO_RELATED_STMT (prev_phi_info) = outer_phi;
4490 prev_phi_info = vinfo_for_stmt (outer_phi);
4495 exit_gsi = gsi_after_labels (exit_bb);
4497 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
4498 (i.e. when reduc_code is not available) and in the final adjustment
4499 code (if needed). Also get the original scalar reduction variable as
4500 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
4501 represents a reduction pattern), the tree-code and scalar-def are
4502 taken from the original stmt that the pattern-stmt (STMT) replaces.
4503 Otherwise (it is a regular reduction) - the tree-code and scalar-def
4504 are taken from STMT. */
4506 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
4507 if (!orig_stmt)
4509 /* Regular reduction */
4510 orig_stmt = stmt;
4512 else
4514 /* Reduction pattern */
4515 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt);
4516 gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo));
4517 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
4520 code = gimple_assign_rhs_code (orig_stmt);
4521 /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
4522 partial results are added and not subtracted. */
4523 if (code == MINUS_EXPR)
4524 code = PLUS_EXPR;
4526 scalar_dest = gimple_assign_lhs (orig_stmt);
4527 scalar_type = TREE_TYPE (scalar_dest);
4528 scalar_results.create (group_size);
4529 new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
4530 bitsize = TYPE_SIZE (scalar_type);
4532 /* In case this is a reduction in an inner-loop while vectorizing an outer
4533 loop - we don't need to extract a single scalar result at the end of the
4534 inner-loop (unless it is double reduction, i.e., the use of reduction is
4535 outside the outer-loop). The final vector of partial results will be used
4536 in the vectorized outer-loop, or reduced to a scalar result at the end of
4537 the outer-loop. */
4538 if (nested_in_vect_loop && !double_reduc)
4539 goto vect_finalize_reduction;
4541 /* SLP reduction without reduction chain, e.g.,
4542 # a1 = phi <a2, a0>
4543 # b1 = phi <b2, b0>
4544 a2 = operation (a1)
4545 b2 = operation (b1) */
4546 slp_reduc = (slp_node && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)));
4548 /* In case of reduction chain, e.g.,
4549 # a1 = phi <a3, a0>
4550 a2 = operation (a1)
4551 a3 = operation (a2),
4553 we may end up with more than one vector result. Here we reduce them to
4554 one vector. */
4555 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
4557 tree first_vect = PHI_RESULT (new_phis[0]);
4558 tree tmp;
4559 gassign *new_vec_stmt = NULL;
4561 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4562 for (k = 1; k < new_phis.length (); k++)
4564 gimple *next_phi = new_phis[k];
4565 tree second_vect = PHI_RESULT (next_phi);
4567 tmp = build2 (code, vectype, first_vect, second_vect);
4568 new_vec_stmt = gimple_build_assign (vec_dest, tmp);
4569 first_vect = make_ssa_name (vec_dest, new_vec_stmt);
4570 gimple_assign_set_lhs (new_vec_stmt, first_vect);
4571 gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT);
4574 new_phi_result = first_vect;
4575 if (new_vec_stmt)
4577 new_phis.truncate (0);
4578 new_phis.safe_push (new_vec_stmt);
4581 else
4582 new_phi_result = PHI_RESULT (new_phis[0]);
4584 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION)
4586 /* For condition reductions, we have a vector (NEW_PHI_RESULT) containing
4587 various data values where the condition matched and another vector
4588 (INDUCTION_INDEX) containing all the indexes of those matches. We
4589 need to extract the last matching index (which will be the index with
4590 highest value) and use this to index into the data vector.
4591 For the case where there were no matches, the data vector will contain
4592 all default values and the index vector will be all zeros. */
4594 /* Get various versions of the type of the vector of indexes. */
4595 tree index_vec_type = TREE_TYPE (induction_index);
4596 gcc_checking_assert (TYPE_UNSIGNED (index_vec_type));
4597 tree index_scalar_type = TREE_TYPE (index_vec_type);
4598 tree index_vec_cmp_type = build_same_sized_truth_vector_type
4599 (index_vec_type);
4601 /* Get an unsigned integer version of the type of the data vector. */
4602 int scalar_precision = GET_MODE_PRECISION (TYPE_MODE (scalar_type));
4603 tree scalar_type_unsigned = make_unsigned_type (scalar_precision);
4604 tree vectype_unsigned = build_vector_type
4605 (scalar_type_unsigned, TYPE_VECTOR_SUBPARTS (vectype));
4607 /* First we need to create a vector (ZERO_VEC) of zeros and another
4608 vector (MAX_INDEX_VEC) filled with the last matching index, which we
4609 can create using a MAX reduction and then expanding.
4610 In the case where the loop never made any matches, the max index will
4611 be zero. */
4613 /* Vector of {0, 0, 0,...}. */
4614 tree zero_vec = make_ssa_name (vectype);
4615 tree zero_vec_rhs = build_zero_cst (vectype);
4616 gimple *zero_vec_stmt = gimple_build_assign (zero_vec, zero_vec_rhs);
4617 gsi_insert_before (&exit_gsi, zero_vec_stmt, GSI_SAME_STMT);
4619 /* Find maximum value from the vector of found indexes. */
4620 tree max_index = make_ssa_name (index_scalar_type);
4621 gimple *max_index_stmt = gimple_build_assign (max_index, REDUC_MAX_EXPR,
4622 induction_index);
4623 gsi_insert_before (&exit_gsi, max_index_stmt, GSI_SAME_STMT);
4625 /* Vector of {max_index, max_index, max_index,...}. */
4626 tree max_index_vec = make_ssa_name (index_vec_type);
4627 tree max_index_vec_rhs = build_vector_from_val (index_vec_type,
4628 max_index);
4629 gimple *max_index_vec_stmt = gimple_build_assign (max_index_vec,
4630 max_index_vec_rhs);
4631 gsi_insert_before (&exit_gsi, max_index_vec_stmt, GSI_SAME_STMT);
4633 /* Next we compare the new vector (MAX_INDEX_VEC) full of max indexes
4634 with the vector (INDUCTION_INDEX) of found indexes, choosing values
4635 from the data vector (NEW_PHI_RESULT) for matches, 0 (ZERO_VEC)
4636 otherwise. Only one value should match, resulting in a vector
4637 (VEC_COND) with one data value and the rest zeros.
4638 In the case where the loop never made any matches, every index will
4639 match, resulting in a vector with all data values (which will all be
4640 the default value). */
4642 /* Compare the max index vector to the vector of found indexes to find
4643 the position of the max value. */
4644 tree vec_compare = make_ssa_name (index_vec_cmp_type);
4645 gimple *vec_compare_stmt = gimple_build_assign (vec_compare, EQ_EXPR,
4646 induction_index,
4647 max_index_vec);
4648 gsi_insert_before (&exit_gsi, vec_compare_stmt, GSI_SAME_STMT);
4650 /* Use the compare to choose either values from the data vector or
4651 zero. */
4652 tree vec_cond = make_ssa_name (vectype);
4653 gimple *vec_cond_stmt = gimple_build_assign (vec_cond, VEC_COND_EXPR,
4654 vec_compare, new_phi_result,
4655 zero_vec);
4656 gsi_insert_before (&exit_gsi, vec_cond_stmt, GSI_SAME_STMT);
4658 /* Finally we need to extract the data value from the vector (VEC_COND)
4659 into a scalar (MATCHED_DATA_REDUC). Logically we want to do a OR
4660 reduction, but because this doesn't exist, we can use a MAX reduction
4661 instead. The data value might be signed or a float so we need to cast
4662 it first.
4663 In the case where the loop never made any matches, the data values are
4664 all identical, and so will reduce down correctly. */
4666 /* Make the matched data values unsigned. */
4667 tree vec_cond_cast = make_ssa_name (vectype_unsigned);
4668 tree vec_cond_cast_rhs = build1 (VIEW_CONVERT_EXPR, vectype_unsigned,
4669 vec_cond);
4670 gimple *vec_cond_cast_stmt = gimple_build_assign (vec_cond_cast,
4671 VIEW_CONVERT_EXPR,
4672 vec_cond_cast_rhs);
4673 gsi_insert_before (&exit_gsi, vec_cond_cast_stmt, GSI_SAME_STMT);
4675 /* Reduce down to a scalar value. */
4676 tree data_reduc = make_ssa_name (scalar_type_unsigned);
4677 optab ot = optab_for_tree_code (REDUC_MAX_EXPR, vectype_unsigned,
4678 optab_default);
4679 gcc_assert (optab_handler (ot, TYPE_MODE (vectype_unsigned))
4680 != CODE_FOR_nothing);
4681 gimple *data_reduc_stmt = gimple_build_assign (data_reduc,
4682 REDUC_MAX_EXPR,
4683 vec_cond_cast);
4684 gsi_insert_before (&exit_gsi, data_reduc_stmt, GSI_SAME_STMT);
4686 /* Convert the reduced value back to the result type and set as the
4687 result. */
4688 tree data_reduc_cast = build1 (VIEW_CONVERT_EXPR, scalar_type,
4689 data_reduc);
4690 epilog_stmt = gimple_build_assign (new_scalar_dest, data_reduc_cast);
4691 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
4692 gimple_assign_set_lhs (epilog_stmt, new_temp);
4693 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4694 scalar_results.safe_push (new_temp);
4697 /* 2.3 Create the reduction code, using one of the three schemes described
4698 above. In SLP we simply need to extract all the elements from the
4699 vector (without reducing them), so we use scalar shifts. */
4700 else if (reduc_code != ERROR_MARK && !slp_reduc)
4702 tree tmp;
4703 tree vec_elem_type;
4705 /*** Case 1: Create:
4706 v_out2 = reduc_expr <v_out1> */
4708 if (dump_enabled_p ())
4709 dump_printf_loc (MSG_NOTE, vect_location,
4710 "Reduce using direct vector reduction.\n");
4712 vec_elem_type = TREE_TYPE (TREE_TYPE (new_phi_result));
4713 if (!useless_type_conversion_p (scalar_type, vec_elem_type))
4715 tree tmp_dest =
4716 vect_create_destination_var (scalar_dest, vec_elem_type);
4717 tmp = build1 (reduc_code, vec_elem_type, new_phi_result);
4718 epilog_stmt = gimple_build_assign (tmp_dest, tmp);
4719 new_temp = make_ssa_name (tmp_dest, epilog_stmt);
4720 gimple_assign_set_lhs (epilog_stmt, new_temp);
4721 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4723 tmp = build1 (NOP_EXPR, scalar_type, new_temp);
4725 else
4726 tmp = build1 (reduc_code, scalar_type, new_phi_result);
4728 epilog_stmt = gimple_build_assign (new_scalar_dest, tmp);
4729 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
4730 gimple_assign_set_lhs (epilog_stmt, new_temp);
4731 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4733 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
4734 == INTEGER_INDUC_COND_REDUCTION)
4736 /* Earlier we set the initial value to be zero. Check the result
4737 and if it is zero then replace with the original initial
4738 value. */
4739 tree zero = build_zero_cst (scalar_type);
4740 tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp, zero);
4742 tmp = make_ssa_name (new_scalar_dest);
4743 epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare,
4744 initial_def, new_temp);
4745 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4746 new_temp = tmp;
4749 scalar_results.safe_push (new_temp);
4751 else
4753 bool reduce_with_shift = have_whole_vector_shift (mode);
4754 int element_bitsize = tree_to_uhwi (bitsize);
4755 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
4756 tree vec_temp;
4758 /* Regardless of whether we have a whole vector shift, if we're
4759 emulating the operation via tree-vect-generic, we don't want
4760 to use it. Only the first round of the reduction is likely
4761 to still be profitable via emulation. */
4762 /* ??? It might be better to emit a reduction tree code here, so that
4763 tree-vect-generic can expand the first round via bit tricks. */
4764 if (!VECTOR_MODE_P (mode))
4765 reduce_with_shift = false;
4766 else
4768 optab optab = optab_for_tree_code (code, vectype, optab_default);
4769 if (optab_handler (optab, mode) == CODE_FOR_nothing)
4770 reduce_with_shift = false;
4773 if (reduce_with_shift && !slp_reduc)
4775 int nelements = vec_size_in_bits / element_bitsize;
4776 unsigned char *sel = XALLOCAVEC (unsigned char, nelements);
4778 int elt_offset;
4780 tree zero_vec = build_zero_cst (vectype);
4781 /*** Case 2: Create:
4782 for (offset = nelements/2; offset >= 1; offset/=2)
4784 Create: va' = vec_shift <va, offset>
4785 Create: va = vop <va, va'>
4786 } */
4788 tree rhs;
4790 if (dump_enabled_p ())
4791 dump_printf_loc (MSG_NOTE, vect_location,
4792 "Reduce using vector shifts\n");
4794 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4795 new_temp = new_phi_result;
4796 for (elt_offset = nelements / 2;
4797 elt_offset >= 1;
4798 elt_offset /= 2)
4800 calc_vec_perm_mask_for_shift (mode, elt_offset, sel);
4801 tree mask = vect_gen_perm_mask_any (vectype, sel);
4802 epilog_stmt = gimple_build_assign (vec_dest, VEC_PERM_EXPR,
4803 new_temp, zero_vec, mask);
4804 new_name = make_ssa_name (vec_dest, epilog_stmt);
4805 gimple_assign_set_lhs (epilog_stmt, new_name);
4806 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4808 epilog_stmt = gimple_build_assign (vec_dest, code, new_name,
4809 new_temp);
4810 new_temp = make_ssa_name (vec_dest, epilog_stmt);
4811 gimple_assign_set_lhs (epilog_stmt, new_temp);
4812 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4815 /* 2.4 Extract the final scalar result. Create:
4816 s_out3 = extract_field <v_out2, bitpos> */
4818 if (dump_enabled_p ())
4819 dump_printf_loc (MSG_NOTE, vect_location,
4820 "extract scalar result\n");
4822 rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp,
4823 bitsize, bitsize_zero_node);
4824 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
4825 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
4826 gimple_assign_set_lhs (epilog_stmt, new_temp);
4827 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4828 scalar_results.safe_push (new_temp);
4830 else
4832 /*** Case 3: Create:
4833 s = extract_field <v_out2, 0>
4834 for (offset = element_size;
4835 offset < vector_size;
4836 offset += element_size;)
4838 Create: s' = extract_field <v_out2, offset>
4839 Create: s = op <s, s'> // For non SLP cases
4840 } */
4842 if (dump_enabled_p ())
4843 dump_printf_loc (MSG_NOTE, vect_location,
4844 "Reduce using scalar code.\n");
4846 vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
4847 FOR_EACH_VEC_ELT (new_phis, i, new_phi)
4849 int bit_offset;
4850 if (gimple_code (new_phi) == GIMPLE_PHI)
4851 vec_temp = PHI_RESULT (new_phi);
4852 else
4853 vec_temp = gimple_assign_lhs (new_phi);
4854 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
4855 bitsize_zero_node);
4856 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
4857 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
4858 gimple_assign_set_lhs (epilog_stmt, new_temp);
4859 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4861 /* In SLP we don't need to apply reduction operation, so we just
4862 collect s' values in SCALAR_RESULTS. */
4863 if (slp_reduc)
4864 scalar_results.safe_push (new_temp);
4866 for (bit_offset = element_bitsize;
4867 bit_offset < vec_size_in_bits;
4868 bit_offset += element_bitsize)
4870 tree bitpos = bitsize_int (bit_offset);
4871 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp,
4872 bitsize, bitpos);
4874 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
4875 new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
4876 gimple_assign_set_lhs (epilog_stmt, new_name);
4877 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4879 if (slp_reduc)
4881 /* In SLP we don't need to apply reduction operation, so
4882 we just collect s' values in SCALAR_RESULTS. */
4883 new_temp = new_name;
4884 scalar_results.safe_push (new_name);
4886 else
4888 epilog_stmt = gimple_build_assign (new_scalar_dest, code,
4889 new_name, new_temp);
4890 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
4891 gimple_assign_set_lhs (epilog_stmt, new_temp);
4892 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4897 /* The only case where we need to reduce scalar results in SLP, is
4898 unrolling. If the size of SCALAR_RESULTS is greater than
4899 GROUP_SIZE, we reduce them combining elements modulo
4900 GROUP_SIZE. */
4901 if (slp_reduc)
4903 tree res, first_res, new_res;
4904 gimple *new_stmt;
4906 /* Reduce multiple scalar results in case of SLP unrolling. */
4907 for (j = group_size; scalar_results.iterate (j, &res);
4908 j++)
4910 first_res = scalar_results[j % group_size];
4911 new_stmt = gimple_build_assign (new_scalar_dest, code,
4912 first_res, res);
4913 new_res = make_ssa_name (new_scalar_dest, new_stmt);
4914 gimple_assign_set_lhs (new_stmt, new_res);
4915 gsi_insert_before (&exit_gsi, new_stmt, GSI_SAME_STMT);
4916 scalar_results[j % group_size] = new_res;
4919 else
4920 /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
4921 scalar_results.safe_push (new_temp);
4925 vect_finalize_reduction:
4927 if (double_reduc)
4928 loop = loop->inner;
4930 /* 2.5 Adjust the final result by the initial value of the reduction
4931 variable. (When such adjustment is not needed, then
4932 'adjustment_def' is zero). For example, if code is PLUS we create:
4933 new_temp = loop_exit_def + adjustment_def */
4935 if (adjustment_def)
4937 gcc_assert (!slp_reduc);
4938 if (nested_in_vect_loop)
4940 new_phi = new_phis[0];
4941 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
4942 expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
4943 new_dest = vect_create_destination_var (scalar_dest, vectype);
4945 else
4947 new_temp = scalar_results[0];
4948 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
4949 expr = build2 (code, scalar_type, new_temp, adjustment_def);
4950 new_dest = vect_create_destination_var (scalar_dest, scalar_type);
4953 epilog_stmt = gimple_build_assign (new_dest, expr);
4954 new_temp = make_ssa_name (new_dest, epilog_stmt);
4955 gimple_assign_set_lhs (epilog_stmt, new_temp);
4956 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4957 if (nested_in_vect_loop)
4959 set_vinfo_for_stmt (epilog_stmt,
4960 new_stmt_vec_info (epilog_stmt, loop_vinfo));
4961 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt)) =
4962 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi));
4964 if (!double_reduc)
4965 scalar_results.quick_push (new_temp);
4966 else
4967 scalar_results[0] = new_temp;
4969 else
4970 scalar_results[0] = new_temp;
4972 new_phis[0] = epilog_stmt;
4975 /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit
4976 phis with new adjusted scalar results, i.e., replace use <s_out0>
4977 with use <s_out4>.
4979 Transform:
4980 loop_exit:
4981 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4982 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4983 v_out2 = reduce <v_out1>
4984 s_out3 = extract_field <v_out2, 0>
4985 s_out4 = adjust_result <s_out3>
4986 use <s_out0>
4987 use <s_out0>
4989 into:
4991 loop_exit:
4992 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4993 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4994 v_out2 = reduce <v_out1>
4995 s_out3 = extract_field <v_out2, 0>
4996 s_out4 = adjust_result <s_out3>
4997 use <s_out4>
4998 use <s_out4> */
5001 /* In SLP reduction chain we reduce vector results into one vector if
5002 necessary, hence we set here GROUP_SIZE to 1. SCALAR_DEST is the LHS of
5003 the last stmt in the reduction chain, since we are looking for the loop
5004 exit phi node. */
5005 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
5007 gimple *dest_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1];
5008 /* Handle reduction patterns. */
5009 if (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (dest_stmt)))
5010 dest_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (dest_stmt));
5012 scalar_dest = gimple_assign_lhs (dest_stmt);
5013 group_size = 1;
5016 /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
5017 case that GROUP_SIZE is greater than vectorization factor). Therefore, we
5018 need to match SCALAR_RESULTS with corresponding statements. The first
5019 (GROUP_SIZE / number of new vector stmts) scalar results correspond to
5020 the first vector stmt, etc.
5021 (RATIO is equal to (GROUP_SIZE / number of new vector stmts)). */
5022 if (group_size > new_phis.length ())
5024 ratio = group_size / new_phis.length ();
5025 gcc_assert (!(group_size % new_phis.length ()));
5027 else
5028 ratio = 1;
5030 for (k = 0; k < group_size; k++)
5032 if (k % ratio == 0)
5034 epilog_stmt = new_phis[k / ratio];
5035 reduction_phi = reduction_phis[k / ratio];
5036 if (double_reduc)
5037 inner_phi = inner_phis[k / ratio];
5040 if (slp_reduc)
5042 gimple *current_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[k];
5044 orig_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (current_stmt));
5045 /* SLP statements can't participate in patterns. */
5046 gcc_assert (!orig_stmt);
5047 scalar_dest = gimple_assign_lhs (current_stmt);
5050 phis.create (3);
5051 /* Find the loop-closed-use at the loop exit of the original scalar
5052 result. (The reduction result is expected to have two immediate uses -
5053 one at the latch block, and one at the loop exit). */
5054 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
5055 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p)))
5056 && !is_gimple_debug (USE_STMT (use_p)))
5057 phis.safe_push (USE_STMT (use_p));
5059 /* While we expect to have found an exit_phi because of loop-closed-ssa
5060 form we can end up without one if the scalar cycle is dead. */
5062 FOR_EACH_VEC_ELT (phis, i, exit_phi)
5064 if (outer_loop)
5066 stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi);
5067 gphi *vect_phi;
5069 /* FORNOW. Currently not supporting the case that an inner-loop
5070 reduction is not used in the outer-loop (but only outside the
5071 outer-loop), unless it is double reduction. */
5072 gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
5073 && !STMT_VINFO_LIVE_P (exit_phi_vinfo))
5074 || double_reduc);
5076 if (double_reduc)
5077 STMT_VINFO_VEC_STMT (exit_phi_vinfo) = inner_phi;
5078 else
5079 STMT_VINFO_VEC_STMT (exit_phi_vinfo) = epilog_stmt;
5080 if (!double_reduc
5081 || STMT_VINFO_DEF_TYPE (exit_phi_vinfo)
5082 != vect_double_reduction_def)
5083 continue;
5085 /* Handle double reduction:
5087 stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
5088 stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop)
5089 stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop)
5090 stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
5092 At that point the regular reduction (stmt2 and stmt3) is
5093 already vectorized, as well as the exit phi node, stmt4.
5094 Here we vectorize the phi node of double reduction, stmt1, and
5095 update all relevant statements. */
5097 /* Go through all the uses of s2 to find double reduction phi
5098 node, i.e., stmt1 above. */
5099 orig_name = PHI_RESULT (exit_phi);
5100 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
5102 stmt_vec_info use_stmt_vinfo;
5103 stmt_vec_info new_phi_vinfo;
5104 tree vect_phi_init, preheader_arg, vect_phi_res, init_def;
5105 basic_block bb = gimple_bb (use_stmt);
5106 gimple *use;
5108 /* Check that USE_STMT is really double reduction phi
5109 node. */
5110 if (gimple_code (use_stmt) != GIMPLE_PHI
5111 || gimple_phi_num_args (use_stmt) != 2
5112 || bb->loop_father != outer_loop)
5113 continue;
5114 use_stmt_vinfo = vinfo_for_stmt (use_stmt);
5115 if (!use_stmt_vinfo
5116 || STMT_VINFO_DEF_TYPE (use_stmt_vinfo)
5117 != vect_double_reduction_def)
5118 continue;
5120 /* Create vector phi node for double reduction:
5121 vs1 = phi <vs0, vs2>
5122 vs1 was created previously in this function by a call to
5123 vect_get_vec_def_for_operand and is stored in
5124 vec_initial_def;
5125 vs2 is defined by INNER_PHI, the vectorized EXIT_PHI;
5126 vs0 is created here. */
5128 /* Create vector phi node. */
5129 vect_phi = create_phi_node (vec_initial_def, bb);
5130 new_phi_vinfo = new_stmt_vec_info (vect_phi,
5131 loop_vec_info_for_loop (outer_loop));
5132 set_vinfo_for_stmt (vect_phi, new_phi_vinfo);
5134 /* Create vs0 - initial def of the double reduction phi. */
5135 preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt,
5136 loop_preheader_edge (outer_loop));
5137 init_def = get_initial_def_for_reduction (stmt,
5138 preheader_arg, NULL);
5139 vect_phi_init = vect_init_vector (use_stmt, init_def,
5140 vectype, NULL);
5142 /* Update phi node arguments with vs0 and vs2. */
5143 add_phi_arg (vect_phi, vect_phi_init,
5144 loop_preheader_edge (outer_loop),
5145 UNKNOWN_LOCATION);
5146 add_phi_arg (vect_phi, PHI_RESULT (inner_phi),
5147 loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
5148 if (dump_enabled_p ())
5150 dump_printf_loc (MSG_NOTE, vect_location,
5151 "created double reduction phi node: ");
5152 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vect_phi, 0);
5155 vect_phi_res = PHI_RESULT (vect_phi);
5157 /* Replace the use, i.e., set the correct vs1 in the regular
5158 reduction phi node. FORNOW, NCOPIES is always 1, so the
5159 loop is redundant. */
5160 use = reduction_phi;
5161 for (j = 0; j < ncopies; j++)
5163 edge pr_edge = loop_preheader_edge (loop);
5164 SET_PHI_ARG_DEF (use, pr_edge->dest_idx, vect_phi_res);
5165 use = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use));
5171 phis.release ();
5172 if (nested_in_vect_loop)
5174 if (double_reduc)
5175 loop = outer_loop;
5176 else
5177 continue;
5180 phis.create (3);
5181 /* Find the loop-closed-use at the loop exit of the original scalar
5182 result. (The reduction result is expected to have two immediate uses,
5183 one at the latch block, and one at the loop exit). For double
5184 reductions we are looking for exit phis of the outer loop. */
5185 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
5187 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
5189 if (!is_gimple_debug (USE_STMT (use_p)))
5190 phis.safe_push (USE_STMT (use_p));
5192 else
5194 if (double_reduc && gimple_code (USE_STMT (use_p)) == GIMPLE_PHI)
5196 tree phi_res = PHI_RESULT (USE_STMT (use_p));
5198 FOR_EACH_IMM_USE_FAST (phi_use_p, phi_imm_iter, phi_res)
5200 if (!flow_bb_inside_loop_p (loop,
5201 gimple_bb (USE_STMT (phi_use_p)))
5202 && !is_gimple_debug (USE_STMT (phi_use_p)))
5203 phis.safe_push (USE_STMT (phi_use_p));
5209 FOR_EACH_VEC_ELT (phis, i, exit_phi)
5211 /* Replace the uses: */
5212 orig_name = PHI_RESULT (exit_phi);
5213 scalar_result = scalar_results[k];
5214 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
5215 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
5216 SET_USE (use_p, scalar_result);
5219 phis.release ();
5224 /* Function is_nonwrapping_integer_induction.
5226 Check if STMT (which is part of loop LOOP) both increments and
5227 does not cause overflow. */
5229 static bool
5230 is_nonwrapping_integer_induction (gimple *stmt, struct loop *loop)
5232 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
5233 tree base = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo);
5234 tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo);
5235 tree lhs_type = TREE_TYPE (gimple_phi_result (stmt));
5236 widest_int ni, max_loop_value, lhs_max;
5237 bool overflow = false;
5239 /* Make sure the loop is integer based. */
5240 if (TREE_CODE (base) != INTEGER_CST
5241 || TREE_CODE (step) != INTEGER_CST)
5242 return false;
5244 /* Check that the induction increments. */
5245 if (tree_int_cst_sgn (step) == -1)
5246 return false;
5248 /* Check that the max size of the loop will not wrap. */
5250 if (TYPE_OVERFLOW_UNDEFINED (lhs_type))
5251 return true;
5253 if (! max_stmt_executions (loop, &ni))
5254 return false;
5256 max_loop_value = wi::mul (wi::to_widest (step), ni, TYPE_SIGN (lhs_type),
5257 &overflow);
5258 if (overflow)
5259 return false;
5261 max_loop_value = wi::add (wi::to_widest (base), max_loop_value,
5262 TYPE_SIGN (lhs_type), &overflow);
5263 if (overflow)
5264 return false;
5266 return (wi::min_precision (max_loop_value, TYPE_SIGN (lhs_type))
5267 <= TYPE_PRECISION (lhs_type));
5270 /* Function vectorizable_reduction.
5272 Check if STMT performs a reduction operation that can be vectorized.
5273 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5274 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
5275 Return FALSE if not a vectorizable STMT, TRUE otherwise.
5277 This function also handles reduction idioms (patterns) that have been
5278 recognized in advance during vect_pattern_recog. In this case, STMT may be
5279 of this form:
5280 X = pattern_expr (arg0, arg1, ..., X)
5281 and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
5282 sequence that had been detected and replaced by the pattern-stmt (STMT).
5284 This function also handles reduction of condition expressions, for example:
5285 for (int i = 0; i < N; i++)
5286 if (a[i] < value)
5287 last = a[i];
5288 This is handled by vectorising the loop and creating an additional vector
5289 containing the loop indexes for which "a[i] < value" was true. In the
5290 function epilogue this is reduced to a single max value and then used to
5291 index into the vector of results.
5293 In some cases of reduction patterns, the type of the reduction variable X is
5294 different than the type of the other arguments of STMT.
5295 In such cases, the vectype that is used when transforming STMT into a vector
5296 stmt is different than the vectype that is used to determine the
5297 vectorization factor, because it consists of a different number of elements
5298 than the actual number of elements that are being operated upon in parallel.
5300 For example, consider an accumulation of shorts into an int accumulator.
5301 On some targets it's possible to vectorize this pattern operating on 8
5302 shorts at a time (hence, the vectype for purposes of determining the
5303 vectorization factor should be V8HI); on the other hand, the vectype that
5304 is used to create the vector form is actually V4SI (the type of the result).
5306 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
5307 indicates what is the actual level of parallelism (V8HI in the example), so
5308 that the right vectorization factor would be derived. This vectype
5309 corresponds to the type of arguments to the reduction stmt, and should *NOT*
5310 be used to create the vectorized stmt. The right vectype for the vectorized
5311 stmt is obtained from the type of the result X:
5312 get_vectype_for_scalar_type (TREE_TYPE (X))
5314 This means that, contrary to "regular" reductions (or "regular" stmts in
5315 general), the following equation:
5316 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
5317 does *NOT* necessarily hold for reduction patterns. */
5319 bool
5320 vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
5321 gimple **vec_stmt, slp_tree slp_node)
5323 tree vec_dest;
5324 tree scalar_dest;
5325 tree loop_vec_def0 = NULL_TREE, loop_vec_def1 = NULL_TREE;
5326 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5327 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
5328 tree vectype_in = NULL_TREE;
5329 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5330 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5331 enum tree_code code, orig_code, epilog_reduc_code;
5332 machine_mode vec_mode;
5333 int op_type;
5334 optab optab, reduc_optab;
5335 tree new_temp = NULL_TREE;
5336 gimple *def_stmt;
5337 enum vect_def_type dt;
5338 gphi *new_phi = NULL;
5339 tree scalar_type;
5340 bool is_simple_use;
5341 gimple *orig_stmt;
5342 stmt_vec_info orig_stmt_info;
5343 tree expr = NULL_TREE;
5344 int i;
5345 int ncopies;
5346 int epilog_copies;
5347 stmt_vec_info prev_stmt_info, prev_phi_info;
5348 bool single_defuse_cycle = false;
5349 tree reduc_def = NULL_TREE;
5350 gimple *new_stmt = NULL;
5351 int j;
5352 tree ops[3];
5353 bool nested_cycle = false, found_nested_cycle_def = false;
5354 gimple *reduc_def_stmt = NULL;
5355 bool double_reduc = false, dummy;
5356 basic_block def_bb;
5357 struct loop * def_stmt_loop, *outer_loop = NULL;
5358 tree def_arg;
5359 gimple *def_arg_stmt;
5360 auto_vec<tree> vec_oprnds0;
5361 auto_vec<tree> vec_oprnds1;
5362 auto_vec<tree> vect_defs;
5363 auto_vec<gimple *> phis;
5364 int vec_num;
5365 tree def0, def1, tem, op0, op1 = NULL_TREE;
5366 bool first_p = true;
5367 tree cr_index_scalar_type = NULL_TREE, cr_index_vector_type = NULL_TREE;
5368 gimple *cond_expr_induction_def_stmt = NULL;
5370 /* In case of reduction chain we switch to the first stmt in the chain, but
5371 we don't update STMT_INFO, since only the last stmt is marked as reduction
5372 and has reduction properties. */
5373 if (GROUP_FIRST_ELEMENT (stmt_info)
5374 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
5376 stmt = GROUP_FIRST_ELEMENT (stmt_info);
5377 first_p = false;
5380 if (nested_in_vect_loop_p (loop, stmt))
5382 outer_loop = loop;
5383 loop = loop->inner;
5384 nested_cycle = true;
5387 /* 1. Is vectorizable reduction? */
5388 /* Not supportable if the reduction variable is used in the loop, unless
5389 it's a reduction chain. */
5390 if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer
5391 && !GROUP_FIRST_ELEMENT (stmt_info))
5392 return false;
5394 /* Reductions that are not used even in an enclosing outer-loop,
5395 are expected to be "live" (used out of the loop). */
5396 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope
5397 && !STMT_VINFO_LIVE_P (stmt_info))
5398 return false;
5400 /* Make sure it was already recognized as a reduction computation. */
5401 if (STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) != vect_reduction_def
5402 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) != vect_nested_cycle)
5403 return false;
5405 /* 2. Has this been recognized as a reduction pattern?
5407 Check if STMT represents a pattern that has been recognized
5408 in earlier analysis stages. For stmts that represent a pattern,
5409 the STMT_VINFO_RELATED_STMT field records the last stmt in
5410 the original sequence that constitutes the pattern. */
5412 orig_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt));
5413 if (orig_stmt)
5415 orig_stmt_info = vinfo_for_stmt (orig_stmt);
5416 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
5417 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
5420 /* 3. Check the operands of the operation. The first operands are defined
5421 inside the loop body. The last operand is the reduction variable,
5422 which is defined by the loop-header-phi. */
5424 gcc_assert (is_gimple_assign (stmt));
5426 /* Flatten RHS. */
5427 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
5429 case GIMPLE_SINGLE_RHS:
5430 op_type = TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt));
5431 if (op_type == ternary_op)
5433 tree rhs = gimple_assign_rhs1 (stmt);
5434 ops[0] = TREE_OPERAND (rhs, 0);
5435 ops[1] = TREE_OPERAND (rhs, 1);
5436 ops[2] = TREE_OPERAND (rhs, 2);
5437 code = TREE_CODE (rhs);
5439 else
5440 return false;
5441 break;
5443 case GIMPLE_BINARY_RHS:
5444 code = gimple_assign_rhs_code (stmt);
5445 op_type = TREE_CODE_LENGTH (code);
5446 gcc_assert (op_type == binary_op);
5447 ops[0] = gimple_assign_rhs1 (stmt);
5448 ops[1] = gimple_assign_rhs2 (stmt);
5449 break;
5451 case GIMPLE_TERNARY_RHS:
5452 code = gimple_assign_rhs_code (stmt);
5453 op_type = TREE_CODE_LENGTH (code);
5454 gcc_assert (op_type == ternary_op);
5455 ops[0] = gimple_assign_rhs1 (stmt);
5456 ops[1] = gimple_assign_rhs2 (stmt);
5457 ops[2] = gimple_assign_rhs3 (stmt);
5458 break;
5460 case GIMPLE_UNARY_RHS:
5461 return false;
5463 default:
5464 gcc_unreachable ();
5466 /* The default is that the reduction variable is the last in statement. */
5467 int reduc_index = op_type - 1;
5468 if (code == MINUS_EXPR)
5469 reduc_index = 0;
5471 if (code == COND_EXPR && slp_node)
5472 return false;
5474 scalar_dest = gimple_assign_lhs (stmt);
5475 scalar_type = TREE_TYPE (scalar_dest);
5476 if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
5477 && !SCALAR_FLOAT_TYPE_P (scalar_type))
5478 return false;
5480 /* Do not try to vectorize bit-precision reductions. */
5481 if ((TYPE_PRECISION (scalar_type)
5482 != GET_MODE_PRECISION (TYPE_MODE (scalar_type))))
5483 return false;
5485 /* All uses but the last are expected to be defined in the loop.
5486 The last use is the reduction variable. In case of nested cycle this
5487 assumption is not true: we use reduc_index to record the index of the
5488 reduction variable. */
5489 for (i = 0; i < op_type; i++)
5491 if (i == reduc_index)
5492 continue;
5494 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
5495 if (i == 0 && code == COND_EXPR)
5496 continue;
5498 is_simple_use = vect_is_simple_use (ops[i], loop_vinfo,
5499 &def_stmt, &dt, &tem);
5500 if (!vectype_in)
5501 vectype_in = tem;
5502 gcc_assert (is_simple_use);
5504 if (dt != vect_internal_def
5505 && dt != vect_external_def
5506 && dt != vect_constant_def
5507 && dt != vect_induction_def
5508 && !(dt == vect_nested_cycle && nested_cycle))
5509 return false;
5511 if (dt == vect_nested_cycle)
5513 found_nested_cycle_def = true;
5514 reduc_def_stmt = def_stmt;
5515 reduc_index = i;
5518 if (i == 1 && code == COND_EXPR && dt == vect_induction_def)
5519 cond_expr_induction_def_stmt = def_stmt;
5522 is_simple_use = vect_is_simple_use (ops[reduc_index], loop_vinfo,
5523 &def_stmt, &dt, &tem);
5524 if (!vectype_in)
5525 vectype_in = tem;
5526 gcc_assert (is_simple_use);
5527 if (!found_nested_cycle_def)
5528 reduc_def_stmt = def_stmt;
5530 if (reduc_def_stmt && gimple_code (reduc_def_stmt) != GIMPLE_PHI)
5531 return false;
5533 if (!(dt == vect_reduction_def
5534 || dt == vect_nested_cycle
5535 || ((dt == vect_internal_def || dt == vect_external_def
5536 || dt == vect_constant_def || dt == vect_induction_def)
5537 && nested_cycle && found_nested_cycle_def)))
5539 /* For pattern recognized stmts, orig_stmt might be a reduction,
5540 but some helper statements for the pattern might not, or
5541 might be COND_EXPRs with reduction uses in the condition. */
5542 gcc_assert (orig_stmt);
5543 return false;
5546 enum vect_reduction_type v_reduc_type;
5547 gimple *tmp = vect_is_simple_reduction (loop_vinfo, reduc_def_stmt,
5548 !nested_cycle, &dummy, false,
5549 &v_reduc_type);
5551 /* If we have a condition reduction, see if we can simplify it further. */
5552 if (v_reduc_type == COND_REDUCTION
5553 && cond_expr_induction_def_stmt != NULL
5554 && is_nonwrapping_integer_induction (cond_expr_induction_def_stmt, loop))
5556 if (dump_enabled_p ())
5557 dump_printf_loc (MSG_NOTE, vect_location,
5558 "condition expression based on integer induction.\n");
5559 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = INTEGER_INDUC_COND_REDUCTION;
5561 else
5562 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = v_reduc_type;
5564 if (orig_stmt)
5565 gcc_assert (tmp == orig_stmt
5566 || GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) == orig_stmt);
5567 else
5568 /* We changed STMT to be the first stmt in reduction chain, hence we
5569 check that in this case the first element in the chain is STMT. */
5570 gcc_assert (stmt == tmp
5571 || GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) == stmt);
5573 if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt)))
5574 return false;
5576 if (slp_node)
5577 ncopies = 1;
5578 else
5579 ncopies = (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
5580 / TYPE_VECTOR_SUBPARTS (vectype_in));
5582 gcc_assert (ncopies >= 1);
5584 vec_mode = TYPE_MODE (vectype_in);
5586 if (code == COND_EXPR)
5588 /* Only call during the analysis stage, otherwise we'll lose
5589 STMT_VINFO_TYPE. */
5590 if (!vec_stmt && !vectorizable_condition (stmt, gsi, NULL,
5591 ops[reduc_index], 0, NULL))
5593 if (dump_enabled_p ())
5594 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5595 "unsupported condition in reduction\n");
5596 return false;
5599 else
5601 /* 4. Supportable by target? */
5603 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR
5604 || code == LROTATE_EXPR || code == RROTATE_EXPR)
5606 /* Shifts and rotates are only supported by vectorizable_shifts,
5607 not vectorizable_reduction. */
5608 if (dump_enabled_p ())
5609 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5610 "unsupported shift or rotation.\n");
5611 return false;
5614 /* 4.1. check support for the operation in the loop */
5615 optab = optab_for_tree_code (code, vectype_in, optab_default);
5616 if (!optab)
5618 if (dump_enabled_p ())
5619 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5620 "no optab.\n");
5622 return false;
5625 if (optab_handler (optab, vec_mode) == CODE_FOR_nothing)
5627 if (dump_enabled_p ())
5628 dump_printf (MSG_NOTE, "op not supported by target.\n");
5630 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
5631 || LOOP_VINFO_VECT_FACTOR (loop_vinfo)
5632 < vect_min_worthwhile_factor (code))
5633 return false;
5635 if (dump_enabled_p ())
5636 dump_printf (MSG_NOTE, "proceeding using word mode.\n");
5639 /* Worthwhile without SIMD support? */
5640 if (!VECTOR_MODE_P (TYPE_MODE (vectype_in))
5641 && LOOP_VINFO_VECT_FACTOR (loop_vinfo)
5642 < vect_min_worthwhile_factor (code))
5644 if (dump_enabled_p ())
5645 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5646 "not worthwhile without SIMD support.\n");
5648 return false;
5652 /* 4.2. Check support for the epilog operation.
5654 If STMT represents a reduction pattern, then the type of the
5655 reduction variable may be different than the type of the rest
5656 of the arguments. For example, consider the case of accumulation
5657 of shorts into an int accumulator; The original code:
5658 S1: int_a = (int) short_a;
5659 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
5661 was replaced with:
5662 STMT: int_acc = widen_sum <short_a, int_acc>
5664 This means that:
5665 1. The tree-code that is used to create the vector operation in the
5666 epilog code (that reduces the partial results) is not the
5667 tree-code of STMT, but is rather the tree-code of the original
5668 stmt from the pattern that STMT is replacing. I.e, in the example
5669 above we want to use 'widen_sum' in the loop, but 'plus' in the
5670 epilog.
5671 2. The type (mode) we use to check available target support
5672 for the vector operation to be created in the *epilog*, is
5673 determined by the type of the reduction variable (in the example
5674 above we'd check this: optab_handler (plus_optab, vect_int_mode])).
5675 However the type (mode) we use to check available target support
5676 for the vector operation to be created *inside the loop*, is
5677 determined by the type of the other arguments to STMT (in the
5678 example we'd check this: optab_handler (widen_sum_optab,
5679 vect_short_mode)).
5681 This is contrary to "regular" reductions, in which the types of all
5682 the arguments are the same as the type of the reduction variable.
5683 For "regular" reductions we can therefore use the same vector type
5684 (and also the same tree-code) when generating the epilog code and
5685 when generating the code inside the loop. */
5687 if (orig_stmt)
5689 /* This is a reduction pattern: get the vectype from the type of the
5690 reduction variable, and get the tree-code from orig_stmt. */
5691 gcc_assert (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5692 == TREE_CODE_REDUCTION);
5693 orig_code = gimple_assign_rhs_code (orig_stmt);
5694 gcc_assert (vectype_out);
5695 vec_mode = TYPE_MODE (vectype_out);
5697 else
5699 /* Regular reduction: use the same vectype and tree-code as used for
5700 the vector code inside the loop can be used for the epilog code. */
5701 orig_code = code;
5703 if (code == MINUS_EXPR)
5704 orig_code = PLUS_EXPR;
5706 /* For simple condition reductions, replace with the actual expression
5707 we want to base our reduction around. */
5708 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5709 == INTEGER_INDUC_COND_REDUCTION)
5710 orig_code = MAX_EXPR;
5713 if (nested_cycle)
5715 def_bb = gimple_bb (reduc_def_stmt);
5716 def_stmt_loop = def_bb->loop_father;
5717 def_arg = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
5718 loop_preheader_edge (def_stmt_loop));
5719 if (TREE_CODE (def_arg) == SSA_NAME
5720 && (def_arg_stmt = SSA_NAME_DEF_STMT (def_arg))
5721 && gimple_code (def_arg_stmt) == GIMPLE_PHI
5722 && flow_bb_inside_loop_p (outer_loop, gimple_bb (def_arg_stmt))
5723 && vinfo_for_stmt (def_arg_stmt)
5724 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_arg_stmt))
5725 == vect_double_reduction_def)
5726 double_reduc = true;
5729 epilog_reduc_code = ERROR_MARK;
5731 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == TREE_CODE_REDUCTION
5732 || STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5733 == INTEGER_INDUC_COND_REDUCTION)
5735 if (reduction_code_for_scalar_code (orig_code, &epilog_reduc_code))
5737 reduc_optab = optab_for_tree_code (epilog_reduc_code, vectype_out,
5738 optab_default);
5739 if (!reduc_optab)
5741 if (dump_enabled_p ())
5742 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5743 "no optab for reduction.\n");
5745 epilog_reduc_code = ERROR_MARK;
5747 else if (optab_handler (reduc_optab, vec_mode) == CODE_FOR_nothing)
5749 if (dump_enabled_p ())
5750 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5751 "reduc op not supported by target.\n");
5753 epilog_reduc_code = ERROR_MARK;
5756 /* When epilog_reduc_code is ERROR_MARK then a reduction will be
5757 generated in the epilog using multiple expressions. This does not
5758 work for condition reductions. */
5759 if (epilog_reduc_code == ERROR_MARK
5760 && STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5761 == INTEGER_INDUC_COND_REDUCTION)
5763 if (dump_enabled_p ())
5764 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5765 "no reduc code for scalar code.\n");
5766 return false;
5769 else
5771 if (!nested_cycle || double_reduc)
5773 if (dump_enabled_p ())
5774 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5775 "no reduc code for scalar code.\n");
5777 return false;
5781 else
5783 int scalar_precision = GET_MODE_PRECISION (TYPE_MODE (scalar_type));
5784 cr_index_scalar_type = make_unsigned_type (scalar_precision);
5785 cr_index_vector_type = build_vector_type
5786 (cr_index_scalar_type, TYPE_VECTOR_SUBPARTS (vectype_out));
5788 epilog_reduc_code = REDUC_MAX_EXPR;
5789 optab = optab_for_tree_code (REDUC_MAX_EXPR, cr_index_vector_type,
5790 optab_default);
5791 if (optab_handler (optab, TYPE_MODE (cr_index_vector_type))
5792 == CODE_FOR_nothing)
5794 if (dump_enabled_p ())
5795 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5796 "reduc max op not supported by target.\n");
5797 return false;
5801 if ((double_reduc
5802 || STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION
5803 || STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5804 == INTEGER_INDUC_COND_REDUCTION)
5805 && ncopies > 1)
5807 if (dump_enabled_p ())
5808 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5809 "multiple types in double reduction or condition "
5810 "reduction.\n");
5811 return false;
5814 /* In case of widenning multiplication by a constant, we update the type
5815 of the constant to be the type of the other operand. We check that the
5816 constant fits the type in the pattern recognition pass. */
5817 if (code == DOT_PROD_EXPR
5818 && !types_compatible_p (TREE_TYPE (ops[0]), TREE_TYPE (ops[1])))
5820 if (TREE_CODE (ops[0]) == INTEGER_CST)
5821 ops[0] = fold_convert (TREE_TYPE (ops[1]), ops[0]);
5822 else if (TREE_CODE (ops[1]) == INTEGER_CST)
5823 ops[1] = fold_convert (TREE_TYPE (ops[0]), ops[1]);
5824 else
5826 if (dump_enabled_p ())
5827 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5828 "invalid types in dot-prod\n");
5830 return false;
5834 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION)
5836 widest_int ni;
5838 if (! max_loop_iterations (loop, &ni))
5840 if (dump_enabled_p ())
5841 dump_printf_loc (MSG_NOTE, vect_location,
5842 "loop count not known, cannot create cond "
5843 "reduction.\n");
5844 return false;
5846 /* Convert backedges to iterations. */
5847 ni += 1;
5849 /* The additional index will be the same type as the condition. Check
5850 that the loop can fit into this less one (because we'll use up the
5851 zero slot for when there are no matches). */
5852 tree max_index = TYPE_MAX_VALUE (cr_index_scalar_type);
5853 if (wi::geu_p (ni, wi::to_widest (max_index)))
5855 if (dump_enabled_p ())
5856 dump_printf_loc (MSG_NOTE, vect_location,
5857 "loop size is greater than data size.\n");
5858 return false;
5862 if (!vec_stmt) /* transformation not required. */
5864 if (first_p
5865 && !vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies,
5866 reduc_index))
5867 return false;
5868 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
5869 return true;
5872 /** Transform. **/
5874 if (dump_enabled_p ())
5875 dump_printf_loc (MSG_NOTE, vect_location, "transform reduction.\n");
5877 /* FORNOW: Multiple types are not supported for condition. */
5878 if (code == COND_EXPR)
5879 gcc_assert (ncopies == 1);
5881 /* Create the destination vector */
5882 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
5884 /* In case the vectorization factor (VF) is bigger than the number
5885 of elements that we can fit in a vectype (nunits), we have to generate
5886 more than one vector stmt - i.e - we need to "unroll" the
5887 vector stmt by a factor VF/nunits. For more details see documentation
5888 in vectorizable_operation. */
5890 /* If the reduction is used in an outer loop we need to generate
5891 VF intermediate results, like so (e.g. for ncopies=2):
5892 r0 = phi (init, r0)
5893 r1 = phi (init, r1)
5894 r0 = x0 + r0;
5895 r1 = x1 + r1;
5896 (i.e. we generate VF results in 2 registers).
5897 In this case we have a separate def-use cycle for each copy, and therefore
5898 for each copy we get the vector def for the reduction variable from the
5899 respective phi node created for this copy.
5901 Otherwise (the reduction is unused in the loop nest), we can combine
5902 together intermediate results, like so (e.g. for ncopies=2):
5903 r = phi (init, r)
5904 r = x0 + r;
5905 r = x1 + r;
5906 (i.e. we generate VF/2 results in a single register).
5907 In this case for each copy we get the vector def for the reduction variable
5908 from the vectorized reduction operation generated in the previous iteration.
5911 if (STMT_VINFO_RELEVANT (stmt_info) <= vect_used_only_live)
5913 single_defuse_cycle = true;
5914 epilog_copies = 1;
5916 else
5917 epilog_copies = ncopies;
5919 prev_stmt_info = NULL;
5920 prev_phi_info = NULL;
5921 if (slp_node)
5922 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
5923 else
5925 vec_num = 1;
5926 vec_oprnds0.create (1);
5927 if (op_type == ternary_op)
5928 vec_oprnds1.create (1);
5931 phis.create (vec_num);
5932 vect_defs.create (vec_num);
5933 if (!slp_node)
5934 vect_defs.quick_push (NULL_TREE);
5936 for (j = 0; j < ncopies; j++)
5938 if (j == 0 || !single_defuse_cycle)
5940 for (i = 0; i < vec_num; i++)
5942 /* Create the reduction-phi that defines the reduction
5943 operand. */
5944 new_phi = create_phi_node (vec_dest, loop->header);
5945 set_vinfo_for_stmt (new_phi,
5946 new_stmt_vec_info (new_phi, loop_vinfo));
5947 if (j == 0 || slp_node)
5948 phis.quick_push (new_phi);
5952 if (code == COND_EXPR)
5954 gcc_assert (!slp_node);
5955 vectorizable_condition (stmt, gsi, vec_stmt,
5956 PHI_RESULT (phis[0]),
5957 reduc_index, NULL);
5958 /* Multiple types are not supported for condition. */
5959 break;
5962 /* Handle uses. */
5963 if (j == 0)
5965 op0 = ops[!reduc_index];
5966 if (op_type == ternary_op)
5968 if (reduc_index == 0)
5969 op1 = ops[2];
5970 else
5971 op1 = ops[1];
5974 if (slp_node)
5975 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
5976 slp_node, -1);
5977 else
5979 loop_vec_def0 = vect_get_vec_def_for_operand (ops[!reduc_index],
5980 stmt);
5981 vec_oprnds0.quick_push (loop_vec_def0);
5982 if (op_type == ternary_op)
5984 loop_vec_def1 = vect_get_vec_def_for_operand (op1, stmt);
5985 vec_oprnds1.quick_push (loop_vec_def1);
5989 else
5991 if (!slp_node)
5993 enum vect_def_type dt;
5994 gimple *dummy_stmt;
5996 vect_is_simple_use (ops[!reduc_index], loop_vinfo,
5997 &dummy_stmt, &dt);
5998 loop_vec_def0 = vect_get_vec_def_for_stmt_copy (dt,
5999 loop_vec_def0);
6000 vec_oprnds0[0] = loop_vec_def0;
6001 if (op_type == ternary_op)
6003 vect_is_simple_use (op1, loop_vinfo, &dummy_stmt, &dt);
6004 loop_vec_def1 = vect_get_vec_def_for_stmt_copy (dt,
6005 loop_vec_def1);
6006 vec_oprnds1[0] = loop_vec_def1;
6010 if (single_defuse_cycle)
6011 reduc_def = gimple_assign_lhs (new_stmt);
6013 STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi;
6016 FOR_EACH_VEC_ELT (vec_oprnds0, i, def0)
6018 if (slp_node)
6019 reduc_def = PHI_RESULT (phis[i]);
6020 else
6022 if (!single_defuse_cycle || j == 0)
6023 reduc_def = PHI_RESULT (new_phi);
6026 def1 = ((op_type == ternary_op)
6027 ? vec_oprnds1[i] : NULL);
6028 if (op_type == binary_op)
6030 if (reduc_index == 0)
6031 expr = build2 (code, vectype_out, reduc_def, def0);
6032 else
6033 expr = build2 (code, vectype_out, def0, reduc_def);
6035 else
6037 if (reduc_index == 0)
6038 expr = build3 (code, vectype_out, reduc_def, def0, def1);
6039 else
6041 if (reduc_index == 1)
6042 expr = build3 (code, vectype_out, def0, reduc_def, def1);
6043 else
6044 expr = build3 (code, vectype_out, def0, def1, reduc_def);
6048 new_stmt = gimple_build_assign (vec_dest, expr);
6049 new_temp = make_ssa_name (vec_dest, new_stmt);
6050 gimple_assign_set_lhs (new_stmt, new_temp);
6051 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6053 if (slp_node)
6055 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
6056 vect_defs.quick_push (new_temp);
6058 else
6059 vect_defs[0] = new_temp;
6062 if (slp_node)
6063 continue;
6065 if (j == 0)
6066 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6067 else
6068 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6070 prev_stmt_info = vinfo_for_stmt (new_stmt);
6071 prev_phi_info = vinfo_for_stmt (new_phi);
6074 tree indx_before_incr, indx_after_incr, cond_name = NULL;
6076 /* Finalize the reduction-phi (set its arguments) and create the
6077 epilog reduction code. */
6078 if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node)
6080 new_temp = gimple_assign_lhs (*vec_stmt);
6081 vect_defs[0] = new_temp;
6083 /* For cond reductions we want to create a new vector (INDEX_COND_EXPR)
6084 which is updated with the current index of the loop for every match of
6085 the original loop's cond_expr (VEC_STMT). This results in a vector
6086 containing the last time the condition passed for that vector lane.
6087 The first match will be a 1 to allow 0 to be used for non-matching
6088 indexes. If there are no matches at all then the vector will be all
6089 zeroes. */
6090 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION)
6092 int nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
6093 int k;
6095 gcc_assert (gimple_assign_rhs_code (*vec_stmt) == VEC_COND_EXPR);
6097 /* First we create a simple vector induction variable which starts
6098 with the values {1,2,3,...} (SERIES_VECT) and increments by the
6099 vector size (STEP). */
6101 /* Create a {1,2,3,...} vector. */
6102 tree *vtemp = XALLOCAVEC (tree, nunits_out);
6103 for (k = 0; k < nunits_out; ++k)
6104 vtemp[k] = build_int_cst (cr_index_scalar_type, k + 1);
6105 tree series_vect = build_vector (cr_index_vector_type, vtemp);
6107 /* Create a vector of the step value. */
6108 tree step = build_int_cst (cr_index_scalar_type, nunits_out);
6109 tree vec_step = build_vector_from_val (cr_index_vector_type, step);
6111 /* Create an induction variable. */
6112 gimple_stmt_iterator incr_gsi;
6113 bool insert_after;
6114 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
6115 create_iv (series_vect, vec_step, NULL_TREE, loop, &incr_gsi,
6116 insert_after, &indx_before_incr, &indx_after_incr);
6118 /* Next create a new phi node vector (NEW_PHI_TREE) which starts
6119 filled with zeros (VEC_ZERO). */
6121 /* Create a vector of 0s. */
6122 tree zero = build_zero_cst (cr_index_scalar_type);
6123 tree vec_zero = build_vector_from_val (cr_index_vector_type, zero);
6125 /* Create a vector phi node. */
6126 tree new_phi_tree = make_ssa_name (cr_index_vector_type);
6127 new_phi = create_phi_node (new_phi_tree, loop->header);
6128 set_vinfo_for_stmt (new_phi,
6129 new_stmt_vec_info (new_phi, loop_vinfo));
6130 add_phi_arg (new_phi, vec_zero, loop_preheader_edge (loop),
6131 UNKNOWN_LOCATION);
6133 /* Now take the condition from the loops original cond_expr
6134 (VEC_STMT) and produce a new cond_expr (INDEX_COND_EXPR) which for
6135 every match uses values from the induction variable
6136 (INDEX_BEFORE_INCR) otherwise uses values from the phi node
6137 (NEW_PHI_TREE).
6138 Finally, we update the phi (NEW_PHI_TREE) to take the value of
6139 the new cond_expr (INDEX_COND_EXPR). */
6141 /* Duplicate the condition from vec_stmt. */
6142 tree ccompare = unshare_expr (gimple_assign_rhs1 (*vec_stmt));
6144 /* Create a conditional, where the condition is taken from vec_stmt
6145 (CCOMPARE), then is the induction index (INDEX_BEFORE_INCR) and
6146 else is the phi (NEW_PHI_TREE). */
6147 tree index_cond_expr = build3 (VEC_COND_EXPR, cr_index_vector_type,
6148 ccompare, indx_before_incr,
6149 new_phi_tree);
6150 cond_name = make_ssa_name (cr_index_vector_type);
6151 gimple *index_condition = gimple_build_assign (cond_name,
6152 index_cond_expr);
6153 gsi_insert_before (&incr_gsi, index_condition, GSI_SAME_STMT);
6154 stmt_vec_info index_vec_info = new_stmt_vec_info (index_condition,
6155 loop_vinfo);
6156 STMT_VINFO_VECTYPE (index_vec_info) = cr_index_vector_type;
6157 set_vinfo_for_stmt (index_condition, index_vec_info);
6159 /* Update the phi with the vec cond. */
6160 add_phi_arg (new_phi, cond_name, loop_latch_edge (loop),
6161 UNKNOWN_LOCATION);
6165 vect_create_epilog_for_reduction (vect_defs, stmt, epilog_copies,
6166 epilog_reduc_code, phis, reduc_index,
6167 double_reduc, slp_node, cond_name);
6169 return true;
6172 /* Function vect_min_worthwhile_factor.
6174 For a loop where we could vectorize the operation indicated by CODE,
6175 return the minimum vectorization factor that makes it worthwhile
6176 to use generic vectors. */
6178 vect_min_worthwhile_factor (enum tree_code code)
6180 switch (code)
6182 case PLUS_EXPR:
6183 case MINUS_EXPR:
6184 case NEGATE_EXPR:
6185 return 4;
6187 case BIT_AND_EXPR:
6188 case BIT_IOR_EXPR:
6189 case BIT_XOR_EXPR:
6190 case BIT_NOT_EXPR:
6191 return 2;
6193 default:
6194 return INT_MAX;
6199 /* Function vectorizable_induction
6201 Check if PHI performs an induction computation that can be vectorized.
6202 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
6203 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
6204 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6206 bool
6207 vectorizable_induction (gimple *phi,
6208 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
6209 gimple **vec_stmt)
6211 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
6212 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
6213 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6214 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
6215 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
6216 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
6217 tree vec_def;
6219 gcc_assert (ncopies >= 1);
6220 /* FORNOW. These restrictions should be relaxed. */
6221 if (nested_in_vect_loop_p (loop, phi))
6223 imm_use_iterator imm_iter;
6224 use_operand_p use_p;
6225 gimple *exit_phi;
6226 edge latch_e;
6227 tree loop_arg;
6229 if (ncopies > 1)
6231 if (dump_enabled_p ())
6232 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6233 "multiple types in nested loop.\n");
6234 return false;
6237 exit_phi = NULL;
6238 latch_e = loop_latch_edge (loop->inner);
6239 loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
6240 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
6242 gimple *use_stmt = USE_STMT (use_p);
6243 if (is_gimple_debug (use_stmt))
6244 continue;
6246 if (!flow_bb_inside_loop_p (loop->inner, gimple_bb (use_stmt)))
6248 exit_phi = use_stmt;
6249 break;
6252 if (exit_phi)
6254 stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi);
6255 if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
6256 && !STMT_VINFO_LIVE_P (exit_phi_vinfo)))
6258 if (dump_enabled_p ())
6259 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6260 "inner-loop induction only used outside "
6261 "of the outer vectorized loop.\n");
6262 return false;
6267 if (!STMT_VINFO_RELEVANT_P (stmt_info))
6268 return false;
6270 /* FORNOW: SLP not supported. */
6271 if (STMT_SLP_TYPE (stmt_info))
6272 return false;
6274 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def);
6276 if (gimple_code (phi) != GIMPLE_PHI)
6277 return false;
6279 if (!vec_stmt) /* transformation not required. */
6281 STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
6282 if (dump_enabled_p ())
6283 dump_printf_loc (MSG_NOTE, vect_location,
6284 "=== vectorizable_induction ===\n");
6285 vect_model_induction_cost (stmt_info, ncopies);
6286 return true;
6289 /** Transform. **/
6291 if (dump_enabled_p ())
6292 dump_printf_loc (MSG_NOTE, vect_location, "transform induction phi.\n");
6294 vec_def = get_initial_def_for_induction (phi);
6295 *vec_stmt = SSA_NAME_DEF_STMT (vec_def);
6296 return true;
6299 /* Function vectorizable_live_operation.
6301 STMT computes a value that is used outside the loop. Check if
6302 it can be supported. */
6304 bool
6305 vectorizable_live_operation (gimple *stmt,
6306 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
6307 slp_tree slp_node, int slp_index,
6308 gimple **vec_stmt)
6310 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
6311 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6312 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
6313 imm_use_iterator imm_iter;
6314 tree lhs, lhs_type, bitsize, vec_bitsize;
6315 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
6316 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
6317 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
6318 gimple *use_stmt;
6319 auto_vec<tree> vec_oprnds;
6321 gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
6323 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
6324 return false;
6326 /* FORNOW. CHECKME. */
6327 if (nested_in_vect_loop_p (loop, stmt))
6328 return false;
6330 /* If STMT is not relevant and it is a simple assignment and its inputs are
6331 invariant then it can remain in place, unvectorized. The original last
6332 scalar value that it computes will be used. */
6333 if (!STMT_VINFO_RELEVANT_P (stmt_info))
6335 gcc_assert (is_simple_and_all_uses_invariant (stmt, loop_vinfo));
6336 if (dump_enabled_p ())
6337 dump_printf_loc (MSG_NOTE, vect_location,
6338 "statement is simple and uses invariant. Leaving in "
6339 "place.\n");
6340 return true;
6343 if (!vec_stmt)
6344 /* No transformation required. */
6345 return true;
6347 /* If stmt has a related stmt, then use that for getting the lhs. */
6348 if (is_pattern_stmt_p (stmt_info))
6349 stmt = STMT_VINFO_RELATED_STMT (stmt_info);
6351 lhs = (is_a <gphi *> (stmt)) ? gimple_phi_result (stmt)
6352 : gimple_get_lhs (stmt);
6353 lhs_type = TREE_TYPE (lhs);
6355 /* Find all uses of STMT outside the loop - there should be exactly one. */
6356 auto_vec<gimple *, 4> worklist;
6357 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, lhs)
6358 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
6359 worklist.safe_push (use_stmt);
6360 gcc_assert (worklist.length () >= 1);
6362 bitsize = TYPE_SIZE (TREE_TYPE (vectype));
6363 vec_bitsize = TYPE_SIZE (vectype);
6365 /* Get the vectorized lhs of STMT and the lane to use (counted in bits). */
6366 tree vec_lhs, bitstart;
6367 if (slp_node)
6369 gcc_assert (slp_index >= 0);
6371 int num_scalar = SLP_TREE_SCALAR_STMTS (slp_node).length ();
6372 int num_vec = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6374 /* Get the last occurrence of the scalar index from the concatenation of
6375 all the slp vectors. Calculate which slp vector it is and the index
6376 within. */
6377 int pos = (num_vec * nunits) - num_scalar + slp_index;
6378 int vec_entry = pos / nunits;
6379 int vec_index = pos % nunits;
6381 /* Get the correct slp vectorized stmt. */
6382 vec_lhs = gimple_get_lhs (SLP_TREE_VEC_STMTS (slp_node)[vec_entry]);
6384 /* Get entry to use. */
6385 bitstart = build_int_cst (unsigned_type_node, vec_index);
6386 bitstart = int_const_binop (MULT_EXPR, bitsize, bitstart);
6388 else
6390 enum vect_def_type dt = STMT_VINFO_DEF_TYPE (stmt_info);
6391 vec_lhs = vect_get_vec_def_for_operand_1 (stmt, dt);
6393 /* For multiple copies, get the last copy. */
6394 for (int i = 1; i < ncopies; ++i)
6395 vec_lhs = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type,
6396 vec_lhs);
6398 /* Get the last lane in the vector. */
6399 bitstart = int_const_binop (MINUS_EXPR, vec_bitsize, bitsize);
6402 /* Create a new vectorized stmt for the uses of STMT and insert outside the
6403 loop. */
6404 gimple_seq stmts = NULL;
6405 tree new_tree = build3 (BIT_FIELD_REF, TREE_TYPE (vectype), vec_lhs, bitsize,
6406 bitstart);
6407 new_tree = force_gimple_operand (fold_convert (lhs_type, new_tree), &stmts,
6408 true, NULL_TREE);
6409 if (stmts)
6410 gsi_insert_seq_on_edge_immediate (single_exit (loop), stmts);
6412 /* Replace all uses of the USE_STMT in the worklist with the newly inserted
6413 statement. */
6414 while (!worklist.is_empty ())
6416 use_stmt = worklist.pop ();
6417 replace_uses_by (gimple_phi_result (use_stmt), new_tree);
6418 update_stmt (use_stmt);
6421 return true;
6424 /* Kill any debug uses outside LOOP of SSA names defined in STMT. */
6426 static void
6427 vect_loop_kill_debug_uses (struct loop *loop, gimple *stmt)
6429 ssa_op_iter op_iter;
6430 imm_use_iterator imm_iter;
6431 def_operand_p def_p;
6432 gimple *ustmt;
6434 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
6436 FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p))
6438 basic_block bb;
6440 if (!is_gimple_debug (ustmt))
6441 continue;
6443 bb = gimple_bb (ustmt);
6445 if (!flow_bb_inside_loop_p (loop, bb))
6447 if (gimple_debug_bind_p (ustmt))
6449 if (dump_enabled_p ())
6450 dump_printf_loc (MSG_NOTE, vect_location,
6451 "killing debug use\n");
6453 gimple_debug_bind_reset_value (ustmt);
6454 update_stmt (ustmt);
6456 else
6457 gcc_unreachable ();
6464 /* This function builds ni_name = number of iterations. Statements
6465 are emitted on the loop preheader edge. */
6467 static tree
6468 vect_build_loop_niters (loop_vec_info loop_vinfo)
6470 tree ni = unshare_expr (LOOP_VINFO_NITERS (loop_vinfo));
6471 if (TREE_CODE (ni) == INTEGER_CST)
6472 return ni;
6473 else
6475 tree ni_name, var;
6476 gimple_seq stmts = NULL;
6477 edge pe = loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo));
6479 var = create_tmp_var (TREE_TYPE (ni), "niters");
6480 ni_name = force_gimple_operand (ni, &stmts, false, var);
6481 if (stmts)
6482 gsi_insert_seq_on_edge_immediate (pe, stmts);
6484 return ni_name;
6489 /* This function generates the following statements:
6491 ni_name = number of iterations loop executes
6492 ratio = ni_name / vf
6493 ratio_mult_vf_name = ratio * vf
6495 and places them on the loop preheader edge. */
6497 static void
6498 vect_generate_tmps_on_preheader (loop_vec_info loop_vinfo,
6499 tree ni_name,
6500 tree *ratio_mult_vf_name_ptr,
6501 tree *ratio_name_ptr)
6503 tree ni_minus_gap_name;
6504 tree var;
6505 tree ratio_name;
6506 tree ratio_mult_vf_name;
6507 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
6508 edge pe = loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo));
6509 tree log_vf;
6511 log_vf = build_int_cst (TREE_TYPE (ni_name), exact_log2 (vf));
6513 /* If epilogue loop is required because of data accesses with gaps, we
6514 subtract one iteration from the total number of iterations here for
6515 correct calculation of RATIO. */
6516 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
6518 ni_minus_gap_name = fold_build2 (MINUS_EXPR, TREE_TYPE (ni_name),
6519 ni_name,
6520 build_one_cst (TREE_TYPE (ni_name)));
6521 if (!is_gimple_val (ni_minus_gap_name))
6523 var = create_tmp_var (TREE_TYPE (ni_name), "ni_gap");
6524 gimple *stmts = NULL;
6525 ni_minus_gap_name = force_gimple_operand (ni_minus_gap_name, &stmts,
6526 true, var);
6527 gsi_insert_seq_on_edge_immediate (pe, stmts);
6530 else
6531 ni_minus_gap_name = ni_name;
6533 /* Create: ratio = ni >> log2(vf) */
6534 /* ??? As we have ni == number of latch executions + 1, ni could
6535 have overflown to zero. So avoid computing ratio based on ni
6536 but compute it using the fact that we know ratio will be at least
6537 one, thus via (ni - vf) >> log2(vf) + 1. */
6538 ratio_name
6539 = fold_build2 (PLUS_EXPR, TREE_TYPE (ni_name),
6540 fold_build2 (RSHIFT_EXPR, TREE_TYPE (ni_name),
6541 fold_build2 (MINUS_EXPR, TREE_TYPE (ni_name),
6542 ni_minus_gap_name,
6543 build_int_cst
6544 (TREE_TYPE (ni_name), vf)),
6545 log_vf),
6546 build_int_cst (TREE_TYPE (ni_name), 1));
6547 if (!is_gimple_val (ratio_name))
6549 var = create_tmp_var (TREE_TYPE (ni_name), "bnd");
6550 gimple *stmts = NULL;
6551 ratio_name = force_gimple_operand (ratio_name, &stmts, true, var);
6552 gsi_insert_seq_on_edge_immediate (pe, stmts);
6554 *ratio_name_ptr = ratio_name;
6556 /* Create: ratio_mult_vf = ratio << log2 (vf). */
6558 if (ratio_mult_vf_name_ptr)
6560 ratio_mult_vf_name = fold_build2 (LSHIFT_EXPR, TREE_TYPE (ratio_name),
6561 ratio_name, log_vf);
6562 if (!is_gimple_val (ratio_mult_vf_name))
6564 var = create_tmp_var (TREE_TYPE (ni_name), "ratio_mult_vf");
6565 gimple *stmts = NULL;
6566 ratio_mult_vf_name = force_gimple_operand (ratio_mult_vf_name, &stmts,
6567 true, var);
6568 gsi_insert_seq_on_edge_immediate (pe, stmts);
6570 *ratio_mult_vf_name_ptr = ratio_mult_vf_name;
6573 return;
6577 /* Function vect_transform_loop.
6579 The analysis phase has determined that the loop is vectorizable.
6580 Vectorize the loop - created vectorized stmts to replace the scalar
6581 stmts in the loop, and update the loop exit condition. */
6583 void
6584 vect_transform_loop (loop_vec_info loop_vinfo)
6586 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
6587 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
6588 int nbbs = loop->num_nodes;
6589 int i;
6590 tree ratio = NULL;
6591 int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
6592 bool grouped_store;
6593 bool slp_scheduled = false;
6594 gimple *stmt, *pattern_stmt;
6595 gimple_seq pattern_def_seq = NULL;
6596 gimple_stmt_iterator pattern_def_si = gsi_none ();
6597 bool transform_pattern_stmt = false;
6598 bool check_profitability = false;
6599 int th;
6600 /* Record number of iterations before we started tampering with the profile. */
6601 gcov_type expected_iterations = expected_loop_iterations_unbounded (loop);
6603 if (dump_enabled_p ())
6604 dump_printf_loc (MSG_NOTE, vect_location, "=== vec_transform_loop ===\n");
6606 /* If profile is inprecise, we have chance to fix it up. */
6607 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
6608 expected_iterations = LOOP_VINFO_INT_NITERS (loop_vinfo);
6610 /* Use the more conservative vectorization threshold. If the number
6611 of iterations is constant assume the cost check has been performed
6612 by our caller. If the threshold makes all loops profitable that
6613 run at least the vectorization factor number of times checking
6614 is pointless, too. */
6615 th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
6616 if (th >= LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1
6617 && !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
6619 if (dump_enabled_p ())
6620 dump_printf_loc (MSG_NOTE, vect_location,
6621 "Profitability threshold is %d loop iterations.\n",
6622 th);
6623 check_profitability = true;
6626 /* Version the loop first, if required, so the profitability check
6627 comes first. */
6629 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
6630 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
6632 vect_loop_versioning (loop_vinfo, th, check_profitability);
6633 check_profitability = false;
6636 tree ni_name = vect_build_loop_niters (loop_vinfo);
6637 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = ni_name;
6639 /* Peel the loop if there are data refs with unknown alignment.
6640 Only one data ref with unknown store is allowed. */
6642 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo))
6644 vect_do_peeling_for_alignment (loop_vinfo, ni_name,
6645 th, check_profitability);
6646 check_profitability = false;
6647 /* The above adjusts LOOP_VINFO_NITERS, so cause ni_name to
6648 be re-computed. */
6649 ni_name = NULL_TREE;
6652 /* If the loop has a symbolic number of iterations 'n' (i.e. it's not a
6653 compile time constant), or it is a constant that doesn't divide by the
6654 vectorization factor, then an epilog loop needs to be created.
6655 We therefore duplicate the loop: the original loop will be vectorized,
6656 and will compute the first (n/VF) iterations. The second copy of the loop
6657 will remain scalar and will compute the remaining (n%VF) iterations.
6658 (VF is the vectorization factor). */
6660 if (LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo)
6661 || LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
6663 tree ratio_mult_vf;
6664 if (!ni_name)
6665 ni_name = vect_build_loop_niters (loop_vinfo);
6666 vect_generate_tmps_on_preheader (loop_vinfo, ni_name, &ratio_mult_vf,
6667 &ratio);
6668 vect_do_peeling_for_loop_bound (loop_vinfo, ni_name, ratio_mult_vf,
6669 th, check_profitability);
6671 else if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
6672 ratio = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
6673 LOOP_VINFO_INT_NITERS (loop_vinfo) / vectorization_factor);
6674 else
6676 if (!ni_name)
6677 ni_name = vect_build_loop_niters (loop_vinfo);
6678 vect_generate_tmps_on_preheader (loop_vinfo, ni_name, NULL, &ratio);
6681 /* 1) Make sure the loop header has exactly two entries
6682 2) Make sure we have a preheader basic block. */
6684 gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
6686 split_edge (loop_preheader_edge (loop));
6688 /* FORNOW: the vectorizer supports only loops which body consist
6689 of one basic block (header + empty latch). When the vectorizer will
6690 support more involved loop forms, the order by which the BBs are
6691 traversed need to be reconsidered. */
6693 for (i = 0; i < nbbs; i++)
6695 basic_block bb = bbs[i];
6696 stmt_vec_info stmt_info;
6698 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
6699 gsi_next (&si))
6701 gphi *phi = si.phi ();
6702 if (dump_enabled_p ())
6704 dump_printf_loc (MSG_NOTE, vect_location,
6705 "------>vectorizing phi: ");
6706 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
6708 stmt_info = vinfo_for_stmt (phi);
6709 if (!stmt_info)
6710 continue;
6712 if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
6713 vect_loop_kill_debug_uses (loop, phi);
6715 if (!STMT_VINFO_RELEVANT_P (stmt_info)
6716 && !STMT_VINFO_LIVE_P (stmt_info))
6717 continue;
6719 if (STMT_VINFO_VECTYPE (stmt_info)
6720 && (TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info))
6721 != (unsigned HOST_WIDE_INT) vectorization_factor)
6722 && dump_enabled_p ())
6723 dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
6725 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
6727 if (dump_enabled_p ())
6728 dump_printf_loc (MSG_NOTE, vect_location, "transform phi.\n");
6729 vect_transform_stmt (phi, NULL, NULL, NULL, NULL);
6733 pattern_stmt = NULL;
6734 for (gimple_stmt_iterator si = gsi_start_bb (bb);
6735 !gsi_end_p (si) || transform_pattern_stmt;)
6737 bool is_store;
6739 if (transform_pattern_stmt)
6740 stmt = pattern_stmt;
6741 else
6743 stmt = gsi_stmt (si);
6744 /* During vectorization remove existing clobber stmts. */
6745 if (gimple_clobber_p (stmt))
6747 unlink_stmt_vdef (stmt);
6748 gsi_remove (&si, true);
6749 release_defs (stmt);
6750 continue;
6754 if (dump_enabled_p ())
6756 dump_printf_loc (MSG_NOTE, vect_location,
6757 "------>vectorizing statement: ");
6758 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
6761 stmt_info = vinfo_for_stmt (stmt);
6763 /* vector stmts created in the outer-loop during vectorization of
6764 stmts in an inner-loop may not have a stmt_info, and do not
6765 need to be vectorized. */
6766 if (!stmt_info)
6768 gsi_next (&si);
6769 continue;
6772 if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
6773 vect_loop_kill_debug_uses (loop, stmt);
6775 if (!STMT_VINFO_RELEVANT_P (stmt_info)
6776 && !STMT_VINFO_LIVE_P (stmt_info))
6778 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
6779 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
6780 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
6781 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
6783 stmt = pattern_stmt;
6784 stmt_info = vinfo_for_stmt (stmt);
6786 else
6788 gsi_next (&si);
6789 continue;
6792 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
6793 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
6794 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
6795 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
6796 transform_pattern_stmt = true;
6798 /* If pattern statement has def stmts, vectorize them too. */
6799 if (is_pattern_stmt_p (stmt_info))
6801 if (pattern_def_seq == NULL)
6803 pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
6804 pattern_def_si = gsi_start (pattern_def_seq);
6806 else if (!gsi_end_p (pattern_def_si))
6807 gsi_next (&pattern_def_si);
6808 if (pattern_def_seq != NULL)
6810 gimple *pattern_def_stmt = NULL;
6811 stmt_vec_info pattern_def_stmt_info = NULL;
6813 while (!gsi_end_p (pattern_def_si))
6815 pattern_def_stmt = gsi_stmt (pattern_def_si);
6816 pattern_def_stmt_info
6817 = vinfo_for_stmt (pattern_def_stmt);
6818 if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info)
6819 || STMT_VINFO_LIVE_P (pattern_def_stmt_info))
6820 break;
6821 gsi_next (&pattern_def_si);
6824 if (!gsi_end_p (pattern_def_si))
6826 if (dump_enabled_p ())
6828 dump_printf_loc (MSG_NOTE, vect_location,
6829 "==> vectorizing pattern def "
6830 "stmt: ");
6831 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
6832 pattern_def_stmt, 0);
6835 stmt = pattern_def_stmt;
6836 stmt_info = pattern_def_stmt_info;
6838 else
6840 pattern_def_si = gsi_none ();
6841 transform_pattern_stmt = false;
6844 else
6845 transform_pattern_stmt = false;
6848 if (STMT_VINFO_VECTYPE (stmt_info))
6850 unsigned int nunits
6851 = (unsigned int)
6852 TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
6853 if (!STMT_SLP_TYPE (stmt_info)
6854 && nunits != (unsigned int) vectorization_factor
6855 && dump_enabled_p ())
6856 /* For SLP VF is set according to unrolling factor, and not
6857 to vector size, hence for SLP this print is not valid. */
6858 dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
6861 /* SLP. Schedule all the SLP instances when the first SLP stmt is
6862 reached. */
6863 if (STMT_SLP_TYPE (stmt_info))
6865 if (!slp_scheduled)
6867 slp_scheduled = true;
6869 if (dump_enabled_p ())
6870 dump_printf_loc (MSG_NOTE, vect_location,
6871 "=== scheduling SLP instances ===\n");
6873 vect_schedule_slp (loop_vinfo);
6876 /* Hybrid SLP stmts must be vectorized in addition to SLP. */
6877 if (!vinfo_for_stmt (stmt) || PURE_SLP_STMT (stmt_info))
6879 if (!transform_pattern_stmt && gsi_end_p (pattern_def_si))
6881 pattern_def_seq = NULL;
6882 gsi_next (&si);
6884 continue;
6888 /* -------- vectorize statement ------------ */
6889 if (dump_enabled_p ())
6890 dump_printf_loc (MSG_NOTE, vect_location, "transform statement.\n");
6892 grouped_store = false;
6893 is_store = vect_transform_stmt (stmt, &si, &grouped_store, NULL, NULL);
6894 if (is_store)
6896 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
6898 /* Interleaving. If IS_STORE is TRUE, the vectorization of the
6899 interleaving chain was completed - free all the stores in
6900 the chain. */
6901 gsi_next (&si);
6902 vect_remove_stores (GROUP_FIRST_ELEMENT (stmt_info));
6904 else
6906 /* Free the attached stmt_vec_info and remove the stmt. */
6907 gimple *store = gsi_stmt (si);
6908 free_stmt_vec_info (store);
6909 unlink_stmt_vdef (store);
6910 gsi_remove (&si, true);
6911 release_defs (store);
6914 /* Stores can only appear at the end of pattern statements. */
6915 gcc_assert (!transform_pattern_stmt);
6916 pattern_def_seq = NULL;
6918 else if (!transform_pattern_stmt && gsi_end_p (pattern_def_si))
6920 pattern_def_seq = NULL;
6921 gsi_next (&si);
6923 } /* stmts in BB */
6924 } /* BBs in loop */
6926 slpeel_make_loop_iterate_ntimes (loop, ratio);
6928 /* Reduce loop iterations by the vectorization factor. */
6929 scale_loop_profile (loop, GCOV_COMPUTE_SCALE (1, vectorization_factor),
6930 expected_iterations / vectorization_factor);
6931 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
6933 if (loop->nb_iterations_upper_bound != 0)
6934 loop->nb_iterations_upper_bound = loop->nb_iterations_upper_bound - 1;
6935 if (loop->nb_iterations_likely_upper_bound != 0)
6936 loop->nb_iterations_likely_upper_bound
6937 = loop->nb_iterations_likely_upper_bound - 1;
6939 loop->nb_iterations_upper_bound
6940 = wi::udiv_floor (loop->nb_iterations_upper_bound + 1,
6941 vectorization_factor) - 1;
6942 loop->nb_iterations_likely_upper_bound
6943 = wi::udiv_floor (loop->nb_iterations_likely_upper_bound + 1,
6944 vectorization_factor) - 1;
6946 if (loop->any_estimate)
6948 loop->nb_iterations_estimate
6949 = wi::udiv_floor (loop->nb_iterations_estimate, vectorization_factor);
6950 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
6951 && loop->nb_iterations_estimate != 0)
6952 loop->nb_iterations_estimate = loop->nb_iterations_estimate - 1;
6955 if (dump_enabled_p ())
6957 dump_printf_loc (MSG_NOTE, vect_location,
6958 "LOOP VECTORIZED\n");
6959 if (loop->inner)
6960 dump_printf_loc (MSG_NOTE, vect_location,
6961 "OUTER LOOP VECTORIZED\n");
6962 dump_printf (MSG_NOTE, "\n");
6965 /* Free SLP instances here because otherwise stmt reference counting
6966 won't work. */
6967 slp_instance instance;
6968 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance)
6969 vect_free_slp_instance (instance);
6970 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
6973 /* The code below is trying to perform simple optimization - revert
6974 if-conversion for masked stores, i.e. if the mask of a store is zero
6975 do not perform it and all stored value producers also if possible.
6976 For example,
6977 for (i=0; i<n; i++)
6978 if (c[i])
6980 p1[i] += 1;
6981 p2[i] = p3[i] +2;
6983 this transformation will produce the following semi-hammock:
6985 if (!mask__ifc__42.18_165 == { 0, 0, 0, 0, 0, 0, 0, 0 })
6987 vect__11.19_170 = MASK_LOAD (vectp_p1.20_168, 0B, mask__ifc__42.18_165);
6988 vect__12.22_172 = vect__11.19_170 + vect_cst__171;
6989 MASK_STORE (vectp_p1.23_175, 0B, mask__ifc__42.18_165, vect__12.22_172);
6990 vect__18.25_182 = MASK_LOAD (vectp_p3.26_180, 0B, mask__ifc__42.18_165);
6991 vect__19.28_184 = vect__18.25_182 + vect_cst__183;
6992 MASK_STORE (vectp_p2.29_187, 0B, mask__ifc__42.18_165, vect__19.28_184);
6996 void
6997 optimize_mask_stores (struct loop *loop)
6999 basic_block *bbs = get_loop_body (loop);
7000 unsigned nbbs = loop->num_nodes;
7001 unsigned i;
7002 basic_block bb;
7003 gimple_stmt_iterator gsi;
7004 gimple *stmt;
7005 auto_vec<gimple *> worklist;
7007 vect_location = find_loop_location (loop);
7008 /* Pick up all masked stores in loop if any. */
7009 for (i = 0; i < nbbs; i++)
7011 bb = bbs[i];
7012 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
7013 gsi_next (&gsi))
7015 stmt = gsi_stmt (gsi);
7016 if (is_gimple_call (stmt)
7017 && gimple_call_internal_p (stmt)
7018 && gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
7019 worklist.safe_push (stmt);
7023 free (bbs);
7024 if (worklist.is_empty ())
7025 return;
7027 /* Loop has masked stores. */
7028 while (!worklist.is_empty ())
7030 gimple *last, *last_store;
7031 edge e, efalse;
7032 tree mask;
7033 basic_block store_bb, join_bb;
7034 gimple_stmt_iterator gsi_to;
7035 tree vdef, new_vdef;
7036 gphi *phi;
7037 tree vectype;
7038 tree zero;
7040 last = worklist.pop ();
7041 mask = gimple_call_arg (last, 2);
7042 bb = gimple_bb (last);
7043 /* Create new bb. */
7044 e = split_block (bb, last);
7045 join_bb = e->dest;
7046 store_bb = create_empty_bb (bb);
7047 add_bb_to_loop (store_bb, loop);
7048 e->flags = EDGE_TRUE_VALUE;
7049 efalse = make_edge (bb, store_bb, EDGE_FALSE_VALUE);
7050 /* Put STORE_BB to likely part. */
7051 efalse->probability = PROB_UNLIKELY;
7052 store_bb->frequency = PROB_ALWAYS - EDGE_FREQUENCY (efalse);
7053 make_edge (store_bb, join_bb, EDGE_FALLTHRU);
7054 if (dom_info_available_p (CDI_DOMINATORS))
7055 set_immediate_dominator (CDI_DOMINATORS, store_bb, bb);
7056 if (dump_enabled_p ())
7057 dump_printf_loc (MSG_NOTE, vect_location,
7058 "Create new block %d to sink mask stores.",
7059 store_bb->index);
7060 /* Create vector comparison with boolean result. */
7061 vectype = TREE_TYPE (mask);
7062 zero = build_zero_cst (vectype);
7063 stmt = gimple_build_cond (EQ_EXPR, mask, zero, NULL_TREE, NULL_TREE);
7064 gsi = gsi_last_bb (bb);
7065 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
7066 /* Create new PHI node for vdef of the last masked store:
7067 .MEM_2 = VDEF <.MEM_1>
7068 will be converted to
7069 .MEM.3 = VDEF <.MEM_1>
7070 and new PHI node will be created in join bb
7071 .MEM_2 = PHI <.MEM_1, .MEM_3>
7073 vdef = gimple_vdef (last);
7074 new_vdef = make_ssa_name (gimple_vop (cfun), last);
7075 gimple_set_vdef (last, new_vdef);
7076 phi = create_phi_node (vdef, join_bb);
7077 add_phi_arg (phi, new_vdef, EDGE_SUCC (store_bb, 0), UNKNOWN_LOCATION);
7079 /* Put all masked stores with the same mask to STORE_BB if possible. */
7080 while (true)
7082 gimple_stmt_iterator gsi_from;
7083 gimple *stmt1 = NULL;
7085 /* Move masked store to STORE_BB. */
7086 last_store = last;
7087 gsi = gsi_for_stmt (last);
7088 gsi_from = gsi;
7089 /* Shift GSI to the previous stmt for further traversal. */
7090 gsi_prev (&gsi);
7091 gsi_to = gsi_start_bb (store_bb);
7092 gsi_move_before (&gsi_from, &gsi_to);
7093 /* Setup GSI_TO to the non-empty block start. */
7094 gsi_to = gsi_start_bb (store_bb);
7095 if (dump_enabled_p ())
7097 dump_printf_loc (MSG_NOTE, vect_location,
7098 "Move stmt to created bb\n");
7099 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, last, 0);
7101 /* Move all stored value producers if possible. */
7102 while (!gsi_end_p (gsi))
7104 tree lhs;
7105 imm_use_iterator imm_iter;
7106 use_operand_p use_p;
7107 bool res;
7109 /* Skip debug statements. */
7110 if (is_gimple_debug (gsi_stmt (gsi)))
7112 gsi_prev (&gsi);
7113 continue;
7115 stmt1 = gsi_stmt (gsi);
7116 /* Do not consider statements writing to memory or having
7117 volatile operand. */
7118 if (gimple_vdef (stmt1)
7119 || gimple_has_volatile_ops (stmt1))
7120 break;
7121 gsi_from = gsi;
7122 gsi_prev (&gsi);
7123 lhs = gimple_get_lhs (stmt1);
7124 if (!lhs)
7125 break;
7127 /* LHS of vectorized stmt must be SSA_NAME. */
7128 if (TREE_CODE (lhs) != SSA_NAME)
7129 break;
7131 if (!VECTOR_TYPE_P (TREE_TYPE (lhs)))
7133 /* Remove dead scalar statement. */
7134 if (has_zero_uses (lhs))
7136 gsi_remove (&gsi_from, true);
7137 continue;
7141 /* Check that LHS does not have uses outside of STORE_BB. */
7142 res = true;
7143 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
7145 gimple *use_stmt;
7146 use_stmt = USE_STMT (use_p);
7147 if (is_gimple_debug (use_stmt))
7148 continue;
7149 if (gimple_bb (use_stmt) != store_bb)
7151 res = false;
7152 break;
7155 if (!res)
7156 break;
7158 if (gimple_vuse (stmt1)
7159 && gimple_vuse (stmt1) != gimple_vuse (last_store))
7160 break;
7162 /* Can move STMT1 to STORE_BB. */
7163 if (dump_enabled_p ())
7165 dump_printf_loc (MSG_NOTE, vect_location,
7166 "Move stmt to created bb\n");
7167 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt1, 0);
7169 gsi_move_before (&gsi_from, &gsi_to);
7170 /* Shift GSI_TO for further insertion. */
7171 gsi_prev (&gsi_to);
7173 /* Put other masked stores with the same mask to STORE_BB. */
7174 if (worklist.is_empty ()
7175 || gimple_call_arg (worklist.last (), 2) != mask
7176 || worklist.last () != stmt1)
7177 break;
7178 last = worklist.pop ();
7180 add_phi_arg (phi, gimple_vuse (last_store), e, UNKNOWN_LOCATION);