re PR c++/79790 ([C++17] ICE class template argument deduction failed)
[official-gcc.git] / gcc / tree-vect-loop.c
blob8b2a61e733be4134ecbdfca77b7cd86f1f35318f
1 /* Loop Vectorization
2 Copyright (C) 2003-2017 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
4 Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "cfghooks.h"
31 #include "tree-pass.h"
32 #include "ssa.h"
33 #include "optabs-tree.h"
34 #include "diagnostic-core.h"
35 #include "fold-const.h"
36 #include "stor-layout.h"
37 #include "cfganal.h"
38 #include "gimplify.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
41 #include "tree-ssa-loop-ivopts.h"
42 #include "tree-ssa-loop-manip.h"
43 #include "tree-ssa-loop-niter.h"
44 #include "tree-ssa-loop.h"
45 #include "cfgloop.h"
46 #include "params.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
49 #include "gimple-fold.h"
50 #include "cgraph.h"
51 #include "tree-cfg.h"
52 #include "tree-if-conv.h"
54 /* Loop Vectorization Pass.
56 This pass tries to vectorize loops.
58 For example, the vectorizer transforms the following simple loop:
60 short a[N]; short b[N]; short c[N]; int i;
62 for (i=0; i<N; i++){
63 a[i] = b[i] + c[i];
66 as if it was manually vectorized by rewriting the source code into:
68 typedef int __attribute__((mode(V8HI))) v8hi;
69 short a[N]; short b[N]; short c[N]; int i;
70 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
71 v8hi va, vb, vc;
73 for (i=0; i<N/8; i++){
74 vb = pb[i];
75 vc = pc[i];
76 va = vb + vc;
77 pa[i] = va;
80 The main entry to this pass is vectorize_loops(), in which
81 the vectorizer applies a set of analyses on a given set of loops,
82 followed by the actual vectorization transformation for the loops that
83 had successfully passed the analysis phase.
84 Throughout this pass we make a distinction between two types of
85 data: scalars (which are represented by SSA_NAMES), and memory references
86 ("data-refs"). These two types of data require different handling both
87 during analysis and transformation. The types of data-refs that the
88 vectorizer currently supports are ARRAY_REFS which base is an array DECL
89 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
90 accesses are required to have a simple (consecutive) access pattern.
92 Analysis phase:
93 ===============
94 The driver for the analysis phase is vect_analyze_loop().
95 It applies a set of analyses, some of which rely on the scalar evolution
96 analyzer (scev) developed by Sebastian Pop.
98 During the analysis phase the vectorizer records some information
99 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
100 loop, as well as general information about the loop as a whole, which is
101 recorded in a "loop_vec_info" struct attached to each loop.
103 Transformation phase:
104 =====================
105 The loop transformation phase scans all the stmts in the loop, and
106 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
107 the loop that needs to be vectorized. It inserts the vector code sequence
108 just before the scalar stmt S, and records a pointer to the vector code
109 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
110 attached to S). This pointer will be used for the vectorization of following
111 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
112 otherwise, we rely on dead code elimination for removing it.
114 For example, say stmt S1 was vectorized into stmt VS1:
116 VS1: vb = px[i];
117 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
118 S2: a = b;
120 To vectorize stmt S2, the vectorizer first finds the stmt that defines
121 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
122 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
123 resulting sequence would be:
125 VS1: vb = px[i];
126 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
127 VS2: va = vb;
128 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
130 Operands that are not SSA_NAMEs, are data-refs that appear in
131 load/store operations (like 'x[i]' in S1), and are handled differently.
133 Target modeling:
134 =================
135 Currently the only target specific information that is used is the
136 size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".
137 Targets that can support different sizes of vectors, for now will need
138 to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More
139 flexibility will be added in the future.
141 Since we only vectorize operations which vector form can be
142 expressed using existing tree codes, to verify that an operation is
143 supported, the vectorizer checks the relevant optab at the relevant
144 machine_mode (e.g, optab_handler (add_optab, V8HImode)). If
145 the value found is CODE_FOR_nothing, then there's no target support, and
146 we can't vectorize the stmt.
148 For additional information on this project see:
149 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
152 static void vect_estimate_min_profitable_iters (loop_vec_info, int *, int *);
154 /* Function vect_determine_vectorization_factor
156 Determine the vectorization factor (VF). VF is the number of data elements
157 that are operated upon in parallel in a single iteration of the vectorized
158 loop. For example, when vectorizing a loop that operates on 4byte elements,
159 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
160 elements can fit in a single vector register.
162 We currently support vectorization of loops in which all types operated upon
163 are of the same size. Therefore this function currently sets VF according to
164 the size of the types operated upon, and fails if there are multiple sizes
165 in the loop.
167 VF is also the factor by which the loop iterations are strip-mined, e.g.:
168 original loop:
169 for (i=0; i<N; i++){
170 a[i] = b[i] + c[i];
173 vectorized loop:
174 for (i=0; i<N; i+=VF){
175 a[i:VF] = b[i:VF] + c[i:VF];
179 static bool
180 vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
182 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
183 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
184 unsigned nbbs = loop->num_nodes;
185 unsigned int vectorization_factor = 0;
186 tree scalar_type = NULL_TREE;
187 gphi *phi;
188 tree vectype;
189 unsigned int nunits;
190 stmt_vec_info stmt_info;
191 unsigned i;
192 HOST_WIDE_INT dummy;
193 gimple *stmt, *pattern_stmt = NULL;
194 gimple_seq pattern_def_seq = NULL;
195 gimple_stmt_iterator pattern_def_si = gsi_none ();
196 bool analyze_pattern_stmt = false;
197 bool bool_result;
198 auto_vec<stmt_vec_info> mask_producers;
200 if (dump_enabled_p ())
201 dump_printf_loc (MSG_NOTE, vect_location,
202 "=== vect_determine_vectorization_factor ===\n");
204 for (i = 0; i < nbbs; i++)
206 basic_block bb = bbs[i];
208 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
209 gsi_next (&si))
211 phi = si.phi ();
212 stmt_info = vinfo_for_stmt (phi);
213 if (dump_enabled_p ())
215 dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: ");
216 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
219 gcc_assert (stmt_info);
221 if (STMT_VINFO_RELEVANT_P (stmt_info)
222 || STMT_VINFO_LIVE_P (stmt_info))
224 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
225 scalar_type = TREE_TYPE (PHI_RESULT (phi));
227 if (dump_enabled_p ())
229 dump_printf_loc (MSG_NOTE, vect_location,
230 "get vectype for scalar type: ");
231 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
232 dump_printf (MSG_NOTE, "\n");
235 vectype = get_vectype_for_scalar_type (scalar_type);
236 if (!vectype)
238 if (dump_enabled_p ())
240 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
241 "not vectorized: unsupported "
242 "data-type ");
243 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
244 scalar_type);
245 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
247 return false;
249 STMT_VINFO_VECTYPE (stmt_info) = vectype;
251 if (dump_enabled_p ())
253 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
254 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
255 dump_printf (MSG_NOTE, "\n");
258 nunits = TYPE_VECTOR_SUBPARTS (vectype);
259 if (dump_enabled_p ())
260 dump_printf_loc (MSG_NOTE, vect_location, "nunits = %d\n",
261 nunits);
263 if (!vectorization_factor
264 || (nunits > vectorization_factor))
265 vectorization_factor = nunits;
269 for (gimple_stmt_iterator si = gsi_start_bb (bb);
270 !gsi_end_p (si) || analyze_pattern_stmt;)
272 tree vf_vectype;
274 if (analyze_pattern_stmt)
275 stmt = pattern_stmt;
276 else
277 stmt = gsi_stmt (si);
279 stmt_info = vinfo_for_stmt (stmt);
281 if (dump_enabled_p ())
283 dump_printf_loc (MSG_NOTE, vect_location,
284 "==> examining statement: ");
285 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
288 gcc_assert (stmt_info);
290 /* Skip stmts which do not need to be vectorized. */
291 if ((!STMT_VINFO_RELEVANT_P (stmt_info)
292 && !STMT_VINFO_LIVE_P (stmt_info))
293 || gimple_clobber_p (stmt))
295 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
296 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
297 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
298 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
300 stmt = pattern_stmt;
301 stmt_info = vinfo_for_stmt (pattern_stmt);
302 if (dump_enabled_p ())
304 dump_printf_loc (MSG_NOTE, vect_location,
305 "==> examining pattern statement: ");
306 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
309 else
311 if (dump_enabled_p ())
312 dump_printf_loc (MSG_NOTE, vect_location, "skip.\n");
313 gsi_next (&si);
314 continue;
317 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
318 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
319 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
320 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
321 analyze_pattern_stmt = true;
323 /* If a pattern statement has def stmts, analyze them too. */
324 if (is_pattern_stmt_p (stmt_info))
326 if (pattern_def_seq == NULL)
328 pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
329 pattern_def_si = gsi_start (pattern_def_seq);
331 else if (!gsi_end_p (pattern_def_si))
332 gsi_next (&pattern_def_si);
333 if (pattern_def_seq != NULL)
335 gimple *pattern_def_stmt = NULL;
336 stmt_vec_info pattern_def_stmt_info = NULL;
338 while (!gsi_end_p (pattern_def_si))
340 pattern_def_stmt = gsi_stmt (pattern_def_si);
341 pattern_def_stmt_info
342 = vinfo_for_stmt (pattern_def_stmt);
343 if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info)
344 || STMT_VINFO_LIVE_P (pattern_def_stmt_info))
345 break;
346 gsi_next (&pattern_def_si);
349 if (!gsi_end_p (pattern_def_si))
351 if (dump_enabled_p ())
353 dump_printf_loc (MSG_NOTE, vect_location,
354 "==> examining pattern def stmt: ");
355 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
356 pattern_def_stmt, 0);
359 stmt = pattern_def_stmt;
360 stmt_info = pattern_def_stmt_info;
362 else
364 pattern_def_si = gsi_none ();
365 analyze_pattern_stmt = false;
368 else
369 analyze_pattern_stmt = false;
372 if (gimple_get_lhs (stmt) == NULL_TREE
373 /* MASK_STORE has no lhs, but is ok. */
374 && (!is_gimple_call (stmt)
375 || !gimple_call_internal_p (stmt)
376 || gimple_call_internal_fn (stmt) != IFN_MASK_STORE))
378 if (is_gimple_call (stmt))
380 /* Ignore calls with no lhs. These must be calls to
381 #pragma omp simd functions, and what vectorization factor
382 it really needs can't be determined until
383 vectorizable_simd_clone_call. */
384 if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si))
386 pattern_def_seq = NULL;
387 gsi_next (&si);
389 continue;
391 if (dump_enabled_p ())
393 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
394 "not vectorized: irregular stmt.");
395 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt,
398 return false;
401 if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
403 if (dump_enabled_p ())
405 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
406 "not vectorized: vector stmt in loop:");
407 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
409 return false;
412 bool_result = false;
414 if (STMT_VINFO_VECTYPE (stmt_info))
416 /* The only case when a vectype had been already set is for stmts
417 that contain a dataref, or for "pattern-stmts" (stmts
418 generated by the vectorizer to represent/replace a certain
419 idiom). */
420 gcc_assert (STMT_VINFO_DATA_REF (stmt_info)
421 || is_pattern_stmt_p (stmt_info)
422 || !gsi_end_p (pattern_def_si));
423 vectype = STMT_VINFO_VECTYPE (stmt_info);
425 else
427 gcc_assert (!STMT_VINFO_DATA_REF (stmt_info));
428 if (gimple_call_internal_p (stmt, IFN_MASK_STORE))
429 scalar_type = TREE_TYPE (gimple_call_arg (stmt, 3));
430 else
431 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
433 /* Bool ops don't participate in vectorization factor
434 computation. For comparison use compared types to
435 compute a factor. */
436 if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type)
437 && is_gimple_assign (stmt)
438 && gimple_assign_rhs_code (stmt) != COND_EXPR)
440 if (STMT_VINFO_RELEVANT_P (stmt_info)
441 || STMT_VINFO_LIVE_P (stmt_info))
442 mask_producers.safe_push (stmt_info);
443 bool_result = true;
445 if (TREE_CODE_CLASS (gimple_assign_rhs_code (stmt))
446 == tcc_comparison
447 && !VECT_SCALAR_BOOLEAN_TYPE_P
448 (TREE_TYPE (gimple_assign_rhs1 (stmt))))
449 scalar_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
450 else
452 if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si))
454 pattern_def_seq = NULL;
455 gsi_next (&si);
457 continue;
461 if (dump_enabled_p ())
463 dump_printf_loc (MSG_NOTE, vect_location,
464 "get vectype for scalar type: ");
465 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
466 dump_printf (MSG_NOTE, "\n");
468 vectype = get_vectype_for_scalar_type (scalar_type);
469 if (!vectype)
471 if (dump_enabled_p ())
473 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
474 "not vectorized: unsupported "
475 "data-type ");
476 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
477 scalar_type);
478 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
480 return false;
483 if (!bool_result)
484 STMT_VINFO_VECTYPE (stmt_info) = vectype;
486 if (dump_enabled_p ())
488 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
489 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
490 dump_printf (MSG_NOTE, "\n");
494 /* Don't try to compute VF out scalar types if we stmt
495 produces boolean vector. Use result vectype instead. */
496 if (VECTOR_BOOLEAN_TYPE_P (vectype))
497 vf_vectype = vectype;
498 else
500 /* The vectorization factor is according to the smallest
501 scalar type (or the largest vector size, but we only
502 support one vector size per loop). */
503 if (!bool_result)
504 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy,
505 &dummy);
506 if (dump_enabled_p ())
508 dump_printf_loc (MSG_NOTE, vect_location,
509 "get vectype for scalar type: ");
510 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
511 dump_printf (MSG_NOTE, "\n");
513 vf_vectype = get_vectype_for_scalar_type (scalar_type);
515 if (!vf_vectype)
517 if (dump_enabled_p ())
519 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
520 "not vectorized: unsupported data-type ");
521 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
522 scalar_type);
523 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
525 return false;
528 if ((GET_MODE_SIZE (TYPE_MODE (vectype))
529 != GET_MODE_SIZE (TYPE_MODE (vf_vectype))))
531 if (dump_enabled_p ())
533 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
534 "not vectorized: different sized vector "
535 "types in statement, ");
536 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
537 vectype);
538 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
539 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
540 vf_vectype);
541 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
543 return false;
546 if (dump_enabled_p ())
548 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
549 dump_generic_expr (MSG_NOTE, TDF_SLIM, vf_vectype);
550 dump_printf (MSG_NOTE, "\n");
553 nunits = TYPE_VECTOR_SUBPARTS (vf_vectype);
554 if (dump_enabled_p ())
555 dump_printf_loc (MSG_NOTE, vect_location, "nunits = %d\n", nunits);
556 if (!vectorization_factor
557 || (nunits > vectorization_factor))
558 vectorization_factor = nunits;
560 if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si))
562 pattern_def_seq = NULL;
563 gsi_next (&si);
568 /* TODO: Analyze cost. Decide if worth while to vectorize. */
569 if (dump_enabled_p ())
570 dump_printf_loc (MSG_NOTE, vect_location, "vectorization factor = %d\n",
571 vectorization_factor);
572 if (vectorization_factor <= 1)
574 if (dump_enabled_p ())
575 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
576 "not vectorized: unsupported data-type\n");
577 return false;
579 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
581 for (i = 0; i < mask_producers.length (); i++)
583 tree mask_type = NULL;
585 stmt = STMT_VINFO_STMT (mask_producers[i]);
587 if (is_gimple_assign (stmt)
588 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison
589 && !VECT_SCALAR_BOOLEAN_TYPE_P
590 (TREE_TYPE (gimple_assign_rhs1 (stmt))))
592 scalar_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
593 mask_type = get_mask_type_for_scalar_type (scalar_type);
595 if (!mask_type)
597 if (dump_enabled_p ())
598 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
599 "not vectorized: unsupported mask\n");
600 return false;
603 else
605 tree rhs;
606 ssa_op_iter iter;
607 gimple *def_stmt;
608 enum vect_def_type dt;
610 FOR_EACH_SSA_TREE_OPERAND (rhs, stmt, iter, SSA_OP_USE)
612 if (!vect_is_simple_use (rhs, mask_producers[i]->vinfo,
613 &def_stmt, &dt, &vectype))
615 if (dump_enabled_p ())
617 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
618 "not vectorized: can't compute mask type "
619 "for statement, ");
620 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt,
623 return false;
626 /* No vectype probably means external definition.
627 Allow it in case there is another operand which
628 allows to determine mask type. */
629 if (!vectype)
630 continue;
632 if (!mask_type)
633 mask_type = vectype;
634 else if (TYPE_VECTOR_SUBPARTS (mask_type)
635 != TYPE_VECTOR_SUBPARTS (vectype))
637 if (dump_enabled_p ())
639 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
640 "not vectorized: different sized masks "
641 "types in statement, ");
642 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
643 mask_type);
644 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
645 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
646 vectype);
647 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
649 return false;
651 else if (VECTOR_BOOLEAN_TYPE_P (mask_type)
652 != VECTOR_BOOLEAN_TYPE_P (vectype))
654 if (dump_enabled_p ())
656 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
657 "not vectorized: mixed mask and "
658 "nonmask vector types in statement, ");
659 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
660 mask_type);
661 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
662 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
663 vectype);
664 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
666 return false;
670 /* We may compare boolean value loaded as vector of integers.
671 Fix mask_type in such case. */
672 if (mask_type
673 && !VECTOR_BOOLEAN_TYPE_P (mask_type)
674 && gimple_code (stmt) == GIMPLE_ASSIGN
675 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
676 mask_type = build_same_sized_truth_vector_type (mask_type);
679 /* No mask_type should mean loop invariant predicate.
680 This is probably a subject for optimization in
681 if-conversion. */
682 if (!mask_type)
684 if (dump_enabled_p ())
686 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
687 "not vectorized: can't compute mask type "
688 "for statement, ");
689 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt,
692 return false;
695 STMT_VINFO_VECTYPE (mask_producers[i]) = mask_type;
698 return true;
702 /* Function vect_is_simple_iv_evolution.
704 FORNOW: A simple evolution of an induction variables in the loop is
705 considered a polynomial evolution. */
707 static bool
708 vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
709 tree * step)
711 tree init_expr;
712 tree step_expr;
713 tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
714 basic_block bb;
716 /* When there is no evolution in this loop, the evolution function
717 is not "simple". */
718 if (evolution_part == NULL_TREE)
719 return false;
721 /* When the evolution is a polynomial of degree >= 2
722 the evolution function is not "simple". */
723 if (tree_is_chrec (evolution_part))
724 return false;
726 step_expr = evolution_part;
727 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
729 if (dump_enabled_p ())
731 dump_printf_loc (MSG_NOTE, vect_location, "step: ");
732 dump_generic_expr (MSG_NOTE, TDF_SLIM, step_expr);
733 dump_printf (MSG_NOTE, ", init: ");
734 dump_generic_expr (MSG_NOTE, TDF_SLIM, init_expr);
735 dump_printf (MSG_NOTE, "\n");
738 *init = init_expr;
739 *step = step_expr;
741 if (TREE_CODE (step_expr) != INTEGER_CST
742 && (TREE_CODE (step_expr) != SSA_NAME
743 || ((bb = gimple_bb (SSA_NAME_DEF_STMT (step_expr)))
744 && flow_bb_inside_loop_p (get_loop (cfun, loop_nb), bb))
745 || (!INTEGRAL_TYPE_P (TREE_TYPE (step_expr))
746 && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))
747 || !flag_associative_math)))
748 && (TREE_CODE (step_expr) != REAL_CST
749 || !flag_associative_math))
751 if (dump_enabled_p ())
752 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
753 "step unknown.\n");
754 return false;
757 return true;
760 /* Function vect_analyze_scalar_cycles_1.
762 Examine the cross iteration def-use cycles of scalar variables
763 in LOOP. LOOP_VINFO represents the loop that is now being
764 considered for vectorization (can be LOOP, or an outer-loop
765 enclosing LOOP). */
767 static void
768 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
770 basic_block bb = loop->header;
771 tree init, step;
772 auto_vec<gimple *, 64> worklist;
773 gphi_iterator gsi;
774 bool double_reduc;
776 if (dump_enabled_p ())
777 dump_printf_loc (MSG_NOTE, vect_location,
778 "=== vect_analyze_scalar_cycles ===\n");
780 /* First - identify all inductions. Reduction detection assumes that all the
781 inductions have been identified, therefore, this order must not be
782 changed. */
783 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
785 gphi *phi = gsi.phi ();
786 tree access_fn = NULL;
787 tree def = PHI_RESULT (phi);
788 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
790 if (dump_enabled_p ())
792 dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
793 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
796 /* Skip virtual phi's. The data dependences that are associated with
797 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
798 if (virtual_operand_p (def))
799 continue;
801 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
803 /* Analyze the evolution function. */
804 access_fn = analyze_scalar_evolution (loop, def);
805 if (access_fn)
807 STRIP_NOPS (access_fn);
808 if (dump_enabled_p ())
810 dump_printf_loc (MSG_NOTE, vect_location,
811 "Access function of PHI: ");
812 dump_generic_expr (MSG_NOTE, TDF_SLIM, access_fn);
813 dump_printf (MSG_NOTE, "\n");
815 STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo)
816 = initial_condition_in_loop_num (access_fn, loop->num);
817 STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo)
818 = evolution_part_in_loop_num (access_fn, loop->num);
821 if (!access_fn
822 || !vect_is_simple_iv_evolution (loop->num, access_fn, &init, &step)
823 || (LOOP_VINFO_LOOP (loop_vinfo) != loop
824 && TREE_CODE (step) != INTEGER_CST))
826 worklist.safe_push (phi);
827 continue;
830 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo)
831 != NULL_TREE);
832 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) != NULL_TREE);
834 if (dump_enabled_p ())
835 dump_printf_loc (MSG_NOTE, vect_location, "Detected induction.\n");
836 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
840 /* Second - identify all reductions and nested cycles. */
841 while (worklist.length () > 0)
843 gimple *phi = worklist.pop ();
844 tree def = PHI_RESULT (phi);
845 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
846 gimple *reduc_stmt;
848 if (dump_enabled_p ())
850 dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
851 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
854 gcc_assert (!virtual_operand_p (def)
855 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
857 reduc_stmt = vect_force_simple_reduction (loop_vinfo, phi,
858 &double_reduc, false);
859 if (reduc_stmt)
861 if (double_reduc)
863 if (dump_enabled_p ())
864 dump_printf_loc (MSG_NOTE, vect_location,
865 "Detected double reduction.\n");
867 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
868 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
869 vect_double_reduction_def;
871 else
873 if (loop != LOOP_VINFO_LOOP (loop_vinfo))
875 if (dump_enabled_p ())
876 dump_printf_loc (MSG_NOTE, vect_location,
877 "Detected vectorizable nested cycle.\n");
879 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
880 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
881 vect_nested_cycle;
883 else
885 if (dump_enabled_p ())
886 dump_printf_loc (MSG_NOTE, vect_location,
887 "Detected reduction.\n");
889 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
890 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
891 vect_reduction_def;
892 /* Store the reduction cycles for possible vectorization in
893 loop-aware SLP if it was not detected as reduction
894 chain. */
895 if (! GROUP_FIRST_ELEMENT (vinfo_for_stmt (reduc_stmt)))
896 LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push (reduc_stmt);
900 else
901 if (dump_enabled_p ())
902 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
903 "Unknown def-use cycle pattern.\n");
908 /* Function vect_analyze_scalar_cycles.
910 Examine the cross iteration def-use cycles of scalar variables, by
911 analyzing the loop-header PHIs of scalar variables. Classify each
912 cycle as one of the following: invariant, induction, reduction, unknown.
913 We do that for the loop represented by LOOP_VINFO, and also to its
914 inner-loop, if exists.
915 Examples for scalar cycles:
917 Example1: reduction:
919 loop1:
920 for (i=0; i<N; i++)
921 sum += a[i];
923 Example2: induction:
925 loop2:
926 for (i=0; i<N; i++)
927 a[i] = i; */
929 static void
930 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
932 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
934 vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
936 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
937 Reductions in such inner-loop therefore have different properties than
938 the reductions in the nest that gets vectorized:
939 1. When vectorized, they are executed in the same order as in the original
940 scalar loop, so we can't change the order of computation when
941 vectorizing them.
942 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
943 current checks are too strict. */
945 if (loop->inner)
946 vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
949 /* Transfer group and reduction information from STMT to its pattern stmt. */
951 static void
952 vect_fixup_reduc_chain (gimple *stmt)
954 gimple *firstp = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt));
955 gimple *stmtp;
956 gcc_assert (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (firstp))
957 && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)));
958 GROUP_SIZE (vinfo_for_stmt (firstp)) = GROUP_SIZE (vinfo_for_stmt (stmt));
961 stmtp = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt));
962 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmtp)) = firstp;
963 stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
964 if (stmt)
965 GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmtp))
966 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt));
968 while (stmt);
969 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmtp)) = vect_reduction_def;
972 /* Fixup scalar cycles that now have their stmts detected as patterns. */
974 static void
975 vect_fixup_scalar_cycles_with_patterns (loop_vec_info loop_vinfo)
977 gimple *first;
978 unsigned i;
980 FOR_EACH_VEC_ELT (LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo), i, first)
981 if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (first)))
983 gimple *next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (first));
984 while (next)
986 if (! STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next)))
987 break;
988 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
990 /* If not all stmt in the chain are patterns try to handle
991 the chain without patterns. */
992 if (! next)
994 vect_fixup_reduc_chain (first);
995 LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo)[i]
996 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first));
1001 /* Function vect_get_loop_niters.
1003 Determine how many iterations the loop is executed and place it
1004 in NUMBER_OF_ITERATIONS. Place the number of latch iterations
1005 in NUMBER_OF_ITERATIONSM1. Place the condition under which the
1006 niter information holds in ASSUMPTIONS.
1008 Return the loop exit condition. */
1011 static gcond *
1012 vect_get_loop_niters (struct loop *loop, tree *assumptions,
1013 tree *number_of_iterations, tree *number_of_iterationsm1)
1015 edge exit = single_exit (loop);
1016 struct tree_niter_desc niter_desc;
1017 tree niter_assumptions, niter, may_be_zero;
1018 gcond *cond = get_loop_exit_condition (loop);
1020 *assumptions = boolean_true_node;
1021 *number_of_iterationsm1 = chrec_dont_know;
1022 *number_of_iterations = chrec_dont_know;
1023 if (dump_enabled_p ())
1024 dump_printf_loc (MSG_NOTE, vect_location,
1025 "=== get_loop_niters ===\n");
1027 if (!exit)
1028 return cond;
1030 niter = chrec_dont_know;
1031 may_be_zero = NULL_TREE;
1032 niter_assumptions = boolean_true_node;
1033 if (!number_of_iterations_exit_assumptions (loop, exit, &niter_desc, NULL)
1034 || chrec_contains_undetermined (niter_desc.niter))
1035 return cond;
1037 niter_assumptions = niter_desc.assumptions;
1038 may_be_zero = niter_desc.may_be_zero;
1039 niter = niter_desc.niter;
1041 if (may_be_zero && integer_zerop (may_be_zero))
1042 may_be_zero = NULL_TREE;
1044 if (may_be_zero)
1046 if (COMPARISON_CLASS_P (may_be_zero))
1048 /* Try to combine may_be_zero with assumptions, this can simplify
1049 computation of niter expression. */
1050 if (niter_assumptions && !integer_nonzerop (niter_assumptions))
1051 niter_assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1052 niter_assumptions,
1053 fold_build1 (TRUTH_NOT_EXPR,
1054 boolean_type_node,
1055 may_be_zero));
1056 else
1057 niter = fold_build3 (COND_EXPR, TREE_TYPE (niter), may_be_zero,
1058 build_int_cst (TREE_TYPE (niter), 0), niter);
1060 may_be_zero = NULL_TREE;
1062 else if (integer_nonzerop (may_be_zero))
1064 *number_of_iterationsm1 = build_int_cst (TREE_TYPE (niter), 0);
1065 *number_of_iterations = build_int_cst (TREE_TYPE (niter), 1);
1066 return cond;
1068 else
1069 return cond;
1072 *assumptions = niter_assumptions;
1073 *number_of_iterationsm1 = niter;
1075 /* We want the number of loop header executions which is the number
1076 of latch executions plus one.
1077 ??? For UINT_MAX latch executions this number overflows to zero
1078 for loops like do { n++; } while (n != 0); */
1079 if (niter && !chrec_contains_undetermined (niter))
1080 niter = fold_build2 (PLUS_EXPR, TREE_TYPE (niter), unshare_expr (niter),
1081 build_int_cst (TREE_TYPE (niter), 1));
1082 *number_of_iterations = niter;
1084 return cond;
1087 /* Function bb_in_loop_p
1089 Used as predicate for dfs order traversal of the loop bbs. */
1091 static bool
1092 bb_in_loop_p (const_basic_block bb, const void *data)
1094 const struct loop *const loop = (const struct loop *)data;
1095 if (flow_bb_inside_loop_p (loop, bb))
1096 return true;
1097 return false;
1101 /* Create and initialize a new loop_vec_info struct for LOOP_IN, as well as
1102 stmt_vec_info structs for all the stmts in LOOP_IN. */
1104 _loop_vec_info::_loop_vec_info (struct loop *loop_in)
1105 : vec_info (vec_info::loop, init_cost (loop_in)),
1106 loop (loop_in),
1107 bbs (XCNEWVEC (basic_block, loop->num_nodes)),
1108 num_itersm1 (NULL_TREE),
1109 num_iters (NULL_TREE),
1110 num_iters_unchanged (NULL_TREE),
1111 num_iters_assumptions (NULL_TREE),
1112 th (0),
1113 vectorization_factor (0),
1114 unaligned_dr (NULL),
1115 peeling_for_alignment (0),
1116 ptr_mask (0),
1117 slp_unrolling_factor (1),
1118 single_scalar_iteration_cost (0),
1119 vectorizable (false),
1120 peeling_for_gaps (false),
1121 peeling_for_niter (false),
1122 operands_swapped (false),
1123 no_data_dependencies (false),
1124 has_mask_store (false),
1125 scalar_loop (NULL),
1126 orig_loop_info (NULL)
1128 /* Create/Update stmt_info for all stmts in the loop. */
1129 basic_block *body = get_loop_body (loop);
1130 for (unsigned int i = 0; i < loop->num_nodes; i++)
1132 basic_block bb = body[i];
1133 gimple_stmt_iterator si;
1135 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
1137 gimple *phi = gsi_stmt (si);
1138 gimple_set_uid (phi, 0);
1139 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, this));
1142 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1144 gimple *stmt = gsi_stmt (si);
1145 gimple_set_uid (stmt, 0);
1146 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, this));
1149 free (body);
1151 /* CHECKME: We want to visit all BBs before their successors (except for
1152 latch blocks, for which this assertion wouldn't hold). In the simple
1153 case of the loop forms we allow, a dfs order of the BBs would the same
1154 as reversed postorder traversal, so we are safe. */
1156 unsigned int nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
1157 bbs, loop->num_nodes, loop);
1158 gcc_assert (nbbs == loop->num_nodes);
1162 /* Free all memory used by the _loop_vec_info, as well as all the
1163 stmt_vec_info structs of all the stmts in the loop. */
1165 _loop_vec_info::~_loop_vec_info ()
1167 int nbbs;
1168 gimple_stmt_iterator si;
1169 int j;
1171 nbbs = loop->num_nodes;
1172 for (j = 0; j < nbbs; j++)
1174 basic_block bb = bbs[j];
1175 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
1176 free_stmt_vec_info (gsi_stmt (si));
1178 for (si = gsi_start_bb (bb); !gsi_end_p (si); )
1180 gimple *stmt = gsi_stmt (si);
1182 /* We may have broken canonical form by moving a constant
1183 into RHS1 of a commutative op. Fix such occurrences. */
1184 if (operands_swapped && is_gimple_assign (stmt))
1186 enum tree_code code = gimple_assign_rhs_code (stmt);
1188 if ((code == PLUS_EXPR
1189 || code == POINTER_PLUS_EXPR
1190 || code == MULT_EXPR)
1191 && CONSTANT_CLASS_P (gimple_assign_rhs1 (stmt)))
1192 swap_ssa_operands (stmt,
1193 gimple_assign_rhs1_ptr (stmt),
1194 gimple_assign_rhs2_ptr (stmt));
1195 else if (code == COND_EXPR
1196 && CONSTANT_CLASS_P (gimple_assign_rhs2 (stmt)))
1198 tree cond_expr = gimple_assign_rhs1 (stmt);
1199 enum tree_code cond_code = TREE_CODE (cond_expr);
1201 if (TREE_CODE_CLASS (cond_code) == tcc_comparison)
1203 bool honor_nans = HONOR_NANS (TREE_OPERAND (cond_expr,
1204 0));
1205 cond_code = invert_tree_comparison (cond_code,
1206 honor_nans);
1207 if (cond_code != ERROR_MARK)
1209 TREE_SET_CODE (cond_expr, cond_code);
1210 swap_ssa_operands (stmt,
1211 gimple_assign_rhs2_ptr (stmt),
1212 gimple_assign_rhs3_ptr (stmt));
1218 /* Free stmt_vec_info. */
1219 free_stmt_vec_info (stmt);
1220 gsi_next (&si);
1224 free (bbs);
1226 loop->aux = NULL;
1230 /* Calculate the cost of one scalar iteration of the loop. */
1231 static void
1232 vect_compute_single_scalar_iteration_cost (loop_vec_info loop_vinfo)
1234 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1235 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1236 int nbbs = loop->num_nodes, factor, scalar_single_iter_cost = 0;
1237 int innerloop_iters, i;
1239 /* Count statements in scalar loop. Using this as scalar cost for a single
1240 iteration for now.
1242 TODO: Add outer loop support.
1244 TODO: Consider assigning different costs to different scalar
1245 statements. */
1247 /* FORNOW. */
1248 innerloop_iters = 1;
1249 if (loop->inner)
1250 innerloop_iters = 50; /* FIXME */
1252 for (i = 0; i < nbbs; i++)
1254 gimple_stmt_iterator si;
1255 basic_block bb = bbs[i];
1257 if (bb->loop_father == loop->inner)
1258 factor = innerloop_iters;
1259 else
1260 factor = 1;
1262 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1264 gimple *stmt = gsi_stmt (si);
1265 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1267 if (!is_gimple_assign (stmt) && !is_gimple_call (stmt))
1268 continue;
1270 /* Skip stmts that are not vectorized inside the loop. */
1271 if (stmt_info
1272 && !STMT_VINFO_RELEVANT_P (stmt_info)
1273 && (!STMT_VINFO_LIVE_P (stmt_info)
1274 || !VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1275 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
1276 continue;
1278 vect_cost_for_stmt kind;
1279 if (STMT_VINFO_DATA_REF (stmt_info))
1281 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
1282 kind = scalar_load;
1283 else
1284 kind = scalar_store;
1286 else
1287 kind = scalar_stmt;
1289 scalar_single_iter_cost
1290 += record_stmt_cost (&LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
1291 factor, kind, stmt_info, 0, vect_prologue);
1294 LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo)
1295 = scalar_single_iter_cost;
1299 /* Function vect_analyze_loop_form_1.
1301 Verify that certain CFG restrictions hold, including:
1302 - the loop has a pre-header
1303 - the loop has a single entry and exit
1304 - the loop exit condition is simple enough
1305 - the number of iterations can be analyzed, i.e, a countable loop. The
1306 niter could be analyzed under some assumptions. */
1308 bool
1309 vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
1310 tree *assumptions, tree *number_of_iterationsm1,
1311 tree *number_of_iterations, gcond **inner_loop_cond)
1313 if (dump_enabled_p ())
1314 dump_printf_loc (MSG_NOTE, vect_location,
1315 "=== vect_analyze_loop_form ===\n");
1317 /* Different restrictions apply when we are considering an inner-most loop,
1318 vs. an outer (nested) loop.
1319 (FORNOW. May want to relax some of these restrictions in the future). */
1321 if (!loop->inner)
1323 /* Inner-most loop. We currently require that the number of BBs is
1324 exactly 2 (the header and latch). Vectorizable inner-most loops
1325 look like this:
1327 (pre-header)
1329 header <--------+
1330 | | |
1331 | +--> latch --+
1333 (exit-bb) */
1335 if (loop->num_nodes != 2)
1337 if (dump_enabled_p ())
1338 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1339 "not vectorized: control flow in loop.\n");
1340 return false;
1343 if (empty_block_p (loop->header))
1345 if (dump_enabled_p ())
1346 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1347 "not vectorized: empty loop.\n");
1348 return false;
1351 else
1353 struct loop *innerloop = loop->inner;
1354 edge entryedge;
1356 /* Nested loop. We currently require that the loop is doubly-nested,
1357 contains a single inner loop, and the number of BBs is exactly 5.
1358 Vectorizable outer-loops look like this:
1360 (pre-header)
1362 header <---+
1364 inner-loop |
1366 tail ------+
1368 (exit-bb)
1370 The inner-loop has the properties expected of inner-most loops
1371 as described above. */
1373 if ((loop->inner)->inner || (loop->inner)->next)
1375 if (dump_enabled_p ())
1376 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1377 "not vectorized: multiple nested loops.\n");
1378 return false;
1381 if (loop->num_nodes != 5)
1383 if (dump_enabled_p ())
1384 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1385 "not vectorized: control flow in loop.\n");
1386 return false;
1389 entryedge = loop_preheader_edge (innerloop);
1390 if (entryedge->src != loop->header
1391 || !single_exit (innerloop)
1392 || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
1394 if (dump_enabled_p ())
1395 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1396 "not vectorized: unsupported outerloop form.\n");
1397 return false;
1400 /* Analyze the inner-loop. */
1401 tree inner_niterm1, inner_niter, inner_assumptions;
1402 if (! vect_analyze_loop_form_1 (loop->inner, inner_loop_cond,
1403 &inner_assumptions, &inner_niterm1,
1404 &inner_niter, NULL)
1405 /* Don't support analyzing niter under assumptions for inner
1406 loop. */
1407 || !integer_onep (inner_assumptions))
1409 if (dump_enabled_p ())
1410 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1411 "not vectorized: Bad inner loop.\n");
1412 return false;
1415 if (!expr_invariant_in_loop_p (loop, inner_niter))
1417 if (dump_enabled_p ())
1418 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1419 "not vectorized: inner-loop count not"
1420 " invariant.\n");
1421 return false;
1424 if (dump_enabled_p ())
1425 dump_printf_loc (MSG_NOTE, vect_location,
1426 "Considering outer-loop vectorization.\n");
1429 if (!single_exit (loop)
1430 || EDGE_COUNT (loop->header->preds) != 2)
1432 if (dump_enabled_p ())
1434 if (!single_exit (loop))
1435 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1436 "not vectorized: multiple exits.\n");
1437 else if (EDGE_COUNT (loop->header->preds) != 2)
1438 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1439 "not vectorized: too many incoming edges.\n");
1441 return false;
1444 /* We assume that the loop exit condition is at the end of the loop. i.e,
1445 that the loop is represented as a do-while (with a proper if-guard
1446 before the loop if needed), where the loop header contains all the
1447 executable statements, and the latch is empty. */
1448 if (!empty_block_p (loop->latch)
1449 || !gimple_seq_empty_p (phi_nodes (loop->latch)))
1451 if (dump_enabled_p ())
1452 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1453 "not vectorized: latch block not empty.\n");
1454 return false;
1457 /* Make sure the exit is not abnormal. */
1458 edge e = single_exit (loop);
1459 if (e->flags & EDGE_ABNORMAL)
1461 if (dump_enabled_p ())
1462 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1463 "not vectorized: abnormal loop exit edge.\n");
1464 return false;
1467 *loop_cond = vect_get_loop_niters (loop, assumptions, number_of_iterations,
1468 number_of_iterationsm1);
1469 if (!*loop_cond)
1471 if (dump_enabled_p ())
1472 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1473 "not vectorized: complicated exit condition.\n");
1474 return false;
1477 if (integer_zerop (*assumptions)
1478 || !*number_of_iterations
1479 || chrec_contains_undetermined (*number_of_iterations))
1481 if (dump_enabled_p ())
1482 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1483 "not vectorized: number of iterations cannot be "
1484 "computed.\n");
1485 return false;
1488 if (integer_zerop (*number_of_iterations))
1490 if (dump_enabled_p ())
1491 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1492 "not vectorized: number of iterations = 0.\n");
1493 return false;
1496 return true;
1499 /* Analyze LOOP form and return a loop_vec_info if it is of suitable form. */
1501 loop_vec_info
1502 vect_analyze_loop_form (struct loop *loop)
1504 tree assumptions, number_of_iterations, number_of_iterationsm1;
1505 gcond *loop_cond, *inner_loop_cond = NULL;
1507 if (! vect_analyze_loop_form_1 (loop, &loop_cond,
1508 &assumptions, &number_of_iterationsm1,
1509 &number_of_iterations, &inner_loop_cond))
1510 return NULL;
1512 loop_vec_info loop_vinfo = new _loop_vec_info (loop);
1513 LOOP_VINFO_NITERSM1 (loop_vinfo) = number_of_iterationsm1;
1514 LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
1515 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
1516 if (!integer_onep (assumptions))
1518 /* We consider to vectorize this loop by versioning it under
1519 some assumptions. In order to do this, we need to clear
1520 existing information computed by scev and niter analyzer. */
1521 scev_reset_htab ();
1522 free_numbers_of_iterations_estimates (loop);
1523 /* Also set flag for this loop so that following scev and niter
1524 analysis are done under the assumptions. */
1525 loop_constraint_set (loop, LOOP_C_FINITE);
1526 /* Also record the assumptions for versioning. */
1527 LOOP_VINFO_NITERS_ASSUMPTIONS (loop_vinfo) = assumptions;
1530 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
1532 if (dump_enabled_p ())
1534 dump_printf_loc (MSG_NOTE, vect_location,
1535 "Symbolic number of iterations is ");
1536 dump_generic_expr (MSG_NOTE, TDF_DETAILS, number_of_iterations);
1537 dump_printf (MSG_NOTE, "\n");
1541 STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond)) = loop_exit_ctrl_vec_info_type;
1542 if (inner_loop_cond)
1543 STMT_VINFO_TYPE (vinfo_for_stmt (inner_loop_cond))
1544 = loop_exit_ctrl_vec_info_type;
1546 gcc_assert (!loop->aux);
1547 loop->aux = loop_vinfo;
1548 return loop_vinfo;
1553 /* Scan the loop stmts and dependent on whether there are any (non-)SLP
1554 statements update the vectorization factor. */
1556 static void
1557 vect_update_vf_for_slp (loop_vec_info loop_vinfo)
1559 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1560 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1561 int nbbs = loop->num_nodes;
1562 unsigned int vectorization_factor;
1563 int i;
1565 if (dump_enabled_p ())
1566 dump_printf_loc (MSG_NOTE, vect_location,
1567 "=== vect_update_vf_for_slp ===\n");
1569 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1570 gcc_assert (vectorization_factor != 0);
1572 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1573 vectorization factor of the loop is the unrolling factor required by
1574 the SLP instances. If that unrolling factor is 1, we say, that we
1575 perform pure SLP on loop - cross iteration parallelism is not
1576 exploited. */
1577 bool only_slp_in_loop = true;
1578 for (i = 0; i < nbbs; i++)
1580 basic_block bb = bbs[i];
1581 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
1582 gsi_next (&si))
1584 gimple *stmt = gsi_stmt (si);
1585 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1586 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
1587 && STMT_VINFO_RELATED_STMT (stmt_info))
1589 stmt = STMT_VINFO_RELATED_STMT (stmt_info);
1590 stmt_info = vinfo_for_stmt (stmt);
1592 if ((STMT_VINFO_RELEVANT_P (stmt_info)
1593 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1594 && !PURE_SLP_STMT (stmt_info))
1595 /* STMT needs both SLP and loop-based vectorization. */
1596 only_slp_in_loop = false;
1600 if (only_slp_in_loop)
1602 dump_printf_loc (MSG_NOTE, vect_location,
1603 "Loop contains only SLP stmts\n");
1604 vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
1606 else
1608 dump_printf_loc (MSG_NOTE, vect_location,
1609 "Loop contains SLP and non-SLP stmts\n");
1610 vectorization_factor
1611 = least_common_multiple (vectorization_factor,
1612 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
1615 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
1616 if (dump_enabled_p ())
1617 dump_printf_loc (MSG_NOTE, vect_location,
1618 "Updating vectorization factor to %d\n",
1619 vectorization_factor);
1622 /* Function vect_analyze_loop_operations.
1624 Scan the loop stmts and make sure they are all vectorizable. */
1626 static bool
1627 vect_analyze_loop_operations (loop_vec_info loop_vinfo)
1629 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1630 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1631 int nbbs = loop->num_nodes;
1632 int i;
1633 stmt_vec_info stmt_info;
1634 bool need_to_vectorize = false;
1635 bool ok;
1637 if (dump_enabled_p ())
1638 dump_printf_loc (MSG_NOTE, vect_location,
1639 "=== vect_analyze_loop_operations ===\n");
1641 for (i = 0; i < nbbs; i++)
1643 basic_block bb = bbs[i];
1645 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
1646 gsi_next (&si))
1648 gphi *phi = si.phi ();
1649 ok = true;
1651 stmt_info = vinfo_for_stmt (phi);
1652 if (dump_enabled_p ())
1654 dump_printf_loc (MSG_NOTE, vect_location, "examining phi: ");
1655 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
1657 if (virtual_operand_p (gimple_phi_result (phi)))
1658 continue;
1660 /* Inner-loop loop-closed exit phi in outer-loop vectorization
1661 (i.e., a phi in the tail of the outer-loop). */
1662 if (! is_loop_header_bb_p (bb))
1664 /* FORNOW: we currently don't support the case that these phis
1665 are not used in the outerloop (unless it is double reduction,
1666 i.e., this phi is vect_reduction_def), cause this case
1667 requires to actually do something here. */
1668 if (STMT_VINFO_LIVE_P (stmt_info)
1669 && STMT_VINFO_DEF_TYPE (stmt_info)
1670 != vect_double_reduction_def)
1672 if (dump_enabled_p ())
1673 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1674 "Unsupported loop-closed phi in "
1675 "outer-loop.\n");
1676 return false;
1679 /* If PHI is used in the outer loop, we check that its operand
1680 is defined in the inner loop. */
1681 if (STMT_VINFO_RELEVANT_P (stmt_info))
1683 tree phi_op;
1684 gimple *op_def_stmt;
1686 if (gimple_phi_num_args (phi) != 1)
1687 return false;
1689 phi_op = PHI_ARG_DEF (phi, 0);
1690 if (TREE_CODE (phi_op) != SSA_NAME)
1691 return false;
1693 op_def_stmt = SSA_NAME_DEF_STMT (phi_op);
1694 if (gimple_nop_p (op_def_stmt)
1695 || !flow_bb_inside_loop_p (loop, gimple_bb (op_def_stmt))
1696 || !vinfo_for_stmt (op_def_stmt))
1697 return false;
1699 if (STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt))
1700 != vect_used_in_outer
1701 && STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt))
1702 != vect_used_in_outer_by_reduction)
1703 return false;
1706 continue;
1709 gcc_assert (stmt_info);
1711 if ((STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
1712 || STMT_VINFO_LIVE_P (stmt_info))
1713 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
1715 /* A scalar-dependence cycle that we don't support. */
1716 if (dump_enabled_p ())
1717 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1718 "not vectorized: scalar dependence cycle.\n");
1719 return false;
1722 if (STMT_VINFO_RELEVANT_P (stmt_info))
1724 need_to_vectorize = true;
1725 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def
1726 && ! PURE_SLP_STMT (stmt_info))
1727 ok = vectorizable_induction (phi, NULL, NULL, NULL);
1728 else if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
1729 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
1730 && ! PURE_SLP_STMT (stmt_info))
1731 ok = vectorizable_reduction (phi, NULL, NULL, NULL, NULL);
1734 if (ok && STMT_VINFO_LIVE_P (stmt_info))
1735 ok = vectorizable_live_operation (phi, NULL, NULL, -1, NULL);
1737 if (!ok)
1739 if (dump_enabled_p ())
1741 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1742 "not vectorized: relevant phi not "
1743 "supported: ");
1744 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, phi, 0);
1746 return false;
1750 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
1751 gsi_next (&si))
1753 gimple *stmt = gsi_stmt (si);
1754 if (!gimple_clobber_p (stmt)
1755 && !vect_analyze_stmt (stmt, &need_to_vectorize, NULL, NULL))
1756 return false;
1758 } /* bbs */
1760 /* All operations in the loop are either irrelevant (deal with loop
1761 control, or dead), or only used outside the loop and can be moved
1762 out of the loop (e.g. invariants, inductions). The loop can be
1763 optimized away by scalar optimizations. We're better off not
1764 touching this loop. */
1765 if (!need_to_vectorize)
1767 if (dump_enabled_p ())
1768 dump_printf_loc (MSG_NOTE, vect_location,
1769 "All the computation can be taken out of the loop.\n");
1770 if (dump_enabled_p ())
1771 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1772 "not vectorized: redundant loop. no profit to "
1773 "vectorize.\n");
1774 return false;
1777 return true;
1781 /* Function vect_analyze_loop_2.
1783 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1784 for it. The different analyses will record information in the
1785 loop_vec_info struct. */
1786 static bool
1787 vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal)
1789 bool ok;
1790 int max_vf = MAX_VECTORIZATION_FACTOR;
1791 int min_vf = 2;
1792 unsigned int n_stmts = 0;
1794 /* The first group of checks is independent of the vector size. */
1795 fatal = true;
1797 /* Find all data references in the loop (which correspond to vdefs/vuses)
1798 and analyze their evolution in the loop. */
1800 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1802 loop_p loop = LOOP_VINFO_LOOP (loop_vinfo);
1803 if (!find_loop_nest (loop, &LOOP_VINFO_LOOP_NEST (loop_vinfo)))
1805 if (dump_enabled_p ())
1806 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1807 "not vectorized: loop nest containing two "
1808 "or more consecutive inner loops cannot be "
1809 "vectorized\n");
1810 return false;
1813 for (unsigned i = 0; i < loop->num_nodes; i++)
1814 for (gimple_stmt_iterator gsi = gsi_start_bb (bbs[i]);
1815 !gsi_end_p (gsi); gsi_next (&gsi))
1817 gimple *stmt = gsi_stmt (gsi);
1818 if (is_gimple_debug (stmt))
1819 continue;
1820 ++n_stmts;
1821 if (!find_data_references_in_stmt (loop, stmt,
1822 &LOOP_VINFO_DATAREFS (loop_vinfo)))
1824 if (is_gimple_call (stmt) && loop->safelen)
1826 tree fndecl = gimple_call_fndecl (stmt), op;
1827 if (fndecl != NULL_TREE)
1829 cgraph_node *node = cgraph_node::get (fndecl);
1830 if (node != NULL && node->simd_clones != NULL)
1832 unsigned int j, n = gimple_call_num_args (stmt);
1833 for (j = 0; j < n; j++)
1835 op = gimple_call_arg (stmt, j);
1836 if (DECL_P (op)
1837 || (REFERENCE_CLASS_P (op)
1838 && get_base_address (op)))
1839 break;
1841 op = gimple_call_lhs (stmt);
1842 /* Ignore #pragma omp declare simd functions
1843 if they don't have data references in the
1844 call stmt itself. */
1845 if (j == n
1846 && !(op
1847 && (DECL_P (op)
1848 || (REFERENCE_CLASS_P (op)
1849 && get_base_address (op)))))
1850 continue;
1854 if (dump_enabled_p ())
1855 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1856 "not vectorized: loop contains function "
1857 "calls or data references that cannot "
1858 "be analyzed\n");
1859 return false;
1863 /* Analyze the data references and also adjust the minimal
1864 vectorization factor according to the loads and stores. */
1866 ok = vect_analyze_data_refs (loop_vinfo, &min_vf);
1867 if (!ok)
1869 if (dump_enabled_p ())
1870 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1871 "bad data references.\n");
1872 return false;
1875 /* Classify all cross-iteration scalar data-flow cycles.
1876 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1877 vect_analyze_scalar_cycles (loop_vinfo);
1879 vect_pattern_recog (loop_vinfo);
1881 vect_fixup_scalar_cycles_with_patterns (loop_vinfo);
1883 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1884 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1886 ok = vect_analyze_data_ref_accesses (loop_vinfo);
1887 if (!ok)
1889 if (dump_enabled_p ())
1890 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1891 "bad data access.\n");
1892 return false;
1895 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1897 ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
1898 if (!ok)
1900 if (dump_enabled_p ())
1901 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1902 "unexpected pattern.\n");
1903 return false;
1906 /* While the rest of the analysis below depends on it in some way. */
1907 fatal = false;
1909 /* Analyze data dependences between the data-refs in the loop
1910 and adjust the maximum vectorization factor according to
1911 the dependences.
1912 FORNOW: fail at the first data dependence that we encounter. */
1914 ok = vect_analyze_data_ref_dependences (loop_vinfo, &max_vf);
1915 if (!ok
1916 || max_vf < min_vf)
1918 if (dump_enabled_p ())
1919 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1920 "bad data dependence.\n");
1921 return false;
1924 ok = vect_determine_vectorization_factor (loop_vinfo);
1925 if (!ok)
1927 if (dump_enabled_p ())
1928 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1929 "can't determine vectorization factor.\n");
1930 return false;
1932 if (max_vf < LOOP_VINFO_VECT_FACTOR (loop_vinfo))
1934 if (dump_enabled_p ())
1935 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1936 "bad data dependence.\n");
1937 return false;
1940 /* Compute the scalar iteration cost. */
1941 vect_compute_single_scalar_iteration_cost (loop_vinfo);
1943 int saved_vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1944 HOST_WIDE_INT estimated_niter;
1945 unsigned th;
1946 int min_scalar_loop_bound;
1948 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
1949 ok = vect_analyze_slp (loop_vinfo, n_stmts);
1950 if (!ok)
1951 return false;
1953 /* If there are any SLP instances mark them as pure_slp. */
1954 bool slp = vect_make_slp_decision (loop_vinfo);
1955 if (slp)
1957 /* Find stmts that need to be both vectorized and SLPed. */
1958 vect_detect_hybrid_slp (loop_vinfo);
1960 /* Update the vectorization factor based on the SLP decision. */
1961 vect_update_vf_for_slp (loop_vinfo);
1964 /* This is the point where we can re-start analysis with SLP forced off. */
1965 start_over:
1967 /* Now the vectorization factor is final. */
1968 unsigned vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1969 gcc_assert (vectorization_factor != 0);
1971 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && dump_enabled_p ())
1972 dump_printf_loc (MSG_NOTE, vect_location,
1973 "vectorization_factor = %d, niters = "
1974 HOST_WIDE_INT_PRINT_DEC "\n", vectorization_factor,
1975 LOOP_VINFO_INT_NITERS (loop_vinfo));
1977 HOST_WIDE_INT max_niter
1978 = likely_max_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo));
1979 if ((LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1980 && (LOOP_VINFO_INT_NITERS (loop_vinfo) < vectorization_factor))
1981 || (max_niter != -1
1982 && (unsigned HOST_WIDE_INT) max_niter < vectorization_factor))
1984 if (dump_enabled_p ())
1985 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1986 "not vectorized: iteration count smaller than "
1987 "vectorization factor.\n");
1988 return false;
1991 /* Analyze the alignment of the data-refs in the loop.
1992 Fail if a data reference is found that cannot be vectorized. */
1994 ok = vect_analyze_data_refs_alignment (loop_vinfo);
1995 if (!ok)
1997 if (dump_enabled_p ())
1998 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1999 "bad data alignment.\n");
2000 return false;
2003 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
2004 It is important to call pruning after vect_analyze_data_ref_accesses,
2005 since we use grouping information gathered by interleaving analysis. */
2006 ok = vect_prune_runtime_alias_test_list (loop_vinfo);
2007 if (!ok)
2008 return false;
2010 /* Do not invoke vect_enhance_data_refs_alignment for eplilogue
2011 vectorization. */
2012 if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo))
2014 /* This pass will decide on using loop versioning and/or loop peeling in
2015 order to enhance the alignment of data references in the loop. */
2016 ok = vect_enhance_data_refs_alignment (loop_vinfo);
2017 if (!ok)
2019 if (dump_enabled_p ())
2020 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2021 "bad data alignment.\n");
2022 return false;
2026 if (slp)
2028 /* Analyze operations in the SLP instances. Note this may
2029 remove unsupported SLP instances which makes the above
2030 SLP kind detection invalid. */
2031 unsigned old_size = LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length ();
2032 vect_slp_analyze_operations (LOOP_VINFO_SLP_INSTANCES (loop_vinfo),
2033 LOOP_VINFO_TARGET_COST_DATA (loop_vinfo));
2034 if (LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length () != old_size)
2035 goto again;
2038 /* Scan all the remaining operations in the loop that are not subject
2039 to SLP and make sure they are vectorizable. */
2040 ok = vect_analyze_loop_operations (loop_vinfo);
2041 if (!ok)
2043 if (dump_enabled_p ())
2044 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2045 "bad operation or unsupported loop bound.\n");
2046 return false;
2049 /* If epilog loop is required because of data accesses with gaps,
2050 one additional iteration needs to be peeled. Check if there is
2051 enough iterations for vectorization. */
2052 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
2053 && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
2055 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2056 tree scalar_niters = LOOP_VINFO_NITERSM1 (loop_vinfo);
2058 if (wi::to_widest (scalar_niters) < vf)
2060 if (dump_enabled_p ())
2061 dump_printf_loc (MSG_NOTE, vect_location,
2062 "loop has no enough iterations to support"
2063 " peeling for gaps.\n");
2064 return false;
2068 /* Analyze cost. Decide if worth while to vectorize. */
2069 int min_profitable_estimate, min_profitable_iters;
2070 vect_estimate_min_profitable_iters (loop_vinfo, &min_profitable_iters,
2071 &min_profitable_estimate);
2073 if (min_profitable_iters < 0)
2075 if (dump_enabled_p ())
2076 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2077 "not vectorized: vectorization not profitable.\n");
2078 if (dump_enabled_p ())
2079 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2080 "not vectorized: vector version will never be "
2081 "profitable.\n");
2082 goto again;
2085 min_scalar_loop_bound = (PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
2086 * vectorization_factor);
2088 /* Use the cost model only if it is more conservative than user specified
2089 threshold. */
2090 th = (unsigned) MAX (min_scalar_loop_bound, min_profitable_iters);
2092 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = th;
2094 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2095 && LOOP_VINFO_INT_NITERS (loop_vinfo) < th)
2097 if (dump_enabled_p ())
2098 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2099 "not vectorized: vectorization not profitable.\n");
2100 if (dump_enabled_p ())
2101 dump_printf_loc (MSG_NOTE, vect_location,
2102 "not vectorized: iteration count smaller than user "
2103 "specified loop bound parameter or minimum profitable "
2104 "iterations (whichever is more conservative).\n");
2105 goto again;
2108 estimated_niter
2109 = estimated_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo));
2110 if (estimated_niter == -1)
2111 estimated_niter = max_niter;
2112 if (estimated_niter != -1
2113 && ((unsigned HOST_WIDE_INT) estimated_niter
2114 < MAX (th, (unsigned) min_profitable_estimate)))
2116 if (dump_enabled_p ())
2117 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2118 "not vectorized: estimated iteration count too "
2119 "small.\n");
2120 if (dump_enabled_p ())
2121 dump_printf_loc (MSG_NOTE, vect_location,
2122 "not vectorized: estimated iteration count smaller "
2123 "than specified loop bound parameter or minimum "
2124 "profitable iterations (whichever is more "
2125 "conservative).\n");
2126 goto again;
2129 /* Decide whether we need to create an epilogue loop to handle
2130 remaining scalar iterations. */
2131 th = ((LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo)
2132 / LOOP_VINFO_VECT_FACTOR (loop_vinfo))
2133 * LOOP_VINFO_VECT_FACTOR (loop_vinfo));
2135 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2136 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) > 0)
2138 if (ctz_hwi (LOOP_VINFO_INT_NITERS (loop_vinfo)
2139 - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo))
2140 < exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
2141 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
2143 else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
2144 || (tree_ctz (LOOP_VINFO_NITERS (loop_vinfo))
2145 < (unsigned)exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo))
2146 /* In case of versioning, check if the maximum number of
2147 iterations is greater than th. If they are identical,
2148 the epilogue is unnecessary. */
2149 && (!LOOP_REQUIRES_VERSIONING (loop_vinfo)
2150 || (unsigned HOST_WIDE_INT) max_niter > th)))
2151 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
2153 /* If an epilogue loop is required make sure we can create one. */
2154 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
2155 || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo))
2157 if (dump_enabled_p ())
2158 dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required\n");
2159 if (!vect_can_advance_ivs_p (loop_vinfo)
2160 || !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo),
2161 single_exit (LOOP_VINFO_LOOP
2162 (loop_vinfo))))
2164 if (dump_enabled_p ())
2165 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2166 "not vectorized: can't create required "
2167 "epilog loop\n");
2168 goto again;
2172 /* During peeling, we need to check if number of loop iterations is
2173 enough for both peeled prolog loop and vector loop. This check
2174 can be merged along with threshold check of loop versioning, so
2175 increase threshold for this case if necessary. */
2176 if (LOOP_REQUIRES_VERSIONING (loop_vinfo)
2177 && (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
2178 || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo)))
2180 unsigned niters_th;
2182 /* Niters for peeled prolog loop. */
2183 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
2185 struct data_reference *dr = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
2186 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr)));
2188 niters_th = TYPE_VECTOR_SUBPARTS (vectype) - 1;
2190 else
2191 niters_th = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
2193 /* Niters for at least one iteration of vectorized loop. */
2194 niters_th += LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2195 /* One additional iteration because of peeling for gap. */
2196 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
2197 niters_th++;
2198 if (LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) < niters_th)
2199 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = niters_th;
2202 gcc_assert (vectorization_factor
2203 == (unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo));
2205 /* Ok to vectorize! */
2206 return true;
2208 again:
2209 /* Try again with SLP forced off but if we didn't do any SLP there is
2210 no point in re-trying. */
2211 if (!slp)
2212 return false;
2214 /* If there are reduction chains re-trying will fail anyway. */
2215 if (! LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).is_empty ())
2216 return false;
2218 /* Likewise if the grouped loads or stores in the SLP cannot be handled
2219 via interleaving or lane instructions. */
2220 slp_instance instance;
2221 slp_tree node;
2222 unsigned i, j;
2223 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance)
2225 stmt_vec_info vinfo;
2226 vinfo = vinfo_for_stmt
2227 (SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance))[0]);
2228 if (! STMT_VINFO_GROUPED_ACCESS (vinfo))
2229 continue;
2230 vinfo = vinfo_for_stmt (STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo));
2231 unsigned int size = STMT_VINFO_GROUP_SIZE (vinfo);
2232 tree vectype = STMT_VINFO_VECTYPE (vinfo);
2233 if (! vect_store_lanes_supported (vectype, size)
2234 && ! vect_grouped_store_supported (vectype, size))
2235 return false;
2236 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), j, node)
2238 vinfo = vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (node)[0]);
2239 vinfo = vinfo_for_stmt (STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo));
2240 bool single_element_p = !STMT_VINFO_GROUP_NEXT_ELEMENT (vinfo);
2241 size = STMT_VINFO_GROUP_SIZE (vinfo);
2242 vectype = STMT_VINFO_VECTYPE (vinfo);
2243 if (! vect_load_lanes_supported (vectype, size)
2244 && ! vect_grouped_load_supported (vectype, single_element_p,
2245 size))
2246 return false;
2250 if (dump_enabled_p ())
2251 dump_printf_loc (MSG_NOTE, vect_location,
2252 "re-trying with SLP disabled\n");
2254 /* Roll back state appropriately. No SLP this time. */
2255 slp = false;
2256 /* Restore vectorization factor as it were without SLP. */
2257 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = saved_vectorization_factor;
2258 /* Free the SLP instances. */
2259 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), j, instance)
2260 vect_free_slp_instance (instance);
2261 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
2262 /* Reset SLP type to loop_vect on all stmts. */
2263 for (i = 0; i < LOOP_VINFO_LOOP (loop_vinfo)->num_nodes; ++i)
2265 basic_block bb = LOOP_VINFO_BBS (loop_vinfo)[i];
2266 for (gimple_stmt_iterator si = gsi_start_phis (bb);
2267 !gsi_end_p (si); gsi_next (&si))
2269 stmt_vec_info stmt_info = vinfo_for_stmt (gsi_stmt (si));
2270 STMT_SLP_TYPE (stmt_info) = loop_vect;
2272 for (gimple_stmt_iterator si = gsi_start_bb (bb);
2273 !gsi_end_p (si); gsi_next (&si))
2275 stmt_vec_info stmt_info = vinfo_for_stmt (gsi_stmt (si));
2276 STMT_SLP_TYPE (stmt_info) = loop_vect;
2277 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
2279 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
2280 STMT_SLP_TYPE (stmt_info) = loop_vect;
2281 for (gimple_stmt_iterator pi
2282 = gsi_start (STMT_VINFO_PATTERN_DEF_SEQ (stmt_info));
2283 !gsi_end_p (pi); gsi_next (&pi))
2285 gimple *pstmt = gsi_stmt (pi);
2286 STMT_SLP_TYPE (vinfo_for_stmt (pstmt)) = loop_vect;
2291 /* Free optimized alias test DDRS. */
2292 LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).release ();
2293 LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).release ();
2294 /* Reset target cost data. */
2295 destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo));
2296 LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)
2297 = init_cost (LOOP_VINFO_LOOP (loop_vinfo));
2298 /* Reset assorted flags. */
2299 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false;
2300 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = false;
2301 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = 0;
2303 goto start_over;
2306 /* Function vect_analyze_loop.
2308 Apply a set of analyses on LOOP, and create a loop_vec_info struct
2309 for it. The different analyses will record information in the
2310 loop_vec_info struct. If ORIG_LOOP_VINFO is not NULL epilogue must
2311 be vectorized. */
2312 loop_vec_info
2313 vect_analyze_loop (struct loop *loop, loop_vec_info orig_loop_vinfo)
2315 loop_vec_info loop_vinfo;
2316 unsigned int vector_sizes;
2318 /* Autodetect first vector size we try. */
2319 current_vector_size = 0;
2320 vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
2322 if (dump_enabled_p ())
2323 dump_printf_loc (MSG_NOTE, vect_location,
2324 "===== analyze_loop_nest =====\n");
2326 if (loop_outer (loop)
2327 && loop_vec_info_for_loop (loop_outer (loop))
2328 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
2330 if (dump_enabled_p ())
2331 dump_printf_loc (MSG_NOTE, vect_location,
2332 "outer-loop already vectorized.\n");
2333 return NULL;
2336 while (1)
2338 /* Check the CFG characteristics of the loop (nesting, entry/exit). */
2339 loop_vinfo = vect_analyze_loop_form (loop);
2340 if (!loop_vinfo)
2342 if (dump_enabled_p ())
2343 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2344 "bad loop form.\n");
2345 return NULL;
2348 bool fatal = false;
2350 if (orig_loop_vinfo)
2351 LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo) = orig_loop_vinfo;
2353 if (vect_analyze_loop_2 (loop_vinfo, fatal))
2355 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
2357 return loop_vinfo;
2360 delete loop_vinfo;
2362 vector_sizes &= ~current_vector_size;
2363 if (fatal
2364 || vector_sizes == 0
2365 || current_vector_size == 0)
2366 return NULL;
2368 /* Try the next biggest vector size. */
2369 current_vector_size = 1 << floor_log2 (vector_sizes);
2370 if (dump_enabled_p ())
2371 dump_printf_loc (MSG_NOTE, vect_location,
2372 "***** Re-trying analysis with "
2373 "vector size %d\n", current_vector_size);
2378 /* Function reduction_code_for_scalar_code
2380 Input:
2381 CODE - tree_code of a reduction operations.
2383 Output:
2384 REDUC_CODE - the corresponding tree-code to be used to reduce the
2385 vector of partial results into a single scalar result, or ERROR_MARK
2386 if the operation is a supported reduction operation, but does not have
2387 such a tree-code.
2389 Return FALSE if CODE currently cannot be vectorized as reduction. */
2391 static bool
2392 reduction_code_for_scalar_code (enum tree_code code,
2393 enum tree_code *reduc_code)
2395 switch (code)
2397 case MAX_EXPR:
2398 *reduc_code = REDUC_MAX_EXPR;
2399 return true;
2401 case MIN_EXPR:
2402 *reduc_code = REDUC_MIN_EXPR;
2403 return true;
2405 case PLUS_EXPR:
2406 *reduc_code = REDUC_PLUS_EXPR;
2407 return true;
2409 case MULT_EXPR:
2410 case MINUS_EXPR:
2411 case BIT_IOR_EXPR:
2412 case BIT_XOR_EXPR:
2413 case BIT_AND_EXPR:
2414 *reduc_code = ERROR_MARK;
2415 return true;
2417 default:
2418 return false;
2423 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
2424 STMT is printed with a message MSG. */
2426 static void
2427 report_vect_op (dump_flags_t msg_type, gimple *stmt, const char *msg)
2429 dump_printf_loc (msg_type, vect_location, "%s", msg);
2430 dump_gimple_stmt (msg_type, TDF_SLIM, stmt, 0);
2434 /* Detect SLP reduction of the form:
2436 #a1 = phi <a5, a0>
2437 a2 = operation (a1)
2438 a3 = operation (a2)
2439 a4 = operation (a3)
2440 a5 = operation (a4)
2442 #a = phi <a5>
2444 PHI is the reduction phi node (#a1 = phi <a5, a0> above)
2445 FIRST_STMT is the first reduction stmt in the chain
2446 (a2 = operation (a1)).
2448 Return TRUE if a reduction chain was detected. */
2450 static bool
2451 vect_is_slp_reduction (loop_vec_info loop_info, gimple *phi,
2452 gimple *first_stmt)
2454 struct loop *loop = (gimple_bb (phi))->loop_father;
2455 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
2456 enum tree_code code;
2457 gimple *current_stmt = NULL, *loop_use_stmt = NULL, *first, *next_stmt;
2458 stmt_vec_info use_stmt_info, current_stmt_info;
2459 tree lhs;
2460 imm_use_iterator imm_iter;
2461 use_operand_p use_p;
2462 int nloop_uses, size = 0, n_out_of_loop_uses;
2463 bool found = false;
2465 if (loop != vect_loop)
2466 return false;
2468 lhs = PHI_RESULT (phi);
2469 code = gimple_assign_rhs_code (first_stmt);
2470 while (1)
2472 nloop_uses = 0;
2473 n_out_of_loop_uses = 0;
2474 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
2476 gimple *use_stmt = USE_STMT (use_p);
2477 if (is_gimple_debug (use_stmt))
2478 continue;
2480 /* Check if we got back to the reduction phi. */
2481 if (use_stmt == phi)
2483 loop_use_stmt = use_stmt;
2484 found = true;
2485 break;
2488 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2490 loop_use_stmt = use_stmt;
2491 nloop_uses++;
2493 else
2494 n_out_of_loop_uses++;
2496 /* There are can be either a single use in the loop or two uses in
2497 phi nodes. */
2498 if (nloop_uses > 1 || (n_out_of_loop_uses && nloop_uses))
2499 return false;
2502 if (found)
2503 break;
2505 /* We reached a statement with no loop uses. */
2506 if (nloop_uses == 0)
2507 return false;
2509 /* This is a loop exit phi, and we haven't reached the reduction phi. */
2510 if (gimple_code (loop_use_stmt) == GIMPLE_PHI)
2511 return false;
2513 if (!is_gimple_assign (loop_use_stmt)
2514 || code != gimple_assign_rhs_code (loop_use_stmt)
2515 || !flow_bb_inside_loop_p (loop, gimple_bb (loop_use_stmt)))
2516 return false;
2518 /* Insert USE_STMT into reduction chain. */
2519 use_stmt_info = vinfo_for_stmt (loop_use_stmt);
2520 if (current_stmt)
2522 current_stmt_info = vinfo_for_stmt (current_stmt);
2523 GROUP_NEXT_ELEMENT (current_stmt_info) = loop_use_stmt;
2524 GROUP_FIRST_ELEMENT (use_stmt_info)
2525 = GROUP_FIRST_ELEMENT (current_stmt_info);
2527 else
2528 GROUP_FIRST_ELEMENT (use_stmt_info) = loop_use_stmt;
2530 lhs = gimple_assign_lhs (loop_use_stmt);
2531 current_stmt = loop_use_stmt;
2532 size++;
2535 if (!found || loop_use_stmt != phi || size < 2)
2536 return false;
2538 /* Swap the operands, if needed, to make the reduction operand be the second
2539 operand. */
2540 lhs = PHI_RESULT (phi);
2541 next_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt));
2542 while (next_stmt)
2544 if (gimple_assign_rhs2 (next_stmt) == lhs)
2546 tree op = gimple_assign_rhs1 (next_stmt);
2547 gimple *def_stmt = NULL;
2549 if (TREE_CODE (op) == SSA_NAME)
2550 def_stmt = SSA_NAME_DEF_STMT (op);
2552 /* Check that the other def is either defined in the loop
2553 ("vect_internal_def"), or it's an induction (defined by a
2554 loop-header phi-node). */
2555 if (def_stmt
2556 && gimple_bb (def_stmt)
2557 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
2558 && (is_gimple_assign (def_stmt)
2559 || is_gimple_call (def_stmt)
2560 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
2561 == vect_induction_def
2562 || (gimple_code (def_stmt) == GIMPLE_PHI
2563 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
2564 == vect_internal_def
2565 && !is_loop_header_bb_p (gimple_bb (def_stmt)))))
2567 lhs = gimple_assign_lhs (next_stmt);
2568 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
2569 continue;
2572 return false;
2574 else
2576 tree op = gimple_assign_rhs2 (next_stmt);
2577 gimple *def_stmt = NULL;
2579 if (TREE_CODE (op) == SSA_NAME)
2580 def_stmt = SSA_NAME_DEF_STMT (op);
2582 /* Check that the other def is either defined in the loop
2583 ("vect_internal_def"), or it's an induction (defined by a
2584 loop-header phi-node). */
2585 if (def_stmt
2586 && gimple_bb (def_stmt)
2587 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
2588 && (is_gimple_assign (def_stmt)
2589 || is_gimple_call (def_stmt)
2590 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
2591 == vect_induction_def
2592 || (gimple_code (def_stmt) == GIMPLE_PHI
2593 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
2594 == vect_internal_def
2595 && !is_loop_header_bb_p (gimple_bb (def_stmt)))))
2597 if (dump_enabled_p ())
2599 dump_printf_loc (MSG_NOTE, vect_location, "swapping oprnds: ");
2600 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, next_stmt, 0);
2603 swap_ssa_operands (next_stmt,
2604 gimple_assign_rhs1_ptr (next_stmt),
2605 gimple_assign_rhs2_ptr (next_stmt));
2606 update_stmt (next_stmt);
2608 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (next_stmt)))
2609 LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true;
2611 else
2612 return false;
2615 lhs = gimple_assign_lhs (next_stmt);
2616 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
2619 /* Save the chain for further analysis in SLP detection. */
2620 first = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt));
2621 LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (first);
2622 GROUP_SIZE (vinfo_for_stmt (first)) = size;
2624 return true;
2628 /* Function vect_is_simple_reduction
2630 (1) Detect a cross-iteration def-use cycle that represents a simple
2631 reduction computation. We look for the following pattern:
2633 loop_header:
2634 a1 = phi < a0, a2 >
2635 a3 = ...
2636 a2 = operation (a3, a1)
2640 a3 = ...
2641 loop_header:
2642 a1 = phi < a0, a2 >
2643 a2 = operation (a3, a1)
2645 such that:
2646 1. operation is commutative and associative and it is safe to
2647 change the order of the computation
2648 2. no uses for a2 in the loop (a2 is used out of the loop)
2649 3. no uses of a1 in the loop besides the reduction operation
2650 4. no uses of a1 outside the loop.
2652 Conditions 1,4 are tested here.
2653 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
2655 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
2656 nested cycles.
2658 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
2659 reductions:
2661 a1 = phi < a0, a2 >
2662 inner loop (def of a3)
2663 a2 = phi < a3 >
2665 (4) Detect condition expressions, ie:
2666 for (int i = 0; i < N; i++)
2667 if (a[i] < val)
2668 ret_val = a[i];
2672 static gimple *
2673 vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
2674 bool *double_reduc,
2675 bool need_wrapping_integral_overflow,
2676 enum vect_reduction_type *v_reduc_type)
2678 struct loop *loop = (gimple_bb (phi))->loop_father;
2679 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
2680 gimple *def_stmt, *def1 = NULL, *def2 = NULL, *phi_use_stmt = NULL;
2681 enum tree_code orig_code, code;
2682 tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE;
2683 tree type;
2684 int nloop_uses;
2685 tree name;
2686 imm_use_iterator imm_iter;
2687 use_operand_p use_p;
2688 bool phi_def;
2690 *double_reduc = false;
2691 *v_reduc_type = TREE_CODE_REDUCTION;
2693 name = PHI_RESULT (phi);
2694 /* ??? If there are no uses of the PHI result the inner loop reduction
2695 won't be detected as possibly double-reduction by vectorizable_reduction
2696 because that tries to walk the PHI arg from the preheader edge which
2697 can be constant. See PR60382. */
2698 if (has_zero_uses (name))
2699 return NULL;
2700 nloop_uses = 0;
2701 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
2703 gimple *use_stmt = USE_STMT (use_p);
2704 if (is_gimple_debug (use_stmt))
2705 continue;
2707 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2709 if (dump_enabled_p ())
2710 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2711 "intermediate value used outside loop.\n");
2713 return NULL;
2716 nloop_uses++;
2717 if (nloop_uses > 1)
2719 if (dump_enabled_p ())
2720 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2721 "reduction value used in loop.\n");
2722 return NULL;
2725 phi_use_stmt = use_stmt;
2728 edge latch_e = loop_latch_edge (loop);
2729 tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
2730 if (TREE_CODE (loop_arg) != SSA_NAME)
2732 if (dump_enabled_p ())
2734 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2735 "reduction: not ssa_name: ");
2736 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, loop_arg);
2737 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2739 return NULL;
2742 def_stmt = SSA_NAME_DEF_STMT (loop_arg);
2743 if (is_gimple_assign (def_stmt))
2745 name = gimple_assign_lhs (def_stmt);
2746 phi_def = false;
2748 else if (gimple_code (def_stmt) == GIMPLE_PHI)
2750 name = PHI_RESULT (def_stmt);
2751 phi_def = true;
2753 else
2755 if (dump_enabled_p ())
2757 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2758 "reduction: unhandled reduction operation: ");
2759 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, def_stmt, 0);
2761 return NULL;
2764 if (! flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
2765 return NULL;
2767 nloop_uses = 0;
2768 auto_vec<gphi *, 3> lcphis;
2769 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
2771 gimple *use_stmt = USE_STMT (use_p);
2772 if (is_gimple_debug (use_stmt))
2773 continue;
2774 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2775 nloop_uses++;
2776 else
2777 /* We can have more than one loop-closed PHI. */
2778 lcphis.safe_push (as_a <gphi *> (use_stmt));
2779 if (nloop_uses > 1)
2781 if (dump_enabled_p ())
2782 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2783 "reduction used in loop.\n");
2784 return NULL;
2788 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
2789 defined in the inner loop. */
2790 if (phi_def)
2792 op1 = PHI_ARG_DEF (def_stmt, 0);
2794 if (gimple_phi_num_args (def_stmt) != 1
2795 || TREE_CODE (op1) != SSA_NAME)
2797 if (dump_enabled_p ())
2798 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2799 "unsupported phi node definition.\n");
2801 return NULL;
2804 def1 = SSA_NAME_DEF_STMT (op1);
2805 if (gimple_bb (def1)
2806 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
2807 && loop->inner
2808 && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1))
2809 && is_gimple_assign (def1)
2810 && flow_bb_inside_loop_p (loop->inner, gimple_bb (phi_use_stmt)))
2812 if (dump_enabled_p ())
2813 report_vect_op (MSG_NOTE, def_stmt,
2814 "detected double reduction: ");
2816 *double_reduc = true;
2817 return def_stmt;
2820 return NULL;
2823 /* If we are vectorizing an inner reduction we are executing that
2824 in the original order only in case we are not dealing with a
2825 double reduction. */
2826 bool check_reduction = true;
2827 if (flow_loop_nested_p (vect_loop, loop))
2829 gphi *lcphi;
2830 unsigned i;
2831 check_reduction = false;
2832 FOR_EACH_VEC_ELT (lcphis, i, lcphi)
2833 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_phi_result (lcphi))
2835 gimple *use_stmt = USE_STMT (use_p);
2836 if (is_gimple_debug (use_stmt))
2837 continue;
2838 if (! flow_bb_inside_loop_p (vect_loop, gimple_bb (use_stmt)))
2839 check_reduction = true;
2843 bool nested_in_vect_loop = flow_loop_nested_p (vect_loop, loop);
2844 code = orig_code = gimple_assign_rhs_code (def_stmt);
2846 /* We can handle "res -= x[i]", which is non-associative by
2847 simply rewriting this into "res += -x[i]". Avoid changing
2848 gimple instruction for the first simple tests and only do this
2849 if we're allowed to change code at all. */
2850 if (code == MINUS_EXPR
2851 && ! ((op1 = gimple_assign_rhs2 (def_stmt))
2852 && TREE_CODE (op1) == SSA_NAME
2853 && SSA_NAME_DEF_STMT (op1) == phi))
2854 code = PLUS_EXPR;
2856 if (code == COND_EXPR)
2858 if (! nested_in_vect_loop)
2859 *v_reduc_type = COND_REDUCTION;
2861 op3 = gimple_assign_rhs1 (def_stmt);
2862 if (COMPARISON_CLASS_P (op3))
2864 op4 = TREE_OPERAND (op3, 1);
2865 op3 = TREE_OPERAND (op3, 0);
2868 op1 = gimple_assign_rhs2 (def_stmt);
2869 op2 = gimple_assign_rhs3 (def_stmt);
2871 else if (!commutative_tree_code (code) || !associative_tree_code (code))
2873 if (dump_enabled_p ())
2874 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2875 "reduction: not commutative/associative: ");
2876 return NULL;
2878 else if (get_gimple_rhs_class (code) == GIMPLE_BINARY_RHS)
2880 op1 = gimple_assign_rhs1 (def_stmt);
2881 op2 = gimple_assign_rhs2 (def_stmt);
2883 else
2885 if (dump_enabled_p ())
2886 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2887 "reduction: not handled operation: ");
2888 return NULL;
2891 if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
2893 if (dump_enabled_p ())
2894 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2895 "reduction: both uses not ssa_names: ");
2897 return NULL;
2900 type = TREE_TYPE (gimple_assign_lhs (def_stmt));
2901 if ((TREE_CODE (op1) == SSA_NAME
2902 && !types_compatible_p (type,TREE_TYPE (op1)))
2903 || (TREE_CODE (op2) == SSA_NAME
2904 && !types_compatible_p (type, TREE_TYPE (op2)))
2905 || (op3 && TREE_CODE (op3) == SSA_NAME
2906 && !types_compatible_p (type, TREE_TYPE (op3)))
2907 || (op4 && TREE_CODE (op4) == SSA_NAME
2908 && !types_compatible_p (type, TREE_TYPE (op4))))
2910 if (dump_enabled_p ())
2912 dump_printf_loc (MSG_NOTE, vect_location,
2913 "reduction: multiple types: operation type: ");
2914 dump_generic_expr (MSG_NOTE, TDF_SLIM, type);
2915 dump_printf (MSG_NOTE, ", operands types: ");
2916 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2917 TREE_TYPE (op1));
2918 dump_printf (MSG_NOTE, ",");
2919 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2920 TREE_TYPE (op2));
2921 if (op3)
2923 dump_printf (MSG_NOTE, ",");
2924 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2925 TREE_TYPE (op3));
2928 if (op4)
2930 dump_printf (MSG_NOTE, ",");
2931 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2932 TREE_TYPE (op4));
2934 dump_printf (MSG_NOTE, "\n");
2937 return NULL;
2940 /* Check that it's ok to change the order of the computation.
2941 Generally, when vectorizing a reduction we change the order of the
2942 computation. This may change the behavior of the program in some
2943 cases, so we need to check that this is ok. One exception is when
2944 vectorizing an outer-loop: the inner-loop is executed sequentially,
2945 and therefore vectorizing reductions in the inner-loop during
2946 outer-loop vectorization is safe. */
2948 if (*v_reduc_type != COND_REDUCTION
2949 && check_reduction)
2951 /* CHECKME: check for !flag_finite_math_only too? */
2952 if (SCALAR_FLOAT_TYPE_P (type) && !flag_associative_math)
2954 /* Changing the order of operations changes the semantics. */
2955 if (dump_enabled_p ())
2956 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2957 "reduction: unsafe fp math optimization: ");
2958 return NULL;
2960 else if (INTEGRAL_TYPE_P (type))
2962 if (!operation_no_trapping_overflow (type, code))
2964 /* Changing the order of operations changes the semantics. */
2965 if (dump_enabled_p ())
2966 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2967 "reduction: unsafe int math optimization"
2968 " (overflow traps): ");
2969 return NULL;
2971 if (need_wrapping_integral_overflow
2972 && !TYPE_OVERFLOW_WRAPS (type)
2973 && operation_can_overflow (code))
2975 /* Changing the order of operations changes the semantics. */
2976 if (dump_enabled_p ())
2977 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2978 "reduction: unsafe int math optimization"
2979 " (overflow doesn't wrap): ");
2980 return NULL;
2983 else if (SAT_FIXED_POINT_TYPE_P (type))
2985 /* Changing the order of operations changes the semantics. */
2986 if (dump_enabled_p ())
2987 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2988 "reduction: unsafe fixed-point math optimization: ");
2989 return NULL;
2993 /* Reduction is safe. We're dealing with one of the following:
2994 1) integer arithmetic and no trapv
2995 2) floating point arithmetic, and special flags permit this optimization
2996 3) nested cycle (i.e., outer loop vectorization). */
2997 if (TREE_CODE (op1) == SSA_NAME)
2998 def1 = SSA_NAME_DEF_STMT (op1);
3000 if (TREE_CODE (op2) == SSA_NAME)
3001 def2 = SSA_NAME_DEF_STMT (op2);
3003 if (code != COND_EXPR
3004 && ((!def1 || gimple_nop_p (def1)) && (!def2 || gimple_nop_p (def2))))
3006 if (dump_enabled_p ())
3007 report_vect_op (MSG_NOTE, def_stmt, "reduction: no defs for operands: ");
3008 return NULL;
3011 /* Check that one def is the reduction def, defined by PHI,
3012 the other def is either defined in the loop ("vect_internal_def"),
3013 or it's an induction (defined by a loop-header phi-node). */
3015 if (def2 && def2 == phi
3016 && (code == COND_EXPR
3017 || !def1 || gimple_nop_p (def1)
3018 || !flow_bb_inside_loop_p (loop, gimple_bb (def1))
3019 || (def1 && flow_bb_inside_loop_p (loop, gimple_bb (def1))
3020 && (is_gimple_assign (def1)
3021 || is_gimple_call (def1)
3022 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
3023 == vect_induction_def
3024 || (gimple_code (def1) == GIMPLE_PHI
3025 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
3026 == vect_internal_def
3027 && !is_loop_header_bb_p (gimple_bb (def1)))))))
3029 if (dump_enabled_p ())
3030 report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
3031 return def_stmt;
3034 if (def1 && def1 == phi
3035 && (code == COND_EXPR
3036 || !def2 || gimple_nop_p (def2)
3037 || !flow_bb_inside_loop_p (loop, gimple_bb (def2))
3038 || (def2 && flow_bb_inside_loop_p (loop, gimple_bb (def2))
3039 && (is_gimple_assign (def2)
3040 || is_gimple_call (def2)
3041 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
3042 == vect_induction_def
3043 || (gimple_code (def2) == GIMPLE_PHI
3044 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
3045 == vect_internal_def
3046 && !is_loop_header_bb_p (gimple_bb (def2)))))))
3048 if (! nested_in_vect_loop && orig_code != MINUS_EXPR)
3050 /* Check if we can swap operands (just for simplicity - so that
3051 the rest of the code can assume that the reduction variable
3052 is always the last (second) argument). */
3053 if (code == COND_EXPR)
3055 /* Swap cond_expr by inverting the condition. */
3056 tree cond_expr = gimple_assign_rhs1 (def_stmt);
3057 enum tree_code invert_code = ERROR_MARK;
3058 enum tree_code cond_code = TREE_CODE (cond_expr);
3060 if (TREE_CODE_CLASS (cond_code) == tcc_comparison)
3062 bool honor_nans = HONOR_NANS (TREE_OPERAND (cond_expr, 0));
3063 invert_code = invert_tree_comparison (cond_code, honor_nans);
3065 if (invert_code != ERROR_MARK)
3067 TREE_SET_CODE (cond_expr, invert_code);
3068 swap_ssa_operands (def_stmt,
3069 gimple_assign_rhs2_ptr (def_stmt),
3070 gimple_assign_rhs3_ptr (def_stmt));
3072 else
3074 if (dump_enabled_p ())
3075 report_vect_op (MSG_NOTE, def_stmt,
3076 "detected reduction: cannot swap operands "
3077 "for cond_expr");
3078 return NULL;
3081 else
3082 swap_ssa_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
3083 gimple_assign_rhs2_ptr (def_stmt));
3085 if (dump_enabled_p ())
3086 report_vect_op (MSG_NOTE, def_stmt,
3087 "detected reduction: need to swap operands: ");
3089 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (def_stmt)))
3090 LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true;
3092 else
3094 if (dump_enabled_p ())
3095 report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
3098 return def_stmt;
3101 /* Try to find SLP reduction chain. */
3102 if (! nested_in_vect_loop
3103 && code != COND_EXPR
3104 && orig_code != MINUS_EXPR
3105 && vect_is_slp_reduction (loop_info, phi, def_stmt))
3107 if (dump_enabled_p ())
3108 report_vect_op (MSG_NOTE, def_stmt,
3109 "reduction: detected reduction chain: ");
3111 return def_stmt;
3114 /* Dissolve group eventually half-built by vect_is_slp_reduction. */
3115 gimple *first = GROUP_FIRST_ELEMENT (vinfo_for_stmt (def_stmt));
3116 while (first)
3118 gimple *next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (first));
3119 GROUP_FIRST_ELEMENT (vinfo_for_stmt (first)) = NULL;
3120 GROUP_NEXT_ELEMENT (vinfo_for_stmt (first)) = NULL;
3121 first = next;
3124 /* Look for the expression computing loop_arg from loop PHI result. */
3125 auto_vec<std::pair<ssa_op_iter, use_operand_p> > path;
3126 auto_bitmap visited;
3127 tree lookfor = PHI_RESULT (phi);
3128 ssa_op_iter curri;
3129 use_operand_p curr = op_iter_init_phiuse (&curri, as_a <gphi *>(phi),
3130 SSA_OP_USE);
3131 while (USE_FROM_PTR (curr) != loop_arg)
3132 curr = op_iter_next_use (&curri);
3133 curri.i = curri.numops;
3136 path.safe_push (std::make_pair (curri, curr));
3137 tree use = USE_FROM_PTR (curr);
3138 if (use == lookfor)
3139 break;
3140 gimple *def = SSA_NAME_DEF_STMT (use);
3141 if (gimple_nop_p (def)
3142 || ! flow_bb_inside_loop_p (loop, gimple_bb (def)))
3144 pop:
3147 std::pair<ssa_op_iter, use_operand_p> x = path.pop ();
3148 curri = x.first;
3149 curr = x.second;
3151 curr = op_iter_next_use (&curri);
3152 /* Skip already visited or non-SSA operands (from iterating
3153 over PHI args). */
3154 while (curr != NULL_USE_OPERAND_P
3155 && (TREE_CODE (USE_FROM_PTR (curr)) != SSA_NAME
3156 || ! bitmap_set_bit (visited,
3157 SSA_NAME_VERSION
3158 (USE_FROM_PTR (curr)))));
3160 while (curr == NULL_USE_OPERAND_P && ! path.is_empty ());
3161 if (curr == NULL_USE_OPERAND_P)
3162 break;
3164 else
3166 if (gimple_code (def) == GIMPLE_PHI)
3167 curr = op_iter_init_phiuse (&curri, as_a <gphi *>(def), SSA_OP_USE);
3168 else
3169 curr = op_iter_init_use (&curri, def, SSA_OP_USE);
3170 while (curr != NULL_USE_OPERAND_P
3171 && (TREE_CODE (USE_FROM_PTR (curr)) != SSA_NAME
3172 || ! bitmap_set_bit (visited,
3173 SSA_NAME_VERSION
3174 (USE_FROM_PTR (curr)))))
3175 curr = op_iter_next_use (&curri);
3176 if (curr == NULL_USE_OPERAND_P)
3177 goto pop;
3180 while (1);
3181 if (dump_file && (dump_flags & TDF_DETAILS))
3183 dump_printf_loc (MSG_NOTE, vect_location,
3184 "reduction path: ");
3185 unsigned i;
3186 std::pair<ssa_op_iter, use_operand_p> *x;
3187 FOR_EACH_VEC_ELT (path, i, x)
3189 dump_generic_expr (MSG_NOTE, TDF_SLIM, USE_FROM_PTR (x->second));
3190 dump_printf (MSG_NOTE, " ");
3192 dump_printf (MSG_NOTE, "\n");
3195 /* Check whether the reduction path detected is valid. */
3196 bool fail = path.length () == 0;
3197 bool neg = false;
3198 for (unsigned i = 1; i < path.length (); ++i)
3200 gimple *use_stmt = USE_STMT (path[i].second);
3201 tree op = USE_FROM_PTR (path[i].second);
3202 if (! has_single_use (op)
3203 || ! is_gimple_assign (use_stmt))
3205 fail = true;
3206 break;
3208 if (gimple_assign_rhs_code (use_stmt) != code)
3210 if (code == PLUS_EXPR
3211 && gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
3213 /* Track whether we negate the reduction value each iteration. */
3214 if (gimple_assign_rhs2 (use_stmt) == op)
3215 neg = ! neg;
3217 else
3219 fail = true;
3220 break;
3224 if (! fail && ! neg)
3225 return def_stmt;
3227 if (dump_enabled_p ())
3229 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3230 "reduction: unknown pattern: ");
3233 return NULL;
3236 /* Wrapper around vect_is_simple_reduction, which will modify code
3237 in-place if it enables detection of more reductions. Arguments
3238 as there. */
3240 gimple *
3241 vect_force_simple_reduction (loop_vec_info loop_info, gimple *phi,
3242 bool *double_reduc,
3243 bool need_wrapping_integral_overflow)
3245 enum vect_reduction_type v_reduc_type;
3246 gimple *def = vect_is_simple_reduction (loop_info, phi, double_reduc,
3247 need_wrapping_integral_overflow,
3248 &v_reduc_type);
3249 if (def)
3251 stmt_vec_info reduc_def_info = vinfo_for_stmt (phi);
3252 STMT_VINFO_REDUC_TYPE (reduc_def_info) = v_reduc_type;
3253 STMT_VINFO_REDUC_DEF (reduc_def_info) = def;
3254 reduc_def_info = vinfo_for_stmt (def);
3255 STMT_VINFO_REDUC_DEF (reduc_def_info) = phi;
3257 return def;
3260 /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */
3262 vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue,
3263 int *peel_iters_epilogue,
3264 stmt_vector_for_cost *scalar_cost_vec,
3265 stmt_vector_for_cost *prologue_cost_vec,
3266 stmt_vector_for_cost *epilogue_cost_vec)
3268 int retval = 0;
3269 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3271 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
3273 *peel_iters_epilogue = vf/2;
3274 if (dump_enabled_p ())
3275 dump_printf_loc (MSG_NOTE, vect_location,
3276 "cost model: epilogue peel iters set to vf/2 "
3277 "because loop iterations are unknown .\n");
3279 /* If peeled iterations are known but number of scalar loop
3280 iterations are unknown, count a taken branch per peeled loop. */
3281 retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken,
3282 NULL, 0, vect_prologue);
3283 retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken,
3284 NULL, 0, vect_epilogue);
3286 else
3288 int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
3289 peel_iters_prologue = niters < peel_iters_prologue ?
3290 niters : peel_iters_prologue;
3291 *peel_iters_epilogue = (niters - peel_iters_prologue) % vf;
3292 /* If we need to peel for gaps, but no peeling is required, we have to
3293 peel VF iterations. */
3294 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && !*peel_iters_epilogue)
3295 *peel_iters_epilogue = vf;
3298 stmt_info_for_cost *si;
3299 int j;
3300 if (peel_iters_prologue)
3301 FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si)
3303 stmt_vec_info stmt_info
3304 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
3305 retval += record_stmt_cost (prologue_cost_vec,
3306 si->count * peel_iters_prologue,
3307 si->kind, stmt_info, si->misalign,
3308 vect_prologue);
3310 if (*peel_iters_epilogue)
3311 FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si)
3313 stmt_vec_info stmt_info
3314 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
3315 retval += record_stmt_cost (epilogue_cost_vec,
3316 si->count * *peel_iters_epilogue,
3317 si->kind, stmt_info, si->misalign,
3318 vect_epilogue);
3321 return retval;
3324 /* Function vect_estimate_min_profitable_iters
3326 Return the number of iterations required for the vector version of the
3327 loop to be profitable relative to the cost of the scalar version of the
3328 loop.
3330 *RET_MIN_PROFITABLE_NITERS is a cost model profitability threshold
3331 of iterations for vectorization. -1 value means loop vectorization
3332 is not profitable. This returned value may be used for dynamic
3333 profitability check.
3335 *RET_MIN_PROFITABLE_ESTIMATE is a profitability threshold to be used
3336 for static check against estimated number of iterations. */
3338 static void
3339 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
3340 int *ret_min_profitable_niters,
3341 int *ret_min_profitable_estimate)
3343 int min_profitable_iters;
3344 int min_profitable_estimate;
3345 int peel_iters_prologue;
3346 int peel_iters_epilogue;
3347 unsigned vec_inside_cost = 0;
3348 int vec_outside_cost = 0;
3349 unsigned vec_prologue_cost = 0;
3350 unsigned vec_epilogue_cost = 0;
3351 int scalar_single_iter_cost = 0;
3352 int scalar_outside_cost = 0;
3353 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3354 int npeel = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
3355 void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
3357 /* Cost model disabled. */
3358 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
3360 dump_printf_loc (MSG_NOTE, vect_location, "cost model disabled.\n");
3361 *ret_min_profitable_niters = 0;
3362 *ret_min_profitable_estimate = 0;
3363 return;
3366 /* Requires loop versioning tests to handle misalignment. */
3367 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
3369 /* FIXME: Make cost depend on complexity of individual check. */
3370 unsigned len = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ();
3371 (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
3372 vect_prologue);
3373 dump_printf (MSG_NOTE,
3374 "cost model: Adding cost of checks for loop "
3375 "versioning to treat misalignment.\n");
3378 /* Requires loop versioning with alias checks. */
3379 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
3381 /* FIXME: Make cost depend on complexity of individual check. */
3382 unsigned len = LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).length ();
3383 (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
3384 vect_prologue);
3385 len = LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).length ();
3386 if (len)
3387 /* Count LEN - 1 ANDs and LEN comparisons. */
3388 (void) add_stmt_cost (target_cost_data, len * 2 - 1, scalar_stmt,
3389 NULL, 0, vect_prologue);
3390 dump_printf (MSG_NOTE,
3391 "cost model: Adding cost of checks for loop "
3392 "versioning aliasing.\n");
3395 /* Requires loop versioning with niter checks. */
3396 if (LOOP_REQUIRES_VERSIONING_FOR_NITERS (loop_vinfo))
3398 /* FIXME: Make cost depend on complexity of individual check. */
3399 (void) add_stmt_cost (target_cost_data, 1, vector_stmt, NULL, 0,
3400 vect_prologue);
3401 dump_printf (MSG_NOTE,
3402 "cost model: Adding cost of checks for loop "
3403 "versioning niters.\n");
3406 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
3407 (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, NULL, 0,
3408 vect_prologue);
3410 /* Count statements in scalar loop. Using this as scalar cost for a single
3411 iteration for now.
3413 TODO: Add outer loop support.
3415 TODO: Consider assigning different costs to different scalar
3416 statements. */
3418 scalar_single_iter_cost
3419 = LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo);
3421 /* Add additional cost for the peeled instructions in prologue and epilogue
3422 loop.
3424 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
3425 at compile-time - we assume it's vf/2 (the worst would be vf-1).
3427 TODO: Build an expression that represents peel_iters for prologue and
3428 epilogue to be used in a run-time test. */
3430 if (npeel < 0)
3432 peel_iters_prologue = vf/2;
3433 dump_printf (MSG_NOTE, "cost model: "
3434 "prologue peel iters set to vf/2.\n");
3436 /* If peeling for alignment is unknown, loop bound of main loop becomes
3437 unknown. */
3438 peel_iters_epilogue = vf/2;
3439 dump_printf (MSG_NOTE, "cost model: "
3440 "epilogue peel iters set to vf/2 because "
3441 "peeling for alignment is unknown.\n");
3443 /* If peeled iterations are unknown, count a taken branch and a not taken
3444 branch per peeled loop. Even if scalar loop iterations are known,
3445 vector iterations are not known since peeled prologue iterations are
3446 not known. Hence guards remain the same. */
3447 (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken,
3448 NULL, 0, vect_prologue);
3449 (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken,
3450 NULL, 0, vect_prologue);
3451 (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken,
3452 NULL, 0, vect_epilogue);
3453 (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken,
3454 NULL, 0, vect_epilogue);
3455 stmt_info_for_cost *si;
3456 int j;
3457 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), j, si)
3459 struct _stmt_vec_info *stmt_info
3460 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
3461 (void) add_stmt_cost (target_cost_data,
3462 si->count * peel_iters_prologue,
3463 si->kind, stmt_info, si->misalign,
3464 vect_prologue);
3465 (void) add_stmt_cost (target_cost_data,
3466 si->count * peel_iters_epilogue,
3467 si->kind, stmt_info, si->misalign,
3468 vect_epilogue);
3471 else
3473 stmt_vector_for_cost prologue_cost_vec, epilogue_cost_vec;
3474 stmt_info_for_cost *si;
3475 int j;
3476 void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
3478 prologue_cost_vec.create (2);
3479 epilogue_cost_vec.create (2);
3480 peel_iters_prologue = npeel;
3482 (void) vect_get_known_peeling_cost (loop_vinfo, peel_iters_prologue,
3483 &peel_iters_epilogue,
3484 &LOOP_VINFO_SCALAR_ITERATION_COST
3485 (loop_vinfo),
3486 &prologue_cost_vec,
3487 &epilogue_cost_vec);
3489 FOR_EACH_VEC_ELT (prologue_cost_vec, j, si)
3491 struct _stmt_vec_info *stmt_info
3492 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
3493 (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
3494 si->misalign, vect_prologue);
3497 FOR_EACH_VEC_ELT (epilogue_cost_vec, j, si)
3499 struct _stmt_vec_info *stmt_info
3500 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
3501 (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
3502 si->misalign, vect_epilogue);
3505 prologue_cost_vec.release ();
3506 epilogue_cost_vec.release ();
3509 /* FORNOW: The scalar outside cost is incremented in one of the
3510 following ways:
3512 1. The vectorizer checks for alignment and aliasing and generates
3513 a condition that allows dynamic vectorization. A cost model
3514 check is ANDED with the versioning condition. Hence scalar code
3515 path now has the added cost of the versioning check.
3517 if (cost > th & versioning_check)
3518 jmp to vector code
3520 Hence run-time scalar is incremented by not-taken branch cost.
3522 2. The vectorizer then checks if a prologue is required. If the
3523 cost model check was not done before during versioning, it has to
3524 be done before the prologue check.
3526 if (cost <= th)
3527 prologue = scalar_iters
3528 if (prologue == 0)
3529 jmp to vector code
3530 else
3531 execute prologue
3532 if (prologue == num_iters)
3533 go to exit
3535 Hence the run-time scalar cost is incremented by a taken branch,
3536 plus a not-taken branch, plus a taken branch cost.
3538 3. The vectorizer then checks if an epilogue is required. If the
3539 cost model check was not done before during prologue check, it
3540 has to be done with the epilogue check.
3542 if (prologue == 0)
3543 jmp to vector code
3544 else
3545 execute prologue
3546 if (prologue == num_iters)
3547 go to exit
3548 vector code:
3549 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
3550 jmp to epilogue
3552 Hence the run-time scalar cost should be incremented by 2 taken
3553 branches.
3555 TODO: The back end may reorder the BBS's differently and reverse
3556 conditions/branch directions. Change the estimates below to
3557 something more reasonable. */
3559 /* If the number of iterations is known and we do not do versioning, we can
3560 decide whether to vectorize at compile time. Hence the scalar version
3561 do not carry cost model guard costs. */
3562 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
3563 || LOOP_REQUIRES_VERSIONING (loop_vinfo))
3565 /* Cost model check occurs at versioning. */
3566 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
3567 scalar_outside_cost += vect_get_stmt_cost (cond_branch_not_taken);
3568 else
3570 /* Cost model check occurs at prologue generation. */
3571 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
3572 scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken)
3573 + vect_get_stmt_cost (cond_branch_not_taken);
3574 /* Cost model check occurs at epilogue generation. */
3575 else
3576 scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken);
3580 /* Complete the target-specific cost calculations. */
3581 finish_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo), &vec_prologue_cost,
3582 &vec_inside_cost, &vec_epilogue_cost);
3584 vec_outside_cost = (int)(vec_prologue_cost + vec_epilogue_cost);
3586 if (dump_enabled_p ())
3588 dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
3589 dump_printf (MSG_NOTE, " Vector inside of loop cost: %d\n",
3590 vec_inside_cost);
3591 dump_printf (MSG_NOTE, " Vector prologue cost: %d\n",
3592 vec_prologue_cost);
3593 dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n",
3594 vec_epilogue_cost);
3595 dump_printf (MSG_NOTE, " Scalar iteration cost: %d\n",
3596 scalar_single_iter_cost);
3597 dump_printf (MSG_NOTE, " Scalar outside cost: %d\n",
3598 scalar_outside_cost);
3599 dump_printf (MSG_NOTE, " Vector outside cost: %d\n",
3600 vec_outside_cost);
3601 dump_printf (MSG_NOTE, " prologue iterations: %d\n",
3602 peel_iters_prologue);
3603 dump_printf (MSG_NOTE, " epilogue iterations: %d\n",
3604 peel_iters_epilogue);
3607 /* Calculate number of iterations required to make the vector version
3608 profitable, relative to the loop bodies only. The following condition
3609 must hold true:
3610 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
3611 where
3612 SIC = scalar iteration cost, VIC = vector iteration cost,
3613 VOC = vector outside cost, VF = vectorization factor,
3614 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
3615 SOC = scalar outside cost for run time cost model check. */
3617 if ((scalar_single_iter_cost * vf) > (int) vec_inside_cost)
3619 if (vec_outside_cost <= 0)
3620 min_profitable_iters = 0;
3621 else
3623 min_profitable_iters = ((vec_outside_cost - scalar_outside_cost) * vf
3624 - vec_inside_cost * peel_iters_prologue
3625 - vec_inside_cost * peel_iters_epilogue)
3626 / ((scalar_single_iter_cost * vf)
3627 - vec_inside_cost);
3629 if ((scalar_single_iter_cost * vf * min_profitable_iters)
3630 <= (((int) vec_inside_cost * min_profitable_iters)
3631 + (((int) vec_outside_cost - scalar_outside_cost) * vf)))
3632 min_profitable_iters++;
3635 /* vector version will never be profitable. */
3636 else
3638 if (LOOP_VINFO_LOOP (loop_vinfo)->force_vectorize)
3639 warning_at (vect_location, OPT_Wopenmp_simd, "vectorization "
3640 "did not happen for a simd loop");
3642 if (dump_enabled_p ())
3643 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3644 "cost model: the vector iteration cost = %d "
3645 "divided by the scalar iteration cost = %d "
3646 "is greater or equal to the vectorization factor = %d"
3647 ".\n",
3648 vec_inside_cost, scalar_single_iter_cost, vf);
3649 *ret_min_profitable_niters = -1;
3650 *ret_min_profitable_estimate = -1;
3651 return;
3654 dump_printf (MSG_NOTE,
3655 " Calculated minimum iters for profitability: %d\n",
3656 min_profitable_iters);
3658 /* We want the vectorized loop to execute at least once. */
3659 if (min_profitable_iters < (vf + peel_iters_prologue + peel_iters_epilogue))
3660 min_profitable_iters = vf + peel_iters_prologue + peel_iters_epilogue;
3662 if (dump_enabled_p ())
3663 dump_printf_loc (MSG_NOTE, vect_location,
3664 " Runtime profitability threshold = %d\n",
3665 min_profitable_iters);
3667 *ret_min_profitable_niters = min_profitable_iters;
3669 /* Calculate number of iterations required to make the vector version
3670 profitable, relative to the loop bodies only.
3672 Non-vectorized variant is SIC * niters and it must win over vector
3673 variant on the expected loop trip count. The following condition must hold true:
3674 SIC * niters > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC + SOC */
3676 if (vec_outside_cost <= 0)
3677 min_profitable_estimate = 0;
3678 else
3680 min_profitable_estimate = ((vec_outside_cost + scalar_outside_cost) * vf
3681 - vec_inside_cost * peel_iters_prologue
3682 - vec_inside_cost * peel_iters_epilogue)
3683 / ((scalar_single_iter_cost * vf)
3684 - vec_inside_cost);
3686 min_profitable_estimate = MAX (min_profitable_estimate, min_profitable_iters);
3687 if (dump_enabled_p ())
3688 dump_printf_loc (MSG_NOTE, vect_location,
3689 " Static estimate profitability threshold = %d\n",
3690 min_profitable_estimate);
3692 *ret_min_profitable_estimate = min_profitable_estimate;
3695 /* Writes into SEL a mask for a vec_perm, equivalent to a vec_shr by OFFSET
3696 vector elements (not bits) for a vector of mode MODE. */
3697 static void
3698 calc_vec_perm_mask_for_shift (machine_mode mode, unsigned int offset,
3699 unsigned char *sel)
3701 unsigned int i, nelt = GET_MODE_NUNITS (mode);
3703 for (i = 0; i < nelt; i++)
3704 sel[i] = (i + offset) & (2*nelt - 1);
3707 /* Checks whether the target supports whole-vector shifts for vectors of mode
3708 MODE. This is the case if _either_ the platform handles vec_shr_optab, _or_
3709 it supports vec_perm_const with masks for all necessary shift amounts. */
3710 static bool
3711 have_whole_vector_shift (machine_mode mode)
3713 if (optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
3714 return true;
3716 if (direct_optab_handler (vec_perm_const_optab, mode) == CODE_FOR_nothing)
3717 return false;
3719 unsigned int i, nelt = GET_MODE_NUNITS (mode);
3720 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
3722 for (i = nelt/2; i >= 1; i/=2)
3724 calc_vec_perm_mask_for_shift (mode, i, sel);
3725 if (!can_vec_perm_p (mode, false, sel))
3726 return false;
3728 return true;
3731 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
3732 functions. Design better to avoid maintenance issues. */
3734 /* Function vect_model_reduction_cost.
3736 Models cost for a reduction operation, including the vector ops
3737 generated within the strip-mine loop, the initial definition before
3738 the loop, and the epilogue code that must be generated. */
3740 static void
3741 vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
3742 int ncopies)
3744 int prologue_cost = 0, epilogue_cost = 0;
3745 enum tree_code code;
3746 optab optab;
3747 tree vectype;
3748 gimple *orig_stmt;
3749 machine_mode mode;
3750 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3751 struct loop *loop = NULL;
3752 void *target_cost_data;
3754 if (loop_vinfo)
3756 loop = LOOP_VINFO_LOOP (loop_vinfo);
3757 target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
3759 else
3760 target_cost_data = BB_VINFO_TARGET_COST_DATA (STMT_VINFO_BB_VINFO (stmt_info));
3762 /* Condition reductions generate two reductions in the loop. */
3763 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION)
3764 ncopies *= 2;
3766 /* Cost of reduction op inside loop. */
3767 unsigned inside_cost = add_stmt_cost (target_cost_data, ncopies, vector_stmt,
3768 stmt_info, 0, vect_body);
3770 vectype = STMT_VINFO_VECTYPE (stmt_info);
3771 mode = TYPE_MODE (vectype);
3772 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
3774 if (!orig_stmt)
3775 orig_stmt = STMT_VINFO_STMT (stmt_info);
3777 code = gimple_assign_rhs_code (orig_stmt);
3779 /* Add in cost for initial definition.
3780 For cond reduction we have four vectors: initial index, step, initial
3781 result of the data reduction, initial value of the index reduction. */
3782 int prologue_stmts = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
3783 == COND_REDUCTION ? 4 : 1;
3784 prologue_cost += add_stmt_cost (target_cost_data, prologue_stmts,
3785 scalar_to_vec, stmt_info, 0,
3786 vect_prologue);
3788 /* Determine cost of epilogue code.
3790 We have a reduction operator that will reduce the vector in one statement.
3791 Also requires scalar extract. */
3793 if (!loop || !nested_in_vect_loop_p (loop, orig_stmt))
3795 if (reduc_code != ERROR_MARK)
3797 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION)
3799 /* An EQ stmt and an COND_EXPR stmt. */
3800 epilogue_cost += add_stmt_cost (target_cost_data, 2,
3801 vector_stmt, stmt_info, 0,
3802 vect_epilogue);
3803 /* Reduction of the max index and a reduction of the found
3804 values. */
3805 epilogue_cost += add_stmt_cost (target_cost_data, 2,
3806 vec_to_scalar, stmt_info, 0,
3807 vect_epilogue);
3808 /* A broadcast of the max value. */
3809 epilogue_cost += add_stmt_cost (target_cost_data, 1,
3810 scalar_to_vec, stmt_info, 0,
3811 vect_epilogue);
3813 else
3815 epilogue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
3816 stmt_info, 0, vect_epilogue);
3817 epilogue_cost += add_stmt_cost (target_cost_data, 1,
3818 vec_to_scalar, stmt_info, 0,
3819 vect_epilogue);
3822 else if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION)
3824 unsigned nunits = TYPE_VECTOR_SUBPARTS (vectype);
3825 /* Extraction of scalar elements. */
3826 epilogue_cost += add_stmt_cost (target_cost_data, 2 * nunits,
3827 vec_to_scalar, stmt_info, 0,
3828 vect_epilogue);
3829 /* Scalar max reductions via COND_EXPR / MAX_EXPR. */
3830 epilogue_cost += add_stmt_cost (target_cost_data, 2 * nunits - 3,
3831 scalar_stmt, stmt_info, 0,
3832 vect_epilogue);
3834 else
3836 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
3837 tree bitsize =
3838 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt)));
3839 int element_bitsize = tree_to_uhwi (bitsize);
3840 int nelements = vec_size_in_bits / element_bitsize;
3842 if (code == COND_EXPR)
3843 code = MAX_EXPR;
3845 optab = optab_for_tree_code (code, vectype, optab_default);
3847 /* We have a whole vector shift available. */
3848 if (optab != unknown_optab
3849 && VECTOR_MODE_P (mode)
3850 && optab_handler (optab, mode) != CODE_FOR_nothing
3851 && have_whole_vector_shift (mode))
3853 /* Final reduction via vector shifts and the reduction operator.
3854 Also requires scalar extract. */
3855 epilogue_cost += add_stmt_cost (target_cost_data,
3856 exact_log2 (nelements) * 2,
3857 vector_stmt, stmt_info, 0,
3858 vect_epilogue);
3859 epilogue_cost += add_stmt_cost (target_cost_data, 1,
3860 vec_to_scalar, stmt_info, 0,
3861 vect_epilogue);
3863 else
3864 /* Use extracts and reduction op for final reduction. For N
3865 elements, we have N extracts and N-1 reduction ops. */
3866 epilogue_cost += add_stmt_cost (target_cost_data,
3867 nelements + nelements - 1,
3868 vector_stmt, stmt_info, 0,
3869 vect_epilogue);
3873 if (dump_enabled_p ())
3874 dump_printf (MSG_NOTE,
3875 "vect_model_reduction_cost: inside_cost = %d, "
3876 "prologue_cost = %d, epilogue_cost = %d .\n", inside_cost,
3877 prologue_cost, epilogue_cost);
3881 /* Function vect_model_induction_cost.
3883 Models cost for induction operations. */
3885 static void
3886 vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies)
3888 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3889 void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
3890 unsigned inside_cost, prologue_cost;
3892 if (PURE_SLP_STMT (stmt_info))
3893 return;
3895 /* loop cost for vec_loop. */
3896 inside_cost = add_stmt_cost (target_cost_data, ncopies, vector_stmt,
3897 stmt_info, 0, vect_body);
3899 /* prologue cost for vec_init and vec_step. */
3900 prologue_cost = add_stmt_cost (target_cost_data, 2, scalar_to_vec,
3901 stmt_info, 0, vect_prologue);
3903 if (dump_enabled_p ())
3904 dump_printf_loc (MSG_NOTE, vect_location,
3905 "vect_model_induction_cost: inside_cost = %d, "
3906 "prologue_cost = %d .\n", inside_cost, prologue_cost);
3911 /* Function get_initial_def_for_reduction
3913 Input:
3914 STMT - a stmt that performs a reduction operation in the loop.
3915 INIT_VAL - the initial value of the reduction variable
3917 Output:
3918 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
3919 of the reduction (used for adjusting the epilog - see below).
3920 Return a vector variable, initialized according to the operation that STMT
3921 performs. This vector will be used as the initial value of the
3922 vector of partial results.
3924 Option1 (adjust in epilog): Initialize the vector as follows:
3925 add/bit or/xor: [0,0,...,0,0]
3926 mult/bit and: [1,1,...,1,1]
3927 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
3928 and when necessary (e.g. add/mult case) let the caller know
3929 that it needs to adjust the result by init_val.
3931 Option2: Initialize the vector as follows:
3932 add/bit or/xor: [init_val,0,0,...,0]
3933 mult/bit and: [init_val,1,1,...,1]
3934 min/max/cond_expr: [init_val,init_val,...,init_val]
3935 and no adjustments are needed.
3937 For example, for the following code:
3939 s = init_val;
3940 for (i=0;i<n;i++)
3941 s = s + a[i];
3943 STMT is 's = s + a[i]', and the reduction variable is 's'.
3944 For a vector of 4 units, we want to return either [0,0,0,init_val],
3945 or [0,0,0,0] and let the caller know that it needs to adjust
3946 the result at the end by 'init_val'.
3948 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
3949 initialization vector is simpler (same element in all entries), if
3950 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
3952 A cost model should help decide between these two schemes. */
3954 tree
3955 get_initial_def_for_reduction (gimple *stmt, tree init_val,
3956 tree *adjustment_def)
3958 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
3959 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
3960 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3961 tree scalar_type = TREE_TYPE (init_val);
3962 tree vectype = get_vectype_for_scalar_type (scalar_type);
3963 int nunits;
3964 enum tree_code code = gimple_assign_rhs_code (stmt);
3965 tree def_for_init;
3966 tree init_def;
3967 tree *elts;
3968 int i;
3969 bool nested_in_vect_loop = false;
3970 REAL_VALUE_TYPE real_init_val = dconst0;
3971 int int_init_val = 0;
3972 gimple *def_stmt = NULL;
3973 gimple_seq stmts = NULL;
3975 gcc_assert (vectype);
3976 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3978 gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
3979 || SCALAR_FLOAT_TYPE_P (scalar_type));
3981 if (nested_in_vect_loop_p (loop, stmt))
3982 nested_in_vect_loop = true;
3983 else
3984 gcc_assert (loop == (gimple_bb (stmt))->loop_father);
3986 /* In case of double reduction we only create a vector variable to be put
3987 in the reduction phi node. The actual statement creation is done in
3988 vect_create_epilog_for_reduction. */
3989 if (adjustment_def && nested_in_vect_loop
3990 && TREE_CODE (init_val) == SSA_NAME
3991 && (def_stmt = SSA_NAME_DEF_STMT (init_val))
3992 && gimple_code (def_stmt) == GIMPLE_PHI
3993 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
3994 && vinfo_for_stmt (def_stmt)
3995 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
3996 == vect_double_reduction_def)
3998 *adjustment_def = NULL;
3999 return vect_create_destination_var (init_val, vectype);
4002 /* In case of a nested reduction do not use an adjustment def as
4003 that case is not supported by the epilogue generation correctly
4004 if ncopies is not one. */
4005 if (adjustment_def && nested_in_vect_loop)
4007 *adjustment_def = NULL;
4008 return vect_get_vec_def_for_operand (init_val, stmt);
4011 switch (code)
4013 case WIDEN_SUM_EXPR:
4014 case DOT_PROD_EXPR:
4015 case SAD_EXPR:
4016 case PLUS_EXPR:
4017 case MINUS_EXPR:
4018 case BIT_IOR_EXPR:
4019 case BIT_XOR_EXPR:
4020 case MULT_EXPR:
4021 case BIT_AND_EXPR:
4022 /* ADJUSMENT_DEF is NULL when called from
4023 vect_create_epilog_for_reduction to vectorize double reduction. */
4024 if (adjustment_def)
4025 *adjustment_def = init_val;
4027 if (code == MULT_EXPR)
4029 real_init_val = dconst1;
4030 int_init_val = 1;
4033 if (code == BIT_AND_EXPR)
4034 int_init_val = -1;
4036 if (SCALAR_FLOAT_TYPE_P (scalar_type))
4037 def_for_init = build_real (scalar_type, real_init_val);
4038 else
4039 def_for_init = build_int_cst (scalar_type, int_init_val);
4041 /* Create a vector of '0' or '1' except the first element. */
4042 elts = XALLOCAVEC (tree, nunits);
4043 for (i = nunits - 2; i >= 0; --i)
4044 elts[i + 1] = def_for_init;
4046 /* Option1: the first element is '0' or '1' as well. */
4047 if (adjustment_def)
4049 elts[0] = def_for_init;
4050 init_def = build_vector (vectype, elts);
4051 break;
4054 /* Option2: the first element is INIT_VAL. */
4055 elts[0] = init_val;
4056 if (TREE_CONSTANT (init_val))
4057 init_def = build_vector (vectype, elts);
4058 else
4060 vec<constructor_elt, va_gc> *v;
4061 vec_alloc (v, nunits);
4062 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, init_val);
4063 for (i = 1; i < nunits; ++i)
4064 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, elts[i]);
4065 init_def = build_constructor (vectype, v);
4068 break;
4070 case MIN_EXPR:
4071 case MAX_EXPR:
4072 case COND_EXPR:
4073 if (adjustment_def)
4075 *adjustment_def = NULL_TREE;
4076 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_vinfo) != COND_REDUCTION)
4078 init_def = vect_get_vec_def_for_operand (init_val, stmt);
4079 break;
4082 init_val = gimple_convert (&stmts, TREE_TYPE (vectype), init_val);
4083 if (! gimple_seq_empty_p (stmts))
4084 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
4085 init_def = build_vector_from_val (vectype, init_val);
4086 break;
4088 default:
4089 gcc_unreachable ();
4092 return init_def;
4095 /* Get at the initial defs for the reduction PHIs in SLP_NODE.
4096 NUMBER_OF_VECTORS is the number of vector defs to create. */
4098 static void
4099 get_initial_defs_for_reduction (slp_tree slp_node,
4100 vec<tree> *vec_oprnds,
4101 unsigned int number_of_vectors,
4102 enum tree_code code, bool reduc_chain)
4104 vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
4105 gimple *stmt = stmts[0];
4106 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
4107 unsigned nunits;
4108 tree vec_cst;
4109 tree *elts;
4110 unsigned j, number_of_places_left_in_vector;
4111 tree vector_type, scalar_type;
4112 tree vop;
4113 int group_size = stmts.length ();
4114 unsigned int vec_num, i;
4115 unsigned number_of_copies = 1;
4116 vec<tree> voprnds;
4117 voprnds.create (number_of_vectors);
4118 bool constant_p;
4119 tree neutral_op = NULL;
4120 struct loop *loop;
4121 gimple_seq ctor_seq = NULL;
4123 vector_type = STMT_VINFO_VECTYPE (stmt_vinfo);
4124 scalar_type = TREE_TYPE (vector_type);
4125 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
4127 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def);
4129 loop = (gimple_bb (stmt))->loop_father;
4130 gcc_assert (loop);
4132 /* op is the reduction operand of the first stmt already. */
4133 /* For additional copies (see the explanation of NUMBER_OF_COPIES below)
4134 we need either neutral operands or the original operands. See
4135 get_initial_def_for_reduction() for details. */
4136 switch (code)
4138 case WIDEN_SUM_EXPR:
4139 case DOT_PROD_EXPR:
4140 case SAD_EXPR:
4141 case PLUS_EXPR:
4142 case MINUS_EXPR:
4143 case BIT_IOR_EXPR:
4144 case BIT_XOR_EXPR:
4145 neutral_op = build_zero_cst (scalar_type);
4146 break;
4148 case MULT_EXPR:
4149 neutral_op = build_one_cst (scalar_type);
4150 break;
4152 case BIT_AND_EXPR:
4153 neutral_op = build_all_ones_cst (scalar_type);
4154 break;
4156 /* For MIN/MAX we don't have an easy neutral operand but
4157 the initial values can be used fine here. Only for
4158 a reduction chain we have to force a neutral element. */
4159 case MAX_EXPR:
4160 case MIN_EXPR:
4161 if (! reduc_chain)
4162 neutral_op = NULL;
4163 else
4164 neutral_op = PHI_ARG_DEF_FROM_EDGE (stmt,
4165 loop_preheader_edge (loop));
4166 break;
4168 default:
4169 gcc_assert (! reduc_chain);
4170 neutral_op = NULL;
4173 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
4174 created vectors. It is greater than 1 if unrolling is performed.
4176 For example, we have two scalar operands, s1 and s2 (e.g., group of
4177 strided accesses of size two), while NUNITS is four (i.e., four scalars
4178 of this type can be packed in a vector). The output vector will contain
4179 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
4180 will be 2).
4182 If GROUP_SIZE > NUNITS, the scalars will be split into several vectors
4183 containing the operands.
4185 For example, NUNITS is four as before, and the group size is 8
4186 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
4187 {s5, s6, s7, s8}. */
4189 number_of_copies = nunits * number_of_vectors / group_size;
4191 number_of_places_left_in_vector = nunits;
4192 constant_p = true;
4193 elts = XALLOCAVEC (tree, nunits);
4194 for (j = 0; j < number_of_copies; j++)
4196 for (i = group_size - 1; stmts.iterate (i, &stmt); i--)
4198 tree op;
4199 /* Get the def before the loop. In reduction chain we have only
4200 one initial value. */
4201 if ((j != (number_of_copies - 1)
4202 || (reduc_chain && i != 0))
4203 && neutral_op)
4204 op = neutral_op;
4205 else
4206 op = PHI_ARG_DEF_FROM_EDGE (stmt,
4207 loop_preheader_edge (loop));
4209 /* Create 'vect_ = {op0,op1,...,opn}'. */
4210 number_of_places_left_in_vector--;
4211 elts[number_of_places_left_in_vector] = op;
4212 if (!CONSTANT_CLASS_P (op))
4213 constant_p = false;
4215 if (number_of_places_left_in_vector == 0)
4217 if (constant_p)
4218 vec_cst = build_vector (vector_type, elts);
4219 else
4221 vec<constructor_elt, va_gc> *v;
4222 unsigned k;
4223 vec_alloc (v, nunits);
4224 for (k = 0; k < nunits; ++k)
4225 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, elts[k]);
4226 vec_cst = build_constructor (vector_type, v);
4228 tree init;
4229 gimple_stmt_iterator gsi;
4230 init = vect_init_vector (stmt, vec_cst, vector_type, NULL);
4231 if (ctor_seq != NULL)
4233 gsi = gsi_for_stmt (SSA_NAME_DEF_STMT (init));
4234 gsi_insert_seq_before_without_update (&gsi, ctor_seq,
4235 GSI_SAME_STMT);
4236 ctor_seq = NULL;
4238 voprnds.quick_push (init);
4240 number_of_places_left_in_vector = nunits;
4241 constant_p = true;
4246 /* Since the vectors are created in the reverse order, we should invert
4247 them. */
4248 vec_num = voprnds.length ();
4249 for (j = vec_num; j != 0; j--)
4251 vop = voprnds[j - 1];
4252 vec_oprnds->quick_push (vop);
4255 voprnds.release ();
4257 /* In case that VF is greater than the unrolling factor needed for the SLP
4258 group of stmts, NUMBER_OF_VECTORS to be created is greater than
4259 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
4260 to replicate the vectors. */
4261 while (number_of_vectors > vec_oprnds->length ())
4263 tree neutral_vec = NULL;
4265 if (neutral_op)
4267 if (!neutral_vec)
4268 neutral_vec = build_vector_from_val (vector_type, neutral_op);
4270 vec_oprnds->quick_push (neutral_vec);
4272 else
4274 for (i = 0; vec_oprnds->iterate (i, &vop) && i < vec_num; i++)
4275 vec_oprnds->quick_push (vop);
4281 /* Function vect_create_epilog_for_reduction
4283 Create code at the loop-epilog to finalize the result of a reduction
4284 computation.
4286 VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector
4287 reduction statements.
4288 STMT is the scalar reduction stmt that is being vectorized.
4289 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
4290 number of elements that we can fit in a vectype (nunits). In this case
4291 we have to generate more than one vector stmt - i.e - we need to "unroll"
4292 the vector stmt by a factor VF/nunits. For more details see documentation
4293 in vectorizable_operation.
4294 REDUC_CODE is the tree-code for the epilog reduction.
4295 REDUCTION_PHIS is a list of the phi-nodes that carry the reduction
4296 computation.
4297 REDUC_INDEX is the index of the operand in the right hand side of the
4298 statement that is defined by REDUCTION_PHI.
4299 DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
4300 SLP_NODE is an SLP node containing a group of reduction statements. The
4301 first one in this group is STMT.
4303 This function:
4304 1. Creates the reduction def-use cycles: sets the arguments for
4305 REDUCTION_PHIS:
4306 The loop-entry argument is the vectorized initial-value of the reduction.
4307 The loop-latch argument is taken from VECT_DEFS - the vector of partial
4308 sums.
4309 2. "Reduces" each vector of partial results VECT_DEFS into a single result,
4310 by applying the operation specified by REDUC_CODE if available, or by
4311 other means (whole-vector shifts or a scalar loop).
4312 The function also creates a new phi node at the loop exit to preserve
4313 loop-closed form, as illustrated below.
4315 The flow at the entry to this function:
4317 loop:
4318 vec_def = phi <null, null> # REDUCTION_PHI
4319 VECT_DEF = vector_stmt # vectorized form of STMT
4320 s_loop = scalar_stmt # (scalar) STMT
4321 loop_exit:
4322 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4323 use <s_out0>
4324 use <s_out0>
4326 The above is transformed by this function into:
4328 loop:
4329 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
4330 VECT_DEF = vector_stmt # vectorized form of STMT
4331 s_loop = scalar_stmt # (scalar) STMT
4332 loop_exit:
4333 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4334 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4335 v_out2 = reduce <v_out1>
4336 s_out3 = extract_field <v_out2, 0>
4337 s_out4 = adjust_result <s_out3>
4338 use <s_out4>
4339 use <s_out4>
4342 static void
4343 vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
4344 gimple *reduc_def_stmt,
4345 int ncopies, enum tree_code reduc_code,
4346 vec<gimple *> reduction_phis,
4347 bool double_reduc,
4348 slp_tree slp_node,
4349 slp_instance slp_node_instance)
4351 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4352 stmt_vec_info prev_phi_info;
4353 tree vectype;
4354 machine_mode mode;
4355 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4356 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
4357 basic_block exit_bb;
4358 tree scalar_dest;
4359 tree scalar_type;
4360 gimple *new_phi = NULL, *phi;
4361 gimple_stmt_iterator exit_gsi;
4362 tree vec_dest;
4363 tree new_temp = NULL_TREE, new_dest, new_name, new_scalar_dest;
4364 gimple *epilog_stmt = NULL;
4365 enum tree_code code = gimple_assign_rhs_code (stmt);
4366 gimple *exit_phi;
4367 tree bitsize;
4368 tree adjustment_def = NULL;
4369 tree vec_initial_def = NULL;
4370 tree expr, def, initial_def = NULL;
4371 tree orig_name, scalar_result;
4372 imm_use_iterator imm_iter, phi_imm_iter;
4373 use_operand_p use_p, phi_use_p;
4374 gimple *use_stmt, *orig_stmt, *reduction_phi = NULL;
4375 bool nested_in_vect_loop = false;
4376 auto_vec<gimple *> new_phis;
4377 auto_vec<gimple *> inner_phis;
4378 enum vect_def_type dt = vect_unknown_def_type;
4379 int j, i;
4380 auto_vec<tree> scalar_results;
4381 unsigned int group_size = 1, k, ratio;
4382 auto_vec<tree> vec_initial_defs;
4383 auto_vec<gimple *> phis;
4384 bool slp_reduc = false;
4385 tree new_phi_result;
4386 gimple *inner_phi = NULL;
4387 tree induction_index = NULL_TREE;
4389 if (slp_node)
4390 group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
4392 if (nested_in_vect_loop_p (loop, stmt))
4394 outer_loop = loop;
4395 loop = loop->inner;
4396 nested_in_vect_loop = true;
4397 gcc_assert (!slp_node);
4400 vectype = STMT_VINFO_VECTYPE (stmt_info);
4401 gcc_assert (vectype);
4402 mode = TYPE_MODE (vectype);
4404 /* 1. Create the reduction def-use cycle:
4405 Set the arguments of REDUCTION_PHIS, i.e., transform
4407 loop:
4408 vec_def = phi <null, null> # REDUCTION_PHI
4409 VECT_DEF = vector_stmt # vectorized form of STMT
4412 into:
4414 loop:
4415 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
4416 VECT_DEF = vector_stmt # vectorized form of STMT
4419 (in case of SLP, do it for all the phis). */
4421 /* Get the loop-entry arguments. */
4422 enum vect_def_type initial_def_dt = vect_unknown_def_type;
4423 if (slp_node)
4425 unsigned vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
4426 vec_initial_defs.reserve (vec_num);
4427 get_initial_defs_for_reduction (slp_node_instance->reduc_phis,
4428 &vec_initial_defs, vec_num, code,
4429 GROUP_FIRST_ELEMENT (stmt_info));
4431 else
4433 /* Get at the scalar def before the loop, that defines the initial value
4434 of the reduction variable. */
4435 gimple *def_stmt;
4436 initial_def = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
4437 loop_preheader_edge (loop));
4438 vect_is_simple_use (initial_def, loop_vinfo, &def_stmt, &initial_def_dt);
4439 vec_initial_def = get_initial_def_for_reduction (stmt, initial_def,
4440 &adjustment_def);
4441 vec_initial_defs.create (1);
4442 vec_initial_defs.quick_push (vec_initial_def);
4445 /* Set phi nodes arguments. */
4446 FOR_EACH_VEC_ELT (reduction_phis, i, phi)
4448 tree vec_init_def, def;
4449 gimple_seq stmts;
4450 vec_init_def = force_gimple_operand (vec_initial_defs[i], &stmts,
4451 true, NULL_TREE);
4452 if (stmts)
4453 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
4455 def = vect_defs[i];
4456 for (j = 0; j < ncopies; j++)
4458 if (j != 0)
4460 phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
4461 if (nested_in_vect_loop)
4462 vec_init_def
4463 = vect_get_vec_def_for_stmt_copy (initial_def_dt,
4464 vec_init_def);
4467 /* Set the loop-entry arg of the reduction-phi. */
4469 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
4470 == INTEGER_INDUC_COND_REDUCTION)
4472 /* Initialise the reduction phi to zero. This prevents initial
4473 values of non-zero interferring with the reduction op. */
4474 gcc_assert (ncopies == 1);
4475 gcc_assert (i == 0);
4477 tree vec_init_def_type = TREE_TYPE (vec_init_def);
4478 tree zero_vec = build_zero_cst (vec_init_def_type);
4480 add_phi_arg (as_a <gphi *> (phi), zero_vec,
4481 loop_preheader_edge (loop), UNKNOWN_LOCATION);
4483 else
4484 add_phi_arg (as_a <gphi *> (phi), vec_init_def,
4485 loop_preheader_edge (loop), UNKNOWN_LOCATION);
4487 /* Set the loop-latch arg for the reduction-phi. */
4488 if (j > 0)
4489 def = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type, def);
4491 add_phi_arg (as_a <gphi *> (phi), def, loop_latch_edge (loop),
4492 UNKNOWN_LOCATION);
4494 if (dump_enabled_p ())
4496 dump_printf_loc (MSG_NOTE, vect_location,
4497 "transform reduction: created def-use cycle: ");
4498 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
4499 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (def), 0);
4504 /* For cond reductions we want to create a new vector (INDEX_COND_EXPR)
4505 which is updated with the current index of the loop for every match of
4506 the original loop's cond_expr (VEC_STMT). This results in a vector
4507 containing the last time the condition passed for that vector lane.
4508 The first match will be a 1 to allow 0 to be used for non-matching
4509 indexes. If there are no matches at all then the vector will be all
4510 zeroes. */
4511 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION)
4513 tree indx_before_incr, indx_after_incr;
4514 int nunits_out = TYPE_VECTOR_SUBPARTS (vectype);
4515 int k;
4517 gimple *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4518 gcc_assert (gimple_assign_rhs_code (vec_stmt) == VEC_COND_EXPR);
4520 int scalar_precision
4521 = GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (vectype)));
4522 tree cr_index_scalar_type = make_unsigned_type (scalar_precision);
4523 tree cr_index_vector_type = build_vector_type
4524 (cr_index_scalar_type, TYPE_VECTOR_SUBPARTS (vectype));
4526 /* First we create a simple vector induction variable which starts
4527 with the values {1,2,3,...} (SERIES_VECT) and increments by the
4528 vector size (STEP). */
4530 /* Create a {1,2,3,...} vector. */
4531 tree *vtemp = XALLOCAVEC (tree, nunits_out);
4532 for (k = 0; k < nunits_out; ++k)
4533 vtemp[k] = build_int_cst (cr_index_scalar_type, k + 1);
4534 tree series_vect = build_vector (cr_index_vector_type, vtemp);
4536 /* Create a vector of the step value. */
4537 tree step = build_int_cst (cr_index_scalar_type, nunits_out);
4538 tree vec_step = build_vector_from_val (cr_index_vector_type, step);
4540 /* Create an induction variable. */
4541 gimple_stmt_iterator incr_gsi;
4542 bool insert_after;
4543 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
4544 create_iv (series_vect, vec_step, NULL_TREE, loop, &incr_gsi,
4545 insert_after, &indx_before_incr, &indx_after_incr);
4547 /* Next create a new phi node vector (NEW_PHI_TREE) which starts
4548 filled with zeros (VEC_ZERO). */
4550 /* Create a vector of 0s. */
4551 tree zero = build_zero_cst (cr_index_scalar_type);
4552 tree vec_zero = build_vector_from_val (cr_index_vector_type, zero);
4554 /* Create a vector phi node. */
4555 tree new_phi_tree = make_ssa_name (cr_index_vector_type);
4556 new_phi = create_phi_node (new_phi_tree, loop->header);
4557 set_vinfo_for_stmt (new_phi,
4558 new_stmt_vec_info (new_phi, loop_vinfo));
4559 add_phi_arg (as_a <gphi *> (new_phi), vec_zero,
4560 loop_preheader_edge (loop), UNKNOWN_LOCATION);
4562 /* Now take the condition from the loops original cond_expr
4563 (VEC_STMT) and produce a new cond_expr (INDEX_COND_EXPR) which for
4564 every match uses values from the induction variable
4565 (INDEX_BEFORE_INCR) otherwise uses values from the phi node
4566 (NEW_PHI_TREE).
4567 Finally, we update the phi (NEW_PHI_TREE) to take the value of
4568 the new cond_expr (INDEX_COND_EXPR). */
4570 /* Duplicate the condition from vec_stmt. */
4571 tree ccompare = unshare_expr (gimple_assign_rhs1 (vec_stmt));
4573 /* Create a conditional, where the condition is taken from vec_stmt
4574 (CCOMPARE), then is the induction index (INDEX_BEFORE_INCR) and
4575 else is the phi (NEW_PHI_TREE). */
4576 tree index_cond_expr = build3 (VEC_COND_EXPR, cr_index_vector_type,
4577 ccompare, indx_before_incr,
4578 new_phi_tree);
4579 induction_index = make_ssa_name (cr_index_vector_type);
4580 gimple *index_condition = gimple_build_assign (induction_index,
4581 index_cond_expr);
4582 gsi_insert_before (&incr_gsi, index_condition, GSI_SAME_STMT);
4583 stmt_vec_info index_vec_info = new_stmt_vec_info (index_condition,
4584 loop_vinfo);
4585 STMT_VINFO_VECTYPE (index_vec_info) = cr_index_vector_type;
4586 set_vinfo_for_stmt (index_condition, index_vec_info);
4588 /* Update the phi with the vec cond. */
4589 add_phi_arg (as_a <gphi *> (new_phi), induction_index,
4590 loop_latch_edge (loop), UNKNOWN_LOCATION);
4593 /* 2. Create epilog code.
4594 The reduction epilog code operates across the elements of the vector
4595 of partial results computed by the vectorized loop.
4596 The reduction epilog code consists of:
4598 step 1: compute the scalar result in a vector (v_out2)
4599 step 2: extract the scalar result (s_out3) from the vector (v_out2)
4600 step 3: adjust the scalar result (s_out3) if needed.
4602 Step 1 can be accomplished using one the following three schemes:
4603 (scheme 1) using reduc_code, if available.
4604 (scheme 2) using whole-vector shifts, if available.
4605 (scheme 3) using a scalar loop. In this case steps 1+2 above are
4606 combined.
4608 The overall epilog code looks like this:
4610 s_out0 = phi <s_loop> # original EXIT_PHI
4611 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4612 v_out2 = reduce <v_out1> # step 1
4613 s_out3 = extract_field <v_out2, 0> # step 2
4614 s_out4 = adjust_result <s_out3> # step 3
4616 (step 3 is optional, and steps 1 and 2 may be combined).
4617 Lastly, the uses of s_out0 are replaced by s_out4. */
4620 /* 2.1 Create new loop-exit-phis to preserve loop-closed form:
4621 v_out1 = phi <VECT_DEF>
4622 Store them in NEW_PHIS. */
4624 exit_bb = single_exit (loop)->dest;
4625 prev_phi_info = NULL;
4626 new_phis.create (vect_defs.length ());
4627 FOR_EACH_VEC_ELT (vect_defs, i, def)
4629 for (j = 0; j < ncopies; j++)
4631 tree new_def = copy_ssa_name (def);
4632 phi = create_phi_node (new_def, exit_bb);
4633 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, loop_vinfo));
4634 if (j == 0)
4635 new_phis.quick_push (phi);
4636 else
4638 def = vect_get_vec_def_for_stmt_copy (dt, def);
4639 STMT_VINFO_RELATED_STMT (prev_phi_info) = phi;
4642 SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
4643 prev_phi_info = vinfo_for_stmt (phi);
4647 /* The epilogue is created for the outer-loop, i.e., for the loop being
4648 vectorized. Create exit phis for the outer loop. */
4649 if (double_reduc)
4651 loop = outer_loop;
4652 exit_bb = single_exit (loop)->dest;
4653 inner_phis.create (vect_defs.length ());
4654 FOR_EACH_VEC_ELT (new_phis, i, phi)
4656 tree new_result = copy_ssa_name (PHI_RESULT (phi));
4657 gphi *outer_phi = create_phi_node (new_result, exit_bb);
4658 SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
4659 PHI_RESULT (phi));
4660 set_vinfo_for_stmt (outer_phi, new_stmt_vec_info (outer_phi,
4661 loop_vinfo));
4662 inner_phis.quick_push (phi);
4663 new_phis[i] = outer_phi;
4664 prev_phi_info = vinfo_for_stmt (outer_phi);
4665 while (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi)))
4667 phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
4668 new_result = copy_ssa_name (PHI_RESULT (phi));
4669 outer_phi = create_phi_node (new_result, exit_bb);
4670 SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
4671 PHI_RESULT (phi));
4672 set_vinfo_for_stmt (outer_phi, new_stmt_vec_info (outer_phi,
4673 loop_vinfo));
4674 STMT_VINFO_RELATED_STMT (prev_phi_info) = outer_phi;
4675 prev_phi_info = vinfo_for_stmt (outer_phi);
4680 exit_gsi = gsi_after_labels (exit_bb);
4682 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
4683 (i.e. when reduc_code is not available) and in the final adjustment
4684 code (if needed). Also get the original scalar reduction variable as
4685 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
4686 represents a reduction pattern), the tree-code and scalar-def are
4687 taken from the original stmt that the pattern-stmt (STMT) replaces.
4688 Otherwise (it is a regular reduction) - the tree-code and scalar-def
4689 are taken from STMT. */
4691 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
4692 if (!orig_stmt)
4694 /* Regular reduction */
4695 orig_stmt = stmt;
4697 else
4699 /* Reduction pattern */
4700 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt);
4701 gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo));
4702 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
4705 code = gimple_assign_rhs_code (orig_stmt);
4706 /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
4707 partial results are added and not subtracted. */
4708 if (code == MINUS_EXPR)
4709 code = PLUS_EXPR;
4711 scalar_dest = gimple_assign_lhs (orig_stmt);
4712 scalar_type = TREE_TYPE (scalar_dest);
4713 scalar_results.create (group_size);
4714 new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
4715 bitsize = TYPE_SIZE (scalar_type);
4717 /* In case this is a reduction in an inner-loop while vectorizing an outer
4718 loop - we don't need to extract a single scalar result at the end of the
4719 inner-loop (unless it is double reduction, i.e., the use of reduction is
4720 outside the outer-loop). The final vector of partial results will be used
4721 in the vectorized outer-loop, or reduced to a scalar result at the end of
4722 the outer-loop. */
4723 if (nested_in_vect_loop && !double_reduc)
4724 goto vect_finalize_reduction;
4726 /* SLP reduction without reduction chain, e.g.,
4727 # a1 = phi <a2, a0>
4728 # b1 = phi <b2, b0>
4729 a2 = operation (a1)
4730 b2 = operation (b1) */
4731 slp_reduc = (slp_node && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)));
4733 /* In case of reduction chain, e.g.,
4734 # a1 = phi <a3, a0>
4735 a2 = operation (a1)
4736 a3 = operation (a2),
4738 we may end up with more than one vector result. Here we reduce them to
4739 one vector. */
4740 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
4742 tree first_vect = PHI_RESULT (new_phis[0]);
4743 gassign *new_vec_stmt = NULL;
4744 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4745 for (k = 1; k < new_phis.length (); k++)
4747 gimple *next_phi = new_phis[k];
4748 tree second_vect = PHI_RESULT (next_phi);
4749 tree tem = make_ssa_name (vec_dest, new_vec_stmt);
4750 new_vec_stmt = gimple_build_assign (tem, code,
4751 first_vect, second_vect);
4752 gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT);
4753 first_vect = tem;
4756 new_phi_result = first_vect;
4757 if (new_vec_stmt)
4759 new_phis.truncate (0);
4760 new_phis.safe_push (new_vec_stmt);
4763 /* Likewise if we couldn't use a single defuse cycle. */
4764 else if (ncopies > 1)
4766 gcc_assert (new_phis.length () == 1);
4767 tree first_vect = PHI_RESULT (new_phis[0]);
4768 gassign *new_vec_stmt = NULL;
4769 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4770 gimple *next_phi = new_phis[0];
4771 for (int k = 1; k < ncopies; ++k)
4773 next_phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next_phi));
4774 tree second_vect = PHI_RESULT (next_phi);
4775 tree tem = make_ssa_name (vec_dest, new_vec_stmt);
4776 new_vec_stmt = gimple_build_assign (tem, code,
4777 first_vect, second_vect);
4778 gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT);
4779 first_vect = tem;
4781 new_phi_result = first_vect;
4782 new_phis.truncate (0);
4783 new_phis.safe_push (new_vec_stmt);
4785 else
4786 new_phi_result = PHI_RESULT (new_phis[0]);
4788 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION
4789 && reduc_code != ERROR_MARK)
4791 /* For condition reductions, we have a vector (NEW_PHI_RESULT) containing
4792 various data values where the condition matched and another vector
4793 (INDUCTION_INDEX) containing all the indexes of those matches. We
4794 need to extract the last matching index (which will be the index with
4795 highest value) and use this to index into the data vector.
4796 For the case where there were no matches, the data vector will contain
4797 all default values and the index vector will be all zeros. */
4799 /* Get various versions of the type of the vector of indexes. */
4800 tree index_vec_type = TREE_TYPE (induction_index);
4801 gcc_checking_assert (TYPE_UNSIGNED (index_vec_type));
4802 tree index_scalar_type = TREE_TYPE (index_vec_type);
4803 tree index_vec_cmp_type = build_same_sized_truth_vector_type
4804 (index_vec_type);
4806 /* Get an unsigned integer version of the type of the data vector. */
4807 int scalar_precision = GET_MODE_PRECISION (TYPE_MODE (scalar_type));
4808 tree scalar_type_unsigned = make_unsigned_type (scalar_precision);
4809 tree vectype_unsigned = build_vector_type
4810 (scalar_type_unsigned, TYPE_VECTOR_SUBPARTS (vectype));
4812 /* First we need to create a vector (ZERO_VEC) of zeros and another
4813 vector (MAX_INDEX_VEC) filled with the last matching index, which we
4814 can create using a MAX reduction and then expanding.
4815 In the case where the loop never made any matches, the max index will
4816 be zero. */
4818 /* Vector of {0, 0, 0,...}. */
4819 tree zero_vec = make_ssa_name (vectype);
4820 tree zero_vec_rhs = build_zero_cst (vectype);
4821 gimple *zero_vec_stmt = gimple_build_assign (zero_vec, zero_vec_rhs);
4822 gsi_insert_before (&exit_gsi, zero_vec_stmt, GSI_SAME_STMT);
4824 /* Find maximum value from the vector of found indexes. */
4825 tree max_index = make_ssa_name (index_scalar_type);
4826 gimple *max_index_stmt = gimple_build_assign (max_index, REDUC_MAX_EXPR,
4827 induction_index);
4828 gsi_insert_before (&exit_gsi, max_index_stmt, GSI_SAME_STMT);
4830 /* Vector of {max_index, max_index, max_index,...}. */
4831 tree max_index_vec = make_ssa_name (index_vec_type);
4832 tree max_index_vec_rhs = build_vector_from_val (index_vec_type,
4833 max_index);
4834 gimple *max_index_vec_stmt = gimple_build_assign (max_index_vec,
4835 max_index_vec_rhs);
4836 gsi_insert_before (&exit_gsi, max_index_vec_stmt, GSI_SAME_STMT);
4838 /* Next we compare the new vector (MAX_INDEX_VEC) full of max indexes
4839 with the vector (INDUCTION_INDEX) of found indexes, choosing values
4840 from the data vector (NEW_PHI_RESULT) for matches, 0 (ZERO_VEC)
4841 otherwise. Only one value should match, resulting in a vector
4842 (VEC_COND) with one data value and the rest zeros.
4843 In the case where the loop never made any matches, every index will
4844 match, resulting in a vector with all data values (which will all be
4845 the default value). */
4847 /* Compare the max index vector to the vector of found indexes to find
4848 the position of the max value. */
4849 tree vec_compare = make_ssa_name (index_vec_cmp_type);
4850 gimple *vec_compare_stmt = gimple_build_assign (vec_compare, EQ_EXPR,
4851 induction_index,
4852 max_index_vec);
4853 gsi_insert_before (&exit_gsi, vec_compare_stmt, GSI_SAME_STMT);
4855 /* Use the compare to choose either values from the data vector or
4856 zero. */
4857 tree vec_cond = make_ssa_name (vectype);
4858 gimple *vec_cond_stmt = gimple_build_assign (vec_cond, VEC_COND_EXPR,
4859 vec_compare, new_phi_result,
4860 zero_vec);
4861 gsi_insert_before (&exit_gsi, vec_cond_stmt, GSI_SAME_STMT);
4863 /* Finally we need to extract the data value from the vector (VEC_COND)
4864 into a scalar (MATCHED_DATA_REDUC). Logically we want to do a OR
4865 reduction, but because this doesn't exist, we can use a MAX reduction
4866 instead. The data value might be signed or a float so we need to cast
4867 it first.
4868 In the case where the loop never made any matches, the data values are
4869 all identical, and so will reduce down correctly. */
4871 /* Make the matched data values unsigned. */
4872 tree vec_cond_cast = make_ssa_name (vectype_unsigned);
4873 tree vec_cond_cast_rhs = build1 (VIEW_CONVERT_EXPR, vectype_unsigned,
4874 vec_cond);
4875 gimple *vec_cond_cast_stmt = gimple_build_assign (vec_cond_cast,
4876 VIEW_CONVERT_EXPR,
4877 vec_cond_cast_rhs);
4878 gsi_insert_before (&exit_gsi, vec_cond_cast_stmt, GSI_SAME_STMT);
4880 /* Reduce down to a scalar value. */
4881 tree data_reduc = make_ssa_name (scalar_type_unsigned);
4882 optab ot = optab_for_tree_code (REDUC_MAX_EXPR, vectype_unsigned,
4883 optab_default);
4884 gcc_assert (optab_handler (ot, TYPE_MODE (vectype_unsigned))
4885 != CODE_FOR_nothing);
4886 gimple *data_reduc_stmt = gimple_build_assign (data_reduc,
4887 REDUC_MAX_EXPR,
4888 vec_cond_cast);
4889 gsi_insert_before (&exit_gsi, data_reduc_stmt, GSI_SAME_STMT);
4891 /* Convert the reduced value back to the result type and set as the
4892 result. */
4893 gimple_seq stmts = NULL;
4894 new_temp = gimple_build (&stmts, VIEW_CONVERT_EXPR, scalar_type,
4895 data_reduc);
4896 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
4897 scalar_results.safe_push (new_temp);
4899 else if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION
4900 && reduc_code == ERROR_MARK)
4902 /* Condition redution without supported REDUC_MAX_EXPR. Generate
4903 idx = 0;
4904 idx_val = induction_index[0];
4905 val = data_reduc[0];
4906 for (idx = 0, val = init, i = 0; i < nelts; ++i)
4907 if (induction_index[i] > idx_val)
4908 val = data_reduc[i], idx_val = induction_index[i];
4909 return val; */
4911 tree data_eltype = TREE_TYPE (TREE_TYPE (new_phi_result));
4912 tree idx_eltype = TREE_TYPE (TREE_TYPE (induction_index));
4913 unsigned HOST_WIDE_INT el_size = tree_to_uhwi (TYPE_SIZE (idx_eltype));
4914 unsigned HOST_WIDE_INT v_size
4915 = el_size * TYPE_VECTOR_SUBPARTS (TREE_TYPE (induction_index));
4916 tree idx_val = NULL_TREE, val = NULL_TREE;
4917 for (unsigned HOST_WIDE_INT off = 0; off < v_size; off += el_size)
4919 tree old_idx_val = idx_val;
4920 tree old_val = val;
4921 idx_val = make_ssa_name (idx_eltype);
4922 epilog_stmt = gimple_build_assign (idx_val, BIT_FIELD_REF,
4923 build3 (BIT_FIELD_REF, idx_eltype,
4924 induction_index,
4925 bitsize_int (el_size),
4926 bitsize_int (off)));
4927 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4928 val = make_ssa_name (data_eltype);
4929 epilog_stmt = gimple_build_assign (val, BIT_FIELD_REF,
4930 build3 (BIT_FIELD_REF,
4931 data_eltype,
4932 new_phi_result,
4933 bitsize_int (el_size),
4934 bitsize_int (off)));
4935 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4936 if (off != 0)
4938 tree new_idx_val = idx_val;
4939 tree new_val = val;
4940 if (off != v_size - el_size)
4942 new_idx_val = make_ssa_name (idx_eltype);
4943 epilog_stmt = gimple_build_assign (new_idx_val,
4944 MAX_EXPR, idx_val,
4945 old_idx_val);
4946 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4948 new_val = make_ssa_name (data_eltype);
4949 epilog_stmt = gimple_build_assign (new_val,
4950 COND_EXPR,
4951 build2 (GT_EXPR,
4952 boolean_type_node,
4953 idx_val,
4954 old_idx_val),
4955 val, old_val);
4956 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4957 idx_val = new_idx_val;
4958 val = new_val;
4961 /* Convert the reduced value back to the result type and set as the
4962 result. */
4963 gimple_seq stmts = NULL;
4964 val = gimple_convert (&stmts, scalar_type, val);
4965 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
4966 scalar_results.safe_push (val);
4969 /* 2.3 Create the reduction code, using one of the three schemes described
4970 above. In SLP we simply need to extract all the elements from the
4971 vector (without reducing them), so we use scalar shifts. */
4972 else if (reduc_code != ERROR_MARK && !slp_reduc)
4974 tree tmp;
4975 tree vec_elem_type;
4977 /* Case 1: Create:
4978 v_out2 = reduc_expr <v_out1> */
4980 if (dump_enabled_p ())
4981 dump_printf_loc (MSG_NOTE, vect_location,
4982 "Reduce using direct vector reduction.\n");
4984 vec_elem_type = TREE_TYPE (TREE_TYPE (new_phi_result));
4985 if (!useless_type_conversion_p (scalar_type, vec_elem_type))
4987 tree tmp_dest =
4988 vect_create_destination_var (scalar_dest, vec_elem_type);
4989 tmp = build1 (reduc_code, vec_elem_type, new_phi_result);
4990 epilog_stmt = gimple_build_assign (tmp_dest, tmp);
4991 new_temp = make_ssa_name (tmp_dest, epilog_stmt);
4992 gimple_assign_set_lhs (epilog_stmt, new_temp);
4993 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4995 tmp = build1 (NOP_EXPR, scalar_type, new_temp);
4997 else
4998 tmp = build1 (reduc_code, scalar_type, new_phi_result);
5000 epilog_stmt = gimple_build_assign (new_scalar_dest, tmp);
5001 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5002 gimple_assign_set_lhs (epilog_stmt, new_temp);
5003 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5005 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5006 == INTEGER_INDUC_COND_REDUCTION)
5008 /* Earlier we set the initial value to be zero. Check the result
5009 and if it is zero then replace with the original initial
5010 value. */
5011 tree zero = build_zero_cst (scalar_type);
5012 tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp, zero);
5014 tmp = make_ssa_name (new_scalar_dest);
5015 epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare,
5016 initial_def, new_temp);
5017 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5018 new_temp = tmp;
5021 scalar_results.safe_push (new_temp);
5023 else
5025 bool reduce_with_shift = have_whole_vector_shift (mode);
5026 int element_bitsize = tree_to_uhwi (bitsize);
5027 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
5028 tree vec_temp;
5030 /* COND reductions all do the final reduction with MAX_EXPR. */
5031 if (code == COND_EXPR)
5032 code = MAX_EXPR;
5034 /* Regardless of whether we have a whole vector shift, if we're
5035 emulating the operation via tree-vect-generic, we don't want
5036 to use it. Only the first round of the reduction is likely
5037 to still be profitable via emulation. */
5038 /* ??? It might be better to emit a reduction tree code here, so that
5039 tree-vect-generic can expand the first round via bit tricks. */
5040 if (!VECTOR_MODE_P (mode))
5041 reduce_with_shift = false;
5042 else
5044 optab optab = optab_for_tree_code (code, vectype, optab_default);
5045 if (optab_handler (optab, mode) == CODE_FOR_nothing)
5046 reduce_with_shift = false;
5049 if (reduce_with_shift && !slp_reduc)
5051 int nelements = vec_size_in_bits / element_bitsize;
5052 unsigned char *sel = XALLOCAVEC (unsigned char, nelements);
5054 int elt_offset;
5056 tree zero_vec = build_zero_cst (vectype);
5057 /* Case 2: Create:
5058 for (offset = nelements/2; offset >= 1; offset/=2)
5060 Create: va' = vec_shift <va, offset>
5061 Create: va = vop <va, va'>
5062 } */
5064 tree rhs;
5066 if (dump_enabled_p ())
5067 dump_printf_loc (MSG_NOTE, vect_location,
5068 "Reduce using vector shifts\n");
5070 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5071 new_temp = new_phi_result;
5072 for (elt_offset = nelements / 2;
5073 elt_offset >= 1;
5074 elt_offset /= 2)
5076 calc_vec_perm_mask_for_shift (mode, elt_offset, sel);
5077 tree mask = vect_gen_perm_mask_any (vectype, sel);
5078 epilog_stmt = gimple_build_assign (vec_dest, VEC_PERM_EXPR,
5079 new_temp, zero_vec, mask);
5080 new_name = make_ssa_name (vec_dest, epilog_stmt);
5081 gimple_assign_set_lhs (epilog_stmt, new_name);
5082 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5084 epilog_stmt = gimple_build_assign (vec_dest, code, new_name,
5085 new_temp);
5086 new_temp = make_ssa_name (vec_dest, epilog_stmt);
5087 gimple_assign_set_lhs (epilog_stmt, new_temp);
5088 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5091 /* 2.4 Extract the final scalar result. Create:
5092 s_out3 = extract_field <v_out2, bitpos> */
5094 if (dump_enabled_p ())
5095 dump_printf_loc (MSG_NOTE, vect_location,
5096 "extract scalar result\n");
5098 rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp,
5099 bitsize, bitsize_zero_node);
5100 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
5101 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5102 gimple_assign_set_lhs (epilog_stmt, new_temp);
5103 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5104 scalar_results.safe_push (new_temp);
5106 else
5108 /* Case 3: Create:
5109 s = extract_field <v_out2, 0>
5110 for (offset = element_size;
5111 offset < vector_size;
5112 offset += element_size;)
5114 Create: s' = extract_field <v_out2, offset>
5115 Create: s = op <s, s'> // For non SLP cases
5116 } */
5118 if (dump_enabled_p ())
5119 dump_printf_loc (MSG_NOTE, vect_location,
5120 "Reduce using scalar code.\n");
5122 vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
5123 FOR_EACH_VEC_ELT (new_phis, i, new_phi)
5125 int bit_offset;
5126 if (gimple_code (new_phi) == GIMPLE_PHI)
5127 vec_temp = PHI_RESULT (new_phi);
5128 else
5129 vec_temp = gimple_assign_lhs (new_phi);
5130 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
5131 bitsize_zero_node);
5132 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
5133 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5134 gimple_assign_set_lhs (epilog_stmt, new_temp);
5135 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5137 /* In SLP we don't need to apply reduction operation, so we just
5138 collect s' values in SCALAR_RESULTS. */
5139 if (slp_reduc)
5140 scalar_results.safe_push (new_temp);
5142 for (bit_offset = element_bitsize;
5143 bit_offset < vec_size_in_bits;
5144 bit_offset += element_bitsize)
5146 tree bitpos = bitsize_int (bit_offset);
5147 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp,
5148 bitsize, bitpos);
5150 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
5151 new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
5152 gimple_assign_set_lhs (epilog_stmt, new_name);
5153 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5155 if (slp_reduc)
5157 /* In SLP we don't need to apply reduction operation, so
5158 we just collect s' values in SCALAR_RESULTS. */
5159 new_temp = new_name;
5160 scalar_results.safe_push (new_name);
5162 else
5164 epilog_stmt = gimple_build_assign (new_scalar_dest, code,
5165 new_name, new_temp);
5166 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5167 gimple_assign_set_lhs (epilog_stmt, new_temp);
5168 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5173 /* The only case where we need to reduce scalar results in SLP, is
5174 unrolling. If the size of SCALAR_RESULTS is greater than
5175 GROUP_SIZE, we reduce them combining elements modulo
5176 GROUP_SIZE. */
5177 if (slp_reduc)
5179 tree res, first_res, new_res;
5180 gimple *new_stmt;
5182 /* Reduce multiple scalar results in case of SLP unrolling. */
5183 for (j = group_size; scalar_results.iterate (j, &res);
5184 j++)
5186 first_res = scalar_results[j % group_size];
5187 new_stmt = gimple_build_assign (new_scalar_dest, code,
5188 first_res, res);
5189 new_res = make_ssa_name (new_scalar_dest, new_stmt);
5190 gimple_assign_set_lhs (new_stmt, new_res);
5191 gsi_insert_before (&exit_gsi, new_stmt, GSI_SAME_STMT);
5192 scalar_results[j % group_size] = new_res;
5195 else
5196 /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
5197 scalar_results.safe_push (new_temp);
5200 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5201 == INTEGER_INDUC_COND_REDUCTION)
5203 /* Earlier we set the initial value to be zero. Check the result
5204 and if it is zero then replace with the original initial
5205 value. */
5206 tree zero = build_zero_cst (scalar_type);
5207 tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp, zero);
5209 tree tmp = make_ssa_name (new_scalar_dest);
5210 epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare,
5211 initial_def, new_temp);
5212 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5213 scalar_results[0] = tmp;
5217 vect_finalize_reduction:
5219 if (double_reduc)
5220 loop = loop->inner;
5222 /* 2.5 Adjust the final result by the initial value of the reduction
5223 variable. (When such adjustment is not needed, then
5224 'adjustment_def' is zero). For example, if code is PLUS we create:
5225 new_temp = loop_exit_def + adjustment_def */
5227 if (adjustment_def)
5229 gcc_assert (!slp_reduc);
5230 if (nested_in_vect_loop)
5232 new_phi = new_phis[0];
5233 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
5234 expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
5235 new_dest = vect_create_destination_var (scalar_dest, vectype);
5237 else
5239 new_temp = scalar_results[0];
5240 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
5241 expr = build2 (code, scalar_type, new_temp, adjustment_def);
5242 new_dest = vect_create_destination_var (scalar_dest, scalar_type);
5245 epilog_stmt = gimple_build_assign (new_dest, expr);
5246 new_temp = make_ssa_name (new_dest, epilog_stmt);
5247 gimple_assign_set_lhs (epilog_stmt, new_temp);
5248 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5249 if (nested_in_vect_loop)
5251 set_vinfo_for_stmt (epilog_stmt,
5252 new_stmt_vec_info (epilog_stmt, loop_vinfo));
5253 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt)) =
5254 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi));
5256 if (!double_reduc)
5257 scalar_results.quick_push (new_temp);
5258 else
5259 scalar_results[0] = new_temp;
5261 else
5262 scalar_results[0] = new_temp;
5264 new_phis[0] = epilog_stmt;
5267 /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit
5268 phis with new adjusted scalar results, i.e., replace use <s_out0>
5269 with use <s_out4>.
5271 Transform:
5272 loop_exit:
5273 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
5274 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
5275 v_out2 = reduce <v_out1>
5276 s_out3 = extract_field <v_out2, 0>
5277 s_out4 = adjust_result <s_out3>
5278 use <s_out0>
5279 use <s_out0>
5281 into:
5283 loop_exit:
5284 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
5285 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
5286 v_out2 = reduce <v_out1>
5287 s_out3 = extract_field <v_out2, 0>
5288 s_out4 = adjust_result <s_out3>
5289 use <s_out4>
5290 use <s_out4> */
5293 /* In SLP reduction chain we reduce vector results into one vector if
5294 necessary, hence we set here GROUP_SIZE to 1. SCALAR_DEST is the LHS of
5295 the last stmt in the reduction chain, since we are looking for the loop
5296 exit phi node. */
5297 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
5299 gimple *dest_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1];
5300 /* Handle reduction patterns. */
5301 if (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (dest_stmt)))
5302 dest_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (dest_stmt));
5304 scalar_dest = gimple_assign_lhs (dest_stmt);
5305 group_size = 1;
5308 /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
5309 case that GROUP_SIZE is greater than vectorization factor). Therefore, we
5310 need to match SCALAR_RESULTS with corresponding statements. The first
5311 (GROUP_SIZE / number of new vector stmts) scalar results correspond to
5312 the first vector stmt, etc.
5313 (RATIO is equal to (GROUP_SIZE / number of new vector stmts)). */
5314 if (group_size > new_phis.length ())
5316 ratio = group_size / new_phis.length ();
5317 gcc_assert (!(group_size % new_phis.length ()));
5319 else
5320 ratio = 1;
5322 for (k = 0; k < group_size; k++)
5324 if (k % ratio == 0)
5326 epilog_stmt = new_phis[k / ratio];
5327 reduction_phi = reduction_phis[k / ratio];
5328 if (double_reduc)
5329 inner_phi = inner_phis[k / ratio];
5332 if (slp_reduc)
5334 gimple *current_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[k];
5336 orig_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (current_stmt));
5337 /* SLP statements can't participate in patterns. */
5338 gcc_assert (!orig_stmt);
5339 scalar_dest = gimple_assign_lhs (current_stmt);
5342 phis.create (3);
5343 /* Find the loop-closed-use at the loop exit of the original scalar
5344 result. (The reduction result is expected to have two immediate uses -
5345 one at the latch block, and one at the loop exit). */
5346 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
5347 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p)))
5348 && !is_gimple_debug (USE_STMT (use_p)))
5349 phis.safe_push (USE_STMT (use_p));
5351 /* While we expect to have found an exit_phi because of loop-closed-ssa
5352 form we can end up without one if the scalar cycle is dead. */
5354 FOR_EACH_VEC_ELT (phis, i, exit_phi)
5356 if (outer_loop)
5358 stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi);
5359 gphi *vect_phi;
5361 /* FORNOW. Currently not supporting the case that an inner-loop
5362 reduction is not used in the outer-loop (but only outside the
5363 outer-loop), unless it is double reduction. */
5364 gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
5365 && !STMT_VINFO_LIVE_P (exit_phi_vinfo))
5366 || double_reduc);
5368 if (double_reduc)
5369 STMT_VINFO_VEC_STMT (exit_phi_vinfo) = inner_phi;
5370 else
5371 STMT_VINFO_VEC_STMT (exit_phi_vinfo) = epilog_stmt;
5372 if (!double_reduc
5373 || STMT_VINFO_DEF_TYPE (exit_phi_vinfo)
5374 != vect_double_reduction_def)
5375 continue;
5377 /* Handle double reduction:
5379 stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
5380 stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop)
5381 stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop)
5382 stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
5384 At that point the regular reduction (stmt2 and stmt3) is
5385 already vectorized, as well as the exit phi node, stmt4.
5386 Here we vectorize the phi node of double reduction, stmt1, and
5387 update all relevant statements. */
5389 /* Go through all the uses of s2 to find double reduction phi
5390 node, i.e., stmt1 above. */
5391 orig_name = PHI_RESULT (exit_phi);
5392 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
5394 stmt_vec_info use_stmt_vinfo;
5395 stmt_vec_info new_phi_vinfo;
5396 tree vect_phi_init, preheader_arg, vect_phi_res, init_def;
5397 basic_block bb = gimple_bb (use_stmt);
5398 gimple *use;
5400 /* Check that USE_STMT is really double reduction phi
5401 node. */
5402 if (gimple_code (use_stmt) != GIMPLE_PHI
5403 || gimple_phi_num_args (use_stmt) != 2
5404 || bb->loop_father != outer_loop)
5405 continue;
5406 use_stmt_vinfo = vinfo_for_stmt (use_stmt);
5407 if (!use_stmt_vinfo
5408 || STMT_VINFO_DEF_TYPE (use_stmt_vinfo)
5409 != vect_double_reduction_def)
5410 continue;
5412 /* Create vector phi node for double reduction:
5413 vs1 = phi <vs0, vs2>
5414 vs1 was created previously in this function by a call to
5415 vect_get_vec_def_for_operand and is stored in
5416 vec_initial_def;
5417 vs2 is defined by INNER_PHI, the vectorized EXIT_PHI;
5418 vs0 is created here. */
5420 /* Create vector phi node. */
5421 vect_phi = create_phi_node (vec_initial_def, bb);
5422 new_phi_vinfo = new_stmt_vec_info (vect_phi,
5423 loop_vec_info_for_loop (outer_loop));
5424 set_vinfo_for_stmt (vect_phi, new_phi_vinfo);
5426 /* Create vs0 - initial def of the double reduction phi. */
5427 preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt,
5428 loop_preheader_edge (outer_loop));
5429 init_def = get_initial_def_for_reduction (stmt,
5430 preheader_arg, NULL);
5431 vect_phi_init = vect_init_vector (use_stmt, init_def,
5432 vectype, NULL);
5434 /* Update phi node arguments with vs0 and vs2. */
5435 add_phi_arg (vect_phi, vect_phi_init,
5436 loop_preheader_edge (outer_loop),
5437 UNKNOWN_LOCATION);
5438 add_phi_arg (vect_phi, PHI_RESULT (inner_phi),
5439 loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
5440 if (dump_enabled_p ())
5442 dump_printf_loc (MSG_NOTE, vect_location,
5443 "created double reduction phi node: ");
5444 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vect_phi, 0);
5447 vect_phi_res = PHI_RESULT (vect_phi);
5449 /* Replace the use, i.e., set the correct vs1 in the regular
5450 reduction phi node. FORNOW, NCOPIES is always 1, so the
5451 loop is redundant. */
5452 use = reduction_phi;
5453 for (j = 0; j < ncopies; j++)
5455 edge pr_edge = loop_preheader_edge (loop);
5456 SET_PHI_ARG_DEF (use, pr_edge->dest_idx, vect_phi_res);
5457 use = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use));
5463 phis.release ();
5464 if (nested_in_vect_loop)
5466 if (double_reduc)
5467 loop = outer_loop;
5468 else
5469 continue;
5472 phis.create (3);
5473 /* Find the loop-closed-use at the loop exit of the original scalar
5474 result. (The reduction result is expected to have two immediate uses,
5475 one at the latch block, and one at the loop exit). For double
5476 reductions we are looking for exit phis of the outer loop. */
5477 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
5479 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
5481 if (!is_gimple_debug (USE_STMT (use_p)))
5482 phis.safe_push (USE_STMT (use_p));
5484 else
5486 if (double_reduc && gimple_code (USE_STMT (use_p)) == GIMPLE_PHI)
5488 tree phi_res = PHI_RESULT (USE_STMT (use_p));
5490 FOR_EACH_IMM_USE_FAST (phi_use_p, phi_imm_iter, phi_res)
5492 if (!flow_bb_inside_loop_p (loop,
5493 gimple_bb (USE_STMT (phi_use_p)))
5494 && !is_gimple_debug (USE_STMT (phi_use_p)))
5495 phis.safe_push (USE_STMT (phi_use_p));
5501 FOR_EACH_VEC_ELT (phis, i, exit_phi)
5503 /* Replace the uses: */
5504 orig_name = PHI_RESULT (exit_phi);
5505 scalar_result = scalar_results[k];
5506 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
5507 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
5508 SET_USE (use_p, scalar_result);
5511 phis.release ();
5516 /* Function is_nonwrapping_integer_induction.
5518 Check if STMT (which is part of loop LOOP) both increments and
5519 does not cause overflow. */
5521 static bool
5522 is_nonwrapping_integer_induction (gimple *stmt, struct loop *loop)
5524 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
5525 tree base = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo);
5526 tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo);
5527 tree lhs_type = TREE_TYPE (gimple_phi_result (stmt));
5528 widest_int ni, max_loop_value, lhs_max;
5529 bool overflow = false;
5531 /* Make sure the loop is integer based. */
5532 if (TREE_CODE (base) != INTEGER_CST
5533 || TREE_CODE (step) != INTEGER_CST)
5534 return false;
5536 /* Check that the induction increments. */
5537 if (tree_int_cst_sgn (step) == -1)
5538 return false;
5540 /* Check that the max size of the loop will not wrap. */
5542 if (TYPE_OVERFLOW_UNDEFINED (lhs_type))
5543 return true;
5545 if (! max_stmt_executions (loop, &ni))
5546 return false;
5548 max_loop_value = wi::mul (wi::to_widest (step), ni, TYPE_SIGN (lhs_type),
5549 &overflow);
5550 if (overflow)
5551 return false;
5553 max_loop_value = wi::add (wi::to_widest (base), max_loop_value,
5554 TYPE_SIGN (lhs_type), &overflow);
5555 if (overflow)
5556 return false;
5558 return (wi::min_precision (max_loop_value, TYPE_SIGN (lhs_type))
5559 <= TYPE_PRECISION (lhs_type));
5562 /* Function vectorizable_reduction.
5564 Check if STMT performs a reduction operation that can be vectorized.
5565 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5566 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
5567 Return FALSE if not a vectorizable STMT, TRUE otherwise.
5569 This function also handles reduction idioms (patterns) that have been
5570 recognized in advance during vect_pattern_recog. In this case, STMT may be
5571 of this form:
5572 X = pattern_expr (arg0, arg1, ..., X)
5573 and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
5574 sequence that had been detected and replaced by the pattern-stmt (STMT).
5576 This function also handles reduction of condition expressions, for example:
5577 for (int i = 0; i < N; i++)
5578 if (a[i] < value)
5579 last = a[i];
5580 This is handled by vectorising the loop and creating an additional vector
5581 containing the loop indexes for which "a[i] < value" was true. In the
5582 function epilogue this is reduced to a single max value and then used to
5583 index into the vector of results.
5585 In some cases of reduction patterns, the type of the reduction variable X is
5586 different than the type of the other arguments of STMT.
5587 In such cases, the vectype that is used when transforming STMT into a vector
5588 stmt is different than the vectype that is used to determine the
5589 vectorization factor, because it consists of a different number of elements
5590 than the actual number of elements that are being operated upon in parallel.
5592 For example, consider an accumulation of shorts into an int accumulator.
5593 On some targets it's possible to vectorize this pattern operating on 8
5594 shorts at a time (hence, the vectype for purposes of determining the
5595 vectorization factor should be V8HI); on the other hand, the vectype that
5596 is used to create the vector form is actually V4SI (the type of the result).
5598 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
5599 indicates what is the actual level of parallelism (V8HI in the example), so
5600 that the right vectorization factor would be derived. This vectype
5601 corresponds to the type of arguments to the reduction stmt, and should *NOT*
5602 be used to create the vectorized stmt. The right vectype for the vectorized
5603 stmt is obtained from the type of the result X:
5604 get_vectype_for_scalar_type (TREE_TYPE (X))
5606 This means that, contrary to "regular" reductions (or "regular" stmts in
5607 general), the following equation:
5608 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
5609 does *NOT* necessarily hold for reduction patterns. */
5611 bool
5612 vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
5613 gimple **vec_stmt, slp_tree slp_node,
5614 slp_instance slp_node_instance)
5616 tree vec_dest;
5617 tree scalar_dest;
5618 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5619 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
5620 tree vectype_in = NULL_TREE;
5621 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5622 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5623 enum tree_code code, orig_code, epilog_reduc_code;
5624 machine_mode vec_mode;
5625 int op_type;
5626 optab optab, reduc_optab;
5627 tree new_temp = NULL_TREE;
5628 gimple *def_stmt;
5629 enum vect_def_type dt, cond_reduc_dt = vect_unknown_def_type;
5630 tree scalar_type;
5631 bool is_simple_use;
5632 gimple *orig_stmt;
5633 stmt_vec_info orig_stmt_info = NULL;
5634 int i;
5635 int ncopies;
5636 int epilog_copies;
5637 stmt_vec_info prev_stmt_info, prev_phi_info;
5638 bool single_defuse_cycle = false;
5639 gimple *new_stmt = NULL;
5640 int j;
5641 tree ops[3];
5642 enum vect_def_type dts[3];
5643 bool nested_cycle = false, found_nested_cycle_def = false;
5644 bool double_reduc = false;
5645 basic_block def_bb;
5646 struct loop * def_stmt_loop, *outer_loop = NULL;
5647 tree def_arg;
5648 gimple *def_arg_stmt;
5649 auto_vec<tree> vec_oprnds0;
5650 auto_vec<tree> vec_oprnds1;
5651 auto_vec<tree> vec_oprnds2;
5652 auto_vec<tree> vect_defs;
5653 auto_vec<gimple *> phis;
5654 int vec_num;
5655 tree def0, tem;
5656 bool first_p = true;
5657 tree cr_index_scalar_type = NULL_TREE, cr_index_vector_type = NULL_TREE;
5658 tree cond_reduc_val = NULL_TREE;
5660 /* Make sure it was already recognized as a reduction computation. */
5661 if (STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) != vect_reduction_def
5662 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) != vect_nested_cycle)
5663 return false;
5665 if (nested_in_vect_loop_p (loop, stmt))
5667 outer_loop = loop;
5668 loop = loop->inner;
5669 nested_cycle = true;
5672 /* In case of reduction chain we switch to the first stmt in the chain, but
5673 we don't update STMT_INFO, since only the last stmt is marked as reduction
5674 and has reduction properties. */
5675 if (GROUP_FIRST_ELEMENT (stmt_info)
5676 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
5678 stmt = GROUP_FIRST_ELEMENT (stmt_info);
5679 first_p = false;
5682 if (gimple_code (stmt) == GIMPLE_PHI)
5684 /* Analysis is fully done on the reduction stmt invocation. */
5685 if (! vec_stmt)
5687 if (slp_node)
5688 slp_node_instance->reduc_phis = slp_node;
5690 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
5691 return true;
5694 gimple *reduc_stmt = STMT_VINFO_REDUC_DEF (stmt_info);
5695 if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (reduc_stmt)))
5696 reduc_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (reduc_stmt));
5698 gcc_assert (is_gimple_assign (reduc_stmt));
5699 for (unsigned k = 1; k < gimple_num_ops (reduc_stmt); ++k)
5701 tree op = gimple_op (reduc_stmt, k);
5702 if (op == gimple_phi_result (stmt))
5703 continue;
5704 if (k == 1
5705 && gimple_assign_rhs_code (reduc_stmt) == COND_EXPR)
5706 continue;
5707 tem = get_vectype_for_scalar_type (TREE_TYPE (op));
5708 if (! vectype_in
5709 || TYPE_VECTOR_SUBPARTS (tem) < TYPE_VECTOR_SUBPARTS (vectype_in))
5710 vectype_in = tem;
5711 break;
5713 gcc_assert (vectype_in);
5715 if (slp_node)
5716 ncopies = 1;
5717 else
5718 ncopies = (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
5719 / TYPE_VECTOR_SUBPARTS (vectype_in));
5721 use_operand_p use_p;
5722 gimple *use_stmt;
5723 if (ncopies > 1
5724 && (STMT_VINFO_RELEVANT (vinfo_for_stmt (reduc_stmt))
5725 <= vect_used_only_live)
5726 && single_imm_use (gimple_phi_result (stmt), &use_p, &use_stmt)
5727 && (use_stmt == reduc_stmt
5728 || (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use_stmt))
5729 == reduc_stmt)))
5730 single_defuse_cycle = true;
5732 /* Create the destination vector */
5733 scalar_dest = gimple_assign_lhs (reduc_stmt);
5734 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
5736 if (slp_node)
5737 /* The size vect_schedule_slp_instance computes is off for us. */
5738 vec_num = ((LOOP_VINFO_VECT_FACTOR (loop_vinfo)
5739 * SLP_TREE_SCALAR_STMTS (slp_node).length ())
5740 / TYPE_VECTOR_SUBPARTS (vectype_in));
5741 else
5742 vec_num = 1;
5744 /* Generate the reduction PHIs upfront. */
5745 prev_phi_info = NULL;
5746 for (j = 0; j < ncopies; j++)
5748 if (j == 0 || !single_defuse_cycle)
5750 for (i = 0; i < vec_num; i++)
5752 /* Create the reduction-phi that defines the reduction
5753 operand. */
5754 gimple *new_phi = create_phi_node (vec_dest, loop->header);
5755 set_vinfo_for_stmt (new_phi,
5756 new_stmt_vec_info (new_phi, loop_vinfo));
5758 if (slp_node)
5759 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_phi);
5760 else
5762 if (j == 0)
5763 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_phi;
5764 else
5765 STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi;
5766 prev_phi_info = vinfo_for_stmt (new_phi);
5772 return true;
5775 /* 1. Is vectorizable reduction? */
5776 /* Not supportable if the reduction variable is used in the loop, unless
5777 it's a reduction chain. */
5778 if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer
5779 && !GROUP_FIRST_ELEMENT (stmt_info))
5780 return false;
5782 /* Reductions that are not used even in an enclosing outer-loop,
5783 are expected to be "live" (used out of the loop). */
5784 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope
5785 && !STMT_VINFO_LIVE_P (stmt_info))
5786 return false;
5788 /* 2. Has this been recognized as a reduction pattern?
5790 Check if STMT represents a pattern that has been recognized
5791 in earlier analysis stages. For stmts that represent a pattern,
5792 the STMT_VINFO_RELATED_STMT field records the last stmt in
5793 the original sequence that constitutes the pattern. */
5795 orig_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt));
5796 if (orig_stmt)
5798 orig_stmt_info = vinfo_for_stmt (orig_stmt);
5799 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
5800 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
5803 /* 3. Check the operands of the operation. The first operands are defined
5804 inside the loop body. The last operand is the reduction variable,
5805 which is defined by the loop-header-phi. */
5807 gcc_assert (is_gimple_assign (stmt));
5809 /* Flatten RHS. */
5810 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
5812 case GIMPLE_BINARY_RHS:
5813 code = gimple_assign_rhs_code (stmt);
5814 op_type = TREE_CODE_LENGTH (code);
5815 gcc_assert (op_type == binary_op);
5816 ops[0] = gimple_assign_rhs1 (stmt);
5817 ops[1] = gimple_assign_rhs2 (stmt);
5818 break;
5820 case GIMPLE_TERNARY_RHS:
5821 code = gimple_assign_rhs_code (stmt);
5822 op_type = TREE_CODE_LENGTH (code);
5823 gcc_assert (op_type == ternary_op);
5824 ops[0] = gimple_assign_rhs1 (stmt);
5825 ops[1] = gimple_assign_rhs2 (stmt);
5826 ops[2] = gimple_assign_rhs3 (stmt);
5827 break;
5829 case GIMPLE_UNARY_RHS:
5830 return false;
5832 default:
5833 gcc_unreachable ();
5836 if (code == COND_EXPR && slp_node)
5837 return false;
5839 scalar_dest = gimple_assign_lhs (stmt);
5840 scalar_type = TREE_TYPE (scalar_dest);
5841 if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
5842 && !SCALAR_FLOAT_TYPE_P (scalar_type))
5843 return false;
5845 /* Do not try to vectorize bit-precision reductions. */
5846 if ((TYPE_PRECISION (scalar_type)
5847 != GET_MODE_PRECISION (TYPE_MODE (scalar_type))))
5848 return false;
5850 /* All uses but the last are expected to be defined in the loop.
5851 The last use is the reduction variable. In case of nested cycle this
5852 assumption is not true: we use reduc_index to record the index of the
5853 reduction variable. */
5854 gimple *reduc_def_stmt = NULL;
5855 int reduc_index = -1;
5856 for (i = 0; i < op_type; i++)
5858 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
5859 if (i == 0 && code == COND_EXPR)
5860 continue;
5862 is_simple_use = vect_is_simple_use (ops[i], loop_vinfo,
5863 &def_stmt, &dts[i], &tem);
5864 dt = dts[i];
5865 gcc_assert (is_simple_use);
5866 if (dt == vect_reduction_def)
5868 reduc_def_stmt = def_stmt;
5869 reduc_index = i;
5870 continue;
5872 else
5874 if (!vectype_in)
5875 vectype_in = tem;
5878 if (dt != vect_internal_def
5879 && dt != vect_external_def
5880 && dt != vect_constant_def
5881 && dt != vect_induction_def
5882 && !(dt == vect_nested_cycle && nested_cycle))
5883 return false;
5885 if (dt == vect_nested_cycle)
5887 found_nested_cycle_def = true;
5888 reduc_def_stmt = def_stmt;
5889 reduc_index = i;
5892 if (i == 1 && code == COND_EXPR)
5894 /* Record how value of COND_EXPR is defined. */
5895 if (dt == vect_constant_def)
5897 cond_reduc_dt = dt;
5898 cond_reduc_val = ops[i];
5900 if (dt == vect_induction_def && def_stmt != NULL
5901 && is_nonwrapping_integer_induction (def_stmt, loop))
5902 cond_reduc_dt = dt;
5906 if (!vectype_in)
5907 vectype_in = vectype_out;
5909 /* When vectorizing a reduction chain w/o SLP the reduction PHI is not
5910 directy used in stmt. */
5911 if (reduc_index == -1)
5913 if (orig_stmt)
5914 reduc_def_stmt = STMT_VINFO_REDUC_DEF (orig_stmt_info);
5915 else
5916 reduc_def_stmt = STMT_VINFO_REDUC_DEF (stmt_info);
5919 if (! reduc_def_stmt || gimple_code (reduc_def_stmt) != GIMPLE_PHI)
5920 return false;
5922 if (!(reduc_index == -1
5923 || dts[reduc_index] == vect_reduction_def
5924 || dts[reduc_index] == vect_nested_cycle
5925 || ((dts[reduc_index] == vect_internal_def
5926 || dts[reduc_index] == vect_external_def
5927 || dts[reduc_index] == vect_constant_def
5928 || dts[reduc_index] == vect_induction_def)
5929 && nested_cycle && found_nested_cycle_def)))
5931 /* For pattern recognized stmts, orig_stmt might be a reduction,
5932 but some helper statements for the pattern might not, or
5933 might be COND_EXPRs with reduction uses in the condition. */
5934 gcc_assert (orig_stmt);
5935 return false;
5938 stmt_vec_info reduc_def_info = vinfo_for_stmt (reduc_def_stmt);
5939 enum vect_reduction_type v_reduc_type
5940 = STMT_VINFO_REDUC_TYPE (reduc_def_info);
5941 gimple *tmp = STMT_VINFO_REDUC_DEF (reduc_def_info);
5943 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = v_reduc_type;
5944 /* If we have a condition reduction, see if we can simplify it further. */
5945 if (v_reduc_type == COND_REDUCTION)
5947 if (cond_reduc_dt == vect_induction_def)
5949 if (dump_enabled_p ())
5950 dump_printf_loc (MSG_NOTE, vect_location,
5951 "condition expression based on "
5952 "integer induction.\n");
5953 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5954 = INTEGER_INDUC_COND_REDUCTION;
5957 /* Loop peeling modifies initial value of reduction PHI, which
5958 makes the reduction stmt to be transformed different to the
5959 original stmt analyzed. We need to record reduction code for
5960 CONST_COND_REDUCTION type reduction at analyzing stage, thus
5961 it can be used directly at transform stage. */
5962 if (STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info) == MAX_EXPR
5963 || STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info) == MIN_EXPR)
5965 /* Also set the reduction type to CONST_COND_REDUCTION. */
5966 gcc_assert (cond_reduc_dt == vect_constant_def);
5967 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = CONST_COND_REDUCTION;
5969 else if (cond_reduc_dt == vect_constant_def)
5971 enum vect_def_type cond_initial_dt;
5972 gimple *def_stmt = SSA_NAME_DEF_STMT (ops[reduc_index]);
5973 tree cond_initial_val
5974 = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
5976 gcc_assert (cond_reduc_val != NULL_TREE);
5977 vect_is_simple_use (cond_initial_val, loop_vinfo,
5978 &def_stmt, &cond_initial_dt);
5979 if (cond_initial_dt == vect_constant_def
5980 && types_compatible_p (TREE_TYPE (cond_initial_val),
5981 TREE_TYPE (cond_reduc_val)))
5983 tree e = fold_binary (LE_EXPR, boolean_type_node,
5984 cond_initial_val, cond_reduc_val);
5985 if (e && (integer_onep (e) || integer_zerop (e)))
5987 if (dump_enabled_p ())
5988 dump_printf_loc (MSG_NOTE, vect_location,
5989 "condition expression based on "
5990 "compile time constant.\n");
5991 /* Record reduction code at analysis stage. */
5992 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info)
5993 = integer_onep (e) ? MAX_EXPR : MIN_EXPR;
5994 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5995 = CONST_COND_REDUCTION;
6001 if (orig_stmt)
6002 gcc_assert (tmp == orig_stmt
6003 || GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) == orig_stmt);
6004 else
6005 /* We changed STMT to be the first stmt in reduction chain, hence we
6006 check that in this case the first element in the chain is STMT. */
6007 gcc_assert (stmt == tmp
6008 || GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) == stmt);
6010 if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt)))
6011 return false;
6013 if (slp_node)
6014 ncopies = 1;
6015 else
6016 ncopies = (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6017 / TYPE_VECTOR_SUBPARTS (vectype_in));
6019 gcc_assert (ncopies >= 1);
6021 vec_mode = TYPE_MODE (vectype_in);
6023 if (code == COND_EXPR)
6025 /* Only call during the analysis stage, otherwise we'll lose
6026 STMT_VINFO_TYPE. */
6027 if (!vec_stmt && !vectorizable_condition (stmt, gsi, NULL,
6028 ops[reduc_index], 0, NULL))
6030 if (dump_enabled_p ())
6031 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6032 "unsupported condition in reduction\n");
6033 return false;
6036 else
6038 /* 4. Supportable by target? */
6040 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR
6041 || code == LROTATE_EXPR || code == RROTATE_EXPR)
6043 /* Shifts and rotates are only supported by vectorizable_shifts,
6044 not vectorizable_reduction. */
6045 if (dump_enabled_p ())
6046 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6047 "unsupported shift or rotation.\n");
6048 return false;
6051 /* 4.1. check support for the operation in the loop */
6052 optab = optab_for_tree_code (code, vectype_in, optab_default);
6053 if (!optab)
6055 if (dump_enabled_p ())
6056 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6057 "no optab.\n");
6059 return false;
6062 if (optab_handler (optab, vec_mode) == CODE_FOR_nothing)
6064 if (dump_enabled_p ())
6065 dump_printf (MSG_NOTE, "op not supported by target.\n");
6067 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
6068 || LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6069 < vect_min_worthwhile_factor (code))
6070 return false;
6072 if (dump_enabled_p ())
6073 dump_printf (MSG_NOTE, "proceeding using word mode.\n");
6076 /* Worthwhile without SIMD support? */
6077 if (!VECTOR_MODE_P (TYPE_MODE (vectype_in))
6078 && LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6079 < vect_min_worthwhile_factor (code))
6081 if (dump_enabled_p ())
6082 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6083 "not worthwhile without SIMD support.\n");
6085 return false;
6089 /* 4.2. Check support for the epilog operation.
6091 If STMT represents a reduction pattern, then the type of the
6092 reduction variable may be different than the type of the rest
6093 of the arguments. For example, consider the case of accumulation
6094 of shorts into an int accumulator; The original code:
6095 S1: int_a = (int) short_a;
6096 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
6098 was replaced with:
6099 STMT: int_acc = widen_sum <short_a, int_acc>
6101 This means that:
6102 1. The tree-code that is used to create the vector operation in the
6103 epilog code (that reduces the partial results) is not the
6104 tree-code of STMT, but is rather the tree-code of the original
6105 stmt from the pattern that STMT is replacing. I.e, in the example
6106 above we want to use 'widen_sum' in the loop, but 'plus' in the
6107 epilog.
6108 2. The type (mode) we use to check available target support
6109 for the vector operation to be created in the *epilog*, is
6110 determined by the type of the reduction variable (in the example
6111 above we'd check this: optab_handler (plus_optab, vect_int_mode])).
6112 However the type (mode) we use to check available target support
6113 for the vector operation to be created *inside the loop*, is
6114 determined by the type of the other arguments to STMT (in the
6115 example we'd check this: optab_handler (widen_sum_optab,
6116 vect_short_mode)).
6118 This is contrary to "regular" reductions, in which the types of all
6119 the arguments are the same as the type of the reduction variable.
6120 For "regular" reductions we can therefore use the same vector type
6121 (and also the same tree-code) when generating the epilog code and
6122 when generating the code inside the loop. */
6124 if (orig_stmt)
6126 /* This is a reduction pattern: get the vectype from the type of the
6127 reduction variable, and get the tree-code from orig_stmt. */
6128 gcc_assert (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
6129 == TREE_CODE_REDUCTION);
6130 orig_code = gimple_assign_rhs_code (orig_stmt);
6131 gcc_assert (vectype_out);
6132 vec_mode = TYPE_MODE (vectype_out);
6134 else
6136 /* Regular reduction: use the same vectype and tree-code as used for
6137 the vector code inside the loop can be used for the epilog code. */
6138 orig_code = code;
6140 if (code == MINUS_EXPR)
6141 orig_code = PLUS_EXPR;
6143 /* For simple condition reductions, replace with the actual expression
6144 we want to base our reduction around. */
6145 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == CONST_COND_REDUCTION)
6147 orig_code = STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info);
6148 gcc_assert (orig_code == MAX_EXPR || orig_code == MIN_EXPR);
6150 else if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
6151 == INTEGER_INDUC_COND_REDUCTION)
6152 orig_code = MAX_EXPR;
6155 if (nested_cycle)
6157 def_bb = gimple_bb (reduc_def_stmt);
6158 def_stmt_loop = def_bb->loop_father;
6159 def_arg = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
6160 loop_preheader_edge (def_stmt_loop));
6161 if (TREE_CODE (def_arg) == SSA_NAME
6162 && (def_arg_stmt = SSA_NAME_DEF_STMT (def_arg))
6163 && gimple_code (def_arg_stmt) == GIMPLE_PHI
6164 && flow_bb_inside_loop_p (outer_loop, gimple_bb (def_arg_stmt))
6165 && vinfo_for_stmt (def_arg_stmt)
6166 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_arg_stmt))
6167 == vect_double_reduction_def)
6168 double_reduc = true;
6171 epilog_reduc_code = ERROR_MARK;
6173 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) != COND_REDUCTION)
6175 if (reduction_code_for_scalar_code (orig_code, &epilog_reduc_code))
6177 reduc_optab = optab_for_tree_code (epilog_reduc_code, vectype_out,
6178 optab_default);
6179 if (!reduc_optab)
6181 if (dump_enabled_p ())
6182 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6183 "no optab for reduction.\n");
6185 epilog_reduc_code = ERROR_MARK;
6187 else if (optab_handler (reduc_optab, vec_mode) == CODE_FOR_nothing)
6189 if (dump_enabled_p ())
6190 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6191 "reduc op not supported by target.\n");
6193 epilog_reduc_code = ERROR_MARK;
6196 else
6198 if (!nested_cycle || double_reduc)
6200 if (dump_enabled_p ())
6201 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6202 "no reduc code for scalar code.\n");
6204 return false;
6208 else
6210 int scalar_precision = GET_MODE_PRECISION (TYPE_MODE (scalar_type));
6211 cr_index_scalar_type = make_unsigned_type (scalar_precision);
6212 cr_index_vector_type = build_vector_type
6213 (cr_index_scalar_type, TYPE_VECTOR_SUBPARTS (vectype_out));
6215 optab = optab_for_tree_code (REDUC_MAX_EXPR, cr_index_vector_type,
6216 optab_default);
6217 if (optab_handler (optab, TYPE_MODE (cr_index_vector_type))
6218 != CODE_FOR_nothing)
6219 epilog_reduc_code = REDUC_MAX_EXPR;
6222 if ((double_reduc
6223 || STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) != TREE_CODE_REDUCTION)
6224 && ncopies > 1)
6226 if (dump_enabled_p ())
6227 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6228 "multiple types in double reduction or condition "
6229 "reduction.\n");
6230 return false;
6233 /* In case of widenning multiplication by a constant, we update the type
6234 of the constant to be the type of the other operand. We check that the
6235 constant fits the type in the pattern recognition pass. */
6236 if (code == DOT_PROD_EXPR
6237 && !types_compatible_p (TREE_TYPE (ops[0]), TREE_TYPE (ops[1])))
6239 if (TREE_CODE (ops[0]) == INTEGER_CST)
6240 ops[0] = fold_convert (TREE_TYPE (ops[1]), ops[0]);
6241 else if (TREE_CODE (ops[1]) == INTEGER_CST)
6242 ops[1] = fold_convert (TREE_TYPE (ops[0]), ops[1]);
6243 else
6245 if (dump_enabled_p ())
6246 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6247 "invalid types in dot-prod\n");
6249 return false;
6253 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION)
6255 widest_int ni;
6257 if (! max_loop_iterations (loop, &ni))
6259 if (dump_enabled_p ())
6260 dump_printf_loc (MSG_NOTE, vect_location,
6261 "loop count not known, cannot create cond "
6262 "reduction.\n");
6263 return false;
6265 /* Convert backedges to iterations. */
6266 ni += 1;
6268 /* The additional index will be the same type as the condition. Check
6269 that the loop can fit into this less one (because we'll use up the
6270 zero slot for when there are no matches). */
6271 tree max_index = TYPE_MAX_VALUE (cr_index_scalar_type);
6272 if (wi::geu_p (ni, wi::to_widest (max_index)))
6274 if (dump_enabled_p ())
6275 dump_printf_loc (MSG_NOTE, vect_location,
6276 "loop size is greater than data size.\n");
6277 return false;
6281 /* In case the vectorization factor (VF) is bigger than the number
6282 of elements that we can fit in a vectype (nunits), we have to generate
6283 more than one vector stmt - i.e - we need to "unroll" the
6284 vector stmt by a factor VF/nunits. For more details see documentation
6285 in vectorizable_operation. */
6287 /* If the reduction is used in an outer loop we need to generate
6288 VF intermediate results, like so (e.g. for ncopies=2):
6289 r0 = phi (init, r0)
6290 r1 = phi (init, r1)
6291 r0 = x0 + r0;
6292 r1 = x1 + r1;
6293 (i.e. we generate VF results in 2 registers).
6294 In this case we have a separate def-use cycle for each copy, and therefore
6295 for each copy we get the vector def for the reduction variable from the
6296 respective phi node created for this copy.
6298 Otherwise (the reduction is unused in the loop nest), we can combine
6299 together intermediate results, like so (e.g. for ncopies=2):
6300 r = phi (init, r)
6301 r = x0 + r;
6302 r = x1 + r;
6303 (i.e. we generate VF/2 results in a single register).
6304 In this case for each copy we get the vector def for the reduction variable
6305 from the vectorized reduction operation generated in the previous iteration.
6307 This only works when we see both the reduction PHI and its only consumer
6308 in vectorizable_reduction and there are no intermediate stmts
6309 participating. */
6310 use_operand_p use_p;
6311 gimple *use_stmt;
6312 if (ncopies > 1
6313 && (STMT_VINFO_RELEVANT (stmt_info) <= vect_used_only_live)
6314 && single_imm_use (gimple_phi_result (reduc_def_stmt), &use_p, &use_stmt)
6315 && (use_stmt == stmt
6316 || STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use_stmt)) == stmt))
6318 single_defuse_cycle = true;
6319 epilog_copies = 1;
6321 else
6322 epilog_copies = ncopies;
6324 /* If the reduction stmt is one of the patterns that have lane
6325 reduction embedded we cannot handle the case of ! single_defuse_cycle. */
6326 if ((ncopies > 1
6327 && ! single_defuse_cycle)
6328 && (code == DOT_PROD_EXPR
6329 || code == WIDEN_SUM_EXPR
6330 || code == SAD_EXPR))
6332 if (dump_enabled_p ())
6333 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6334 "multi def-use cycle not possible for lane-reducing "
6335 "reduction operation\n");
6336 return false;
6339 if (!vec_stmt) /* transformation not required. */
6341 if (first_p)
6342 vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies);
6343 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
6344 return true;
6347 /* Transform. */
6349 if (dump_enabled_p ())
6350 dump_printf_loc (MSG_NOTE, vect_location, "transform reduction.\n");
6352 /* FORNOW: Multiple types are not supported for condition. */
6353 if (code == COND_EXPR)
6354 gcc_assert (ncopies == 1);
6356 /* Create the destination vector */
6357 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
6359 prev_stmt_info = NULL;
6360 prev_phi_info = NULL;
6361 if (slp_node)
6362 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6363 else
6365 vec_num = 1;
6366 vec_oprnds0.create (1);
6367 vec_oprnds1.create (1);
6368 if (op_type == ternary_op)
6369 vec_oprnds2.create (1);
6372 phis.create (vec_num);
6373 vect_defs.create (vec_num);
6374 if (!slp_node)
6375 vect_defs.quick_push (NULL_TREE);
6377 if (slp_node)
6378 phis.splice (SLP_TREE_VEC_STMTS (slp_node_instance->reduc_phis));
6379 else
6380 phis.quick_push (STMT_VINFO_VEC_STMT (vinfo_for_stmt (reduc_def_stmt)));
6382 for (j = 0; j < ncopies; j++)
6384 if (code == COND_EXPR)
6386 gcc_assert (!slp_node);
6387 vectorizable_condition (stmt, gsi, vec_stmt,
6388 PHI_RESULT (phis[0]),
6389 reduc_index, NULL);
6390 /* Multiple types are not supported for condition. */
6391 break;
6394 /* Handle uses. */
6395 if (j == 0)
6397 if (slp_node)
6399 /* Get vec defs for all the operands except the reduction index,
6400 ensuring the ordering of the ops in the vector is kept. */
6401 auto_vec<tree, 3> slp_ops;
6402 auto_vec<vec<tree>, 3> vec_defs;
6404 slp_ops.quick_push (ops[0]);
6405 slp_ops.quick_push (ops[1]);
6406 if (op_type == ternary_op)
6407 slp_ops.quick_push (ops[2]);
6409 vect_get_slp_defs (slp_ops, slp_node, &vec_defs);
6411 vec_oprnds0.safe_splice (vec_defs[0]);
6412 vec_defs[0].release ();
6413 vec_oprnds1.safe_splice (vec_defs[1]);
6414 vec_defs[1].release ();
6415 if (op_type == ternary_op)
6417 vec_oprnds2.safe_splice (vec_defs[2]);
6418 vec_defs[2].release ();
6421 else
6423 vec_oprnds0.quick_push
6424 (vect_get_vec_def_for_operand (ops[0], stmt));
6425 vec_oprnds1.quick_push
6426 (vect_get_vec_def_for_operand (ops[1], stmt));
6427 if (op_type == ternary_op)
6428 vec_oprnds2.quick_push
6429 (vect_get_vec_def_for_operand (ops[2], stmt));
6432 else
6434 if (!slp_node)
6436 gcc_assert (reduc_index != -1 || ! single_defuse_cycle);
6438 if (single_defuse_cycle && reduc_index == 0)
6439 vec_oprnds0[0] = gimple_assign_lhs (new_stmt);
6440 else
6441 vec_oprnds0[0]
6442 = vect_get_vec_def_for_stmt_copy (dts[0], vec_oprnds0[0]);
6443 if (single_defuse_cycle && reduc_index == 1)
6444 vec_oprnds1[0] = gimple_assign_lhs (new_stmt);
6445 else
6446 vec_oprnds1[0]
6447 = vect_get_vec_def_for_stmt_copy (dts[1], vec_oprnds1[0]);
6448 if (op_type == ternary_op)
6450 if (single_defuse_cycle && reduc_index == 2)
6451 vec_oprnds2[0] = gimple_assign_lhs (new_stmt);
6452 else
6453 vec_oprnds2[0]
6454 = vect_get_vec_def_for_stmt_copy (dts[2], vec_oprnds2[0]);
6459 FOR_EACH_VEC_ELT (vec_oprnds0, i, def0)
6461 tree vop[3] = { def0, vec_oprnds1[i], NULL_TREE };
6462 if (op_type == ternary_op)
6463 vop[2] = vec_oprnds2[i];
6465 new_temp = make_ssa_name (vec_dest, new_stmt);
6466 new_stmt = gimple_build_assign (new_temp, code,
6467 vop[0], vop[1], vop[2]);
6468 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6470 if (slp_node)
6472 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
6473 vect_defs.quick_push (new_temp);
6475 else
6476 vect_defs[0] = new_temp;
6479 if (slp_node)
6480 continue;
6482 if (j == 0)
6483 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6484 else
6485 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6487 prev_stmt_info = vinfo_for_stmt (new_stmt);
6490 /* Finalize the reduction-phi (set its arguments) and create the
6491 epilog reduction code. */
6492 if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node)
6493 vect_defs[0] = gimple_assign_lhs (*vec_stmt);
6495 vect_create_epilog_for_reduction (vect_defs, stmt, reduc_def_stmt,
6496 epilog_copies,
6497 epilog_reduc_code, phis,
6498 double_reduc, slp_node, slp_node_instance);
6500 return true;
6503 /* Function vect_min_worthwhile_factor.
6505 For a loop where we could vectorize the operation indicated by CODE,
6506 return the minimum vectorization factor that makes it worthwhile
6507 to use generic vectors. */
6509 vect_min_worthwhile_factor (enum tree_code code)
6511 switch (code)
6513 case PLUS_EXPR:
6514 case MINUS_EXPR:
6515 case NEGATE_EXPR:
6516 return 4;
6518 case BIT_AND_EXPR:
6519 case BIT_IOR_EXPR:
6520 case BIT_XOR_EXPR:
6521 case BIT_NOT_EXPR:
6522 return 2;
6524 default:
6525 return INT_MAX;
6530 /* Function vectorizable_induction
6532 Check if PHI performs an induction computation that can be vectorized.
6533 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
6534 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
6535 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6537 bool
6538 vectorizable_induction (gimple *phi,
6539 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
6540 gimple **vec_stmt, slp_tree slp_node)
6542 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
6543 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6544 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
6545 unsigned ncopies;
6546 bool nested_in_vect_loop = false;
6547 struct loop *iv_loop;
6548 tree vec_def;
6549 edge pe = loop_preheader_edge (loop);
6550 basic_block new_bb;
6551 tree new_vec, vec_init, vec_step, t;
6552 tree new_name;
6553 gimple *new_stmt;
6554 gphi *induction_phi;
6555 tree induc_def, vec_dest;
6556 tree init_expr, step_expr;
6557 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
6558 unsigned i;
6559 tree expr;
6560 gimple_seq stmts;
6561 imm_use_iterator imm_iter;
6562 use_operand_p use_p;
6563 gimple *exit_phi;
6564 edge latch_e;
6565 tree loop_arg;
6566 gimple_stmt_iterator si;
6567 basic_block bb = gimple_bb (phi);
6569 if (gimple_code (phi) != GIMPLE_PHI)
6570 return false;
6572 if (!STMT_VINFO_RELEVANT_P (stmt_info))
6573 return false;
6575 /* Make sure it was recognized as induction computation. */
6576 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
6577 return false;
6579 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
6580 unsigned nunits = TYPE_VECTOR_SUBPARTS (vectype);
6582 if (slp_node)
6583 ncopies = 1;
6584 else
6585 ncopies = vf / nunits;
6586 gcc_assert (ncopies >= 1);
6588 /* FORNOW. These restrictions should be relaxed. */
6589 if (nested_in_vect_loop_p (loop, phi))
6591 imm_use_iterator imm_iter;
6592 use_operand_p use_p;
6593 gimple *exit_phi;
6594 edge latch_e;
6595 tree loop_arg;
6597 if (ncopies > 1)
6599 if (dump_enabled_p ())
6600 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6601 "multiple types in nested loop.\n");
6602 return false;
6605 /* FORNOW: outer loop induction with SLP not supported. */
6606 if (STMT_SLP_TYPE (stmt_info))
6607 return false;
6609 exit_phi = NULL;
6610 latch_e = loop_latch_edge (loop->inner);
6611 loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
6612 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
6614 gimple *use_stmt = USE_STMT (use_p);
6615 if (is_gimple_debug (use_stmt))
6616 continue;
6618 if (!flow_bb_inside_loop_p (loop->inner, gimple_bb (use_stmt)))
6620 exit_phi = use_stmt;
6621 break;
6624 if (exit_phi)
6626 stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi);
6627 if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
6628 && !STMT_VINFO_LIVE_P (exit_phi_vinfo)))
6630 if (dump_enabled_p ())
6631 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6632 "inner-loop induction only used outside "
6633 "of the outer vectorized loop.\n");
6634 return false;
6638 nested_in_vect_loop = true;
6639 iv_loop = loop->inner;
6641 else
6642 iv_loop = loop;
6643 gcc_assert (iv_loop == (gimple_bb (phi))->loop_father);
6645 if (!vec_stmt) /* transformation not required. */
6647 STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
6648 if (dump_enabled_p ())
6649 dump_printf_loc (MSG_NOTE, vect_location,
6650 "=== vectorizable_induction ===\n");
6651 vect_model_induction_cost (stmt_info, ncopies);
6652 return true;
6655 /* Transform. */
6657 /* Compute a vector variable, initialized with the first VF values of
6658 the induction variable. E.g., for an iv with IV_PHI='X' and
6659 evolution S, for a vector of 4 units, we want to compute:
6660 [X, X + S, X + 2*S, X + 3*S]. */
6662 if (dump_enabled_p ())
6663 dump_printf_loc (MSG_NOTE, vect_location, "transform induction phi.\n");
6665 latch_e = loop_latch_edge (iv_loop);
6666 loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
6668 step_expr = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_info);
6669 gcc_assert (step_expr != NULL_TREE);
6671 pe = loop_preheader_edge (iv_loop);
6672 init_expr = PHI_ARG_DEF_FROM_EDGE (phi,
6673 loop_preheader_edge (iv_loop));
6675 /* Convert the step to the desired type. */
6676 stmts = NULL;
6677 step_expr = gimple_convert (&stmts, TREE_TYPE (vectype), step_expr);
6678 if (stmts)
6680 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
6681 gcc_assert (!new_bb);
6684 /* Find the first insertion point in the BB. */
6685 si = gsi_after_labels (bb);
6687 /* For SLP induction we have to generate several IVs as for example
6688 with group size 3 we need [i, i, i, i + S] [i + S, i + S, i + 2*S, i + 2*S]
6689 [i + 2*S, i + 3*S, i + 3*S, i + 3*S]. The step is the same uniform
6690 [VF*S, VF*S, VF*S, VF*S] for all. */
6691 if (slp_node)
6693 /* Convert the init to the desired type. */
6694 stmts = NULL;
6695 init_expr = gimple_convert (&stmts, TREE_TYPE (vectype), init_expr);
6696 if (stmts)
6698 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
6699 gcc_assert (!new_bb);
6702 /* Generate [VF*S, VF*S, ... ]. */
6703 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
6705 expr = build_int_cst (integer_type_node, vf);
6706 expr = fold_convert (TREE_TYPE (step_expr), expr);
6708 else
6709 expr = build_int_cst (TREE_TYPE (step_expr), vf);
6710 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
6711 expr, step_expr);
6712 if (! CONSTANT_CLASS_P (new_name))
6713 new_name = vect_init_vector (phi, new_name,
6714 TREE_TYPE (step_expr), NULL);
6715 new_vec = build_vector_from_val (vectype, new_name);
6716 vec_step = vect_init_vector (phi, new_vec, vectype, NULL);
6718 /* Now generate the IVs. */
6719 unsigned group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
6720 unsigned nvects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6721 unsigned elts = nunits * nvects;
6722 unsigned nivs = least_common_multiple (group_size, nunits) / nunits;
6723 gcc_assert (elts % group_size == 0);
6724 tree elt = init_expr;
6725 unsigned ivn;
6726 for (ivn = 0; ivn < nivs; ++ivn)
6728 tree *elts = XALLOCAVEC (tree, nunits);
6729 bool constant_p = true;
6730 for (unsigned eltn = 0; eltn < nunits; ++eltn)
6732 if (ivn*nunits + eltn >= group_size
6733 && (ivn*nunits + eltn) % group_size == 0)
6735 stmts = NULL;
6736 elt = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (elt),
6737 elt, step_expr);
6738 if (stmts)
6740 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
6741 gcc_assert (!new_bb);
6744 if (! CONSTANT_CLASS_P (elt))
6745 constant_p = false;
6746 elts[eltn] = elt;
6748 if (constant_p)
6749 new_vec = build_vector (vectype, elts);
6750 else
6752 vec<constructor_elt, va_gc> *v;
6753 vec_alloc (v, nunits);
6754 for (i = 0; i < nunits; ++i)
6755 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, elts[i]);
6756 new_vec = build_constructor (vectype, v);
6758 vec_init = vect_init_vector (phi, new_vec, vectype, NULL);
6760 /* Create the induction-phi that defines the induction-operand. */
6761 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
6762 induction_phi = create_phi_node (vec_dest, iv_loop->header);
6763 set_vinfo_for_stmt (induction_phi,
6764 new_stmt_vec_info (induction_phi, loop_vinfo));
6765 induc_def = PHI_RESULT (induction_phi);
6767 /* Create the iv update inside the loop */
6768 vec_def = make_ssa_name (vec_dest);
6769 new_stmt = gimple_build_assign (vec_def, PLUS_EXPR, induc_def, vec_step);
6770 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
6771 set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo));
6773 /* Set the arguments of the phi node: */
6774 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
6775 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
6776 UNKNOWN_LOCATION);
6778 SLP_TREE_VEC_STMTS (slp_node).quick_push (induction_phi);
6781 /* Re-use IVs when we can. */
6782 if (ivn < nvects)
6784 unsigned vfp
6785 = least_common_multiple (group_size, nunits) / group_size;
6786 /* Generate [VF'*S, VF'*S, ... ]. */
6787 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
6789 expr = build_int_cst (integer_type_node, vfp);
6790 expr = fold_convert (TREE_TYPE (step_expr), expr);
6792 else
6793 expr = build_int_cst (TREE_TYPE (step_expr), vfp);
6794 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
6795 expr, step_expr);
6796 if (! CONSTANT_CLASS_P (new_name))
6797 new_name = vect_init_vector (phi, new_name,
6798 TREE_TYPE (step_expr), NULL);
6799 new_vec = build_vector_from_val (vectype, new_name);
6800 vec_step = vect_init_vector (phi, new_vec, vectype, NULL);
6801 for (; ivn < nvects; ++ivn)
6803 gimple *iv = SLP_TREE_VEC_STMTS (slp_node)[ivn - nivs];
6804 tree def;
6805 if (gimple_code (iv) == GIMPLE_PHI)
6806 def = gimple_phi_result (iv);
6807 else
6808 def = gimple_assign_lhs (iv);
6809 new_stmt = gimple_build_assign (make_ssa_name (vectype),
6810 PLUS_EXPR,
6811 def, vec_step);
6812 if (gimple_code (iv) == GIMPLE_PHI)
6813 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
6814 else
6816 gimple_stmt_iterator tgsi = gsi_for_stmt (iv);
6817 gsi_insert_after (&tgsi, new_stmt, GSI_CONTINUE_LINKING);
6819 set_vinfo_for_stmt (new_stmt,
6820 new_stmt_vec_info (new_stmt, loop_vinfo));
6821 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
6825 return true;
6828 /* Create the vector that holds the initial_value of the induction. */
6829 if (nested_in_vect_loop)
6831 /* iv_loop is nested in the loop to be vectorized. init_expr had already
6832 been created during vectorization of previous stmts. We obtain it
6833 from the STMT_VINFO_VEC_STMT of the defining stmt. */
6834 vec_init = vect_get_vec_def_for_operand (init_expr, phi);
6835 /* If the initial value is not of proper type, convert it. */
6836 if (!useless_type_conversion_p (vectype, TREE_TYPE (vec_init)))
6838 new_stmt
6839 = gimple_build_assign (vect_get_new_ssa_name (vectype,
6840 vect_simple_var,
6841 "vec_iv_"),
6842 VIEW_CONVERT_EXPR,
6843 build1 (VIEW_CONVERT_EXPR, vectype,
6844 vec_init));
6845 vec_init = gimple_assign_lhs (new_stmt);
6846 new_bb = gsi_insert_on_edge_immediate (loop_preheader_edge (iv_loop),
6847 new_stmt);
6848 gcc_assert (!new_bb);
6849 set_vinfo_for_stmt (new_stmt,
6850 new_stmt_vec_info (new_stmt, loop_vinfo));
6853 else
6855 vec<constructor_elt, va_gc> *v;
6857 /* iv_loop is the loop to be vectorized. Create:
6858 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
6859 stmts = NULL;
6860 new_name = gimple_convert (&stmts, TREE_TYPE (vectype), init_expr);
6862 vec_alloc (v, nunits);
6863 bool constant_p = is_gimple_min_invariant (new_name);
6864 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, new_name);
6865 for (i = 1; i < nunits; i++)
6867 /* Create: new_name_i = new_name + step_expr */
6868 new_name = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (new_name),
6869 new_name, step_expr);
6870 if (!is_gimple_min_invariant (new_name))
6871 constant_p = false;
6872 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, new_name);
6874 if (stmts)
6876 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
6877 gcc_assert (!new_bb);
6880 /* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1] */
6881 if (constant_p)
6882 new_vec = build_vector_from_ctor (vectype, v);
6883 else
6884 new_vec = build_constructor (vectype, v);
6885 vec_init = vect_init_vector (phi, new_vec, vectype, NULL);
6889 /* Create the vector that holds the step of the induction. */
6890 if (nested_in_vect_loop)
6891 /* iv_loop is nested in the loop to be vectorized. Generate:
6892 vec_step = [S, S, S, S] */
6893 new_name = step_expr;
6894 else
6896 /* iv_loop is the loop to be vectorized. Generate:
6897 vec_step = [VF*S, VF*S, VF*S, VF*S] */
6898 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
6900 expr = build_int_cst (integer_type_node, vf);
6901 expr = fold_convert (TREE_TYPE (step_expr), expr);
6903 else
6904 expr = build_int_cst (TREE_TYPE (step_expr), vf);
6905 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
6906 expr, step_expr);
6907 if (TREE_CODE (step_expr) == SSA_NAME)
6908 new_name = vect_init_vector (phi, new_name,
6909 TREE_TYPE (step_expr), NULL);
6912 t = unshare_expr (new_name);
6913 gcc_assert (CONSTANT_CLASS_P (new_name)
6914 || TREE_CODE (new_name) == SSA_NAME);
6915 new_vec = build_vector_from_val (vectype, t);
6916 vec_step = vect_init_vector (phi, new_vec, vectype, NULL);
6919 /* Create the following def-use cycle:
6920 loop prolog:
6921 vec_init = ...
6922 vec_step = ...
6923 loop:
6924 vec_iv = PHI <vec_init, vec_loop>
6926 STMT
6928 vec_loop = vec_iv + vec_step; */
6930 /* Create the induction-phi that defines the induction-operand. */
6931 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
6932 induction_phi = create_phi_node (vec_dest, iv_loop->header);
6933 set_vinfo_for_stmt (induction_phi,
6934 new_stmt_vec_info (induction_phi, loop_vinfo));
6935 induc_def = PHI_RESULT (induction_phi);
6937 /* Create the iv update inside the loop */
6938 vec_def = make_ssa_name (vec_dest);
6939 new_stmt = gimple_build_assign (vec_def, PLUS_EXPR, induc_def, vec_step);
6940 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
6941 set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo));
6943 /* Set the arguments of the phi node: */
6944 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
6945 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
6946 UNKNOWN_LOCATION);
6948 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = induction_phi;
6950 /* In case that vectorization factor (VF) is bigger than the number
6951 of elements that we can fit in a vectype (nunits), we have to generate
6952 more than one vector stmt - i.e - we need to "unroll" the
6953 vector stmt by a factor VF/nunits. For more details see documentation
6954 in vectorizable_operation. */
6956 if (ncopies > 1)
6958 stmt_vec_info prev_stmt_vinfo;
6959 /* FORNOW. This restriction should be relaxed. */
6960 gcc_assert (!nested_in_vect_loop);
6962 /* Create the vector that holds the step of the induction. */
6963 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
6965 expr = build_int_cst (integer_type_node, nunits);
6966 expr = fold_convert (TREE_TYPE (step_expr), expr);
6968 else
6969 expr = build_int_cst (TREE_TYPE (step_expr), nunits);
6970 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
6971 expr, step_expr);
6972 if (TREE_CODE (step_expr) == SSA_NAME)
6973 new_name = vect_init_vector (phi, new_name,
6974 TREE_TYPE (step_expr), NULL);
6975 t = unshare_expr (new_name);
6976 gcc_assert (CONSTANT_CLASS_P (new_name)
6977 || TREE_CODE (new_name) == SSA_NAME);
6978 new_vec = build_vector_from_val (vectype, t);
6979 vec_step = vect_init_vector (phi, new_vec, vectype, NULL);
6981 vec_def = induc_def;
6982 prev_stmt_vinfo = vinfo_for_stmt (induction_phi);
6983 for (i = 1; i < ncopies; i++)
6985 /* vec_i = vec_prev + vec_step */
6986 new_stmt = gimple_build_assign (vec_dest, PLUS_EXPR,
6987 vec_def, vec_step);
6988 vec_def = make_ssa_name (vec_dest, new_stmt);
6989 gimple_assign_set_lhs (new_stmt, vec_def);
6991 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
6992 set_vinfo_for_stmt (new_stmt,
6993 new_stmt_vec_info (new_stmt, loop_vinfo));
6994 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt;
6995 prev_stmt_vinfo = vinfo_for_stmt (new_stmt);
6999 if (nested_in_vect_loop)
7001 /* Find the loop-closed exit-phi of the induction, and record
7002 the final vector of induction results: */
7003 exit_phi = NULL;
7004 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
7006 gimple *use_stmt = USE_STMT (use_p);
7007 if (is_gimple_debug (use_stmt))
7008 continue;
7010 if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (use_stmt)))
7012 exit_phi = use_stmt;
7013 break;
7016 if (exit_phi)
7018 stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi);
7019 /* FORNOW. Currently not supporting the case that an inner-loop induction
7020 is not used in the outer-loop (i.e. only outside the outer-loop). */
7021 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
7022 && !STMT_VINFO_LIVE_P (stmt_vinfo));
7024 STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt;
7025 if (dump_enabled_p ())
7027 dump_printf_loc (MSG_NOTE, vect_location,
7028 "vector of inductions after inner-loop:");
7029 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
7035 if (dump_enabled_p ())
7037 dump_printf_loc (MSG_NOTE, vect_location,
7038 "transform induction: created def-use cycle: ");
7039 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, induction_phi, 0);
7040 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
7041 SSA_NAME_DEF_STMT (vec_def), 0);
7044 return true;
7047 /* Function vectorizable_live_operation.
7049 STMT computes a value that is used outside the loop. Check if
7050 it can be supported. */
7052 bool
7053 vectorizable_live_operation (gimple *stmt,
7054 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
7055 slp_tree slp_node, int slp_index,
7056 gimple **vec_stmt)
7058 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7059 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7060 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
7061 imm_use_iterator imm_iter;
7062 tree lhs, lhs_type, bitsize, vec_bitsize;
7063 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7064 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
7065 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
7066 gimple *use_stmt;
7067 auto_vec<tree> vec_oprnds;
7069 gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
7071 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
7072 return false;
7074 /* FORNOW. CHECKME. */
7075 if (nested_in_vect_loop_p (loop, stmt))
7076 return false;
7078 /* If STMT is not relevant and it is a simple assignment and its inputs are
7079 invariant then it can remain in place, unvectorized. The original last
7080 scalar value that it computes will be used. */
7081 if (!STMT_VINFO_RELEVANT_P (stmt_info))
7083 gcc_assert (is_simple_and_all_uses_invariant (stmt, loop_vinfo));
7084 if (dump_enabled_p ())
7085 dump_printf_loc (MSG_NOTE, vect_location,
7086 "statement is simple and uses invariant. Leaving in "
7087 "place.\n");
7088 return true;
7091 if (!vec_stmt)
7092 /* No transformation required. */
7093 return true;
7095 /* If stmt has a related stmt, then use that for getting the lhs. */
7096 if (is_pattern_stmt_p (stmt_info))
7097 stmt = STMT_VINFO_RELATED_STMT (stmt_info);
7099 lhs = (is_a <gphi *> (stmt)) ? gimple_phi_result (stmt)
7100 : gimple_get_lhs (stmt);
7101 lhs_type = TREE_TYPE (lhs);
7103 bitsize = TYPE_SIZE (TREE_TYPE (vectype));
7104 vec_bitsize = TYPE_SIZE (vectype);
7106 /* Get the vectorized lhs of STMT and the lane to use (counted in bits). */
7107 tree vec_lhs, bitstart;
7108 if (slp_node)
7110 gcc_assert (slp_index >= 0);
7112 int num_scalar = SLP_TREE_SCALAR_STMTS (slp_node).length ();
7113 int num_vec = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7115 /* Get the last occurrence of the scalar index from the concatenation of
7116 all the slp vectors. Calculate which slp vector it is and the index
7117 within. */
7118 int pos = (num_vec * nunits) - num_scalar + slp_index;
7119 int vec_entry = pos / nunits;
7120 int vec_index = pos % nunits;
7122 /* Get the correct slp vectorized stmt. */
7123 vec_lhs = gimple_get_lhs (SLP_TREE_VEC_STMTS (slp_node)[vec_entry]);
7125 /* Get entry to use. */
7126 bitstart = build_int_cst (unsigned_type_node, vec_index);
7127 bitstart = int_const_binop (MULT_EXPR, bitsize, bitstart);
7129 else
7131 enum vect_def_type dt = STMT_VINFO_DEF_TYPE (stmt_info);
7132 vec_lhs = vect_get_vec_def_for_operand_1 (stmt, dt);
7134 /* For multiple copies, get the last copy. */
7135 for (int i = 1; i < ncopies; ++i)
7136 vec_lhs = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type,
7137 vec_lhs);
7139 /* Get the last lane in the vector. */
7140 bitstart = int_const_binop (MINUS_EXPR, vec_bitsize, bitsize);
7143 /* Create a new vectorized stmt for the uses of STMT and insert outside the
7144 loop. */
7145 gimple_seq stmts = NULL;
7146 tree bftype = TREE_TYPE (vectype);
7147 if (VECTOR_BOOLEAN_TYPE_P (vectype))
7148 bftype = build_nonstandard_integer_type (tree_to_uhwi (bitsize), 1);
7149 tree new_tree = build3 (BIT_FIELD_REF, bftype, vec_lhs, bitsize, bitstart);
7150 new_tree = force_gimple_operand (fold_convert (lhs_type, new_tree), &stmts,
7151 true, NULL_TREE);
7152 if (stmts)
7153 gsi_insert_seq_on_edge_immediate (single_exit (loop), stmts);
7155 /* Replace use of lhs with newly computed result. If the use stmt is a
7156 single arg PHI, just replace all uses of PHI result. It's necessary
7157 because lcssa PHI defining lhs may be before newly inserted stmt. */
7158 use_operand_p use_p;
7159 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, lhs)
7160 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
7161 && !is_gimple_debug (use_stmt))
7163 if (gimple_code (use_stmt) == GIMPLE_PHI
7164 && gimple_phi_num_args (use_stmt) == 1)
7166 replace_uses_by (gimple_phi_result (use_stmt), new_tree);
7168 else
7170 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
7171 SET_USE (use_p, new_tree);
7173 update_stmt (use_stmt);
7176 return true;
7179 /* Kill any debug uses outside LOOP of SSA names defined in STMT. */
7181 static void
7182 vect_loop_kill_debug_uses (struct loop *loop, gimple *stmt)
7184 ssa_op_iter op_iter;
7185 imm_use_iterator imm_iter;
7186 def_operand_p def_p;
7187 gimple *ustmt;
7189 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
7191 FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p))
7193 basic_block bb;
7195 if (!is_gimple_debug (ustmt))
7196 continue;
7198 bb = gimple_bb (ustmt);
7200 if (!flow_bb_inside_loop_p (loop, bb))
7202 if (gimple_debug_bind_p (ustmt))
7204 if (dump_enabled_p ())
7205 dump_printf_loc (MSG_NOTE, vect_location,
7206 "killing debug use\n");
7208 gimple_debug_bind_reset_value (ustmt);
7209 update_stmt (ustmt);
7211 else
7212 gcc_unreachable ();
7218 /* Given loop represented by LOOP_VINFO, return true if computation of
7219 LOOP_VINFO_NITERS (= LOOP_VINFO_NITERSM1 + 1) doesn't overflow, false
7220 otherwise. */
7222 static bool
7223 loop_niters_no_overflow (loop_vec_info loop_vinfo)
7225 /* Constant case. */
7226 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
7228 tree cst_niters = LOOP_VINFO_NITERS (loop_vinfo);
7229 tree cst_nitersm1 = LOOP_VINFO_NITERSM1 (loop_vinfo);
7231 gcc_assert (TREE_CODE (cst_niters) == INTEGER_CST);
7232 gcc_assert (TREE_CODE (cst_nitersm1) == INTEGER_CST);
7233 if (wi::to_widest (cst_nitersm1) < wi::to_widest (cst_niters))
7234 return true;
7237 widest_int max;
7238 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
7239 /* Check the upper bound of loop niters. */
7240 if (get_max_loop_iterations (loop, &max))
7242 tree type = TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo));
7243 signop sgn = TYPE_SIGN (type);
7244 widest_int type_max = widest_int::from (wi::max_value (type), sgn);
7245 if (max < type_max)
7246 return true;
7248 return false;
7251 /* Scale profiling counters by estimation for LOOP which is vectorized
7252 by factor VF. */
7254 static void
7255 scale_profile_for_vect_loop (struct loop *loop, unsigned vf)
7257 edge preheader = loop_preheader_edge (loop);
7258 /* Reduce loop iterations by the vectorization factor. */
7259 gcov_type new_est_niter = niter_for_unrolled_loop (loop, vf);
7260 profile_count freq_h = loop->header->count, freq_e = preheader->count;
7262 /* Use frequency only if counts are zero. */
7263 if (!(freq_h > 0) && !(freq_e > 0))
7265 freq_h = profile_count::from_gcov_type (loop->header->frequency);
7266 freq_e = profile_count::from_gcov_type (EDGE_FREQUENCY (preheader));
7268 if (freq_h > 0)
7270 profile_probability p;
7272 /* Avoid dropping loop body profile counter to 0 because of zero count
7273 in loop's preheader. */
7274 if (!(freq_e > profile_count::from_gcov_type (1)))
7275 freq_e = profile_count::from_gcov_type (1);
7276 p = freq_e.apply_scale (new_est_niter + 1, 1).probability_in (freq_h);
7277 scale_loop_frequencies (loop, p);
7280 basic_block exit_bb = single_pred (loop->latch);
7281 edge exit_e = single_exit (loop);
7282 exit_e->count = loop_preheader_edge (loop)->count;
7283 exit_e->probability = profile_probability::always ()
7284 .apply_scale (1, new_est_niter + 1);
7286 edge exit_l = single_pred_edge (loop->latch);
7287 profile_probability prob = exit_l->probability;
7288 exit_l->probability = exit_e->probability.invert ();
7289 exit_l->count = exit_bb->count - exit_e->count;
7290 if (prob.initialized_p () && exit_l->probability.initialized_p ())
7291 scale_bbs_frequencies (&loop->latch, 1, exit_l->probability / prob);
7294 /* Function vect_transform_loop.
7296 The analysis phase has determined that the loop is vectorizable.
7297 Vectorize the loop - created vectorized stmts to replace the scalar
7298 stmts in the loop, and update the loop exit condition.
7299 Returns scalar epilogue loop if any. */
7301 struct loop *
7302 vect_transform_loop (loop_vec_info loop_vinfo)
7304 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
7305 struct loop *epilogue = NULL;
7306 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
7307 int nbbs = loop->num_nodes;
7308 int i;
7309 tree niters_vector = NULL;
7310 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
7311 bool grouped_store;
7312 bool slp_scheduled = false;
7313 gimple *stmt, *pattern_stmt;
7314 gimple_seq pattern_def_seq = NULL;
7315 gimple_stmt_iterator pattern_def_si = gsi_none ();
7316 bool transform_pattern_stmt = false;
7317 bool check_profitability = false;
7318 int th;
7320 if (dump_enabled_p ())
7321 dump_printf_loc (MSG_NOTE, vect_location, "=== vec_transform_loop ===\n");
7323 /* Use the more conservative vectorization threshold. If the number
7324 of iterations is constant assume the cost check has been performed
7325 by our caller. If the threshold makes all loops profitable that
7326 run at least the vectorization factor number of times checking
7327 is pointless, too. */
7328 th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
7329 if (th >= LOOP_VINFO_VECT_FACTOR (loop_vinfo)
7330 && !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
7332 if (dump_enabled_p ())
7333 dump_printf_loc (MSG_NOTE, vect_location,
7334 "Profitability threshold is %d loop iterations.\n",
7335 th);
7336 check_profitability = true;
7339 /* Make sure there exists a single-predecessor exit bb. Do this before
7340 versioning. */
7341 edge e = single_exit (loop);
7342 if (! single_pred_p (e->dest))
7344 split_loop_exit_edge (e);
7345 if (dump_enabled_p ())
7346 dump_printf (MSG_NOTE, "split exit edge\n");
7349 /* Version the loop first, if required, so the profitability check
7350 comes first. */
7352 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
7354 vect_loop_versioning (loop_vinfo, th, check_profitability);
7355 check_profitability = false;
7358 /* Make sure there exists a single-predecessor exit bb also on the
7359 scalar loop copy. Do this after versioning but before peeling
7360 so CFG structure is fine for both scalar and if-converted loop
7361 to make slpeel_duplicate_current_defs_from_edges face matched
7362 loop closed PHI nodes on the exit. */
7363 if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo))
7365 e = single_exit (LOOP_VINFO_SCALAR_LOOP (loop_vinfo));
7366 if (! single_pred_p (e->dest))
7368 split_loop_exit_edge (e);
7369 if (dump_enabled_p ())
7370 dump_printf (MSG_NOTE, "split exit edge of scalar loop\n");
7374 tree niters = vect_build_loop_niters (loop_vinfo);
7375 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = niters;
7376 tree nitersm1 = unshare_expr (LOOP_VINFO_NITERSM1 (loop_vinfo));
7377 bool niters_no_overflow = loop_niters_no_overflow (loop_vinfo);
7378 epilogue = vect_do_peeling (loop_vinfo, niters, nitersm1, &niters_vector, th,
7379 check_profitability, niters_no_overflow);
7380 if (niters_vector == NULL_TREE)
7382 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
7383 niters_vector
7384 = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
7385 LOOP_VINFO_INT_NITERS (loop_vinfo) / vf);
7386 else
7387 vect_gen_vector_loop_niters (loop_vinfo, niters, &niters_vector,
7388 niters_no_overflow);
7391 /* 1) Make sure the loop header has exactly two entries
7392 2) Make sure we have a preheader basic block. */
7394 gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
7396 split_edge (loop_preheader_edge (loop));
7398 /* FORNOW: the vectorizer supports only loops which body consist
7399 of one basic block (header + empty latch). When the vectorizer will
7400 support more involved loop forms, the order by which the BBs are
7401 traversed need to be reconsidered. */
7403 for (i = 0; i < nbbs; i++)
7405 basic_block bb = bbs[i];
7406 stmt_vec_info stmt_info;
7408 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
7409 gsi_next (&si))
7411 gphi *phi = si.phi ();
7412 if (dump_enabled_p ())
7414 dump_printf_loc (MSG_NOTE, vect_location,
7415 "------>vectorizing phi: ");
7416 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
7418 stmt_info = vinfo_for_stmt (phi);
7419 if (!stmt_info)
7420 continue;
7422 if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
7423 vect_loop_kill_debug_uses (loop, phi);
7425 if (!STMT_VINFO_RELEVANT_P (stmt_info)
7426 && !STMT_VINFO_LIVE_P (stmt_info))
7427 continue;
7429 if (STMT_VINFO_VECTYPE (stmt_info)
7430 && (TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info))
7431 != (unsigned HOST_WIDE_INT) vf)
7432 && dump_enabled_p ())
7433 dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
7435 if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def
7436 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
7437 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
7438 && ! PURE_SLP_STMT (stmt_info))
7440 if (dump_enabled_p ())
7441 dump_printf_loc (MSG_NOTE, vect_location, "transform phi.\n");
7442 vect_transform_stmt (phi, NULL, NULL, NULL, NULL);
7446 pattern_stmt = NULL;
7447 for (gimple_stmt_iterator si = gsi_start_bb (bb);
7448 !gsi_end_p (si) || transform_pattern_stmt;)
7450 bool is_store;
7452 if (transform_pattern_stmt)
7453 stmt = pattern_stmt;
7454 else
7456 stmt = gsi_stmt (si);
7457 /* During vectorization remove existing clobber stmts. */
7458 if (gimple_clobber_p (stmt))
7460 unlink_stmt_vdef (stmt);
7461 gsi_remove (&si, true);
7462 release_defs (stmt);
7463 continue;
7467 if (dump_enabled_p ())
7469 dump_printf_loc (MSG_NOTE, vect_location,
7470 "------>vectorizing statement: ");
7471 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7474 stmt_info = vinfo_for_stmt (stmt);
7476 /* vector stmts created in the outer-loop during vectorization of
7477 stmts in an inner-loop may not have a stmt_info, and do not
7478 need to be vectorized. */
7479 if (!stmt_info)
7481 gsi_next (&si);
7482 continue;
7485 if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
7486 vect_loop_kill_debug_uses (loop, stmt);
7488 if (!STMT_VINFO_RELEVANT_P (stmt_info)
7489 && !STMT_VINFO_LIVE_P (stmt_info))
7491 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
7492 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
7493 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
7494 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
7496 stmt = pattern_stmt;
7497 stmt_info = vinfo_for_stmt (stmt);
7499 else
7501 gsi_next (&si);
7502 continue;
7505 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
7506 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
7507 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
7508 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
7509 transform_pattern_stmt = true;
7511 /* If pattern statement has def stmts, vectorize them too. */
7512 if (is_pattern_stmt_p (stmt_info))
7514 if (pattern_def_seq == NULL)
7516 pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
7517 pattern_def_si = gsi_start (pattern_def_seq);
7519 else if (!gsi_end_p (pattern_def_si))
7520 gsi_next (&pattern_def_si);
7521 if (pattern_def_seq != NULL)
7523 gimple *pattern_def_stmt = NULL;
7524 stmt_vec_info pattern_def_stmt_info = NULL;
7526 while (!gsi_end_p (pattern_def_si))
7528 pattern_def_stmt = gsi_stmt (pattern_def_si);
7529 pattern_def_stmt_info
7530 = vinfo_for_stmt (pattern_def_stmt);
7531 if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info)
7532 || STMT_VINFO_LIVE_P (pattern_def_stmt_info))
7533 break;
7534 gsi_next (&pattern_def_si);
7537 if (!gsi_end_p (pattern_def_si))
7539 if (dump_enabled_p ())
7541 dump_printf_loc (MSG_NOTE, vect_location,
7542 "==> vectorizing pattern def "
7543 "stmt: ");
7544 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
7545 pattern_def_stmt, 0);
7548 stmt = pattern_def_stmt;
7549 stmt_info = pattern_def_stmt_info;
7551 else
7553 pattern_def_si = gsi_none ();
7554 transform_pattern_stmt = false;
7557 else
7558 transform_pattern_stmt = false;
7561 if (STMT_VINFO_VECTYPE (stmt_info))
7563 unsigned int nunits
7564 = (unsigned int)
7565 TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
7566 if (!STMT_SLP_TYPE (stmt_info)
7567 && nunits != (unsigned int) vf
7568 && dump_enabled_p ())
7569 /* For SLP VF is set according to unrolling factor, and not
7570 to vector size, hence for SLP this print is not valid. */
7571 dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
7574 /* SLP. Schedule all the SLP instances when the first SLP stmt is
7575 reached. */
7576 if (STMT_SLP_TYPE (stmt_info))
7578 if (!slp_scheduled)
7580 slp_scheduled = true;
7582 if (dump_enabled_p ())
7583 dump_printf_loc (MSG_NOTE, vect_location,
7584 "=== scheduling SLP instances ===\n");
7586 vect_schedule_slp (loop_vinfo);
7589 /* Hybrid SLP stmts must be vectorized in addition to SLP. */
7590 if (!vinfo_for_stmt (stmt) || PURE_SLP_STMT (stmt_info))
7592 if (!transform_pattern_stmt && gsi_end_p (pattern_def_si))
7594 pattern_def_seq = NULL;
7595 gsi_next (&si);
7597 continue;
7601 /* -------- vectorize statement ------------ */
7602 if (dump_enabled_p ())
7603 dump_printf_loc (MSG_NOTE, vect_location, "transform statement.\n");
7605 grouped_store = false;
7606 is_store = vect_transform_stmt (stmt, &si, &grouped_store, NULL, NULL);
7607 if (is_store)
7609 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
7611 /* Interleaving. If IS_STORE is TRUE, the vectorization of the
7612 interleaving chain was completed - free all the stores in
7613 the chain. */
7614 gsi_next (&si);
7615 vect_remove_stores (GROUP_FIRST_ELEMENT (stmt_info));
7617 else
7619 /* Free the attached stmt_vec_info and remove the stmt. */
7620 gimple *store = gsi_stmt (si);
7621 free_stmt_vec_info (store);
7622 unlink_stmt_vdef (store);
7623 gsi_remove (&si, true);
7624 release_defs (store);
7627 /* Stores can only appear at the end of pattern statements. */
7628 gcc_assert (!transform_pattern_stmt);
7629 pattern_def_seq = NULL;
7631 else if (!transform_pattern_stmt && gsi_end_p (pattern_def_si))
7633 pattern_def_seq = NULL;
7634 gsi_next (&si);
7636 } /* stmts in BB */
7637 } /* BBs in loop */
7639 slpeel_make_loop_iterate_ntimes (loop, niters_vector);
7641 scale_profile_for_vect_loop (loop, vf);
7643 /* The minimum number of iterations performed by the epilogue. This
7644 is 1 when peeling for gaps because we always need a final scalar
7645 iteration. */
7646 int min_epilogue_iters = LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) ? 1 : 0;
7647 /* +1 to convert latch counts to loop iteration counts,
7648 -min_epilogue_iters to remove iterations that cannot be performed
7649 by the vector code. */
7650 int bias = 1 - min_epilogue_iters;
7651 /* In these calculations the "- 1" converts loop iteration counts
7652 back to latch counts. */
7653 if (loop->any_upper_bound)
7654 loop->nb_iterations_upper_bound
7655 = wi::udiv_floor (loop->nb_iterations_upper_bound + bias, vf) - 1;
7656 if (loop->any_likely_upper_bound)
7657 loop->nb_iterations_likely_upper_bound
7658 = wi::udiv_floor (loop->nb_iterations_likely_upper_bound + bias, vf) - 1;
7659 if (loop->any_estimate)
7660 loop->nb_iterations_estimate
7661 = wi::udiv_floor (loop->nb_iterations_estimate + bias, vf) - 1;
7663 if (dump_enabled_p ())
7665 if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo))
7667 dump_printf_loc (MSG_NOTE, vect_location,
7668 "LOOP VECTORIZED\n");
7669 if (loop->inner)
7670 dump_printf_loc (MSG_NOTE, vect_location,
7671 "OUTER LOOP VECTORIZED\n");
7672 dump_printf (MSG_NOTE, "\n");
7674 else
7675 dump_printf_loc (MSG_NOTE, vect_location,
7676 "LOOP EPILOGUE VECTORIZED (VS=%d)\n",
7677 current_vector_size);
7680 /* Free SLP instances here because otherwise stmt reference counting
7681 won't work. */
7682 slp_instance instance;
7683 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance)
7684 vect_free_slp_instance (instance);
7685 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
7686 /* Clear-up safelen field since its value is invalid after vectorization
7687 since vectorized loop can have loop-carried dependencies. */
7688 loop->safelen = 0;
7690 /* Don't vectorize epilogue for epilogue. */
7691 if (LOOP_VINFO_EPILOGUE_P (loop_vinfo))
7692 epilogue = NULL;
7694 if (epilogue)
7696 unsigned int vector_sizes
7697 = targetm.vectorize.autovectorize_vector_sizes ();
7698 vector_sizes &= current_vector_size - 1;
7700 if (!PARAM_VALUE (PARAM_VECT_EPILOGUES_NOMASK))
7701 epilogue = NULL;
7702 else if (!vector_sizes)
7703 epilogue = NULL;
7704 else if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
7705 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) >= 0)
7707 int smallest_vec_size = 1 << ctz_hwi (vector_sizes);
7708 int ratio = current_vector_size / smallest_vec_size;
7709 int eiters = LOOP_VINFO_INT_NITERS (loop_vinfo)
7710 - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
7711 eiters = eiters % vf;
7713 epilogue->nb_iterations_upper_bound = eiters - 1;
7715 if (eiters < vf / ratio)
7716 epilogue = NULL;
7720 if (epilogue)
7722 epilogue->force_vectorize = loop->force_vectorize;
7723 epilogue->safelen = loop->safelen;
7724 epilogue->dont_vectorize = false;
7726 /* We may need to if-convert epilogue to vectorize it. */
7727 if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo))
7728 tree_if_conversion (epilogue);
7731 return epilogue;
7734 /* The code below is trying to perform simple optimization - revert
7735 if-conversion for masked stores, i.e. if the mask of a store is zero
7736 do not perform it and all stored value producers also if possible.
7737 For example,
7738 for (i=0; i<n; i++)
7739 if (c[i])
7741 p1[i] += 1;
7742 p2[i] = p3[i] +2;
7744 this transformation will produce the following semi-hammock:
7746 if (!mask__ifc__42.18_165 == { 0, 0, 0, 0, 0, 0, 0, 0 })
7748 vect__11.19_170 = MASK_LOAD (vectp_p1.20_168, 0B, mask__ifc__42.18_165);
7749 vect__12.22_172 = vect__11.19_170 + vect_cst__171;
7750 MASK_STORE (vectp_p1.23_175, 0B, mask__ifc__42.18_165, vect__12.22_172);
7751 vect__18.25_182 = MASK_LOAD (vectp_p3.26_180, 0B, mask__ifc__42.18_165);
7752 vect__19.28_184 = vect__18.25_182 + vect_cst__183;
7753 MASK_STORE (vectp_p2.29_187, 0B, mask__ifc__42.18_165, vect__19.28_184);
7757 void
7758 optimize_mask_stores (struct loop *loop)
7760 basic_block *bbs = get_loop_body (loop);
7761 unsigned nbbs = loop->num_nodes;
7762 unsigned i;
7763 basic_block bb;
7764 struct loop *bb_loop;
7765 gimple_stmt_iterator gsi;
7766 gimple *stmt;
7767 auto_vec<gimple *> worklist;
7769 vect_location = find_loop_location (loop);
7770 /* Pick up all masked stores in loop if any. */
7771 for (i = 0; i < nbbs; i++)
7773 bb = bbs[i];
7774 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
7775 gsi_next (&gsi))
7777 stmt = gsi_stmt (gsi);
7778 if (gimple_call_internal_p (stmt, IFN_MASK_STORE))
7779 worklist.safe_push (stmt);
7783 free (bbs);
7784 if (worklist.is_empty ())
7785 return;
7787 /* Loop has masked stores. */
7788 while (!worklist.is_empty ())
7790 gimple *last, *last_store;
7791 edge e, efalse;
7792 tree mask;
7793 basic_block store_bb, join_bb;
7794 gimple_stmt_iterator gsi_to;
7795 tree vdef, new_vdef;
7796 gphi *phi;
7797 tree vectype;
7798 tree zero;
7800 last = worklist.pop ();
7801 mask = gimple_call_arg (last, 2);
7802 bb = gimple_bb (last);
7803 /* Create then_bb and if-then structure in CFG, then_bb belongs to
7804 the same loop as if_bb. It could be different to LOOP when two
7805 level loop-nest is vectorized and mask_store belongs to the inner
7806 one. */
7807 e = split_block (bb, last);
7808 bb_loop = bb->loop_father;
7809 gcc_assert (loop == bb_loop || flow_loop_nested_p (loop, bb_loop));
7810 join_bb = e->dest;
7811 store_bb = create_empty_bb (bb);
7812 add_bb_to_loop (store_bb, bb_loop);
7813 e->flags = EDGE_TRUE_VALUE;
7814 efalse = make_edge (bb, store_bb, EDGE_FALSE_VALUE);
7815 /* Put STORE_BB to likely part. */
7816 efalse->probability = profile_probability::unlikely ();
7817 store_bb->frequency = PROB_ALWAYS - EDGE_FREQUENCY (efalse);
7818 make_single_succ_edge (store_bb, join_bb, EDGE_FALLTHRU);
7819 if (dom_info_available_p (CDI_DOMINATORS))
7820 set_immediate_dominator (CDI_DOMINATORS, store_bb, bb);
7821 if (dump_enabled_p ())
7822 dump_printf_loc (MSG_NOTE, vect_location,
7823 "Create new block %d to sink mask stores.",
7824 store_bb->index);
7825 /* Create vector comparison with boolean result. */
7826 vectype = TREE_TYPE (mask);
7827 zero = build_zero_cst (vectype);
7828 stmt = gimple_build_cond (EQ_EXPR, mask, zero, NULL_TREE, NULL_TREE);
7829 gsi = gsi_last_bb (bb);
7830 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
7831 /* Create new PHI node for vdef of the last masked store:
7832 .MEM_2 = VDEF <.MEM_1>
7833 will be converted to
7834 .MEM.3 = VDEF <.MEM_1>
7835 and new PHI node will be created in join bb
7836 .MEM_2 = PHI <.MEM_1, .MEM_3>
7838 vdef = gimple_vdef (last);
7839 new_vdef = make_ssa_name (gimple_vop (cfun), last);
7840 gimple_set_vdef (last, new_vdef);
7841 phi = create_phi_node (vdef, join_bb);
7842 add_phi_arg (phi, new_vdef, EDGE_SUCC (store_bb, 0), UNKNOWN_LOCATION);
7844 /* Put all masked stores with the same mask to STORE_BB if possible. */
7845 while (true)
7847 gimple_stmt_iterator gsi_from;
7848 gimple *stmt1 = NULL;
7850 /* Move masked store to STORE_BB. */
7851 last_store = last;
7852 gsi = gsi_for_stmt (last);
7853 gsi_from = gsi;
7854 /* Shift GSI to the previous stmt for further traversal. */
7855 gsi_prev (&gsi);
7856 gsi_to = gsi_start_bb (store_bb);
7857 gsi_move_before (&gsi_from, &gsi_to);
7858 /* Setup GSI_TO to the non-empty block start. */
7859 gsi_to = gsi_start_bb (store_bb);
7860 if (dump_enabled_p ())
7862 dump_printf_loc (MSG_NOTE, vect_location,
7863 "Move stmt to created bb\n");
7864 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, last, 0);
7866 /* Move all stored value producers if possible. */
7867 while (!gsi_end_p (gsi))
7869 tree lhs;
7870 imm_use_iterator imm_iter;
7871 use_operand_p use_p;
7872 bool res;
7874 /* Skip debug statements. */
7875 if (is_gimple_debug (gsi_stmt (gsi)))
7877 gsi_prev (&gsi);
7878 continue;
7880 stmt1 = gsi_stmt (gsi);
7881 /* Do not consider statements writing to memory or having
7882 volatile operand. */
7883 if (gimple_vdef (stmt1)
7884 || gimple_has_volatile_ops (stmt1))
7885 break;
7886 gsi_from = gsi;
7887 gsi_prev (&gsi);
7888 lhs = gimple_get_lhs (stmt1);
7889 if (!lhs)
7890 break;
7892 /* LHS of vectorized stmt must be SSA_NAME. */
7893 if (TREE_CODE (lhs) != SSA_NAME)
7894 break;
7896 if (!VECTOR_TYPE_P (TREE_TYPE (lhs)))
7898 /* Remove dead scalar statement. */
7899 if (has_zero_uses (lhs))
7901 gsi_remove (&gsi_from, true);
7902 continue;
7906 /* Check that LHS does not have uses outside of STORE_BB. */
7907 res = true;
7908 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
7910 gimple *use_stmt;
7911 use_stmt = USE_STMT (use_p);
7912 if (is_gimple_debug (use_stmt))
7913 continue;
7914 if (gimple_bb (use_stmt) != store_bb)
7916 res = false;
7917 break;
7920 if (!res)
7921 break;
7923 if (gimple_vuse (stmt1)
7924 && gimple_vuse (stmt1) != gimple_vuse (last_store))
7925 break;
7927 /* Can move STMT1 to STORE_BB. */
7928 if (dump_enabled_p ())
7930 dump_printf_loc (MSG_NOTE, vect_location,
7931 "Move stmt to created bb\n");
7932 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt1, 0);
7934 gsi_move_before (&gsi_from, &gsi_to);
7935 /* Shift GSI_TO for further insertion. */
7936 gsi_prev (&gsi_to);
7938 /* Put other masked stores with the same mask to STORE_BB. */
7939 if (worklist.is_empty ()
7940 || gimple_call_arg (worklist.last (), 2) != mask
7941 || worklist.last () != stmt1)
7942 break;
7943 last = worklist.pop ();
7945 add_phi_arg (phi, gimple_vuse (last_store), e, UNKNOWN_LOCATION);