Fix version check for ATTRIBUTE_GCC_DUMP_PRINTF
[official-gcc.git] / gcc / tree-vect-loop.c
blob124a4be0a6756a7f0a8cd2f700483c718df84ee5
1 /* Loop Vectorization
2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
4 Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "cfghooks.h"
31 #include "tree-pass.h"
32 #include "ssa.h"
33 #include "optabs-tree.h"
34 #include "diagnostic-core.h"
35 #include "fold-const.h"
36 #include "stor-layout.h"
37 #include "cfganal.h"
38 #include "gimplify.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
41 #include "tree-ssa-loop-ivopts.h"
42 #include "tree-ssa-loop-manip.h"
43 #include "tree-ssa-loop-niter.h"
44 #include "tree-ssa-loop.h"
45 #include "cfgloop.h"
46 #include "params.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
49 #include "gimple-fold.h"
50 #include "cgraph.h"
51 #include "tree-cfg.h"
52 #include "tree-if-conv.h"
53 #include "internal-fn.h"
54 #include "tree-vector-builder.h"
55 #include "vec-perm-indices.h"
56 #include "tree-eh.h"
58 /* Loop Vectorization Pass.
60 This pass tries to vectorize loops.
62 For example, the vectorizer transforms the following simple loop:
64 short a[N]; short b[N]; short c[N]; int i;
66 for (i=0; i<N; i++){
67 a[i] = b[i] + c[i];
70 as if it was manually vectorized by rewriting the source code into:
72 typedef int __attribute__((mode(V8HI))) v8hi;
73 short a[N]; short b[N]; short c[N]; int i;
74 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
75 v8hi va, vb, vc;
77 for (i=0; i<N/8; i++){
78 vb = pb[i];
79 vc = pc[i];
80 va = vb + vc;
81 pa[i] = va;
84 The main entry to this pass is vectorize_loops(), in which
85 the vectorizer applies a set of analyses on a given set of loops,
86 followed by the actual vectorization transformation for the loops that
87 had successfully passed the analysis phase.
88 Throughout this pass we make a distinction between two types of
89 data: scalars (which are represented by SSA_NAMES), and memory references
90 ("data-refs"). These two types of data require different handling both
91 during analysis and transformation. The types of data-refs that the
92 vectorizer currently supports are ARRAY_REFS which base is an array DECL
93 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
94 accesses are required to have a simple (consecutive) access pattern.
96 Analysis phase:
97 ===============
98 The driver for the analysis phase is vect_analyze_loop().
99 It applies a set of analyses, some of which rely on the scalar evolution
100 analyzer (scev) developed by Sebastian Pop.
102 During the analysis phase the vectorizer records some information
103 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
104 loop, as well as general information about the loop as a whole, which is
105 recorded in a "loop_vec_info" struct attached to each loop.
107 Transformation phase:
108 =====================
109 The loop transformation phase scans all the stmts in the loop, and
110 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
111 the loop that needs to be vectorized. It inserts the vector code sequence
112 just before the scalar stmt S, and records a pointer to the vector code
113 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
114 attached to S). This pointer will be used for the vectorization of following
115 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
116 otherwise, we rely on dead code elimination for removing it.
118 For example, say stmt S1 was vectorized into stmt VS1:
120 VS1: vb = px[i];
121 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
122 S2: a = b;
124 To vectorize stmt S2, the vectorizer first finds the stmt that defines
125 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
126 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
127 resulting sequence would be:
129 VS1: vb = px[i];
130 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
131 VS2: va = vb;
132 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
134 Operands that are not SSA_NAMEs, are data-refs that appear in
135 load/store operations (like 'x[i]' in S1), and are handled differently.
137 Target modeling:
138 =================
139 Currently the only target specific information that is used is the
140 size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".
141 Targets that can support different sizes of vectors, for now will need
142 to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More
143 flexibility will be added in the future.
145 Since we only vectorize operations which vector form can be
146 expressed using existing tree codes, to verify that an operation is
147 supported, the vectorizer checks the relevant optab at the relevant
148 machine_mode (e.g, optab_handler (add_optab, V8HImode)). If
149 the value found is CODE_FOR_nothing, then there's no target support, and
150 we can't vectorize the stmt.
152 For additional information on this project see:
153 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
156 static void vect_estimate_min_profitable_iters (loop_vec_info, int *, int *);
158 /* Subroutine of vect_determine_vf_for_stmt that handles only one
159 statement. VECTYPE_MAYBE_SET_P is true if STMT_VINFO_VECTYPE
160 may already be set for general statements (not just data refs). */
162 static bool
163 vect_determine_vf_for_stmt_1 (stmt_vec_info stmt_info,
164 bool vectype_maybe_set_p,
165 poly_uint64 *vf,
166 vec<stmt_vec_info > *mask_producers)
168 gimple *stmt = stmt_info->stmt;
170 if ((!STMT_VINFO_RELEVANT_P (stmt_info)
171 && !STMT_VINFO_LIVE_P (stmt_info))
172 || gimple_clobber_p (stmt))
174 if (dump_enabled_p ())
175 dump_printf_loc (MSG_NOTE, vect_location, "skip.\n");
176 return true;
179 tree stmt_vectype, nunits_vectype;
180 if (!vect_get_vector_types_for_stmt (stmt_info, &stmt_vectype,
181 &nunits_vectype))
182 return false;
184 if (stmt_vectype)
186 if (STMT_VINFO_VECTYPE (stmt_info))
187 /* The only case when a vectype had been already set is for stmts
188 that contain a data ref, or for "pattern-stmts" (stmts generated
189 by the vectorizer to represent/replace a certain idiom). */
190 gcc_assert ((STMT_VINFO_DATA_REF (stmt_info)
191 || vectype_maybe_set_p)
192 && STMT_VINFO_VECTYPE (stmt_info) == stmt_vectype);
193 else if (stmt_vectype == boolean_type_node)
194 mask_producers->safe_push (stmt_info);
195 else
196 STMT_VINFO_VECTYPE (stmt_info) = stmt_vectype;
199 if (nunits_vectype)
200 vect_update_max_nunits (vf, nunits_vectype);
202 return true;
205 /* Subroutine of vect_determine_vectorization_factor. Set the vector
206 types of STMT_INFO and all attached pattern statements and update
207 the vectorization factor VF accordingly. If some of the statements
208 produce a mask result whose vector type can only be calculated later,
209 add them to MASK_PRODUCERS. Return true on success or false if
210 something prevented vectorization. */
212 static bool
213 vect_determine_vf_for_stmt (stmt_vec_info stmt_info, poly_uint64 *vf,
214 vec<stmt_vec_info > *mask_producers)
216 vec_info *vinfo = stmt_info->vinfo;
217 if (dump_enabled_p ())
219 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
220 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
222 if (!vect_determine_vf_for_stmt_1 (stmt_info, false, vf, mask_producers))
223 return false;
225 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
226 && STMT_VINFO_RELATED_STMT (stmt_info))
228 gimple *pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
229 stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
231 /* If a pattern statement has def stmts, analyze them too. */
232 for (gimple_stmt_iterator si = gsi_start (pattern_def_seq);
233 !gsi_end_p (si); gsi_next (&si))
235 stmt_vec_info def_stmt_info = vinfo->lookup_stmt (gsi_stmt (si));
236 if (dump_enabled_p ())
238 dump_printf_loc (MSG_NOTE, vect_location,
239 "==> examining pattern def stmt: ");
240 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
241 def_stmt_info->stmt, 0);
243 if (!vect_determine_vf_for_stmt_1 (def_stmt_info, true,
244 vf, mask_producers))
245 return false;
248 if (dump_enabled_p ())
250 dump_printf_loc (MSG_NOTE, vect_location,
251 "==> examining pattern statement: ");
252 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
254 if (!vect_determine_vf_for_stmt_1 (stmt_info, true, vf, mask_producers))
255 return false;
258 return true;
261 /* Function vect_determine_vectorization_factor
263 Determine the vectorization factor (VF). VF is the number of data elements
264 that are operated upon in parallel in a single iteration of the vectorized
265 loop. For example, when vectorizing a loop that operates on 4byte elements,
266 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
267 elements can fit in a single vector register.
269 We currently support vectorization of loops in which all types operated upon
270 are of the same size. Therefore this function currently sets VF according to
271 the size of the types operated upon, and fails if there are multiple sizes
272 in the loop.
274 VF is also the factor by which the loop iterations are strip-mined, e.g.:
275 original loop:
276 for (i=0; i<N; i++){
277 a[i] = b[i] + c[i];
280 vectorized loop:
281 for (i=0; i<N; i+=VF){
282 a[i:VF] = b[i:VF] + c[i:VF];
286 static bool
287 vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
289 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
290 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
291 unsigned nbbs = loop->num_nodes;
292 poly_uint64 vectorization_factor = 1;
293 tree scalar_type = NULL_TREE;
294 gphi *phi;
295 tree vectype;
296 stmt_vec_info stmt_info;
297 unsigned i;
298 auto_vec<stmt_vec_info> mask_producers;
300 DUMP_VECT_SCOPE ("vect_determine_vectorization_factor");
302 for (i = 0; i < nbbs; i++)
304 basic_block bb = bbs[i];
306 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
307 gsi_next (&si))
309 phi = si.phi ();
310 stmt_info = loop_vinfo->lookup_stmt (phi);
311 if (dump_enabled_p ())
313 dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: ");
314 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
317 gcc_assert (stmt_info);
319 if (STMT_VINFO_RELEVANT_P (stmt_info)
320 || STMT_VINFO_LIVE_P (stmt_info))
322 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
323 scalar_type = TREE_TYPE (PHI_RESULT (phi));
325 if (dump_enabled_p ())
327 dump_printf_loc (MSG_NOTE, vect_location,
328 "get vectype for scalar type: ");
329 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
330 dump_printf (MSG_NOTE, "\n");
333 vectype = get_vectype_for_scalar_type (scalar_type);
334 if (!vectype)
336 if (dump_enabled_p ())
338 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
339 "not vectorized: unsupported "
340 "data-type ");
341 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
342 scalar_type);
343 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
345 return false;
347 STMT_VINFO_VECTYPE (stmt_info) = vectype;
349 if (dump_enabled_p ())
351 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
352 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
353 dump_printf (MSG_NOTE, "\n");
356 if (dump_enabled_p ())
358 dump_printf_loc (MSG_NOTE, vect_location, "nunits = ");
359 dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (vectype));
360 dump_printf (MSG_NOTE, "\n");
363 vect_update_max_nunits (&vectorization_factor, vectype);
367 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
368 gsi_next (&si))
370 stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
371 if (!vect_determine_vf_for_stmt (stmt_info, &vectorization_factor,
372 &mask_producers))
373 return false;
377 /* TODO: Analyze cost. Decide if worth while to vectorize. */
378 if (dump_enabled_p ())
380 dump_printf_loc (MSG_NOTE, vect_location, "vectorization factor = ");
381 dump_dec (MSG_NOTE, vectorization_factor);
382 dump_printf (MSG_NOTE, "\n");
385 if (known_le (vectorization_factor, 1U))
387 if (dump_enabled_p ())
388 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
389 "not vectorized: unsupported data-type\n");
390 return false;
392 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
394 for (i = 0; i < mask_producers.length (); i++)
396 stmt_info = mask_producers[i];
397 tree mask_type = vect_get_mask_type_for_stmt (stmt_info);
398 if (!mask_type)
399 return false;
400 STMT_VINFO_VECTYPE (stmt_info) = mask_type;
403 return true;
407 /* Function vect_is_simple_iv_evolution.
409 FORNOW: A simple evolution of an induction variables in the loop is
410 considered a polynomial evolution. */
412 static bool
413 vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
414 tree * step)
416 tree init_expr;
417 tree step_expr;
418 tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
419 basic_block bb;
421 /* When there is no evolution in this loop, the evolution function
422 is not "simple". */
423 if (evolution_part == NULL_TREE)
424 return false;
426 /* When the evolution is a polynomial of degree >= 2
427 the evolution function is not "simple". */
428 if (tree_is_chrec (evolution_part))
429 return false;
431 step_expr = evolution_part;
432 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
434 if (dump_enabled_p ())
436 dump_printf_loc (MSG_NOTE, vect_location, "step: ");
437 dump_generic_expr (MSG_NOTE, TDF_SLIM, step_expr);
438 dump_printf (MSG_NOTE, ", init: ");
439 dump_generic_expr (MSG_NOTE, TDF_SLIM, init_expr);
440 dump_printf (MSG_NOTE, "\n");
443 *init = init_expr;
444 *step = step_expr;
446 if (TREE_CODE (step_expr) != INTEGER_CST
447 && (TREE_CODE (step_expr) != SSA_NAME
448 || ((bb = gimple_bb (SSA_NAME_DEF_STMT (step_expr)))
449 && flow_bb_inside_loop_p (get_loop (cfun, loop_nb), bb))
450 || (!INTEGRAL_TYPE_P (TREE_TYPE (step_expr))
451 && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))
452 || !flag_associative_math)))
453 && (TREE_CODE (step_expr) != REAL_CST
454 || !flag_associative_math))
456 if (dump_enabled_p ())
457 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
458 "step unknown.\n");
459 return false;
462 return true;
465 /* Return true if PHI, described by STMT_INFO, is the inner PHI in
466 what we are assuming is a double reduction. For example, given
467 a structure like this:
469 outer1:
470 x_1 = PHI <x_4(outer2), ...>;
473 inner:
474 x_2 = PHI <x_1(outer1), ...>;
476 x_3 = ...;
479 outer2:
480 x_4 = PHI <x_3(inner)>;
483 outer loop analysis would treat x_1 as a double reduction phi and
484 this function would then return true for x_2. */
486 static bool
487 vect_inner_phi_in_double_reduction_p (stmt_vec_info stmt_info, gphi *phi)
489 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
490 use_operand_p use_p;
491 ssa_op_iter op_iter;
492 FOR_EACH_PHI_ARG (use_p, phi, op_iter, SSA_OP_USE)
493 if (stmt_vec_info def_info = loop_vinfo->lookup_def (USE_FROM_PTR (use_p)))
494 if (STMT_VINFO_DEF_TYPE (def_info) == vect_double_reduction_def)
495 return true;
496 return false;
499 /* Function vect_analyze_scalar_cycles_1.
501 Examine the cross iteration def-use cycles of scalar variables
502 in LOOP. LOOP_VINFO represents the loop that is now being
503 considered for vectorization (can be LOOP, or an outer-loop
504 enclosing LOOP). */
506 static void
507 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
509 basic_block bb = loop->header;
510 tree init, step;
511 auto_vec<stmt_vec_info, 64> worklist;
512 gphi_iterator gsi;
513 bool double_reduc;
515 DUMP_VECT_SCOPE ("vect_analyze_scalar_cycles");
517 /* First - identify all inductions. Reduction detection assumes that all the
518 inductions have been identified, therefore, this order must not be
519 changed. */
520 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
522 gphi *phi = gsi.phi ();
523 tree access_fn = NULL;
524 tree def = PHI_RESULT (phi);
525 stmt_vec_info stmt_vinfo = loop_vinfo->lookup_stmt (phi);
527 if (dump_enabled_p ())
529 dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
530 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
533 /* Skip virtual phi's. The data dependences that are associated with
534 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
535 if (virtual_operand_p (def))
536 continue;
538 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
540 /* Analyze the evolution function. */
541 access_fn = analyze_scalar_evolution (loop, def);
542 if (access_fn)
544 STRIP_NOPS (access_fn);
545 if (dump_enabled_p ())
547 dump_printf_loc (MSG_NOTE, vect_location,
548 "Access function of PHI: ");
549 dump_generic_expr (MSG_NOTE, TDF_SLIM, access_fn);
550 dump_printf (MSG_NOTE, "\n");
552 STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo)
553 = initial_condition_in_loop_num (access_fn, loop->num);
554 STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo)
555 = evolution_part_in_loop_num (access_fn, loop->num);
558 if (!access_fn
559 || vect_inner_phi_in_double_reduction_p (stmt_vinfo, phi)
560 || !vect_is_simple_iv_evolution (loop->num, access_fn, &init, &step)
561 || (LOOP_VINFO_LOOP (loop_vinfo) != loop
562 && TREE_CODE (step) != INTEGER_CST))
564 worklist.safe_push (stmt_vinfo);
565 continue;
568 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo)
569 != NULL_TREE);
570 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) != NULL_TREE);
572 if (dump_enabled_p ())
573 dump_printf_loc (MSG_NOTE, vect_location, "Detected induction.\n");
574 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
578 /* Second - identify all reductions and nested cycles. */
579 while (worklist.length () > 0)
581 stmt_vec_info stmt_vinfo = worklist.pop ();
582 gphi *phi = as_a <gphi *> (stmt_vinfo->stmt);
583 tree def = PHI_RESULT (phi);
585 if (dump_enabled_p ())
587 dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
588 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
591 gcc_assert (!virtual_operand_p (def)
592 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
594 stmt_vec_info reduc_stmt_info
595 = vect_force_simple_reduction (loop_vinfo, stmt_vinfo,
596 &double_reduc, false);
597 if (reduc_stmt_info)
599 if (double_reduc)
601 if (dump_enabled_p ())
602 dump_printf_loc (MSG_NOTE, vect_location,
603 "Detected double reduction.\n");
605 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
606 STMT_VINFO_DEF_TYPE (reduc_stmt_info)
607 = vect_double_reduction_def;
609 else
611 if (loop != LOOP_VINFO_LOOP (loop_vinfo))
613 if (dump_enabled_p ())
614 dump_printf_loc (MSG_NOTE, vect_location,
615 "Detected vectorizable nested cycle.\n");
617 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
618 STMT_VINFO_DEF_TYPE (reduc_stmt_info) = vect_nested_cycle;
620 else
622 if (dump_enabled_p ())
623 dump_printf_loc (MSG_NOTE, vect_location,
624 "Detected reduction.\n");
626 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
627 STMT_VINFO_DEF_TYPE (reduc_stmt_info) = vect_reduction_def;
628 /* Store the reduction cycles for possible vectorization in
629 loop-aware SLP if it was not detected as reduction
630 chain. */
631 if (! REDUC_GROUP_FIRST_ELEMENT (reduc_stmt_info))
632 LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push
633 (reduc_stmt_info);
637 else
638 if (dump_enabled_p ())
639 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
640 "Unknown def-use cycle pattern.\n");
645 /* Function vect_analyze_scalar_cycles.
647 Examine the cross iteration def-use cycles of scalar variables, by
648 analyzing the loop-header PHIs of scalar variables. Classify each
649 cycle as one of the following: invariant, induction, reduction, unknown.
650 We do that for the loop represented by LOOP_VINFO, and also to its
651 inner-loop, if exists.
652 Examples for scalar cycles:
654 Example1: reduction:
656 loop1:
657 for (i=0; i<N; i++)
658 sum += a[i];
660 Example2: induction:
662 loop2:
663 for (i=0; i<N; i++)
664 a[i] = i; */
666 static void
667 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
669 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
671 vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
673 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
674 Reductions in such inner-loop therefore have different properties than
675 the reductions in the nest that gets vectorized:
676 1. When vectorized, they are executed in the same order as in the original
677 scalar loop, so we can't change the order of computation when
678 vectorizing them.
679 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
680 current checks are too strict. */
682 if (loop->inner)
683 vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
686 /* Transfer group and reduction information from STMT_INFO to its
687 pattern stmt. */
689 static void
690 vect_fixup_reduc_chain (stmt_vec_info stmt_info)
692 stmt_vec_info firstp = STMT_VINFO_RELATED_STMT (stmt_info);
693 stmt_vec_info stmtp;
694 gcc_assert (!REDUC_GROUP_FIRST_ELEMENT (firstp)
695 && REDUC_GROUP_FIRST_ELEMENT (stmt_info));
696 REDUC_GROUP_SIZE (firstp) = REDUC_GROUP_SIZE (stmt_info);
699 stmtp = STMT_VINFO_RELATED_STMT (stmt_info);
700 REDUC_GROUP_FIRST_ELEMENT (stmtp) = firstp;
701 stmt_info = REDUC_GROUP_NEXT_ELEMENT (stmt_info);
702 if (stmt_info)
703 REDUC_GROUP_NEXT_ELEMENT (stmtp)
704 = STMT_VINFO_RELATED_STMT (stmt_info);
706 while (stmt_info);
707 STMT_VINFO_DEF_TYPE (stmtp) = vect_reduction_def;
710 /* Fixup scalar cycles that now have their stmts detected as patterns. */
712 static void
713 vect_fixup_scalar_cycles_with_patterns (loop_vec_info loop_vinfo)
715 stmt_vec_info first;
716 unsigned i;
718 FOR_EACH_VEC_ELT (LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo), i, first)
719 if (STMT_VINFO_IN_PATTERN_P (first))
721 stmt_vec_info next = REDUC_GROUP_NEXT_ELEMENT (first);
722 while (next)
724 if (! STMT_VINFO_IN_PATTERN_P (next))
725 break;
726 next = REDUC_GROUP_NEXT_ELEMENT (next);
728 /* If not all stmt in the chain are patterns try to handle
729 the chain without patterns. */
730 if (! next)
732 vect_fixup_reduc_chain (first);
733 LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo)[i]
734 = STMT_VINFO_RELATED_STMT (first);
739 /* Function vect_get_loop_niters.
741 Determine how many iterations the loop is executed and place it
742 in NUMBER_OF_ITERATIONS. Place the number of latch iterations
743 in NUMBER_OF_ITERATIONSM1. Place the condition under which the
744 niter information holds in ASSUMPTIONS.
746 Return the loop exit condition. */
749 static gcond *
750 vect_get_loop_niters (struct loop *loop, tree *assumptions,
751 tree *number_of_iterations, tree *number_of_iterationsm1)
753 edge exit = single_exit (loop);
754 struct tree_niter_desc niter_desc;
755 tree niter_assumptions, niter, may_be_zero;
756 gcond *cond = get_loop_exit_condition (loop);
758 *assumptions = boolean_true_node;
759 *number_of_iterationsm1 = chrec_dont_know;
760 *number_of_iterations = chrec_dont_know;
761 DUMP_VECT_SCOPE ("get_loop_niters");
763 if (!exit)
764 return cond;
766 niter = chrec_dont_know;
767 may_be_zero = NULL_TREE;
768 niter_assumptions = boolean_true_node;
769 if (!number_of_iterations_exit_assumptions (loop, exit, &niter_desc, NULL)
770 || chrec_contains_undetermined (niter_desc.niter))
771 return cond;
773 niter_assumptions = niter_desc.assumptions;
774 may_be_zero = niter_desc.may_be_zero;
775 niter = niter_desc.niter;
777 if (may_be_zero && integer_zerop (may_be_zero))
778 may_be_zero = NULL_TREE;
780 if (may_be_zero)
782 if (COMPARISON_CLASS_P (may_be_zero))
784 /* Try to combine may_be_zero with assumptions, this can simplify
785 computation of niter expression. */
786 if (niter_assumptions && !integer_nonzerop (niter_assumptions))
787 niter_assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
788 niter_assumptions,
789 fold_build1 (TRUTH_NOT_EXPR,
790 boolean_type_node,
791 may_be_zero));
792 else
793 niter = fold_build3 (COND_EXPR, TREE_TYPE (niter), may_be_zero,
794 build_int_cst (TREE_TYPE (niter), 0),
795 rewrite_to_non_trapping_overflow (niter));
797 may_be_zero = NULL_TREE;
799 else if (integer_nonzerop (may_be_zero))
801 *number_of_iterationsm1 = build_int_cst (TREE_TYPE (niter), 0);
802 *number_of_iterations = build_int_cst (TREE_TYPE (niter), 1);
803 return cond;
805 else
806 return cond;
809 *assumptions = niter_assumptions;
810 *number_of_iterationsm1 = niter;
812 /* We want the number of loop header executions which is the number
813 of latch executions plus one.
814 ??? For UINT_MAX latch executions this number overflows to zero
815 for loops like do { n++; } while (n != 0); */
816 if (niter && !chrec_contains_undetermined (niter))
817 niter = fold_build2 (PLUS_EXPR, TREE_TYPE (niter), unshare_expr (niter),
818 build_int_cst (TREE_TYPE (niter), 1));
819 *number_of_iterations = niter;
821 return cond;
824 /* Function bb_in_loop_p
826 Used as predicate for dfs order traversal of the loop bbs. */
828 static bool
829 bb_in_loop_p (const_basic_block bb, const void *data)
831 const struct loop *const loop = (const struct loop *)data;
832 if (flow_bb_inside_loop_p (loop, bb))
833 return true;
834 return false;
838 /* Create and initialize a new loop_vec_info struct for LOOP_IN, as well as
839 stmt_vec_info structs for all the stmts in LOOP_IN. */
841 _loop_vec_info::_loop_vec_info (struct loop *loop_in, vec_info_shared *shared)
842 : vec_info (vec_info::loop, init_cost (loop_in), shared),
843 loop (loop_in),
844 bbs (XCNEWVEC (basic_block, loop->num_nodes)),
845 num_itersm1 (NULL_TREE),
846 num_iters (NULL_TREE),
847 num_iters_unchanged (NULL_TREE),
848 num_iters_assumptions (NULL_TREE),
849 th (0),
850 versioning_threshold (0),
851 vectorization_factor (0),
852 max_vectorization_factor (0),
853 mask_skip_niters (NULL_TREE),
854 mask_compare_type (NULL_TREE),
855 unaligned_dr (NULL),
856 peeling_for_alignment (0),
857 ptr_mask (0),
858 ivexpr_map (NULL),
859 slp_unrolling_factor (1),
860 single_scalar_iteration_cost (0),
861 vectorizable (false),
862 can_fully_mask_p (true),
863 fully_masked_p (false),
864 peeling_for_gaps (false),
865 peeling_for_niter (false),
866 operands_swapped (false),
867 no_data_dependencies (false),
868 has_mask_store (false),
869 scalar_loop (NULL),
870 orig_loop_info (NULL)
872 /* CHECKME: We want to visit all BBs before their successors (except for
873 latch blocks, for which this assertion wouldn't hold). In the simple
874 case of the loop forms we allow, a dfs order of the BBs would the same
875 as reversed postorder traversal, so we are safe. */
877 unsigned int nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
878 bbs, loop->num_nodes, loop);
879 gcc_assert (nbbs == loop->num_nodes);
881 for (unsigned int i = 0; i < nbbs; i++)
883 basic_block bb = bbs[i];
884 gimple_stmt_iterator si;
886 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
888 gimple *phi = gsi_stmt (si);
889 gimple_set_uid (phi, 0);
890 add_stmt (phi);
893 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
895 gimple *stmt = gsi_stmt (si);
896 gimple_set_uid (stmt, 0);
897 add_stmt (stmt);
902 /* Free all levels of MASKS. */
904 void
905 release_vec_loop_masks (vec_loop_masks *masks)
907 rgroup_masks *rgm;
908 unsigned int i;
909 FOR_EACH_VEC_ELT (*masks, i, rgm)
910 rgm->masks.release ();
911 masks->release ();
914 /* Free all memory used by the _loop_vec_info, as well as all the
915 stmt_vec_info structs of all the stmts in the loop. */
917 _loop_vec_info::~_loop_vec_info ()
919 int nbbs;
920 gimple_stmt_iterator si;
921 int j;
923 nbbs = loop->num_nodes;
924 for (j = 0; j < nbbs; j++)
926 basic_block bb = bbs[j];
927 for (si = gsi_start_bb (bb); !gsi_end_p (si); )
929 gimple *stmt = gsi_stmt (si);
931 /* We may have broken canonical form by moving a constant
932 into RHS1 of a commutative op. Fix such occurrences. */
933 if (operands_swapped && is_gimple_assign (stmt))
935 enum tree_code code = gimple_assign_rhs_code (stmt);
937 if ((code == PLUS_EXPR
938 || code == POINTER_PLUS_EXPR
939 || code == MULT_EXPR)
940 && CONSTANT_CLASS_P (gimple_assign_rhs1 (stmt)))
941 swap_ssa_operands (stmt,
942 gimple_assign_rhs1_ptr (stmt),
943 gimple_assign_rhs2_ptr (stmt));
944 else if (code == COND_EXPR
945 && CONSTANT_CLASS_P (gimple_assign_rhs2 (stmt)))
947 tree cond_expr = gimple_assign_rhs1 (stmt);
948 enum tree_code cond_code = TREE_CODE (cond_expr);
950 if (TREE_CODE_CLASS (cond_code) == tcc_comparison)
952 bool honor_nans = HONOR_NANS (TREE_OPERAND (cond_expr,
953 0));
954 cond_code = invert_tree_comparison (cond_code,
955 honor_nans);
956 if (cond_code != ERROR_MARK)
958 TREE_SET_CODE (cond_expr, cond_code);
959 swap_ssa_operands (stmt,
960 gimple_assign_rhs2_ptr (stmt),
961 gimple_assign_rhs3_ptr (stmt));
966 gsi_next (&si);
970 free (bbs);
972 release_vec_loop_masks (&masks);
973 delete ivexpr_map;
975 loop->aux = NULL;
978 /* Return an invariant or register for EXPR and emit necessary
979 computations in the LOOP_VINFO loop preheader. */
981 tree
982 cse_and_gimplify_to_preheader (loop_vec_info loop_vinfo, tree expr)
984 if (is_gimple_reg (expr)
985 || is_gimple_min_invariant (expr))
986 return expr;
988 if (! loop_vinfo->ivexpr_map)
989 loop_vinfo->ivexpr_map = new hash_map<tree_operand_hash, tree>;
990 tree &cached = loop_vinfo->ivexpr_map->get_or_insert (expr);
991 if (! cached)
993 gimple_seq stmts = NULL;
994 cached = force_gimple_operand (unshare_expr (expr),
995 &stmts, true, NULL_TREE);
996 if (stmts)
998 edge e = loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo));
999 gsi_insert_seq_on_edge_immediate (e, stmts);
1002 return cached;
1005 /* Return true if we can use CMP_TYPE as the comparison type to produce
1006 all masks required to mask LOOP_VINFO. */
1008 static bool
1009 can_produce_all_loop_masks_p (loop_vec_info loop_vinfo, tree cmp_type)
1011 rgroup_masks *rgm;
1012 unsigned int i;
1013 FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), i, rgm)
1014 if (rgm->mask_type != NULL_TREE
1015 && !direct_internal_fn_supported_p (IFN_WHILE_ULT,
1016 cmp_type, rgm->mask_type,
1017 OPTIMIZE_FOR_SPEED))
1018 return false;
1019 return true;
1022 /* Calculate the maximum number of scalars per iteration for every
1023 rgroup in LOOP_VINFO. */
1025 static unsigned int
1026 vect_get_max_nscalars_per_iter (loop_vec_info loop_vinfo)
1028 unsigned int res = 1;
1029 unsigned int i;
1030 rgroup_masks *rgm;
1031 FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), i, rgm)
1032 res = MAX (res, rgm->max_nscalars_per_iter);
1033 return res;
1036 /* Each statement in LOOP_VINFO can be masked where necessary. Check
1037 whether we can actually generate the masks required. Return true if so,
1038 storing the type of the scalar IV in LOOP_VINFO_MASK_COMPARE_TYPE. */
1040 static bool
1041 vect_verify_full_masking (loop_vec_info loop_vinfo)
1043 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1044 unsigned int min_ni_width;
1046 /* Use a normal loop if there are no statements that need masking.
1047 This only happens in rare degenerate cases: it means that the loop
1048 has no loads, no stores, and no live-out values. */
1049 if (LOOP_VINFO_MASKS (loop_vinfo).is_empty ())
1050 return false;
1052 /* Get the maximum number of iterations that is representable
1053 in the counter type. */
1054 tree ni_type = TREE_TYPE (LOOP_VINFO_NITERSM1 (loop_vinfo));
1055 widest_int max_ni = wi::to_widest (TYPE_MAX_VALUE (ni_type)) + 1;
1057 /* Get a more refined estimate for the number of iterations. */
1058 widest_int max_back_edges;
1059 if (max_loop_iterations (loop, &max_back_edges))
1060 max_ni = wi::smin (max_ni, max_back_edges + 1);
1062 /* Account for rgroup masks, in which each bit is replicated N times. */
1063 max_ni *= vect_get_max_nscalars_per_iter (loop_vinfo);
1065 /* Work out how many bits we need to represent the limit. */
1066 min_ni_width = wi::min_precision (max_ni, UNSIGNED);
1068 /* Find a scalar mode for which WHILE_ULT is supported. */
1069 opt_scalar_int_mode cmp_mode_iter;
1070 tree cmp_type = NULL_TREE;
1071 FOR_EACH_MODE_IN_CLASS (cmp_mode_iter, MODE_INT)
1073 unsigned int cmp_bits = GET_MODE_BITSIZE (cmp_mode_iter.require ());
1074 if (cmp_bits >= min_ni_width
1075 && targetm.scalar_mode_supported_p (cmp_mode_iter.require ()))
1077 tree this_type = build_nonstandard_integer_type (cmp_bits, true);
1078 if (this_type
1079 && can_produce_all_loop_masks_p (loop_vinfo, this_type))
1081 /* Although we could stop as soon as we find a valid mode,
1082 it's often better to continue until we hit Pmode, since the
1083 operands to the WHILE are more likely to be reusable in
1084 address calculations. */
1085 cmp_type = this_type;
1086 if (cmp_bits >= GET_MODE_BITSIZE (Pmode))
1087 break;
1092 if (!cmp_type)
1093 return false;
1095 LOOP_VINFO_MASK_COMPARE_TYPE (loop_vinfo) = cmp_type;
1096 return true;
1099 /* Calculate the cost of one scalar iteration of the loop. */
1100 static void
1101 vect_compute_single_scalar_iteration_cost (loop_vec_info loop_vinfo)
1103 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1104 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1105 int nbbs = loop->num_nodes, factor;
1106 int innerloop_iters, i;
1108 /* Gather costs for statements in the scalar loop. */
1110 /* FORNOW. */
1111 innerloop_iters = 1;
1112 if (loop->inner)
1113 innerloop_iters = 50; /* FIXME */
1115 for (i = 0; i < nbbs; i++)
1117 gimple_stmt_iterator si;
1118 basic_block bb = bbs[i];
1120 if (bb->loop_father == loop->inner)
1121 factor = innerloop_iters;
1122 else
1123 factor = 1;
1125 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1127 gimple *stmt = gsi_stmt (si);
1128 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (stmt);
1130 if (!is_gimple_assign (stmt) && !is_gimple_call (stmt))
1131 continue;
1133 /* Skip stmts that are not vectorized inside the loop. */
1134 if (stmt_info
1135 && !STMT_VINFO_RELEVANT_P (stmt_info)
1136 && (!STMT_VINFO_LIVE_P (stmt_info)
1137 || !VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1138 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
1139 continue;
1141 vect_cost_for_stmt kind;
1142 if (STMT_VINFO_DATA_REF (stmt_info))
1144 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
1145 kind = scalar_load;
1146 else
1147 kind = scalar_store;
1149 else
1150 kind = scalar_stmt;
1152 record_stmt_cost (&LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
1153 factor, kind, stmt_info, 0, vect_prologue);
1157 /* Now accumulate cost. */
1158 void *target_cost_data = init_cost (loop);
1159 stmt_info_for_cost *si;
1160 int j;
1161 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
1162 j, si)
1163 (void) add_stmt_cost (target_cost_data, si->count,
1164 si->kind, si->stmt_info, si->misalign,
1165 vect_body);
1166 unsigned dummy, body_cost = 0;
1167 finish_cost (target_cost_data, &dummy, &body_cost, &dummy);
1168 destroy_cost_data (target_cost_data);
1169 LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo) = body_cost;
1173 /* Function vect_analyze_loop_form_1.
1175 Verify that certain CFG restrictions hold, including:
1176 - the loop has a pre-header
1177 - the loop has a single entry and exit
1178 - the loop exit condition is simple enough
1179 - the number of iterations can be analyzed, i.e, a countable loop. The
1180 niter could be analyzed under some assumptions. */
1182 bool
1183 vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
1184 tree *assumptions, tree *number_of_iterationsm1,
1185 tree *number_of_iterations, gcond **inner_loop_cond)
1187 DUMP_VECT_SCOPE ("vect_analyze_loop_form");
1189 /* Different restrictions apply when we are considering an inner-most loop,
1190 vs. an outer (nested) loop.
1191 (FORNOW. May want to relax some of these restrictions in the future). */
1193 if (!loop->inner)
1195 /* Inner-most loop. We currently require that the number of BBs is
1196 exactly 2 (the header and latch). Vectorizable inner-most loops
1197 look like this:
1199 (pre-header)
1201 header <--------+
1202 | | |
1203 | +--> latch --+
1205 (exit-bb) */
1207 if (loop->num_nodes != 2)
1209 if (dump_enabled_p ())
1210 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1211 "not vectorized: control flow in loop.\n");
1212 return false;
1215 if (empty_block_p (loop->header))
1217 if (dump_enabled_p ())
1218 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1219 "not vectorized: empty loop.\n");
1220 return false;
1223 else
1225 struct loop *innerloop = loop->inner;
1226 edge entryedge;
1228 /* Nested loop. We currently require that the loop is doubly-nested,
1229 contains a single inner loop, and the number of BBs is exactly 5.
1230 Vectorizable outer-loops look like this:
1232 (pre-header)
1234 header <---+
1236 inner-loop |
1238 tail ------+
1240 (exit-bb)
1242 The inner-loop has the properties expected of inner-most loops
1243 as described above. */
1245 if ((loop->inner)->inner || (loop->inner)->next)
1247 if (dump_enabled_p ())
1248 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1249 "not vectorized: multiple nested loops.\n");
1250 return false;
1253 if (loop->num_nodes != 5)
1255 if (dump_enabled_p ())
1256 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1257 "not vectorized: control flow in loop.\n");
1258 return false;
1261 entryedge = loop_preheader_edge (innerloop);
1262 if (entryedge->src != loop->header
1263 || !single_exit (innerloop)
1264 || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
1266 if (dump_enabled_p ())
1267 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1268 "not vectorized: unsupported outerloop form.\n");
1269 return false;
1272 /* Analyze the inner-loop. */
1273 tree inner_niterm1, inner_niter, inner_assumptions;
1274 if (! vect_analyze_loop_form_1 (loop->inner, inner_loop_cond,
1275 &inner_assumptions, &inner_niterm1,
1276 &inner_niter, NULL)
1277 /* Don't support analyzing niter under assumptions for inner
1278 loop. */
1279 || !integer_onep (inner_assumptions))
1281 if (dump_enabled_p ())
1282 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1283 "not vectorized: Bad inner loop.\n");
1284 return false;
1287 if (!expr_invariant_in_loop_p (loop, inner_niter))
1289 if (dump_enabled_p ())
1290 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1291 "not vectorized: inner-loop count not"
1292 " invariant.\n");
1293 return false;
1296 if (dump_enabled_p ())
1297 dump_printf_loc (MSG_NOTE, vect_location,
1298 "Considering outer-loop vectorization.\n");
1301 if (!single_exit (loop)
1302 || EDGE_COUNT (loop->header->preds) != 2)
1304 if (dump_enabled_p ())
1306 if (!single_exit (loop))
1307 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1308 "not vectorized: multiple exits.\n");
1309 else if (EDGE_COUNT (loop->header->preds) != 2)
1310 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1311 "not vectorized: too many incoming edges.\n");
1313 return false;
1316 /* We assume that the loop exit condition is at the end of the loop. i.e,
1317 that the loop is represented as a do-while (with a proper if-guard
1318 before the loop if needed), where the loop header contains all the
1319 executable statements, and the latch is empty. */
1320 if (!empty_block_p (loop->latch)
1321 || !gimple_seq_empty_p (phi_nodes (loop->latch)))
1323 if (dump_enabled_p ())
1324 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1325 "not vectorized: latch block not empty.\n");
1326 return false;
1329 /* Make sure the exit is not abnormal. */
1330 edge e = single_exit (loop);
1331 if (e->flags & EDGE_ABNORMAL)
1333 if (dump_enabled_p ())
1334 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1335 "not vectorized: abnormal loop exit edge.\n");
1336 return false;
1339 *loop_cond = vect_get_loop_niters (loop, assumptions, number_of_iterations,
1340 number_of_iterationsm1);
1341 if (!*loop_cond)
1343 if (dump_enabled_p ())
1344 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1345 "not vectorized: complicated exit condition.\n");
1346 return false;
1349 if (integer_zerop (*assumptions)
1350 || !*number_of_iterations
1351 || chrec_contains_undetermined (*number_of_iterations))
1353 if (dump_enabled_p ())
1354 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1355 "not vectorized: number of iterations cannot be "
1356 "computed.\n");
1357 return false;
1360 if (integer_zerop (*number_of_iterations))
1362 if (dump_enabled_p ())
1363 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1364 "not vectorized: number of iterations = 0.\n");
1365 return false;
1368 return true;
1371 /* Analyze LOOP form and return a loop_vec_info if it is of suitable form. */
1373 loop_vec_info
1374 vect_analyze_loop_form (struct loop *loop, vec_info_shared *shared)
1376 tree assumptions, number_of_iterations, number_of_iterationsm1;
1377 gcond *loop_cond, *inner_loop_cond = NULL;
1379 if (! vect_analyze_loop_form_1 (loop, &loop_cond,
1380 &assumptions, &number_of_iterationsm1,
1381 &number_of_iterations, &inner_loop_cond))
1382 return NULL;
1384 loop_vec_info loop_vinfo = new _loop_vec_info (loop, shared);
1385 LOOP_VINFO_NITERSM1 (loop_vinfo) = number_of_iterationsm1;
1386 LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
1387 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
1388 if (!integer_onep (assumptions))
1390 /* We consider to vectorize this loop by versioning it under
1391 some assumptions. In order to do this, we need to clear
1392 existing information computed by scev and niter analyzer. */
1393 scev_reset_htab ();
1394 free_numbers_of_iterations_estimates (loop);
1395 /* Also set flag for this loop so that following scev and niter
1396 analysis are done under the assumptions. */
1397 loop_constraint_set (loop, LOOP_C_FINITE);
1398 /* Also record the assumptions for versioning. */
1399 LOOP_VINFO_NITERS_ASSUMPTIONS (loop_vinfo) = assumptions;
1402 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
1404 if (dump_enabled_p ())
1406 dump_printf_loc (MSG_NOTE, vect_location,
1407 "Symbolic number of iterations is ");
1408 dump_generic_expr (MSG_NOTE, TDF_DETAILS, number_of_iterations);
1409 dump_printf (MSG_NOTE, "\n");
1413 stmt_vec_info loop_cond_info = loop_vinfo->lookup_stmt (loop_cond);
1414 STMT_VINFO_TYPE (loop_cond_info) = loop_exit_ctrl_vec_info_type;
1415 if (inner_loop_cond)
1417 stmt_vec_info inner_loop_cond_info
1418 = loop_vinfo->lookup_stmt (inner_loop_cond);
1419 STMT_VINFO_TYPE (inner_loop_cond_info) = loop_exit_ctrl_vec_info_type;
1422 gcc_assert (!loop->aux);
1423 loop->aux = loop_vinfo;
1424 return loop_vinfo;
1429 /* Scan the loop stmts and dependent on whether there are any (non-)SLP
1430 statements update the vectorization factor. */
1432 static void
1433 vect_update_vf_for_slp (loop_vec_info loop_vinfo)
1435 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1436 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1437 int nbbs = loop->num_nodes;
1438 poly_uint64 vectorization_factor;
1439 int i;
1441 DUMP_VECT_SCOPE ("vect_update_vf_for_slp");
1443 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1444 gcc_assert (known_ne (vectorization_factor, 0U));
1446 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1447 vectorization factor of the loop is the unrolling factor required by
1448 the SLP instances. If that unrolling factor is 1, we say, that we
1449 perform pure SLP on loop - cross iteration parallelism is not
1450 exploited. */
1451 bool only_slp_in_loop = true;
1452 for (i = 0; i < nbbs; i++)
1454 basic_block bb = bbs[i];
1455 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
1456 gsi_next (&si))
1458 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
1459 stmt_info = vect_stmt_to_vectorize (stmt_info);
1460 if ((STMT_VINFO_RELEVANT_P (stmt_info)
1461 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1462 && !PURE_SLP_STMT (stmt_info))
1463 /* STMT needs both SLP and loop-based vectorization. */
1464 only_slp_in_loop = false;
1468 if (only_slp_in_loop)
1470 dump_printf_loc (MSG_NOTE, vect_location,
1471 "Loop contains only SLP stmts\n");
1472 vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
1474 else
1476 dump_printf_loc (MSG_NOTE, vect_location,
1477 "Loop contains SLP and non-SLP stmts\n");
1478 /* Both the vectorization factor and unroll factor have the form
1479 current_vector_size * X for some rational X, so they must have
1480 a common multiple. */
1481 vectorization_factor
1482 = force_common_multiple (vectorization_factor,
1483 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
1486 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
1487 if (dump_enabled_p ())
1489 dump_printf_loc (MSG_NOTE, vect_location,
1490 "Updating vectorization factor to ");
1491 dump_dec (MSG_NOTE, vectorization_factor);
1492 dump_printf (MSG_NOTE, ".\n");
1496 /* Return true if STMT_INFO describes a double reduction phi and if
1497 the other phi in the reduction is also relevant for vectorization.
1498 This rejects cases such as:
1500 outer1:
1501 x_1 = PHI <x_3(outer2), ...>;
1504 inner:
1505 x_2 = ...;
1508 outer2:
1509 x_3 = PHI <x_2(inner)>;
1511 if nothing in x_2 or elsewhere makes x_1 relevant. */
1513 static bool
1514 vect_active_double_reduction_p (stmt_vec_info stmt_info)
1516 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_double_reduction_def)
1517 return false;
1519 return STMT_VINFO_RELEVANT_P (STMT_VINFO_REDUC_DEF (stmt_info));
1522 /* Function vect_analyze_loop_operations.
1524 Scan the loop stmts and make sure they are all vectorizable. */
1526 static bool
1527 vect_analyze_loop_operations (loop_vec_info loop_vinfo)
1529 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1530 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1531 int nbbs = loop->num_nodes;
1532 int i;
1533 stmt_vec_info stmt_info;
1534 bool need_to_vectorize = false;
1535 bool ok;
1537 DUMP_VECT_SCOPE ("vect_analyze_loop_operations");
1539 stmt_vector_for_cost cost_vec;
1540 cost_vec.create (2);
1542 for (i = 0; i < nbbs; i++)
1544 basic_block bb = bbs[i];
1546 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
1547 gsi_next (&si))
1549 gphi *phi = si.phi ();
1550 ok = true;
1552 stmt_info = loop_vinfo->lookup_stmt (phi);
1553 if (dump_enabled_p ())
1555 dump_printf_loc (MSG_NOTE, vect_location, "examining phi: ");
1556 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
1558 if (virtual_operand_p (gimple_phi_result (phi)))
1559 continue;
1561 /* Inner-loop loop-closed exit phi in outer-loop vectorization
1562 (i.e., a phi in the tail of the outer-loop). */
1563 if (! is_loop_header_bb_p (bb))
1565 /* FORNOW: we currently don't support the case that these phis
1566 are not used in the outerloop (unless it is double reduction,
1567 i.e., this phi is vect_reduction_def), cause this case
1568 requires to actually do something here. */
1569 if (STMT_VINFO_LIVE_P (stmt_info)
1570 && !vect_active_double_reduction_p (stmt_info))
1572 if (dump_enabled_p ())
1573 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1574 "Unsupported loop-closed phi in "
1575 "outer-loop.\n");
1576 return false;
1579 /* If PHI is used in the outer loop, we check that its operand
1580 is defined in the inner loop. */
1581 if (STMT_VINFO_RELEVANT_P (stmt_info))
1583 tree phi_op;
1585 if (gimple_phi_num_args (phi) != 1)
1586 return false;
1588 phi_op = PHI_ARG_DEF (phi, 0);
1589 stmt_vec_info op_def_info = loop_vinfo->lookup_def (phi_op);
1590 if (!op_def_info)
1591 return false;
1593 if (STMT_VINFO_RELEVANT (op_def_info) != vect_used_in_outer
1594 && (STMT_VINFO_RELEVANT (op_def_info)
1595 != vect_used_in_outer_by_reduction))
1596 return false;
1599 continue;
1602 gcc_assert (stmt_info);
1604 if ((STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
1605 || STMT_VINFO_LIVE_P (stmt_info))
1606 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
1608 /* A scalar-dependence cycle that we don't support. */
1609 if (dump_enabled_p ())
1610 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1611 "not vectorized: scalar dependence cycle.\n");
1612 return false;
1615 if (STMT_VINFO_RELEVANT_P (stmt_info))
1617 need_to_vectorize = true;
1618 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def
1619 && ! PURE_SLP_STMT (stmt_info))
1620 ok = vectorizable_induction (stmt_info, NULL, NULL, NULL,
1621 &cost_vec);
1622 else if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
1623 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
1624 && ! PURE_SLP_STMT (stmt_info))
1625 ok = vectorizable_reduction (stmt_info, NULL, NULL, NULL, NULL,
1626 &cost_vec);
1629 /* SLP PHIs are tested by vect_slp_analyze_node_operations. */
1630 if (ok
1631 && STMT_VINFO_LIVE_P (stmt_info)
1632 && !PURE_SLP_STMT (stmt_info))
1633 ok = vectorizable_live_operation (stmt_info, NULL, NULL, -1, NULL,
1634 &cost_vec);
1636 if (!ok)
1638 if (dump_enabled_p ())
1640 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1641 "not vectorized: relevant phi not "
1642 "supported: ");
1643 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, phi, 0);
1645 return false;
1649 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
1650 gsi_next (&si))
1652 gimple *stmt = gsi_stmt (si);
1653 if (!gimple_clobber_p (stmt)
1654 && !vect_analyze_stmt (loop_vinfo->lookup_stmt (stmt),
1655 &need_to_vectorize,
1656 NULL, NULL, &cost_vec))
1657 return false;
1659 } /* bbs */
1661 add_stmt_costs (loop_vinfo->target_cost_data, &cost_vec);
1662 cost_vec.release ();
1664 /* All operations in the loop are either irrelevant (deal with loop
1665 control, or dead), or only used outside the loop and can be moved
1666 out of the loop (e.g. invariants, inductions). The loop can be
1667 optimized away by scalar optimizations. We're better off not
1668 touching this loop. */
1669 if (!need_to_vectorize)
1671 if (dump_enabled_p ())
1672 dump_printf_loc (MSG_NOTE, vect_location,
1673 "All the computation can be taken out of the loop.\n");
1674 if (dump_enabled_p ())
1675 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1676 "not vectorized: redundant loop. no profit to "
1677 "vectorize.\n");
1678 return false;
1681 return true;
1684 /* Analyze the cost of the loop described by LOOP_VINFO. Decide if it
1685 is worthwhile to vectorize. Return 1 if definitely yes, 0 if
1686 definitely no, or -1 if it's worth retrying. */
1688 static int
1689 vect_analyze_loop_costing (loop_vec_info loop_vinfo)
1691 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1692 unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
1694 /* Only fully-masked loops can have iteration counts less than the
1695 vectorization factor. */
1696 if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
1698 HOST_WIDE_INT max_niter;
1700 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
1701 max_niter = LOOP_VINFO_INT_NITERS (loop_vinfo);
1702 else
1703 max_niter = max_stmt_executions_int (loop);
1705 if (max_niter != -1
1706 && (unsigned HOST_WIDE_INT) max_niter < assumed_vf)
1708 if (dump_enabled_p ())
1709 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1710 "not vectorized: iteration count smaller than "
1711 "vectorization factor.\n");
1712 return 0;
1716 int min_profitable_iters, min_profitable_estimate;
1717 vect_estimate_min_profitable_iters (loop_vinfo, &min_profitable_iters,
1718 &min_profitable_estimate);
1720 if (min_profitable_iters < 0)
1722 if (dump_enabled_p ())
1723 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1724 "not vectorized: vectorization not profitable.\n");
1725 if (dump_enabled_p ())
1726 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1727 "not vectorized: vector version will never be "
1728 "profitable.\n");
1729 return -1;
1732 int min_scalar_loop_bound = (PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
1733 * assumed_vf);
1735 /* Use the cost model only if it is more conservative than user specified
1736 threshold. */
1737 unsigned int th = (unsigned) MAX (min_scalar_loop_bound,
1738 min_profitable_iters);
1740 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = th;
1742 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1743 && LOOP_VINFO_INT_NITERS (loop_vinfo) < th)
1745 if (dump_enabled_p ())
1746 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1747 "not vectorized: vectorization not profitable.\n");
1748 if (dump_enabled_p ())
1749 dump_printf_loc (MSG_NOTE, vect_location,
1750 "not vectorized: iteration count smaller than user "
1751 "specified loop bound parameter or minimum profitable "
1752 "iterations (whichever is more conservative).\n");
1753 return 0;
1756 HOST_WIDE_INT estimated_niter = estimated_stmt_executions_int (loop);
1757 if (estimated_niter == -1)
1758 estimated_niter = likely_max_stmt_executions_int (loop);
1759 if (estimated_niter != -1
1760 && ((unsigned HOST_WIDE_INT) estimated_niter
1761 < MAX (th, (unsigned) min_profitable_estimate)))
1763 if (dump_enabled_p ())
1764 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1765 "not vectorized: estimated iteration count too "
1766 "small.\n");
1767 if (dump_enabled_p ())
1768 dump_printf_loc (MSG_NOTE, vect_location,
1769 "not vectorized: estimated iteration count smaller "
1770 "than specified loop bound parameter or minimum "
1771 "profitable iterations (whichever is more "
1772 "conservative).\n");
1773 return -1;
1776 return 1;
1779 static bool
1780 vect_get_datarefs_in_loop (loop_p loop, basic_block *bbs,
1781 vec<data_reference_p> *datarefs,
1782 unsigned int *n_stmts)
1784 *n_stmts = 0;
1785 for (unsigned i = 0; i < loop->num_nodes; i++)
1786 for (gimple_stmt_iterator gsi = gsi_start_bb (bbs[i]);
1787 !gsi_end_p (gsi); gsi_next (&gsi))
1789 gimple *stmt = gsi_stmt (gsi);
1790 if (is_gimple_debug (stmt))
1791 continue;
1792 ++(*n_stmts);
1793 if (!vect_find_stmt_data_reference (loop, stmt, datarefs))
1795 if (is_gimple_call (stmt) && loop->safelen)
1797 tree fndecl = gimple_call_fndecl (stmt), op;
1798 if (fndecl != NULL_TREE)
1800 cgraph_node *node = cgraph_node::get (fndecl);
1801 if (node != NULL && node->simd_clones != NULL)
1803 unsigned int j, n = gimple_call_num_args (stmt);
1804 for (j = 0; j < n; j++)
1806 op = gimple_call_arg (stmt, j);
1807 if (DECL_P (op)
1808 || (REFERENCE_CLASS_P (op)
1809 && get_base_address (op)))
1810 break;
1812 op = gimple_call_lhs (stmt);
1813 /* Ignore #pragma omp declare simd functions
1814 if they don't have data references in the
1815 call stmt itself. */
1816 if (j == n
1817 && !(op
1818 && (DECL_P (op)
1819 || (REFERENCE_CLASS_P (op)
1820 && get_base_address (op)))))
1821 continue;
1825 return false;
1827 /* If dependence analysis will give up due to the limit on the
1828 number of datarefs stop here and fail fatally. */
1829 if (datarefs->length ()
1830 > (unsigned)PARAM_VALUE (PARAM_LOOP_MAX_DATAREFS_FOR_DATADEPS))
1831 return false;
1833 return true;
1836 /* Function vect_analyze_loop_2.
1838 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1839 for it. The different analyses will record information in the
1840 loop_vec_info struct. */
1841 static bool
1842 vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal, unsigned *n_stmts)
1844 bool ok;
1845 int res;
1846 unsigned int max_vf = MAX_VECTORIZATION_FACTOR;
1847 poly_uint64 min_vf = 2;
1849 /* The first group of checks is independent of the vector size. */
1850 fatal = true;
1852 /* Find all data references in the loop (which correspond to vdefs/vuses)
1853 and analyze their evolution in the loop. */
1855 loop_p loop = LOOP_VINFO_LOOP (loop_vinfo);
1857 /* Gather the data references and count stmts in the loop. */
1858 if (!LOOP_VINFO_DATAREFS (loop_vinfo).exists ())
1860 if (!vect_get_datarefs_in_loop (loop, LOOP_VINFO_BBS (loop_vinfo),
1861 &LOOP_VINFO_DATAREFS (loop_vinfo),
1862 n_stmts))
1864 if (dump_enabled_p ())
1865 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1866 "not vectorized: loop contains function "
1867 "calls or data references that cannot "
1868 "be analyzed\n");
1869 return false;
1871 loop_vinfo->shared->save_datarefs ();
1873 else
1874 loop_vinfo->shared->check_datarefs ();
1876 /* Analyze the data references and also adjust the minimal
1877 vectorization factor according to the loads and stores. */
1879 ok = vect_analyze_data_refs (loop_vinfo, &min_vf);
1880 if (!ok)
1882 if (dump_enabled_p ())
1883 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1884 "bad data references.\n");
1885 return false;
1888 /* Classify all cross-iteration scalar data-flow cycles.
1889 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1890 vect_analyze_scalar_cycles (loop_vinfo);
1892 vect_pattern_recog (loop_vinfo);
1894 vect_fixup_scalar_cycles_with_patterns (loop_vinfo);
1896 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1897 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1899 ok = vect_analyze_data_ref_accesses (loop_vinfo);
1900 if (!ok)
1902 if (dump_enabled_p ())
1903 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1904 "bad data access.\n");
1905 return false;
1908 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1910 ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
1911 if (!ok)
1913 if (dump_enabled_p ())
1914 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1915 "unexpected pattern.\n");
1916 return false;
1919 /* While the rest of the analysis below depends on it in some way. */
1920 fatal = false;
1922 /* Analyze data dependences between the data-refs in the loop
1923 and adjust the maximum vectorization factor according to
1924 the dependences.
1925 FORNOW: fail at the first data dependence that we encounter. */
1927 ok = vect_analyze_data_ref_dependences (loop_vinfo, &max_vf);
1928 if (!ok
1929 || (max_vf != MAX_VECTORIZATION_FACTOR
1930 && maybe_lt (max_vf, min_vf)))
1932 if (dump_enabled_p ())
1933 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1934 "bad data dependence.\n");
1935 return false;
1937 LOOP_VINFO_MAX_VECT_FACTOR (loop_vinfo) = max_vf;
1939 ok = vect_determine_vectorization_factor (loop_vinfo);
1940 if (!ok)
1942 if (dump_enabled_p ())
1943 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1944 "can't determine vectorization factor.\n");
1945 return false;
1947 if (max_vf != MAX_VECTORIZATION_FACTOR
1948 && maybe_lt (max_vf, LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
1950 if (dump_enabled_p ())
1951 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1952 "bad data dependence.\n");
1953 return false;
1956 /* Compute the scalar iteration cost. */
1957 vect_compute_single_scalar_iteration_cost (loop_vinfo);
1959 poly_uint64 saved_vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1960 unsigned th;
1962 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
1963 ok = vect_analyze_slp (loop_vinfo, *n_stmts);
1964 if (!ok)
1965 return false;
1967 /* If there are any SLP instances mark them as pure_slp. */
1968 bool slp = vect_make_slp_decision (loop_vinfo);
1969 if (slp)
1971 /* Find stmts that need to be both vectorized and SLPed. */
1972 vect_detect_hybrid_slp (loop_vinfo);
1974 /* Update the vectorization factor based on the SLP decision. */
1975 vect_update_vf_for_slp (loop_vinfo);
1978 bool saved_can_fully_mask_p = LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo);
1980 /* We don't expect to have to roll back to anything other than an empty
1981 set of rgroups. */
1982 gcc_assert (LOOP_VINFO_MASKS (loop_vinfo).is_empty ());
1984 /* This is the point where we can re-start analysis with SLP forced off. */
1985 start_over:
1987 /* Now the vectorization factor is final. */
1988 poly_uint64 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1989 gcc_assert (known_ne (vectorization_factor, 0U));
1991 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && dump_enabled_p ())
1993 dump_printf_loc (MSG_NOTE, vect_location,
1994 "vectorization_factor = ");
1995 dump_dec (MSG_NOTE, vectorization_factor);
1996 dump_printf (MSG_NOTE, ", niters = %wd\n",
1997 LOOP_VINFO_INT_NITERS (loop_vinfo));
2000 HOST_WIDE_INT max_niter
2001 = likely_max_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo));
2003 /* Analyze the alignment of the data-refs in the loop.
2004 Fail if a data reference is found that cannot be vectorized. */
2006 ok = vect_analyze_data_refs_alignment (loop_vinfo);
2007 if (!ok)
2009 if (dump_enabled_p ())
2010 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2011 "bad data alignment.\n");
2012 return false;
2015 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
2016 It is important to call pruning after vect_analyze_data_ref_accesses,
2017 since we use grouping information gathered by interleaving analysis. */
2018 ok = vect_prune_runtime_alias_test_list (loop_vinfo);
2019 if (!ok)
2020 return false;
2022 /* Do not invoke vect_enhance_data_refs_alignment for eplilogue
2023 vectorization. */
2024 if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo))
2026 /* This pass will decide on using loop versioning and/or loop peeling in
2027 order to enhance the alignment of data references in the loop. */
2028 ok = vect_enhance_data_refs_alignment (loop_vinfo);
2029 if (!ok)
2031 if (dump_enabled_p ())
2032 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2033 "bad data alignment.\n");
2034 return false;
2038 if (slp)
2040 /* Analyze operations in the SLP instances. Note this may
2041 remove unsupported SLP instances which makes the above
2042 SLP kind detection invalid. */
2043 unsigned old_size = LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length ();
2044 vect_slp_analyze_operations (loop_vinfo);
2045 if (LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length () != old_size)
2046 goto again;
2049 /* Scan all the remaining operations in the loop that are not subject
2050 to SLP and make sure they are vectorizable. */
2051 ok = vect_analyze_loop_operations (loop_vinfo);
2052 if (!ok)
2054 if (dump_enabled_p ())
2055 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2056 "bad operation or unsupported loop bound.\n");
2057 return false;
2060 /* Decide whether to use a fully-masked loop for this vectorization
2061 factor. */
2062 LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
2063 = (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo)
2064 && vect_verify_full_masking (loop_vinfo));
2065 if (dump_enabled_p ())
2067 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
2068 dump_printf_loc (MSG_NOTE, vect_location,
2069 "using a fully-masked loop.\n");
2070 else
2071 dump_printf_loc (MSG_NOTE, vect_location,
2072 "not using a fully-masked loop.\n");
2075 /* If epilog loop is required because of data accesses with gaps,
2076 one additional iteration needs to be peeled. Check if there is
2077 enough iterations for vectorization. */
2078 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
2079 && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2080 && !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
2082 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2083 tree scalar_niters = LOOP_VINFO_NITERSM1 (loop_vinfo);
2085 if (known_lt (wi::to_widest (scalar_niters), vf))
2087 if (dump_enabled_p ())
2088 dump_printf_loc (MSG_NOTE, vect_location,
2089 "loop has no enough iterations to support"
2090 " peeling for gaps.\n");
2091 return false;
2095 /* Check the costings of the loop make vectorizing worthwhile. */
2096 res = vect_analyze_loop_costing (loop_vinfo);
2097 if (res < 0)
2098 goto again;
2099 if (!res)
2101 if (dump_enabled_p ())
2102 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2103 "Loop costings not worthwhile.\n");
2104 return false;
2107 /* Decide whether we need to create an epilogue loop to handle
2108 remaining scalar iterations. */
2109 th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
2111 unsigned HOST_WIDE_INT const_vf;
2112 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
2113 /* The main loop handles all iterations. */
2114 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false;
2115 else if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2116 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) > 0)
2118 if (!multiple_p (LOOP_VINFO_INT_NITERS (loop_vinfo)
2119 - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo),
2120 LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
2121 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
2123 else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
2124 || !LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&const_vf)
2125 || ((tree_ctz (LOOP_VINFO_NITERS (loop_vinfo))
2126 < (unsigned) exact_log2 (const_vf))
2127 /* In case of versioning, check if the maximum number of
2128 iterations is greater than th. If they are identical,
2129 the epilogue is unnecessary. */
2130 && (!LOOP_REQUIRES_VERSIONING (loop_vinfo)
2131 || ((unsigned HOST_WIDE_INT) max_niter
2132 > (th / const_vf) * const_vf))))
2133 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
2135 /* If an epilogue loop is required make sure we can create one. */
2136 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
2137 || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo))
2139 if (dump_enabled_p ())
2140 dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required\n");
2141 if (!vect_can_advance_ivs_p (loop_vinfo)
2142 || !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo),
2143 single_exit (LOOP_VINFO_LOOP
2144 (loop_vinfo))))
2146 if (dump_enabled_p ())
2147 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2148 "not vectorized: can't create required "
2149 "epilog loop\n");
2150 goto again;
2154 /* During peeling, we need to check if number of loop iterations is
2155 enough for both peeled prolog loop and vector loop. This check
2156 can be merged along with threshold check of loop versioning, so
2157 increase threshold for this case if necessary. */
2158 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
2160 poly_uint64 niters_th = 0;
2162 if (!vect_use_loop_mask_for_alignment_p (loop_vinfo))
2164 /* Niters for peeled prolog loop. */
2165 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
2167 dr_vec_info *dr_info = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
2168 tree vectype = STMT_VINFO_VECTYPE (dr_info->stmt);
2169 niters_th += TYPE_VECTOR_SUBPARTS (vectype) - 1;
2171 else
2172 niters_th += LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
2175 /* Niters for at least one iteration of vectorized loop. */
2176 if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
2177 niters_th += LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2178 /* One additional iteration because of peeling for gap. */
2179 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
2180 niters_th += 1;
2181 LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = niters_th;
2184 gcc_assert (known_eq (vectorization_factor,
2185 LOOP_VINFO_VECT_FACTOR (loop_vinfo)));
2187 /* Ok to vectorize! */
2188 return true;
2190 again:
2191 /* Try again with SLP forced off but if we didn't do any SLP there is
2192 no point in re-trying. */
2193 if (!slp)
2194 return false;
2196 /* If there are reduction chains re-trying will fail anyway. */
2197 if (! LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).is_empty ())
2198 return false;
2200 /* Likewise if the grouped loads or stores in the SLP cannot be handled
2201 via interleaving or lane instructions. */
2202 slp_instance instance;
2203 slp_tree node;
2204 unsigned i, j;
2205 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance)
2207 stmt_vec_info vinfo;
2208 vinfo = SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance))[0];
2209 if (! STMT_VINFO_GROUPED_ACCESS (vinfo))
2210 continue;
2211 vinfo = DR_GROUP_FIRST_ELEMENT (vinfo);
2212 unsigned int size = DR_GROUP_SIZE (vinfo);
2213 tree vectype = STMT_VINFO_VECTYPE (vinfo);
2214 if (! vect_store_lanes_supported (vectype, size, false)
2215 && ! known_eq (TYPE_VECTOR_SUBPARTS (vectype), 1U)
2216 && ! vect_grouped_store_supported (vectype, size))
2217 return false;
2218 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), j, node)
2220 vinfo = SLP_TREE_SCALAR_STMTS (node)[0];
2221 vinfo = DR_GROUP_FIRST_ELEMENT (vinfo);
2222 bool single_element_p = !DR_GROUP_NEXT_ELEMENT (vinfo);
2223 size = DR_GROUP_SIZE (vinfo);
2224 vectype = STMT_VINFO_VECTYPE (vinfo);
2225 if (! vect_load_lanes_supported (vectype, size, false)
2226 && ! vect_grouped_load_supported (vectype, single_element_p,
2227 size))
2228 return false;
2232 if (dump_enabled_p ())
2233 dump_printf_loc (MSG_NOTE, vect_location,
2234 "re-trying with SLP disabled\n");
2236 /* Roll back state appropriately. No SLP this time. */
2237 slp = false;
2238 /* Restore vectorization factor as it were without SLP. */
2239 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = saved_vectorization_factor;
2240 /* Free the SLP instances. */
2241 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), j, instance)
2242 vect_free_slp_instance (instance, false);
2243 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
2244 /* Reset SLP type to loop_vect on all stmts. */
2245 for (i = 0; i < LOOP_VINFO_LOOP (loop_vinfo)->num_nodes; ++i)
2247 basic_block bb = LOOP_VINFO_BBS (loop_vinfo)[i];
2248 for (gimple_stmt_iterator si = gsi_start_phis (bb);
2249 !gsi_end_p (si); gsi_next (&si))
2251 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
2252 STMT_SLP_TYPE (stmt_info) = loop_vect;
2254 for (gimple_stmt_iterator si = gsi_start_bb (bb);
2255 !gsi_end_p (si); gsi_next (&si))
2257 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
2258 STMT_SLP_TYPE (stmt_info) = loop_vect;
2259 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
2261 gimple *pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
2262 stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
2263 STMT_SLP_TYPE (stmt_info) = loop_vect;
2264 for (gimple_stmt_iterator pi = gsi_start (pattern_def_seq);
2265 !gsi_end_p (pi); gsi_next (&pi))
2266 STMT_SLP_TYPE (loop_vinfo->lookup_stmt (gsi_stmt (pi)))
2267 = loop_vect;
2271 /* Free optimized alias test DDRS. */
2272 LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).truncate (0);
2273 LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).release ();
2274 LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).release ();
2275 /* Reset target cost data. */
2276 destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo));
2277 LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)
2278 = init_cost (LOOP_VINFO_LOOP (loop_vinfo));
2279 /* Reset accumulated rgroup information. */
2280 release_vec_loop_masks (&LOOP_VINFO_MASKS (loop_vinfo));
2281 /* Reset assorted flags. */
2282 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false;
2283 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = false;
2284 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = 0;
2285 LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = 0;
2286 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = saved_can_fully_mask_p;
2288 goto start_over;
2291 /* Function vect_analyze_loop.
2293 Apply a set of analyses on LOOP, and create a loop_vec_info struct
2294 for it. The different analyses will record information in the
2295 loop_vec_info struct. If ORIG_LOOP_VINFO is not NULL epilogue must
2296 be vectorized. */
2297 loop_vec_info
2298 vect_analyze_loop (struct loop *loop, loop_vec_info orig_loop_vinfo,
2299 vec_info_shared *shared)
2301 loop_vec_info loop_vinfo;
2302 auto_vector_sizes vector_sizes;
2304 /* Autodetect first vector size we try. */
2305 current_vector_size = 0;
2306 targetm.vectorize.autovectorize_vector_sizes (&vector_sizes);
2307 unsigned int next_size = 0;
2309 DUMP_VECT_SCOPE ("analyze_loop_nest");
2311 if (loop_outer (loop)
2312 && loop_vec_info_for_loop (loop_outer (loop))
2313 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
2315 if (dump_enabled_p ())
2316 dump_printf_loc (MSG_NOTE, vect_location,
2317 "outer-loop already vectorized.\n");
2318 return NULL;
2321 if (!find_loop_nest (loop, &shared->loop_nest))
2323 if (dump_enabled_p ())
2324 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2325 "not vectorized: loop nest containing two "
2326 "or more consecutive inner loops cannot be "
2327 "vectorized\n");
2328 return NULL;
2331 unsigned n_stmts = 0;
2332 poly_uint64 autodetected_vector_size = 0;
2333 while (1)
2335 /* Check the CFG characteristics of the loop (nesting, entry/exit). */
2336 loop_vinfo = vect_analyze_loop_form (loop, shared);
2337 if (!loop_vinfo)
2339 if (dump_enabled_p ())
2340 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2341 "bad loop form.\n");
2342 return NULL;
2345 bool fatal = false;
2347 if (orig_loop_vinfo)
2348 LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo) = orig_loop_vinfo;
2350 if (vect_analyze_loop_2 (loop_vinfo, fatal, &n_stmts))
2352 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
2354 return loop_vinfo;
2357 delete loop_vinfo;
2359 if (next_size == 0)
2360 autodetected_vector_size = current_vector_size;
2362 if (next_size < vector_sizes.length ()
2363 && known_eq (vector_sizes[next_size], autodetected_vector_size))
2364 next_size += 1;
2366 if (fatal
2367 || next_size == vector_sizes.length ()
2368 || known_eq (current_vector_size, 0U))
2369 return NULL;
2371 /* Try the next biggest vector size. */
2372 current_vector_size = vector_sizes[next_size++];
2373 if (dump_enabled_p ())
2375 dump_printf_loc (MSG_NOTE, vect_location,
2376 "***** Re-trying analysis with "
2377 "vector size ");
2378 dump_dec (MSG_NOTE, current_vector_size);
2379 dump_printf (MSG_NOTE, "\n");
2384 /* Return true if there is an in-order reduction function for CODE, storing
2385 it in *REDUC_FN if so. */
2387 static bool
2388 fold_left_reduction_fn (tree_code code, internal_fn *reduc_fn)
2390 switch (code)
2392 case PLUS_EXPR:
2393 *reduc_fn = IFN_FOLD_LEFT_PLUS;
2394 return true;
2396 default:
2397 return false;
2401 /* Function reduction_fn_for_scalar_code
2403 Input:
2404 CODE - tree_code of a reduction operations.
2406 Output:
2407 REDUC_FN - the corresponding internal function to be used to reduce the
2408 vector of partial results into a single scalar result, or IFN_LAST
2409 if the operation is a supported reduction operation, but does not have
2410 such an internal function.
2412 Return FALSE if CODE currently cannot be vectorized as reduction. */
2414 static bool
2415 reduction_fn_for_scalar_code (enum tree_code code, internal_fn *reduc_fn)
2417 switch (code)
2419 case MAX_EXPR:
2420 *reduc_fn = IFN_REDUC_MAX;
2421 return true;
2423 case MIN_EXPR:
2424 *reduc_fn = IFN_REDUC_MIN;
2425 return true;
2427 case PLUS_EXPR:
2428 *reduc_fn = IFN_REDUC_PLUS;
2429 return true;
2431 case BIT_AND_EXPR:
2432 *reduc_fn = IFN_REDUC_AND;
2433 return true;
2435 case BIT_IOR_EXPR:
2436 *reduc_fn = IFN_REDUC_IOR;
2437 return true;
2439 case BIT_XOR_EXPR:
2440 *reduc_fn = IFN_REDUC_XOR;
2441 return true;
2443 case MULT_EXPR:
2444 case MINUS_EXPR:
2445 *reduc_fn = IFN_LAST;
2446 return true;
2448 default:
2449 return false;
2453 /* If there is a neutral value X such that SLP reduction NODE would not
2454 be affected by the introduction of additional X elements, return that X,
2455 otherwise return null. CODE is the code of the reduction. REDUC_CHAIN
2456 is true if the SLP statements perform a single reduction, false if each
2457 statement performs an independent reduction. */
2459 static tree
2460 neutral_op_for_slp_reduction (slp_tree slp_node, tree_code code,
2461 bool reduc_chain)
2463 vec<stmt_vec_info> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
2464 stmt_vec_info stmt_vinfo = stmts[0];
2465 tree vector_type = STMT_VINFO_VECTYPE (stmt_vinfo);
2466 tree scalar_type = TREE_TYPE (vector_type);
2467 struct loop *loop = gimple_bb (stmt_vinfo->stmt)->loop_father;
2468 gcc_assert (loop);
2470 switch (code)
2472 case WIDEN_SUM_EXPR:
2473 case DOT_PROD_EXPR:
2474 case SAD_EXPR:
2475 case PLUS_EXPR:
2476 case MINUS_EXPR:
2477 case BIT_IOR_EXPR:
2478 case BIT_XOR_EXPR:
2479 return build_zero_cst (scalar_type);
2481 case MULT_EXPR:
2482 return build_one_cst (scalar_type);
2484 case BIT_AND_EXPR:
2485 return build_all_ones_cst (scalar_type);
2487 case MAX_EXPR:
2488 case MIN_EXPR:
2489 /* For MIN/MAX the initial values are neutral. A reduction chain
2490 has only a single initial value, so that value is neutral for
2491 all statements. */
2492 if (reduc_chain)
2493 return PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt,
2494 loop_preheader_edge (loop));
2495 return NULL_TREE;
2497 default:
2498 return NULL_TREE;
2502 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
2503 STMT is printed with a message MSG. */
2505 static void
2506 report_vect_op (dump_flags_t msg_type, gimple *stmt, const char *msg)
2508 dump_printf_loc (msg_type, vect_location, "%s", msg);
2509 dump_gimple_stmt (msg_type, TDF_SLIM, stmt, 0);
2512 /* DEF_STMT_INFO occurs in a loop that contains a potential reduction
2513 operation. Return true if the results of DEF_STMT_INFO are something
2514 that can be accumulated by such a reduction. */
2516 static bool
2517 vect_valid_reduction_input_p (stmt_vec_info def_stmt_info)
2519 return (is_gimple_assign (def_stmt_info->stmt)
2520 || is_gimple_call (def_stmt_info->stmt)
2521 || STMT_VINFO_DEF_TYPE (def_stmt_info) == vect_induction_def
2522 || (gimple_code (def_stmt_info->stmt) == GIMPLE_PHI
2523 && STMT_VINFO_DEF_TYPE (def_stmt_info) == vect_internal_def
2524 && !is_loop_header_bb_p (gimple_bb (def_stmt_info->stmt))));
2527 /* Detect SLP reduction of the form:
2529 #a1 = phi <a5, a0>
2530 a2 = operation (a1)
2531 a3 = operation (a2)
2532 a4 = operation (a3)
2533 a5 = operation (a4)
2535 #a = phi <a5>
2537 PHI is the reduction phi node (#a1 = phi <a5, a0> above)
2538 FIRST_STMT is the first reduction stmt in the chain
2539 (a2 = operation (a1)).
2541 Return TRUE if a reduction chain was detected. */
2543 static bool
2544 vect_is_slp_reduction (loop_vec_info loop_info, gimple *phi,
2545 gimple *first_stmt)
2547 struct loop *loop = (gimple_bb (phi))->loop_father;
2548 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
2549 enum tree_code code;
2550 gimple *loop_use_stmt = NULL;
2551 stmt_vec_info use_stmt_info, current_stmt_info = NULL;
2552 tree lhs;
2553 imm_use_iterator imm_iter;
2554 use_operand_p use_p;
2555 int nloop_uses, size = 0, n_out_of_loop_uses;
2556 bool found = false;
2558 if (loop != vect_loop)
2559 return false;
2561 lhs = PHI_RESULT (phi);
2562 code = gimple_assign_rhs_code (first_stmt);
2563 while (1)
2565 nloop_uses = 0;
2566 n_out_of_loop_uses = 0;
2567 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
2569 gimple *use_stmt = USE_STMT (use_p);
2570 if (is_gimple_debug (use_stmt))
2571 continue;
2573 /* Check if we got back to the reduction phi. */
2574 if (use_stmt == phi)
2576 loop_use_stmt = use_stmt;
2577 found = true;
2578 break;
2581 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2583 loop_use_stmt = use_stmt;
2584 nloop_uses++;
2586 else
2587 n_out_of_loop_uses++;
2589 /* There are can be either a single use in the loop or two uses in
2590 phi nodes. */
2591 if (nloop_uses > 1 || (n_out_of_loop_uses && nloop_uses))
2592 return false;
2595 if (found)
2596 break;
2598 /* We reached a statement with no loop uses. */
2599 if (nloop_uses == 0)
2600 return false;
2602 /* This is a loop exit phi, and we haven't reached the reduction phi. */
2603 if (gimple_code (loop_use_stmt) == GIMPLE_PHI)
2604 return false;
2606 if (!is_gimple_assign (loop_use_stmt)
2607 || code != gimple_assign_rhs_code (loop_use_stmt)
2608 || !flow_bb_inside_loop_p (loop, gimple_bb (loop_use_stmt)))
2609 return false;
2611 /* Insert USE_STMT into reduction chain. */
2612 use_stmt_info = loop_info->lookup_stmt (loop_use_stmt);
2613 if (current_stmt_info)
2615 REDUC_GROUP_NEXT_ELEMENT (current_stmt_info) = use_stmt_info;
2616 REDUC_GROUP_FIRST_ELEMENT (use_stmt_info)
2617 = REDUC_GROUP_FIRST_ELEMENT (current_stmt_info);
2619 else
2620 REDUC_GROUP_FIRST_ELEMENT (use_stmt_info) = use_stmt_info;
2622 lhs = gimple_assign_lhs (loop_use_stmt);
2623 current_stmt_info = use_stmt_info;
2624 size++;
2627 if (!found || loop_use_stmt != phi || size < 2)
2628 return false;
2630 /* Swap the operands, if needed, to make the reduction operand be the second
2631 operand. */
2632 lhs = PHI_RESULT (phi);
2633 stmt_vec_info next_stmt_info = REDUC_GROUP_FIRST_ELEMENT (current_stmt_info);
2634 while (next_stmt_info)
2636 gassign *next_stmt = as_a <gassign *> (next_stmt_info->stmt);
2637 if (gimple_assign_rhs2 (next_stmt) == lhs)
2639 tree op = gimple_assign_rhs1 (next_stmt);
2640 stmt_vec_info def_stmt_info = loop_info->lookup_def (op);
2642 /* Check that the other def is either defined in the loop
2643 ("vect_internal_def"), or it's an induction (defined by a
2644 loop-header phi-node). */
2645 if (def_stmt_info
2646 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt_info->stmt))
2647 && vect_valid_reduction_input_p (def_stmt_info))
2649 lhs = gimple_assign_lhs (next_stmt);
2650 next_stmt_info = REDUC_GROUP_NEXT_ELEMENT (next_stmt_info);
2651 continue;
2654 return false;
2656 else
2658 tree op = gimple_assign_rhs2 (next_stmt);
2659 stmt_vec_info def_stmt_info = loop_info->lookup_def (op);
2661 /* Check that the other def is either defined in the loop
2662 ("vect_internal_def"), or it's an induction (defined by a
2663 loop-header phi-node). */
2664 if (def_stmt_info
2665 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt_info->stmt))
2666 && vect_valid_reduction_input_p (def_stmt_info))
2668 if (dump_enabled_p ())
2670 dump_printf_loc (MSG_NOTE, vect_location, "swapping oprnds: ");
2671 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, next_stmt, 0);
2674 swap_ssa_operands (next_stmt,
2675 gimple_assign_rhs1_ptr (next_stmt),
2676 gimple_assign_rhs2_ptr (next_stmt));
2677 update_stmt (next_stmt);
2679 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (next_stmt)))
2680 LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true;
2682 else
2683 return false;
2686 lhs = gimple_assign_lhs (next_stmt);
2687 next_stmt_info = REDUC_GROUP_NEXT_ELEMENT (next_stmt_info);
2690 /* Save the chain for further analysis in SLP detection. */
2691 stmt_vec_info first_stmt_info
2692 = REDUC_GROUP_FIRST_ELEMENT (current_stmt_info);
2693 LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (first_stmt_info);
2694 REDUC_GROUP_SIZE (first_stmt_info) = size;
2696 return true;
2699 /* Return true if we need an in-order reduction for operation CODE
2700 on type TYPE. NEED_WRAPPING_INTEGRAL_OVERFLOW is true if integer
2701 overflow must wrap. */
2703 static bool
2704 needs_fold_left_reduction_p (tree type, tree_code code,
2705 bool need_wrapping_integral_overflow)
2707 /* CHECKME: check for !flag_finite_math_only too? */
2708 if (SCALAR_FLOAT_TYPE_P (type))
2709 switch (code)
2711 case MIN_EXPR:
2712 case MAX_EXPR:
2713 return false;
2715 default:
2716 return !flag_associative_math;
2719 if (INTEGRAL_TYPE_P (type))
2721 if (!operation_no_trapping_overflow (type, code))
2722 return true;
2723 if (need_wrapping_integral_overflow
2724 && !TYPE_OVERFLOW_WRAPS (type)
2725 && operation_can_overflow (code))
2726 return true;
2727 return false;
2730 if (SAT_FIXED_POINT_TYPE_P (type))
2731 return true;
2733 return false;
2736 /* Return true if the reduction PHI in LOOP with latch arg LOOP_ARG and
2737 reduction operation CODE has a handled computation expression. */
2739 bool
2740 check_reduction_path (dump_user_location_t loc, loop_p loop, gphi *phi,
2741 tree loop_arg, enum tree_code code)
2743 auto_vec<std::pair<ssa_op_iter, use_operand_p> > path;
2744 auto_bitmap visited;
2745 tree lookfor = PHI_RESULT (phi);
2746 ssa_op_iter curri;
2747 use_operand_p curr = op_iter_init_phiuse (&curri, phi, SSA_OP_USE);
2748 while (USE_FROM_PTR (curr) != loop_arg)
2749 curr = op_iter_next_use (&curri);
2750 curri.i = curri.numops;
2753 path.safe_push (std::make_pair (curri, curr));
2754 tree use = USE_FROM_PTR (curr);
2755 if (use == lookfor)
2756 break;
2757 gimple *def = SSA_NAME_DEF_STMT (use);
2758 if (gimple_nop_p (def)
2759 || ! flow_bb_inside_loop_p (loop, gimple_bb (def)))
2761 pop:
2764 std::pair<ssa_op_iter, use_operand_p> x = path.pop ();
2765 curri = x.first;
2766 curr = x.second;
2768 curr = op_iter_next_use (&curri);
2769 /* Skip already visited or non-SSA operands (from iterating
2770 over PHI args). */
2771 while (curr != NULL_USE_OPERAND_P
2772 && (TREE_CODE (USE_FROM_PTR (curr)) != SSA_NAME
2773 || ! bitmap_set_bit (visited,
2774 SSA_NAME_VERSION
2775 (USE_FROM_PTR (curr)))));
2777 while (curr == NULL_USE_OPERAND_P && ! path.is_empty ());
2778 if (curr == NULL_USE_OPERAND_P)
2779 break;
2781 else
2783 if (gimple_code (def) == GIMPLE_PHI)
2784 curr = op_iter_init_phiuse (&curri, as_a <gphi *>(def), SSA_OP_USE);
2785 else
2786 curr = op_iter_init_use (&curri, def, SSA_OP_USE);
2787 while (curr != NULL_USE_OPERAND_P
2788 && (TREE_CODE (USE_FROM_PTR (curr)) != SSA_NAME
2789 || ! bitmap_set_bit (visited,
2790 SSA_NAME_VERSION
2791 (USE_FROM_PTR (curr)))))
2792 curr = op_iter_next_use (&curri);
2793 if (curr == NULL_USE_OPERAND_P)
2794 goto pop;
2797 while (1);
2798 if (dump_file && (dump_flags & TDF_DETAILS))
2800 dump_printf_loc (MSG_NOTE, loc, "reduction path: ");
2801 unsigned i;
2802 std::pair<ssa_op_iter, use_operand_p> *x;
2803 FOR_EACH_VEC_ELT (path, i, x)
2805 dump_generic_expr (MSG_NOTE, TDF_SLIM, USE_FROM_PTR (x->second));
2806 dump_printf (MSG_NOTE, " ");
2808 dump_printf (MSG_NOTE, "\n");
2811 /* Check whether the reduction path detected is valid. */
2812 bool fail = path.length () == 0;
2813 bool neg = false;
2814 for (unsigned i = 1; i < path.length (); ++i)
2816 gimple *use_stmt = USE_STMT (path[i].second);
2817 tree op = USE_FROM_PTR (path[i].second);
2818 if (! has_single_use (op)
2819 || ! is_gimple_assign (use_stmt))
2821 fail = true;
2822 break;
2824 if (gimple_assign_rhs_code (use_stmt) != code)
2826 if (code == PLUS_EXPR
2827 && gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
2829 /* Track whether we negate the reduction value each iteration. */
2830 if (gimple_assign_rhs2 (use_stmt) == op)
2831 neg = ! neg;
2833 else
2835 fail = true;
2836 break;
2840 return ! fail && ! neg;
2844 /* Function vect_is_simple_reduction
2846 (1) Detect a cross-iteration def-use cycle that represents a simple
2847 reduction computation. We look for the following pattern:
2849 loop_header:
2850 a1 = phi < a0, a2 >
2851 a3 = ...
2852 a2 = operation (a3, a1)
2856 a3 = ...
2857 loop_header:
2858 a1 = phi < a0, a2 >
2859 a2 = operation (a3, a1)
2861 such that:
2862 1. operation is commutative and associative and it is safe to
2863 change the order of the computation
2864 2. no uses for a2 in the loop (a2 is used out of the loop)
2865 3. no uses of a1 in the loop besides the reduction operation
2866 4. no uses of a1 outside the loop.
2868 Conditions 1,4 are tested here.
2869 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
2871 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
2872 nested cycles.
2874 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
2875 reductions:
2877 a1 = phi < a0, a2 >
2878 inner loop (def of a3)
2879 a2 = phi < a3 >
2881 (4) Detect condition expressions, ie:
2882 for (int i = 0; i < N; i++)
2883 if (a[i] < val)
2884 ret_val = a[i];
2888 static stmt_vec_info
2889 vect_is_simple_reduction (loop_vec_info loop_info, stmt_vec_info phi_info,
2890 bool *double_reduc,
2891 bool need_wrapping_integral_overflow,
2892 enum vect_reduction_type *v_reduc_type)
2894 gphi *phi = as_a <gphi *> (phi_info->stmt);
2895 struct loop *loop = (gimple_bb (phi))->loop_father;
2896 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
2897 gimple *phi_use_stmt = NULL;
2898 enum tree_code orig_code, code;
2899 tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE;
2900 tree type;
2901 int nloop_uses;
2902 tree name;
2903 imm_use_iterator imm_iter;
2904 use_operand_p use_p;
2905 bool phi_def;
2907 *double_reduc = false;
2908 *v_reduc_type = TREE_CODE_REDUCTION;
2910 tree phi_name = PHI_RESULT (phi);
2911 /* ??? If there are no uses of the PHI result the inner loop reduction
2912 won't be detected as possibly double-reduction by vectorizable_reduction
2913 because that tries to walk the PHI arg from the preheader edge which
2914 can be constant. See PR60382. */
2915 if (has_zero_uses (phi_name))
2916 return NULL;
2917 nloop_uses = 0;
2918 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, phi_name)
2920 gimple *use_stmt = USE_STMT (use_p);
2921 if (is_gimple_debug (use_stmt))
2922 continue;
2924 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2926 if (dump_enabled_p ())
2927 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2928 "intermediate value used outside loop.\n");
2930 return NULL;
2933 nloop_uses++;
2934 if (nloop_uses > 1)
2936 if (dump_enabled_p ())
2937 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2938 "reduction value used in loop.\n");
2939 return NULL;
2942 phi_use_stmt = use_stmt;
2945 edge latch_e = loop_latch_edge (loop);
2946 tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
2947 if (TREE_CODE (loop_arg) != SSA_NAME)
2949 if (dump_enabled_p ())
2951 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2952 "reduction: not ssa_name: ");
2953 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, loop_arg);
2954 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2956 return NULL;
2959 stmt_vec_info def_stmt_info = loop_info->lookup_def (loop_arg);
2960 if (!def_stmt_info
2961 || !flow_bb_inside_loop_p (loop, gimple_bb (def_stmt_info->stmt)))
2962 return NULL;
2964 if (gassign *def_stmt = dyn_cast <gassign *> (def_stmt_info->stmt))
2966 name = gimple_assign_lhs (def_stmt);
2967 phi_def = false;
2969 else if (gphi *def_stmt = dyn_cast <gphi *> (def_stmt_info->stmt))
2971 name = PHI_RESULT (def_stmt);
2972 phi_def = true;
2974 else
2976 if (dump_enabled_p ())
2978 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2979 "reduction: unhandled reduction operation: ");
2980 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
2981 def_stmt_info->stmt, 0);
2983 return NULL;
2986 nloop_uses = 0;
2987 auto_vec<gphi *, 3> lcphis;
2988 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
2990 gimple *use_stmt = USE_STMT (use_p);
2991 if (is_gimple_debug (use_stmt))
2992 continue;
2993 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2994 nloop_uses++;
2995 else
2996 /* We can have more than one loop-closed PHI. */
2997 lcphis.safe_push (as_a <gphi *> (use_stmt));
2998 if (nloop_uses > 1)
3000 if (dump_enabled_p ())
3001 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3002 "reduction used in loop.\n");
3003 return NULL;
3007 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
3008 defined in the inner loop. */
3009 if (phi_def)
3011 gphi *def_stmt = as_a <gphi *> (def_stmt_info->stmt);
3012 op1 = PHI_ARG_DEF (def_stmt, 0);
3014 if (gimple_phi_num_args (def_stmt) != 1
3015 || TREE_CODE (op1) != SSA_NAME)
3017 if (dump_enabled_p ())
3018 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3019 "unsupported phi node definition.\n");
3021 return NULL;
3024 gimple *def1 = SSA_NAME_DEF_STMT (op1);
3025 if (gimple_bb (def1)
3026 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
3027 && loop->inner
3028 && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1))
3029 && is_gimple_assign (def1)
3030 && is_a <gphi *> (phi_use_stmt)
3031 && flow_bb_inside_loop_p (loop->inner, gimple_bb (phi_use_stmt)))
3033 if (dump_enabled_p ())
3034 report_vect_op (MSG_NOTE, def_stmt,
3035 "detected double reduction: ");
3037 *double_reduc = true;
3038 return def_stmt_info;
3041 return NULL;
3044 /* If we are vectorizing an inner reduction we are executing that
3045 in the original order only in case we are not dealing with a
3046 double reduction. */
3047 bool check_reduction = true;
3048 if (flow_loop_nested_p (vect_loop, loop))
3050 gphi *lcphi;
3051 unsigned i;
3052 check_reduction = false;
3053 FOR_EACH_VEC_ELT (lcphis, i, lcphi)
3054 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_phi_result (lcphi))
3056 gimple *use_stmt = USE_STMT (use_p);
3057 if (is_gimple_debug (use_stmt))
3058 continue;
3059 if (! flow_bb_inside_loop_p (vect_loop, gimple_bb (use_stmt)))
3060 check_reduction = true;
3064 gassign *def_stmt = as_a <gassign *> (def_stmt_info->stmt);
3065 bool nested_in_vect_loop = flow_loop_nested_p (vect_loop, loop);
3066 code = orig_code = gimple_assign_rhs_code (def_stmt);
3068 /* We can handle "res -= x[i]", which is non-associative by
3069 simply rewriting this into "res += -x[i]". Avoid changing
3070 gimple instruction for the first simple tests and only do this
3071 if we're allowed to change code at all. */
3072 if (code == MINUS_EXPR && gimple_assign_rhs2 (def_stmt) != phi_name)
3073 code = PLUS_EXPR;
3075 if (code == COND_EXPR)
3077 if (! nested_in_vect_loop)
3078 *v_reduc_type = COND_REDUCTION;
3080 op3 = gimple_assign_rhs1 (def_stmt);
3081 if (COMPARISON_CLASS_P (op3))
3083 op4 = TREE_OPERAND (op3, 1);
3084 op3 = TREE_OPERAND (op3, 0);
3086 if (op3 == phi_name || op4 == phi_name)
3088 if (dump_enabled_p ())
3089 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3090 "reduction: condition depends on previous"
3091 " iteration: ");
3092 return NULL;
3095 op1 = gimple_assign_rhs2 (def_stmt);
3096 op2 = gimple_assign_rhs3 (def_stmt);
3098 else if (!commutative_tree_code (code) || !associative_tree_code (code))
3100 if (dump_enabled_p ())
3101 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3102 "reduction: not commutative/associative: ");
3103 return NULL;
3105 else if (get_gimple_rhs_class (code) == GIMPLE_BINARY_RHS)
3107 op1 = gimple_assign_rhs1 (def_stmt);
3108 op2 = gimple_assign_rhs2 (def_stmt);
3110 else
3112 if (dump_enabled_p ())
3113 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3114 "reduction: not handled operation: ");
3115 return NULL;
3118 if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
3120 if (dump_enabled_p ())
3121 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3122 "reduction: both uses not ssa_names: ");
3124 return NULL;
3127 type = TREE_TYPE (gimple_assign_lhs (def_stmt));
3128 if ((TREE_CODE (op1) == SSA_NAME
3129 && !types_compatible_p (type,TREE_TYPE (op1)))
3130 || (TREE_CODE (op2) == SSA_NAME
3131 && !types_compatible_p (type, TREE_TYPE (op2)))
3132 || (op3 && TREE_CODE (op3) == SSA_NAME
3133 && !types_compatible_p (type, TREE_TYPE (op3)))
3134 || (op4 && TREE_CODE (op4) == SSA_NAME
3135 && !types_compatible_p (type, TREE_TYPE (op4))))
3137 if (dump_enabled_p ())
3139 dump_printf_loc (MSG_NOTE, vect_location,
3140 "reduction: multiple types: operation type: ");
3141 dump_generic_expr (MSG_NOTE, TDF_SLIM, type);
3142 dump_printf (MSG_NOTE, ", operands types: ");
3143 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3144 TREE_TYPE (op1));
3145 dump_printf (MSG_NOTE, ",");
3146 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3147 TREE_TYPE (op2));
3148 if (op3)
3150 dump_printf (MSG_NOTE, ",");
3151 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3152 TREE_TYPE (op3));
3155 if (op4)
3157 dump_printf (MSG_NOTE, ",");
3158 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3159 TREE_TYPE (op4));
3161 dump_printf (MSG_NOTE, "\n");
3164 return NULL;
3167 /* Check whether it's ok to change the order of the computation.
3168 Generally, when vectorizing a reduction we change the order of the
3169 computation. This may change the behavior of the program in some
3170 cases, so we need to check that this is ok. One exception is when
3171 vectorizing an outer-loop: the inner-loop is executed sequentially,
3172 and therefore vectorizing reductions in the inner-loop during
3173 outer-loop vectorization is safe. */
3174 if (check_reduction
3175 && *v_reduc_type == TREE_CODE_REDUCTION
3176 && needs_fold_left_reduction_p (type, code,
3177 need_wrapping_integral_overflow))
3178 *v_reduc_type = FOLD_LEFT_REDUCTION;
3180 /* Reduction is safe. We're dealing with one of the following:
3181 1) integer arithmetic and no trapv
3182 2) floating point arithmetic, and special flags permit this optimization
3183 3) nested cycle (i.e., outer loop vectorization). */
3184 stmt_vec_info def1_info = loop_info->lookup_def (op1);
3185 stmt_vec_info def2_info = loop_info->lookup_def (op2);
3186 if (code != COND_EXPR && !def1_info && !def2_info)
3188 if (dump_enabled_p ())
3189 report_vect_op (MSG_NOTE, def_stmt, "reduction: no defs for operands: ");
3190 return NULL;
3193 /* Check that one def is the reduction def, defined by PHI,
3194 the other def is either defined in the loop ("vect_internal_def"),
3195 or it's an induction (defined by a loop-header phi-node). */
3197 if (def2_info
3198 && def2_info->stmt == phi
3199 && (code == COND_EXPR
3200 || !def1_info
3201 || !flow_bb_inside_loop_p (loop, gimple_bb (def1_info->stmt))
3202 || vect_valid_reduction_input_p (def1_info)))
3204 if (dump_enabled_p ())
3205 report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
3206 return def_stmt_info;
3209 if (def1_info
3210 && def1_info->stmt == phi
3211 && (code == COND_EXPR
3212 || !def2_info
3213 || !flow_bb_inside_loop_p (loop, gimple_bb (def2_info->stmt))
3214 || vect_valid_reduction_input_p (def2_info)))
3216 if (! nested_in_vect_loop && orig_code != MINUS_EXPR)
3218 /* Check if we can swap operands (just for simplicity - so that
3219 the rest of the code can assume that the reduction variable
3220 is always the last (second) argument). */
3221 if (code == COND_EXPR)
3223 /* Swap cond_expr by inverting the condition. */
3224 tree cond_expr = gimple_assign_rhs1 (def_stmt);
3225 enum tree_code invert_code = ERROR_MARK;
3226 enum tree_code cond_code = TREE_CODE (cond_expr);
3228 if (TREE_CODE_CLASS (cond_code) == tcc_comparison)
3230 bool honor_nans = HONOR_NANS (TREE_OPERAND (cond_expr, 0));
3231 invert_code = invert_tree_comparison (cond_code, honor_nans);
3233 if (invert_code != ERROR_MARK)
3235 TREE_SET_CODE (cond_expr, invert_code);
3236 swap_ssa_operands (def_stmt,
3237 gimple_assign_rhs2_ptr (def_stmt),
3238 gimple_assign_rhs3_ptr (def_stmt));
3240 else
3242 if (dump_enabled_p ())
3243 report_vect_op (MSG_NOTE, def_stmt,
3244 "detected reduction: cannot swap operands "
3245 "for cond_expr");
3246 return NULL;
3249 else
3250 swap_ssa_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
3251 gimple_assign_rhs2_ptr (def_stmt));
3253 if (dump_enabled_p ())
3254 report_vect_op (MSG_NOTE, def_stmt,
3255 "detected reduction: need to swap operands: ");
3257 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (def_stmt)))
3258 LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true;
3260 else
3262 if (dump_enabled_p ())
3263 report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
3266 return def_stmt_info;
3269 /* Try to find SLP reduction chain. */
3270 if (! nested_in_vect_loop
3271 && code != COND_EXPR
3272 && orig_code != MINUS_EXPR
3273 && vect_is_slp_reduction (loop_info, phi, def_stmt))
3275 if (dump_enabled_p ())
3276 report_vect_op (MSG_NOTE, def_stmt,
3277 "reduction: detected reduction chain: ");
3279 return def_stmt_info;
3282 /* Dissolve group eventually half-built by vect_is_slp_reduction. */
3283 stmt_vec_info first = REDUC_GROUP_FIRST_ELEMENT (def_stmt_info);
3284 while (first)
3286 stmt_vec_info next = REDUC_GROUP_NEXT_ELEMENT (first);
3287 REDUC_GROUP_FIRST_ELEMENT (first) = NULL;
3288 REDUC_GROUP_NEXT_ELEMENT (first) = NULL;
3289 first = next;
3292 /* Look for the expression computing loop_arg from loop PHI result. */
3293 if (check_reduction_path (vect_location, loop, phi, loop_arg, code))
3294 return def_stmt_info;
3296 if (dump_enabled_p ())
3298 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3299 "reduction: unknown pattern: ");
3302 return NULL;
3305 /* Wrapper around vect_is_simple_reduction, which will modify code
3306 in-place if it enables detection of more reductions. Arguments
3307 as there. */
3309 stmt_vec_info
3310 vect_force_simple_reduction (loop_vec_info loop_info, stmt_vec_info phi_info,
3311 bool *double_reduc,
3312 bool need_wrapping_integral_overflow)
3314 enum vect_reduction_type v_reduc_type;
3315 stmt_vec_info def_info
3316 = vect_is_simple_reduction (loop_info, phi_info, double_reduc,
3317 need_wrapping_integral_overflow,
3318 &v_reduc_type);
3319 if (def_info)
3321 STMT_VINFO_REDUC_TYPE (phi_info) = v_reduc_type;
3322 STMT_VINFO_REDUC_DEF (phi_info) = def_info;
3323 STMT_VINFO_REDUC_TYPE (def_info) = v_reduc_type;
3324 STMT_VINFO_REDUC_DEF (def_info) = phi_info;
3326 return def_info;
3329 /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */
3331 vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue,
3332 int *peel_iters_epilogue,
3333 stmt_vector_for_cost *scalar_cost_vec,
3334 stmt_vector_for_cost *prologue_cost_vec,
3335 stmt_vector_for_cost *epilogue_cost_vec)
3337 int retval = 0;
3338 int assumed_vf = vect_vf_for_cost (loop_vinfo);
3340 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
3342 *peel_iters_epilogue = assumed_vf / 2;
3343 if (dump_enabled_p ())
3344 dump_printf_loc (MSG_NOTE, vect_location,
3345 "cost model: epilogue peel iters set to vf/2 "
3346 "because loop iterations are unknown .\n");
3348 /* If peeled iterations are known but number of scalar loop
3349 iterations are unknown, count a taken branch per peeled loop. */
3350 retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken,
3351 NULL, 0, vect_prologue);
3352 retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken,
3353 NULL, 0, vect_epilogue);
3355 else
3357 int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
3358 peel_iters_prologue = niters < peel_iters_prologue ?
3359 niters : peel_iters_prologue;
3360 *peel_iters_epilogue = (niters - peel_iters_prologue) % assumed_vf;
3361 /* If we need to peel for gaps, but no peeling is required, we have to
3362 peel VF iterations. */
3363 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && !*peel_iters_epilogue)
3364 *peel_iters_epilogue = assumed_vf;
3367 stmt_info_for_cost *si;
3368 int j;
3369 if (peel_iters_prologue)
3370 FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si)
3371 retval += record_stmt_cost (prologue_cost_vec,
3372 si->count * peel_iters_prologue,
3373 si->kind, si->stmt_info, si->misalign,
3374 vect_prologue);
3375 if (*peel_iters_epilogue)
3376 FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si)
3377 retval += record_stmt_cost (epilogue_cost_vec,
3378 si->count * *peel_iters_epilogue,
3379 si->kind, si->stmt_info, si->misalign,
3380 vect_epilogue);
3382 return retval;
3385 /* Function vect_estimate_min_profitable_iters
3387 Return the number of iterations required for the vector version of the
3388 loop to be profitable relative to the cost of the scalar version of the
3389 loop.
3391 *RET_MIN_PROFITABLE_NITERS is a cost model profitability threshold
3392 of iterations for vectorization. -1 value means loop vectorization
3393 is not profitable. This returned value may be used for dynamic
3394 profitability check.
3396 *RET_MIN_PROFITABLE_ESTIMATE is a profitability threshold to be used
3397 for static check against estimated number of iterations. */
3399 static void
3400 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
3401 int *ret_min_profitable_niters,
3402 int *ret_min_profitable_estimate)
3404 int min_profitable_iters;
3405 int min_profitable_estimate;
3406 int peel_iters_prologue;
3407 int peel_iters_epilogue;
3408 unsigned vec_inside_cost = 0;
3409 int vec_outside_cost = 0;
3410 unsigned vec_prologue_cost = 0;
3411 unsigned vec_epilogue_cost = 0;
3412 int scalar_single_iter_cost = 0;
3413 int scalar_outside_cost = 0;
3414 int assumed_vf = vect_vf_for_cost (loop_vinfo);
3415 int npeel = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
3416 void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
3418 /* Cost model disabled. */
3419 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
3421 dump_printf_loc (MSG_NOTE, vect_location, "cost model disabled.\n");
3422 *ret_min_profitable_niters = 0;
3423 *ret_min_profitable_estimate = 0;
3424 return;
3427 /* Requires loop versioning tests to handle misalignment. */
3428 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
3430 /* FIXME: Make cost depend on complexity of individual check. */
3431 unsigned len = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ();
3432 (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
3433 vect_prologue);
3434 dump_printf (MSG_NOTE,
3435 "cost model: Adding cost of checks for loop "
3436 "versioning to treat misalignment.\n");
3439 /* Requires loop versioning with alias checks. */
3440 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
3442 /* FIXME: Make cost depend on complexity of individual check. */
3443 unsigned len = LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).length ();
3444 (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
3445 vect_prologue);
3446 len = LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).length ();
3447 if (len)
3448 /* Count LEN - 1 ANDs and LEN comparisons. */
3449 (void) add_stmt_cost (target_cost_data, len * 2 - 1, scalar_stmt,
3450 NULL, 0, vect_prologue);
3451 len = LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).length ();
3452 if (len)
3454 /* Count LEN - 1 ANDs and LEN comparisons. */
3455 unsigned int nstmts = len * 2 - 1;
3456 /* +1 for each bias that needs adding. */
3457 for (unsigned int i = 0; i < len; ++i)
3458 if (!LOOP_VINFO_LOWER_BOUNDS (loop_vinfo)[i].unsigned_p)
3459 nstmts += 1;
3460 (void) add_stmt_cost (target_cost_data, nstmts, scalar_stmt,
3461 NULL, 0, vect_prologue);
3463 dump_printf (MSG_NOTE,
3464 "cost model: Adding cost of checks for loop "
3465 "versioning aliasing.\n");
3468 /* Requires loop versioning with niter checks. */
3469 if (LOOP_REQUIRES_VERSIONING_FOR_NITERS (loop_vinfo))
3471 /* FIXME: Make cost depend on complexity of individual check. */
3472 (void) add_stmt_cost (target_cost_data, 1, vector_stmt, NULL, 0,
3473 vect_prologue);
3474 dump_printf (MSG_NOTE,
3475 "cost model: Adding cost of checks for loop "
3476 "versioning niters.\n");
3479 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
3480 (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, NULL, 0,
3481 vect_prologue);
3483 /* Count statements in scalar loop. Using this as scalar cost for a single
3484 iteration for now.
3486 TODO: Add outer loop support.
3488 TODO: Consider assigning different costs to different scalar
3489 statements. */
3491 scalar_single_iter_cost
3492 = LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo);
3494 /* Add additional cost for the peeled instructions in prologue and epilogue
3495 loop. (For fully-masked loops there will be no peeling.)
3497 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
3498 at compile-time - we assume it's vf/2 (the worst would be vf-1).
3500 TODO: Build an expression that represents peel_iters for prologue and
3501 epilogue to be used in a run-time test. */
3503 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
3505 peel_iters_prologue = 0;
3506 peel_iters_epilogue = 0;
3508 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
3510 /* We need to peel exactly one iteration. */
3511 peel_iters_epilogue += 1;
3512 stmt_info_for_cost *si;
3513 int j;
3514 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
3515 j, si)
3516 (void) add_stmt_cost (target_cost_data, si->count,
3517 si->kind, si->stmt_info, si->misalign,
3518 vect_epilogue);
3521 else if (npeel < 0)
3523 peel_iters_prologue = assumed_vf / 2;
3524 dump_printf (MSG_NOTE, "cost model: "
3525 "prologue peel iters set to vf/2.\n");
3527 /* If peeling for alignment is unknown, loop bound of main loop becomes
3528 unknown. */
3529 peel_iters_epilogue = assumed_vf / 2;
3530 dump_printf (MSG_NOTE, "cost model: "
3531 "epilogue peel iters set to vf/2 because "
3532 "peeling for alignment is unknown.\n");
3534 /* If peeled iterations are unknown, count a taken branch and a not taken
3535 branch per peeled loop. Even if scalar loop iterations are known,
3536 vector iterations are not known since peeled prologue iterations are
3537 not known. Hence guards remain the same. */
3538 (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken,
3539 NULL, 0, vect_prologue);
3540 (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken,
3541 NULL, 0, vect_prologue);
3542 (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken,
3543 NULL, 0, vect_epilogue);
3544 (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken,
3545 NULL, 0, vect_epilogue);
3546 stmt_info_for_cost *si;
3547 int j;
3548 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), j, si)
3550 (void) add_stmt_cost (target_cost_data,
3551 si->count * peel_iters_prologue,
3552 si->kind, si->stmt_info, si->misalign,
3553 vect_prologue);
3554 (void) add_stmt_cost (target_cost_data,
3555 si->count * peel_iters_epilogue,
3556 si->kind, si->stmt_info, si->misalign,
3557 vect_epilogue);
3560 else
3562 stmt_vector_for_cost prologue_cost_vec, epilogue_cost_vec;
3563 stmt_info_for_cost *si;
3564 int j;
3565 void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
3567 prologue_cost_vec.create (2);
3568 epilogue_cost_vec.create (2);
3569 peel_iters_prologue = npeel;
3571 (void) vect_get_known_peeling_cost (loop_vinfo, peel_iters_prologue,
3572 &peel_iters_epilogue,
3573 &LOOP_VINFO_SCALAR_ITERATION_COST
3574 (loop_vinfo),
3575 &prologue_cost_vec,
3576 &epilogue_cost_vec);
3578 FOR_EACH_VEC_ELT (prologue_cost_vec, j, si)
3579 (void) add_stmt_cost (data, si->count, si->kind, si->stmt_info,
3580 si->misalign, vect_prologue);
3582 FOR_EACH_VEC_ELT (epilogue_cost_vec, j, si)
3583 (void) add_stmt_cost (data, si->count, si->kind, si->stmt_info,
3584 si->misalign, vect_epilogue);
3586 prologue_cost_vec.release ();
3587 epilogue_cost_vec.release ();
3590 /* FORNOW: The scalar outside cost is incremented in one of the
3591 following ways:
3593 1. The vectorizer checks for alignment and aliasing and generates
3594 a condition that allows dynamic vectorization. A cost model
3595 check is ANDED with the versioning condition. Hence scalar code
3596 path now has the added cost of the versioning check.
3598 if (cost > th & versioning_check)
3599 jmp to vector code
3601 Hence run-time scalar is incremented by not-taken branch cost.
3603 2. The vectorizer then checks if a prologue is required. If the
3604 cost model check was not done before during versioning, it has to
3605 be done before the prologue check.
3607 if (cost <= th)
3608 prologue = scalar_iters
3609 if (prologue == 0)
3610 jmp to vector code
3611 else
3612 execute prologue
3613 if (prologue == num_iters)
3614 go to exit
3616 Hence the run-time scalar cost is incremented by a taken branch,
3617 plus a not-taken branch, plus a taken branch cost.
3619 3. The vectorizer then checks if an epilogue is required. If the
3620 cost model check was not done before during prologue check, it
3621 has to be done with the epilogue check.
3623 if (prologue == 0)
3624 jmp to vector code
3625 else
3626 execute prologue
3627 if (prologue == num_iters)
3628 go to exit
3629 vector code:
3630 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
3631 jmp to epilogue
3633 Hence the run-time scalar cost should be incremented by 2 taken
3634 branches.
3636 TODO: The back end may reorder the BBS's differently and reverse
3637 conditions/branch directions. Change the estimates below to
3638 something more reasonable. */
3640 /* If the number of iterations is known and we do not do versioning, we can
3641 decide whether to vectorize at compile time. Hence the scalar version
3642 do not carry cost model guard costs. */
3643 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
3644 || LOOP_REQUIRES_VERSIONING (loop_vinfo))
3646 /* Cost model check occurs at versioning. */
3647 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
3648 scalar_outside_cost += vect_get_stmt_cost (cond_branch_not_taken);
3649 else
3651 /* Cost model check occurs at prologue generation. */
3652 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
3653 scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken)
3654 + vect_get_stmt_cost (cond_branch_not_taken);
3655 /* Cost model check occurs at epilogue generation. */
3656 else
3657 scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken);
3661 /* Complete the target-specific cost calculations. */
3662 finish_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo), &vec_prologue_cost,
3663 &vec_inside_cost, &vec_epilogue_cost);
3665 vec_outside_cost = (int)(vec_prologue_cost + vec_epilogue_cost);
3667 if (dump_enabled_p ())
3669 dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
3670 dump_printf (MSG_NOTE, " Vector inside of loop cost: %d\n",
3671 vec_inside_cost);
3672 dump_printf (MSG_NOTE, " Vector prologue cost: %d\n",
3673 vec_prologue_cost);
3674 dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n",
3675 vec_epilogue_cost);
3676 dump_printf (MSG_NOTE, " Scalar iteration cost: %d\n",
3677 scalar_single_iter_cost);
3678 dump_printf (MSG_NOTE, " Scalar outside cost: %d\n",
3679 scalar_outside_cost);
3680 dump_printf (MSG_NOTE, " Vector outside cost: %d\n",
3681 vec_outside_cost);
3682 dump_printf (MSG_NOTE, " prologue iterations: %d\n",
3683 peel_iters_prologue);
3684 dump_printf (MSG_NOTE, " epilogue iterations: %d\n",
3685 peel_iters_epilogue);
3688 /* Calculate number of iterations required to make the vector version
3689 profitable, relative to the loop bodies only. The following condition
3690 must hold true:
3691 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
3692 where
3693 SIC = scalar iteration cost, VIC = vector iteration cost,
3694 VOC = vector outside cost, VF = vectorization factor,
3695 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
3696 SOC = scalar outside cost for run time cost model check. */
3698 if ((scalar_single_iter_cost * assumed_vf) > (int) vec_inside_cost)
3700 min_profitable_iters = ((vec_outside_cost - scalar_outside_cost)
3701 * assumed_vf
3702 - vec_inside_cost * peel_iters_prologue
3703 - vec_inside_cost * peel_iters_epilogue);
3704 if (min_profitable_iters <= 0)
3705 min_profitable_iters = 0;
3706 else
3708 min_profitable_iters /= ((scalar_single_iter_cost * assumed_vf)
3709 - vec_inside_cost);
3711 if ((scalar_single_iter_cost * assumed_vf * min_profitable_iters)
3712 <= (((int) vec_inside_cost * min_profitable_iters)
3713 + (((int) vec_outside_cost - scalar_outside_cost)
3714 * assumed_vf)))
3715 min_profitable_iters++;
3718 /* vector version will never be profitable. */
3719 else
3721 if (LOOP_VINFO_LOOP (loop_vinfo)->force_vectorize)
3722 warning_at (vect_location.get_location_t (), OPT_Wopenmp_simd,
3723 "vectorization did not happen for a simd loop");
3725 if (dump_enabled_p ())
3726 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3727 "cost model: the vector iteration cost = %d "
3728 "divided by the scalar iteration cost = %d "
3729 "is greater or equal to the vectorization factor = %d"
3730 ".\n",
3731 vec_inside_cost, scalar_single_iter_cost, assumed_vf);
3732 *ret_min_profitable_niters = -1;
3733 *ret_min_profitable_estimate = -1;
3734 return;
3737 dump_printf (MSG_NOTE,
3738 " Calculated minimum iters for profitability: %d\n",
3739 min_profitable_iters);
3741 if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
3742 && min_profitable_iters < (assumed_vf + peel_iters_prologue))
3743 /* We want the vectorized loop to execute at least once. */
3744 min_profitable_iters = assumed_vf + peel_iters_prologue;
3746 if (dump_enabled_p ())
3747 dump_printf_loc (MSG_NOTE, vect_location,
3748 " Runtime profitability threshold = %d\n",
3749 min_profitable_iters);
3751 *ret_min_profitable_niters = min_profitable_iters;
3753 /* Calculate number of iterations required to make the vector version
3754 profitable, relative to the loop bodies only.
3756 Non-vectorized variant is SIC * niters and it must win over vector
3757 variant on the expected loop trip count. The following condition must hold true:
3758 SIC * niters > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC + SOC */
3760 if (vec_outside_cost <= 0)
3761 min_profitable_estimate = 0;
3762 else
3764 min_profitable_estimate = ((vec_outside_cost + scalar_outside_cost)
3765 * assumed_vf
3766 - vec_inside_cost * peel_iters_prologue
3767 - vec_inside_cost * peel_iters_epilogue)
3768 / ((scalar_single_iter_cost * assumed_vf)
3769 - vec_inside_cost);
3771 min_profitable_estimate = MAX (min_profitable_estimate, min_profitable_iters);
3772 if (dump_enabled_p ())
3773 dump_printf_loc (MSG_NOTE, vect_location,
3774 " Static estimate profitability threshold = %d\n",
3775 min_profitable_estimate);
3777 *ret_min_profitable_estimate = min_profitable_estimate;
3780 /* Writes into SEL a mask for a vec_perm, equivalent to a vec_shr by OFFSET
3781 vector elements (not bits) for a vector with NELT elements. */
3782 static void
3783 calc_vec_perm_mask_for_shift (unsigned int offset, unsigned int nelt,
3784 vec_perm_builder *sel)
3786 /* The encoding is a single stepped pattern. Any wrap-around is handled
3787 by vec_perm_indices. */
3788 sel->new_vector (nelt, 1, 3);
3789 for (unsigned int i = 0; i < 3; i++)
3790 sel->quick_push (i + offset);
3793 /* Checks whether the target supports whole-vector shifts for vectors of mode
3794 MODE. This is the case if _either_ the platform handles vec_shr_optab, _or_
3795 it supports vec_perm_const with masks for all necessary shift amounts. */
3796 static bool
3797 have_whole_vector_shift (machine_mode mode)
3799 if (optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
3800 return true;
3802 /* Variable-length vectors should be handled via the optab. */
3803 unsigned int nelt;
3804 if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
3805 return false;
3807 vec_perm_builder sel;
3808 vec_perm_indices indices;
3809 for (unsigned int i = nelt / 2; i >= 1; i /= 2)
3811 calc_vec_perm_mask_for_shift (i, nelt, &sel);
3812 indices.new_vector (sel, 2, nelt);
3813 if (!can_vec_perm_const_p (mode, indices, false))
3814 return false;
3816 return true;
3819 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
3820 functions. Design better to avoid maintenance issues. */
3822 /* Function vect_model_reduction_cost.
3824 Models cost for a reduction operation, including the vector ops
3825 generated within the strip-mine loop, the initial definition before
3826 the loop, and the epilogue code that must be generated. */
3828 static void
3829 vect_model_reduction_cost (stmt_vec_info stmt_info, internal_fn reduc_fn,
3830 int ncopies, stmt_vector_for_cost *cost_vec)
3832 int prologue_cost = 0, epilogue_cost = 0, inside_cost;
3833 enum tree_code code;
3834 optab optab;
3835 tree vectype;
3836 machine_mode mode;
3837 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3838 struct loop *loop = NULL;
3840 if (loop_vinfo)
3841 loop = LOOP_VINFO_LOOP (loop_vinfo);
3843 /* Condition reductions generate two reductions in the loop. */
3844 vect_reduction_type reduction_type
3845 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info);
3846 if (reduction_type == COND_REDUCTION)
3847 ncopies *= 2;
3849 vectype = STMT_VINFO_VECTYPE (stmt_info);
3850 mode = TYPE_MODE (vectype);
3851 stmt_vec_info orig_stmt_info = vect_orig_stmt (stmt_info);
3853 code = gimple_assign_rhs_code (orig_stmt_info->stmt);
3855 if (reduction_type == EXTRACT_LAST_REDUCTION
3856 || reduction_type == FOLD_LEFT_REDUCTION)
3858 /* No extra instructions needed in the prologue. */
3859 prologue_cost = 0;
3861 if (reduction_type == EXTRACT_LAST_REDUCTION || reduc_fn != IFN_LAST)
3862 /* Count one reduction-like operation per vector. */
3863 inside_cost = record_stmt_cost (cost_vec, ncopies, vec_to_scalar,
3864 stmt_info, 0, vect_body);
3865 else
3867 /* Use NELEMENTS extracts and NELEMENTS scalar ops. */
3868 unsigned int nelements = ncopies * vect_nunits_for_cost (vectype);
3869 inside_cost = record_stmt_cost (cost_vec, nelements,
3870 vec_to_scalar, stmt_info, 0,
3871 vect_body);
3872 inside_cost += record_stmt_cost (cost_vec, nelements,
3873 scalar_stmt, stmt_info, 0,
3874 vect_body);
3877 else
3879 /* Add in cost for initial definition.
3880 For cond reduction we have four vectors: initial index, step,
3881 initial result of the data reduction, initial value of the index
3882 reduction. */
3883 int prologue_stmts = reduction_type == COND_REDUCTION ? 4 : 1;
3884 prologue_cost += record_stmt_cost (cost_vec, prologue_stmts,
3885 scalar_to_vec, stmt_info, 0,
3886 vect_prologue);
3888 /* Cost of reduction op inside loop. */
3889 inside_cost = record_stmt_cost (cost_vec, ncopies, vector_stmt,
3890 stmt_info, 0, vect_body);
3893 /* Determine cost of epilogue code.
3895 We have a reduction operator that will reduce the vector in one statement.
3896 Also requires scalar extract. */
3898 if (!loop || !nested_in_vect_loop_p (loop, orig_stmt_info))
3900 if (reduc_fn != IFN_LAST)
3902 if (reduction_type == COND_REDUCTION)
3904 /* An EQ stmt and an COND_EXPR stmt. */
3905 epilogue_cost += record_stmt_cost (cost_vec, 2,
3906 vector_stmt, stmt_info, 0,
3907 vect_epilogue);
3908 /* Reduction of the max index and a reduction of the found
3909 values. */
3910 epilogue_cost += record_stmt_cost (cost_vec, 2,
3911 vec_to_scalar, stmt_info, 0,
3912 vect_epilogue);
3913 /* A broadcast of the max value. */
3914 epilogue_cost += record_stmt_cost (cost_vec, 1,
3915 scalar_to_vec, stmt_info, 0,
3916 vect_epilogue);
3918 else
3920 epilogue_cost += record_stmt_cost (cost_vec, 1, vector_stmt,
3921 stmt_info, 0, vect_epilogue);
3922 epilogue_cost += record_stmt_cost (cost_vec, 1,
3923 vec_to_scalar, stmt_info, 0,
3924 vect_epilogue);
3927 else if (reduction_type == COND_REDUCTION)
3929 unsigned estimated_nunits = vect_nunits_for_cost (vectype);
3930 /* Extraction of scalar elements. */
3931 epilogue_cost += record_stmt_cost (cost_vec,
3932 2 * estimated_nunits,
3933 vec_to_scalar, stmt_info, 0,
3934 vect_epilogue);
3935 /* Scalar max reductions via COND_EXPR / MAX_EXPR. */
3936 epilogue_cost += record_stmt_cost (cost_vec,
3937 2 * estimated_nunits - 3,
3938 scalar_stmt, stmt_info, 0,
3939 vect_epilogue);
3941 else if (reduction_type == EXTRACT_LAST_REDUCTION
3942 || reduction_type == FOLD_LEFT_REDUCTION)
3943 /* No extra instructions need in the epilogue. */
3945 else
3947 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
3948 tree bitsize =
3949 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt_info->stmt)));
3950 int element_bitsize = tree_to_uhwi (bitsize);
3951 int nelements = vec_size_in_bits / element_bitsize;
3953 if (code == COND_EXPR)
3954 code = MAX_EXPR;
3956 optab = optab_for_tree_code (code, vectype, optab_default);
3958 /* We have a whole vector shift available. */
3959 if (optab != unknown_optab
3960 && VECTOR_MODE_P (mode)
3961 && optab_handler (optab, mode) != CODE_FOR_nothing
3962 && have_whole_vector_shift (mode))
3964 /* Final reduction via vector shifts and the reduction operator.
3965 Also requires scalar extract. */
3966 epilogue_cost += record_stmt_cost (cost_vec,
3967 exact_log2 (nelements) * 2,
3968 vector_stmt, stmt_info, 0,
3969 vect_epilogue);
3970 epilogue_cost += record_stmt_cost (cost_vec, 1,
3971 vec_to_scalar, stmt_info, 0,
3972 vect_epilogue);
3974 else
3975 /* Use extracts and reduction op for final reduction. For N
3976 elements, we have N extracts and N-1 reduction ops. */
3977 epilogue_cost += record_stmt_cost (cost_vec,
3978 nelements + nelements - 1,
3979 vector_stmt, stmt_info, 0,
3980 vect_epilogue);
3984 if (dump_enabled_p ())
3985 dump_printf (MSG_NOTE,
3986 "vect_model_reduction_cost: inside_cost = %d, "
3987 "prologue_cost = %d, epilogue_cost = %d .\n", inside_cost,
3988 prologue_cost, epilogue_cost);
3992 /* Function vect_model_induction_cost.
3994 Models cost for induction operations. */
3996 static void
3997 vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies,
3998 stmt_vector_for_cost *cost_vec)
4000 unsigned inside_cost, prologue_cost;
4002 if (PURE_SLP_STMT (stmt_info))
4003 return;
4005 /* loop cost for vec_loop. */
4006 inside_cost = record_stmt_cost (cost_vec, ncopies, vector_stmt,
4007 stmt_info, 0, vect_body);
4009 /* prologue cost for vec_init and vec_step. */
4010 prologue_cost = record_stmt_cost (cost_vec, 2, scalar_to_vec,
4011 stmt_info, 0, vect_prologue);
4013 if (dump_enabled_p ())
4014 dump_printf_loc (MSG_NOTE, vect_location,
4015 "vect_model_induction_cost: inside_cost = %d, "
4016 "prologue_cost = %d .\n", inside_cost, prologue_cost);
4021 /* Function get_initial_def_for_reduction
4023 Input:
4024 STMT_VINFO - a stmt that performs a reduction operation in the loop.
4025 INIT_VAL - the initial value of the reduction variable
4027 Output:
4028 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
4029 of the reduction (used for adjusting the epilog - see below).
4030 Return a vector variable, initialized according to the operation that
4031 STMT_VINFO performs. This vector will be used as the initial value
4032 of the vector of partial results.
4034 Option1 (adjust in epilog): Initialize the vector as follows:
4035 add/bit or/xor: [0,0,...,0,0]
4036 mult/bit and: [1,1,...,1,1]
4037 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
4038 and when necessary (e.g. add/mult case) let the caller know
4039 that it needs to adjust the result by init_val.
4041 Option2: Initialize the vector as follows:
4042 add/bit or/xor: [init_val,0,0,...,0]
4043 mult/bit and: [init_val,1,1,...,1]
4044 min/max/cond_expr: [init_val,init_val,...,init_val]
4045 and no adjustments are needed.
4047 For example, for the following code:
4049 s = init_val;
4050 for (i=0;i<n;i++)
4051 s = s + a[i];
4053 STMT_VINFO is 's = s + a[i]', and the reduction variable is 's'.
4054 For a vector of 4 units, we want to return either [0,0,0,init_val],
4055 or [0,0,0,0] and let the caller know that it needs to adjust
4056 the result at the end by 'init_val'.
4058 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
4059 initialization vector is simpler (same element in all entries), if
4060 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
4062 A cost model should help decide between these two schemes. */
4064 tree
4065 get_initial_def_for_reduction (stmt_vec_info stmt_vinfo, tree init_val,
4066 tree *adjustment_def)
4068 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
4069 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4070 tree scalar_type = TREE_TYPE (init_val);
4071 tree vectype = get_vectype_for_scalar_type (scalar_type);
4072 enum tree_code code = gimple_assign_rhs_code (stmt_vinfo->stmt);
4073 tree def_for_init;
4074 tree init_def;
4075 REAL_VALUE_TYPE real_init_val = dconst0;
4076 int int_init_val = 0;
4077 gimple_seq stmts = NULL;
4079 gcc_assert (vectype);
4081 gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
4082 || SCALAR_FLOAT_TYPE_P (scalar_type));
4084 gcc_assert (nested_in_vect_loop_p (loop, stmt_vinfo)
4085 || loop == (gimple_bb (stmt_vinfo->stmt))->loop_father);
4087 vect_reduction_type reduction_type
4088 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_vinfo);
4090 switch (code)
4092 case WIDEN_SUM_EXPR:
4093 case DOT_PROD_EXPR:
4094 case SAD_EXPR:
4095 case PLUS_EXPR:
4096 case MINUS_EXPR:
4097 case BIT_IOR_EXPR:
4098 case BIT_XOR_EXPR:
4099 case MULT_EXPR:
4100 case BIT_AND_EXPR:
4102 /* ADJUSTMENT_DEF is NULL when called from
4103 vect_create_epilog_for_reduction to vectorize double reduction. */
4104 if (adjustment_def)
4105 *adjustment_def = init_val;
4107 if (code == MULT_EXPR)
4109 real_init_val = dconst1;
4110 int_init_val = 1;
4113 if (code == BIT_AND_EXPR)
4114 int_init_val = -1;
4116 if (SCALAR_FLOAT_TYPE_P (scalar_type))
4117 def_for_init = build_real (scalar_type, real_init_val);
4118 else
4119 def_for_init = build_int_cst (scalar_type, int_init_val);
4121 if (adjustment_def)
4122 /* Option1: the first element is '0' or '1' as well. */
4123 init_def = gimple_build_vector_from_val (&stmts, vectype,
4124 def_for_init);
4125 else if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant ())
4127 /* Option2 (variable length): the first element is INIT_VAL. */
4128 init_def = gimple_build_vector_from_val (&stmts, vectype,
4129 def_for_init);
4130 init_def = gimple_build (&stmts, CFN_VEC_SHL_INSERT,
4131 vectype, init_def, init_val);
4133 else
4135 /* Option2: the first element is INIT_VAL. */
4136 tree_vector_builder elts (vectype, 1, 2);
4137 elts.quick_push (init_val);
4138 elts.quick_push (def_for_init);
4139 init_def = gimple_build_vector (&stmts, &elts);
4142 break;
4144 case MIN_EXPR:
4145 case MAX_EXPR:
4146 case COND_EXPR:
4148 if (adjustment_def)
4150 *adjustment_def = NULL_TREE;
4151 if (reduction_type != COND_REDUCTION
4152 && reduction_type != EXTRACT_LAST_REDUCTION)
4154 init_def = vect_get_vec_def_for_operand (init_val, stmt_vinfo);
4155 break;
4158 init_val = gimple_convert (&stmts, TREE_TYPE (vectype), init_val);
4159 init_def = gimple_build_vector_from_val (&stmts, vectype, init_val);
4161 break;
4163 default:
4164 gcc_unreachable ();
4167 if (stmts)
4168 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
4169 return init_def;
4172 /* Get at the initial defs for the reduction PHIs in SLP_NODE.
4173 NUMBER_OF_VECTORS is the number of vector defs to create.
4174 If NEUTRAL_OP is nonnull, introducing extra elements of that
4175 value will not change the result. */
4177 static void
4178 get_initial_defs_for_reduction (slp_tree slp_node,
4179 vec<tree> *vec_oprnds,
4180 unsigned int number_of_vectors,
4181 bool reduc_chain, tree neutral_op)
4183 vec<stmt_vec_info> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
4184 stmt_vec_info stmt_vinfo = stmts[0];
4185 unsigned HOST_WIDE_INT nunits;
4186 unsigned j, number_of_places_left_in_vector;
4187 tree vector_type;
4188 tree vop;
4189 int group_size = stmts.length ();
4190 unsigned int vec_num, i;
4191 unsigned number_of_copies = 1;
4192 vec<tree> voprnds;
4193 voprnds.create (number_of_vectors);
4194 struct loop *loop;
4195 auto_vec<tree, 16> permute_results;
4197 vector_type = STMT_VINFO_VECTYPE (stmt_vinfo);
4199 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def);
4201 loop = (gimple_bb (stmt_vinfo->stmt))->loop_father;
4202 gcc_assert (loop);
4203 edge pe = loop_preheader_edge (loop);
4205 gcc_assert (!reduc_chain || neutral_op);
4207 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
4208 created vectors. It is greater than 1 if unrolling is performed.
4210 For example, we have two scalar operands, s1 and s2 (e.g., group of
4211 strided accesses of size two), while NUNITS is four (i.e., four scalars
4212 of this type can be packed in a vector). The output vector will contain
4213 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
4214 will be 2).
4216 If REDUC_GROUP_SIZE > NUNITS, the scalars will be split into several
4217 vectors containing the operands.
4219 For example, NUNITS is four as before, and the group size is 8
4220 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
4221 {s5, s6, s7, s8}. */
4223 if (!TYPE_VECTOR_SUBPARTS (vector_type).is_constant (&nunits))
4224 nunits = group_size;
4226 number_of_copies = nunits * number_of_vectors / group_size;
4228 number_of_places_left_in_vector = nunits;
4229 bool constant_p = true;
4230 tree_vector_builder elts (vector_type, nunits, 1);
4231 elts.quick_grow (nunits);
4232 for (j = 0; j < number_of_copies; j++)
4234 for (i = group_size - 1; stmts.iterate (i, &stmt_vinfo); i--)
4236 tree op;
4237 /* Get the def before the loop. In reduction chain we have only
4238 one initial value. */
4239 if ((j != (number_of_copies - 1)
4240 || (reduc_chain && i != 0))
4241 && neutral_op)
4242 op = neutral_op;
4243 else
4244 op = PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt, pe);
4246 /* Create 'vect_ = {op0,op1,...,opn}'. */
4247 number_of_places_left_in_vector--;
4248 elts[number_of_places_left_in_vector] = op;
4249 if (!CONSTANT_CLASS_P (op))
4250 constant_p = false;
4252 if (number_of_places_left_in_vector == 0)
4254 gimple_seq ctor_seq = NULL;
4255 tree init;
4256 if (constant_p && !neutral_op
4257 ? multiple_p (TYPE_VECTOR_SUBPARTS (vector_type), nunits)
4258 : known_eq (TYPE_VECTOR_SUBPARTS (vector_type), nunits))
4259 /* Build the vector directly from ELTS. */
4260 init = gimple_build_vector (&ctor_seq, &elts);
4261 else if (neutral_op)
4263 /* Build a vector of the neutral value and shift the
4264 other elements into place. */
4265 init = gimple_build_vector_from_val (&ctor_seq, vector_type,
4266 neutral_op);
4267 int k = nunits;
4268 while (k > 0 && elts[k - 1] == neutral_op)
4269 k -= 1;
4270 while (k > 0)
4272 k -= 1;
4273 init = gimple_build (&ctor_seq, CFN_VEC_SHL_INSERT,
4274 vector_type, init, elts[k]);
4277 else
4279 /* First time round, duplicate ELTS to fill the
4280 required number of vectors, then cherry pick the
4281 appropriate result for each iteration. */
4282 if (vec_oprnds->is_empty ())
4283 duplicate_and_interleave (&ctor_seq, vector_type, elts,
4284 number_of_vectors,
4285 permute_results);
4286 init = permute_results[number_of_vectors - j - 1];
4288 if (ctor_seq != NULL)
4289 gsi_insert_seq_on_edge_immediate (pe, ctor_seq);
4290 voprnds.quick_push (init);
4292 number_of_places_left_in_vector = nunits;
4293 elts.new_vector (vector_type, nunits, 1);
4294 elts.quick_grow (nunits);
4295 constant_p = true;
4300 /* Since the vectors are created in the reverse order, we should invert
4301 them. */
4302 vec_num = voprnds.length ();
4303 for (j = vec_num; j != 0; j--)
4305 vop = voprnds[j - 1];
4306 vec_oprnds->quick_push (vop);
4309 voprnds.release ();
4311 /* In case that VF is greater than the unrolling factor needed for the SLP
4312 group of stmts, NUMBER_OF_VECTORS to be created is greater than
4313 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
4314 to replicate the vectors. */
4315 tree neutral_vec = NULL;
4316 while (number_of_vectors > vec_oprnds->length ())
4318 if (neutral_op)
4320 if (!neutral_vec)
4322 gimple_seq ctor_seq = NULL;
4323 neutral_vec = gimple_build_vector_from_val
4324 (&ctor_seq, vector_type, neutral_op);
4325 if (ctor_seq != NULL)
4326 gsi_insert_seq_on_edge_immediate (pe, ctor_seq);
4328 vec_oprnds->quick_push (neutral_vec);
4330 else
4332 for (i = 0; vec_oprnds->iterate (i, &vop) && i < vec_num; i++)
4333 vec_oprnds->quick_push (vop);
4339 /* Function vect_create_epilog_for_reduction
4341 Create code at the loop-epilog to finalize the result of a reduction
4342 computation.
4344 VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector
4345 reduction statements.
4346 STMT_INFO is the scalar reduction stmt that is being vectorized.
4347 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
4348 number of elements that we can fit in a vectype (nunits). In this case
4349 we have to generate more than one vector stmt - i.e - we need to "unroll"
4350 the vector stmt by a factor VF/nunits. For more details see documentation
4351 in vectorizable_operation.
4352 REDUC_FN is the internal function for the epilog reduction.
4353 REDUCTION_PHIS is a list of the phi-nodes that carry the reduction
4354 computation.
4355 REDUC_INDEX is the index of the operand in the right hand side of the
4356 statement that is defined by REDUCTION_PHI.
4357 DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
4358 SLP_NODE is an SLP node containing a group of reduction statements. The
4359 first one in this group is STMT_INFO.
4360 INDUC_VAL is for INTEGER_INDUC_COND_REDUCTION the value to use for the case
4361 when the COND_EXPR is never true in the loop. For MAX_EXPR, it needs to
4362 be smaller than any value of the IV in the loop, for MIN_EXPR larger than
4363 any value of the IV in the loop.
4364 INDUC_CODE is the code for epilog reduction if INTEGER_INDUC_COND_REDUCTION.
4365 NEUTRAL_OP is the value given by neutral_op_for_slp_reduction; it is
4366 null if this is not an SLP reduction
4368 This function:
4369 1. Creates the reduction def-use cycles: sets the arguments for
4370 REDUCTION_PHIS:
4371 The loop-entry argument is the vectorized initial-value of the reduction.
4372 The loop-latch argument is taken from VECT_DEFS - the vector of partial
4373 sums.
4374 2. "Reduces" each vector of partial results VECT_DEFS into a single result,
4375 by calling the function specified by REDUC_FN if available, or by
4376 other means (whole-vector shifts or a scalar loop).
4377 The function also creates a new phi node at the loop exit to preserve
4378 loop-closed form, as illustrated below.
4380 The flow at the entry to this function:
4382 loop:
4383 vec_def = phi <null, null> # REDUCTION_PHI
4384 VECT_DEF = vector_stmt # vectorized form of STMT_INFO
4385 s_loop = scalar_stmt # (scalar) STMT_INFO
4386 loop_exit:
4387 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4388 use <s_out0>
4389 use <s_out0>
4391 The above is transformed by this function into:
4393 loop:
4394 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
4395 VECT_DEF = vector_stmt # vectorized form of STMT_INFO
4396 s_loop = scalar_stmt # (scalar) STMT_INFO
4397 loop_exit:
4398 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4399 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4400 v_out2 = reduce <v_out1>
4401 s_out3 = extract_field <v_out2, 0>
4402 s_out4 = adjust_result <s_out3>
4403 use <s_out4>
4404 use <s_out4>
4407 static void
4408 vect_create_epilog_for_reduction (vec<tree> vect_defs,
4409 stmt_vec_info stmt_info,
4410 gimple *reduc_def_stmt,
4411 int ncopies, internal_fn reduc_fn,
4412 vec<stmt_vec_info> reduction_phis,
4413 bool double_reduc,
4414 slp_tree slp_node,
4415 slp_instance slp_node_instance,
4416 tree induc_val, enum tree_code induc_code,
4417 tree neutral_op)
4419 stmt_vec_info prev_phi_info;
4420 tree vectype;
4421 machine_mode mode;
4422 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4423 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
4424 basic_block exit_bb;
4425 tree scalar_dest;
4426 tree scalar_type;
4427 gimple *new_phi = NULL, *phi;
4428 stmt_vec_info phi_info;
4429 gimple_stmt_iterator exit_gsi;
4430 tree vec_dest;
4431 tree new_temp = NULL_TREE, new_dest, new_name, new_scalar_dest;
4432 gimple *epilog_stmt = NULL;
4433 enum tree_code code = gimple_assign_rhs_code (stmt_info->stmt);
4434 gimple *exit_phi;
4435 tree bitsize;
4436 tree adjustment_def = NULL;
4437 tree vec_initial_def = NULL;
4438 tree expr, def, initial_def = NULL;
4439 tree orig_name, scalar_result;
4440 imm_use_iterator imm_iter, phi_imm_iter;
4441 use_operand_p use_p, phi_use_p;
4442 gimple *use_stmt;
4443 stmt_vec_info reduction_phi_info = NULL;
4444 bool nested_in_vect_loop = false;
4445 auto_vec<gimple *> new_phis;
4446 auto_vec<stmt_vec_info> inner_phis;
4447 int j, i;
4448 auto_vec<tree> scalar_results;
4449 unsigned int group_size = 1, k, ratio;
4450 auto_vec<tree> vec_initial_defs;
4451 auto_vec<gimple *> phis;
4452 bool slp_reduc = false;
4453 bool direct_slp_reduc;
4454 tree new_phi_result;
4455 stmt_vec_info inner_phi = NULL;
4456 tree induction_index = NULL_TREE;
4458 if (slp_node)
4459 group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
4461 if (nested_in_vect_loop_p (loop, stmt_info))
4463 outer_loop = loop;
4464 loop = loop->inner;
4465 nested_in_vect_loop = true;
4466 gcc_assert (!slp_node);
4469 vectype = STMT_VINFO_VECTYPE (stmt_info);
4470 gcc_assert (vectype);
4471 mode = TYPE_MODE (vectype);
4473 /* 1. Create the reduction def-use cycle:
4474 Set the arguments of REDUCTION_PHIS, i.e., transform
4476 loop:
4477 vec_def = phi <null, null> # REDUCTION_PHI
4478 VECT_DEF = vector_stmt # vectorized form of STMT
4481 into:
4483 loop:
4484 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
4485 VECT_DEF = vector_stmt # vectorized form of STMT
4488 (in case of SLP, do it for all the phis). */
4490 /* Get the loop-entry arguments. */
4491 enum vect_def_type initial_def_dt = vect_unknown_def_type;
4492 if (slp_node)
4494 unsigned vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
4495 vec_initial_defs.reserve (vec_num);
4496 get_initial_defs_for_reduction (slp_node_instance->reduc_phis,
4497 &vec_initial_defs, vec_num,
4498 REDUC_GROUP_FIRST_ELEMENT (stmt_info),
4499 neutral_op);
4501 else
4503 /* Get at the scalar def before the loop, that defines the initial value
4504 of the reduction variable. */
4505 initial_def = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
4506 loop_preheader_edge (loop));
4507 /* Optimize: if initial_def is for REDUC_MAX smaller than the base
4508 and we can't use zero for induc_val, use initial_def. Similarly
4509 for REDUC_MIN and initial_def larger than the base. */
4510 if (TREE_CODE (initial_def) == INTEGER_CST
4511 && (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
4512 == INTEGER_INDUC_COND_REDUCTION)
4513 && !integer_zerop (induc_val)
4514 && ((induc_code == MAX_EXPR
4515 && tree_int_cst_lt (initial_def, induc_val))
4516 || (induc_code == MIN_EXPR
4517 && tree_int_cst_lt (induc_val, initial_def))))
4518 induc_val = initial_def;
4520 if (double_reduc)
4521 /* In case of double reduction we only create a vector variable
4522 to be put in the reduction phi node. The actual statement
4523 creation is done later in this function. */
4524 vec_initial_def = vect_create_destination_var (initial_def, vectype);
4525 else if (nested_in_vect_loop)
4527 /* Do not use an adjustment def as that case is not supported
4528 correctly if ncopies is not one. */
4529 vect_is_simple_use (initial_def, loop_vinfo, &initial_def_dt);
4530 vec_initial_def = vect_get_vec_def_for_operand (initial_def,
4531 stmt_info);
4533 else
4534 vec_initial_def
4535 = get_initial_def_for_reduction (stmt_info, initial_def,
4536 &adjustment_def);
4537 vec_initial_defs.create (1);
4538 vec_initial_defs.quick_push (vec_initial_def);
4541 /* Set phi nodes arguments. */
4542 FOR_EACH_VEC_ELT (reduction_phis, i, phi_info)
4544 tree vec_init_def = vec_initial_defs[i];
4545 tree def = vect_defs[i];
4546 for (j = 0; j < ncopies; j++)
4548 if (j != 0)
4550 phi_info = STMT_VINFO_RELATED_STMT (phi_info);
4551 if (nested_in_vect_loop)
4552 vec_init_def
4553 = vect_get_vec_def_for_stmt_copy (loop_vinfo, vec_init_def);
4556 /* Set the loop-entry arg of the reduction-phi. */
4558 gphi *phi = as_a <gphi *> (phi_info->stmt);
4559 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
4560 == INTEGER_INDUC_COND_REDUCTION)
4562 /* Initialise the reduction phi to zero. This prevents initial
4563 values of non-zero interferring with the reduction op. */
4564 gcc_assert (ncopies == 1);
4565 gcc_assert (i == 0);
4567 tree vec_init_def_type = TREE_TYPE (vec_init_def);
4568 tree induc_val_vec
4569 = build_vector_from_val (vec_init_def_type, induc_val);
4571 add_phi_arg (phi, induc_val_vec, loop_preheader_edge (loop),
4572 UNKNOWN_LOCATION);
4574 else
4575 add_phi_arg (phi, vec_init_def, loop_preheader_edge (loop),
4576 UNKNOWN_LOCATION);
4578 /* Set the loop-latch arg for the reduction-phi. */
4579 if (j > 0)
4580 def = vect_get_vec_def_for_stmt_copy (loop_vinfo, def);
4582 add_phi_arg (phi, def, loop_latch_edge (loop), UNKNOWN_LOCATION);
4584 if (dump_enabled_p ())
4586 dump_printf_loc (MSG_NOTE, vect_location,
4587 "transform reduction: created def-use cycle: ");
4588 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
4589 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (def), 0);
4594 /* For cond reductions we want to create a new vector (INDEX_COND_EXPR)
4595 which is updated with the current index of the loop for every match of
4596 the original loop's cond_expr (VEC_STMT). This results in a vector
4597 containing the last time the condition passed for that vector lane.
4598 The first match will be a 1 to allow 0 to be used for non-matching
4599 indexes. If there are no matches at all then the vector will be all
4600 zeroes. */
4601 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION)
4603 tree indx_before_incr, indx_after_incr;
4604 poly_uint64 nunits_out = TYPE_VECTOR_SUBPARTS (vectype);
4606 gimple *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info)->stmt;
4607 gcc_assert (gimple_assign_rhs_code (vec_stmt) == VEC_COND_EXPR);
4609 int scalar_precision
4610 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (TREE_TYPE (vectype)));
4611 tree cr_index_scalar_type = make_unsigned_type (scalar_precision);
4612 tree cr_index_vector_type = build_vector_type
4613 (cr_index_scalar_type, TYPE_VECTOR_SUBPARTS (vectype));
4615 /* First we create a simple vector induction variable which starts
4616 with the values {1,2,3,...} (SERIES_VECT) and increments by the
4617 vector size (STEP). */
4619 /* Create a {1,2,3,...} vector. */
4620 tree series_vect = build_index_vector (cr_index_vector_type, 1, 1);
4622 /* Create a vector of the step value. */
4623 tree step = build_int_cst (cr_index_scalar_type, nunits_out);
4624 tree vec_step = build_vector_from_val (cr_index_vector_type, step);
4626 /* Create an induction variable. */
4627 gimple_stmt_iterator incr_gsi;
4628 bool insert_after;
4629 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
4630 create_iv (series_vect, vec_step, NULL_TREE, loop, &incr_gsi,
4631 insert_after, &indx_before_incr, &indx_after_incr);
4633 /* Next create a new phi node vector (NEW_PHI_TREE) which starts
4634 filled with zeros (VEC_ZERO). */
4636 /* Create a vector of 0s. */
4637 tree zero = build_zero_cst (cr_index_scalar_type);
4638 tree vec_zero = build_vector_from_val (cr_index_vector_type, zero);
4640 /* Create a vector phi node. */
4641 tree new_phi_tree = make_ssa_name (cr_index_vector_type);
4642 new_phi = create_phi_node (new_phi_tree, loop->header);
4643 loop_vinfo->add_stmt (new_phi);
4644 add_phi_arg (as_a <gphi *> (new_phi), vec_zero,
4645 loop_preheader_edge (loop), UNKNOWN_LOCATION);
4647 /* Now take the condition from the loops original cond_expr
4648 (VEC_STMT) and produce a new cond_expr (INDEX_COND_EXPR) which for
4649 every match uses values from the induction variable
4650 (INDEX_BEFORE_INCR) otherwise uses values from the phi node
4651 (NEW_PHI_TREE).
4652 Finally, we update the phi (NEW_PHI_TREE) to take the value of
4653 the new cond_expr (INDEX_COND_EXPR). */
4655 /* Duplicate the condition from vec_stmt. */
4656 tree ccompare = unshare_expr (gimple_assign_rhs1 (vec_stmt));
4658 /* Create a conditional, where the condition is taken from vec_stmt
4659 (CCOMPARE), then is the induction index (INDEX_BEFORE_INCR) and
4660 else is the phi (NEW_PHI_TREE). */
4661 tree index_cond_expr = build3 (VEC_COND_EXPR, cr_index_vector_type,
4662 ccompare, indx_before_incr,
4663 new_phi_tree);
4664 induction_index = make_ssa_name (cr_index_vector_type);
4665 gimple *index_condition = gimple_build_assign (induction_index,
4666 index_cond_expr);
4667 gsi_insert_before (&incr_gsi, index_condition, GSI_SAME_STMT);
4668 stmt_vec_info index_vec_info = loop_vinfo->add_stmt (index_condition);
4669 STMT_VINFO_VECTYPE (index_vec_info) = cr_index_vector_type;
4671 /* Update the phi with the vec cond. */
4672 add_phi_arg (as_a <gphi *> (new_phi), induction_index,
4673 loop_latch_edge (loop), UNKNOWN_LOCATION);
4676 /* 2. Create epilog code.
4677 The reduction epilog code operates across the elements of the vector
4678 of partial results computed by the vectorized loop.
4679 The reduction epilog code consists of:
4681 step 1: compute the scalar result in a vector (v_out2)
4682 step 2: extract the scalar result (s_out3) from the vector (v_out2)
4683 step 3: adjust the scalar result (s_out3) if needed.
4685 Step 1 can be accomplished using one the following three schemes:
4686 (scheme 1) using reduc_fn, if available.
4687 (scheme 2) using whole-vector shifts, if available.
4688 (scheme 3) using a scalar loop. In this case steps 1+2 above are
4689 combined.
4691 The overall epilog code looks like this:
4693 s_out0 = phi <s_loop> # original EXIT_PHI
4694 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4695 v_out2 = reduce <v_out1> # step 1
4696 s_out3 = extract_field <v_out2, 0> # step 2
4697 s_out4 = adjust_result <s_out3> # step 3
4699 (step 3 is optional, and steps 1 and 2 may be combined).
4700 Lastly, the uses of s_out0 are replaced by s_out4. */
4703 /* 2.1 Create new loop-exit-phis to preserve loop-closed form:
4704 v_out1 = phi <VECT_DEF>
4705 Store them in NEW_PHIS. */
4707 exit_bb = single_exit (loop)->dest;
4708 prev_phi_info = NULL;
4709 new_phis.create (vect_defs.length ());
4710 FOR_EACH_VEC_ELT (vect_defs, i, def)
4712 for (j = 0; j < ncopies; j++)
4714 tree new_def = copy_ssa_name (def);
4715 phi = create_phi_node (new_def, exit_bb);
4716 stmt_vec_info phi_info = loop_vinfo->add_stmt (phi);
4717 if (j == 0)
4718 new_phis.quick_push (phi);
4719 else
4721 def = vect_get_vec_def_for_stmt_copy (loop_vinfo, def);
4722 STMT_VINFO_RELATED_STMT (prev_phi_info) = phi_info;
4725 SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
4726 prev_phi_info = phi_info;
4730 /* The epilogue is created for the outer-loop, i.e., for the loop being
4731 vectorized. Create exit phis for the outer loop. */
4732 if (double_reduc)
4734 loop = outer_loop;
4735 exit_bb = single_exit (loop)->dest;
4736 inner_phis.create (vect_defs.length ());
4737 FOR_EACH_VEC_ELT (new_phis, i, phi)
4739 stmt_vec_info phi_info = loop_vinfo->lookup_stmt (phi);
4740 tree new_result = copy_ssa_name (PHI_RESULT (phi));
4741 gphi *outer_phi = create_phi_node (new_result, exit_bb);
4742 SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
4743 PHI_RESULT (phi));
4744 prev_phi_info = loop_vinfo->add_stmt (outer_phi);
4745 inner_phis.quick_push (phi_info);
4746 new_phis[i] = outer_phi;
4747 while (STMT_VINFO_RELATED_STMT (phi_info))
4749 phi_info = STMT_VINFO_RELATED_STMT (phi_info);
4750 new_result = copy_ssa_name (PHI_RESULT (phi_info->stmt));
4751 outer_phi = create_phi_node (new_result, exit_bb);
4752 SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
4753 PHI_RESULT (phi_info->stmt));
4754 stmt_vec_info outer_phi_info = loop_vinfo->add_stmt (outer_phi);
4755 STMT_VINFO_RELATED_STMT (prev_phi_info) = outer_phi_info;
4756 prev_phi_info = outer_phi_info;
4761 exit_gsi = gsi_after_labels (exit_bb);
4763 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
4764 (i.e. when reduc_fn is not available) and in the final adjustment
4765 code (if needed). Also get the original scalar reduction variable as
4766 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
4767 represents a reduction pattern), the tree-code and scalar-def are
4768 taken from the original stmt that the pattern-stmt (STMT) replaces.
4769 Otherwise (it is a regular reduction) - the tree-code and scalar-def
4770 are taken from STMT. */
4772 stmt_vec_info orig_stmt_info = vect_orig_stmt (stmt_info);
4773 if (orig_stmt_info != stmt_info)
4775 /* Reduction pattern */
4776 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
4777 gcc_assert (STMT_VINFO_RELATED_STMT (orig_stmt_info) == stmt_info);
4780 code = gimple_assign_rhs_code (orig_stmt_info->stmt);
4781 /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
4782 partial results are added and not subtracted. */
4783 if (code == MINUS_EXPR)
4784 code = PLUS_EXPR;
4786 scalar_dest = gimple_assign_lhs (orig_stmt_info->stmt);
4787 scalar_type = TREE_TYPE (scalar_dest);
4788 scalar_results.create (group_size);
4789 new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
4790 bitsize = TYPE_SIZE (scalar_type);
4792 /* In case this is a reduction in an inner-loop while vectorizing an outer
4793 loop - we don't need to extract a single scalar result at the end of the
4794 inner-loop (unless it is double reduction, i.e., the use of reduction is
4795 outside the outer-loop). The final vector of partial results will be used
4796 in the vectorized outer-loop, or reduced to a scalar result at the end of
4797 the outer-loop. */
4798 if (nested_in_vect_loop && !double_reduc)
4799 goto vect_finalize_reduction;
4801 /* SLP reduction without reduction chain, e.g.,
4802 # a1 = phi <a2, a0>
4803 # b1 = phi <b2, b0>
4804 a2 = operation (a1)
4805 b2 = operation (b1) */
4806 slp_reduc = (slp_node && !REDUC_GROUP_FIRST_ELEMENT (stmt_info));
4808 /* True if we should implement SLP_REDUC using native reduction operations
4809 instead of scalar operations. */
4810 direct_slp_reduc = (reduc_fn != IFN_LAST
4811 && slp_reduc
4812 && !TYPE_VECTOR_SUBPARTS (vectype).is_constant ());
4814 /* In case of reduction chain, e.g.,
4815 # a1 = phi <a3, a0>
4816 a2 = operation (a1)
4817 a3 = operation (a2),
4819 we may end up with more than one vector result. Here we reduce them to
4820 one vector. */
4821 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info) || direct_slp_reduc)
4823 tree first_vect = PHI_RESULT (new_phis[0]);
4824 gassign *new_vec_stmt = NULL;
4825 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4826 for (k = 1; k < new_phis.length (); k++)
4828 gimple *next_phi = new_phis[k];
4829 tree second_vect = PHI_RESULT (next_phi);
4830 tree tem = make_ssa_name (vec_dest, new_vec_stmt);
4831 new_vec_stmt = gimple_build_assign (tem, code,
4832 first_vect, second_vect);
4833 gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT);
4834 first_vect = tem;
4837 new_phi_result = first_vect;
4838 if (new_vec_stmt)
4840 new_phis.truncate (0);
4841 new_phis.safe_push (new_vec_stmt);
4844 /* Likewise if we couldn't use a single defuse cycle. */
4845 else if (ncopies > 1)
4847 gcc_assert (new_phis.length () == 1);
4848 tree first_vect = PHI_RESULT (new_phis[0]);
4849 gassign *new_vec_stmt = NULL;
4850 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4851 stmt_vec_info next_phi_info = loop_vinfo->lookup_stmt (new_phis[0]);
4852 for (int k = 1; k < ncopies; ++k)
4854 next_phi_info = STMT_VINFO_RELATED_STMT (next_phi_info);
4855 tree second_vect = PHI_RESULT (next_phi_info->stmt);
4856 tree tem = make_ssa_name (vec_dest, new_vec_stmt);
4857 new_vec_stmt = gimple_build_assign (tem, code,
4858 first_vect, second_vect);
4859 gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT);
4860 first_vect = tem;
4862 new_phi_result = first_vect;
4863 new_phis.truncate (0);
4864 new_phis.safe_push (new_vec_stmt);
4866 else
4867 new_phi_result = PHI_RESULT (new_phis[0]);
4869 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION
4870 && reduc_fn != IFN_LAST)
4872 /* For condition reductions, we have a vector (NEW_PHI_RESULT) containing
4873 various data values where the condition matched and another vector
4874 (INDUCTION_INDEX) containing all the indexes of those matches. We
4875 need to extract the last matching index (which will be the index with
4876 highest value) and use this to index into the data vector.
4877 For the case where there were no matches, the data vector will contain
4878 all default values and the index vector will be all zeros. */
4880 /* Get various versions of the type of the vector of indexes. */
4881 tree index_vec_type = TREE_TYPE (induction_index);
4882 gcc_checking_assert (TYPE_UNSIGNED (index_vec_type));
4883 tree index_scalar_type = TREE_TYPE (index_vec_type);
4884 tree index_vec_cmp_type = build_same_sized_truth_vector_type
4885 (index_vec_type);
4887 /* Get an unsigned integer version of the type of the data vector. */
4888 int scalar_precision
4889 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type));
4890 tree scalar_type_unsigned = make_unsigned_type (scalar_precision);
4891 tree vectype_unsigned = build_vector_type
4892 (scalar_type_unsigned, TYPE_VECTOR_SUBPARTS (vectype));
4894 /* First we need to create a vector (ZERO_VEC) of zeros and another
4895 vector (MAX_INDEX_VEC) filled with the last matching index, which we
4896 can create using a MAX reduction and then expanding.
4897 In the case where the loop never made any matches, the max index will
4898 be zero. */
4900 /* Vector of {0, 0, 0,...}. */
4901 tree zero_vec = make_ssa_name (vectype);
4902 tree zero_vec_rhs = build_zero_cst (vectype);
4903 gimple *zero_vec_stmt = gimple_build_assign (zero_vec, zero_vec_rhs);
4904 gsi_insert_before (&exit_gsi, zero_vec_stmt, GSI_SAME_STMT);
4906 /* Find maximum value from the vector of found indexes. */
4907 tree max_index = make_ssa_name (index_scalar_type);
4908 gcall *max_index_stmt = gimple_build_call_internal (IFN_REDUC_MAX,
4909 1, induction_index);
4910 gimple_call_set_lhs (max_index_stmt, max_index);
4911 gsi_insert_before (&exit_gsi, max_index_stmt, GSI_SAME_STMT);
4913 /* Vector of {max_index, max_index, max_index,...}. */
4914 tree max_index_vec = make_ssa_name (index_vec_type);
4915 tree max_index_vec_rhs = build_vector_from_val (index_vec_type,
4916 max_index);
4917 gimple *max_index_vec_stmt = gimple_build_assign (max_index_vec,
4918 max_index_vec_rhs);
4919 gsi_insert_before (&exit_gsi, max_index_vec_stmt, GSI_SAME_STMT);
4921 /* Next we compare the new vector (MAX_INDEX_VEC) full of max indexes
4922 with the vector (INDUCTION_INDEX) of found indexes, choosing values
4923 from the data vector (NEW_PHI_RESULT) for matches, 0 (ZERO_VEC)
4924 otherwise. Only one value should match, resulting in a vector
4925 (VEC_COND) with one data value and the rest zeros.
4926 In the case where the loop never made any matches, every index will
4927 match, resulting in a vector with all data values (which will all be
4928 the default value). */
4930 /* Compare the max index vector to the vector of found indexes to find
4931 the position of the max value. */
4932 tree vec_compare = make_ssa_name (index_vec_cmp_type);
4933 gimple *vec_compare_stmt = gimple_build_assign (vec_compare, EQ_EXPR,
4934 induction_index,
4935 max_index_vec);
4936 gsi_insert_before (&exit_gsi, vec_compare_stmt, GSI_SAME_STMT);
4938 /* Use the compare to choose either values from the data vector or
4939 zero. */
4940 tree vec_cond = make_ssa_name (vectype);
4941 gimple *vec_cond_stmt = gimple_build_assign (vec_cond, VEC_COND_EXPR,
4942 vec_compare, new_phi_result,
4943 zero_vec);
4944 gsi_insert_before (&exit_gsi, vec_cond_stmt, GSI_SAME_STMT);
4946 /* Finally we need to extract the data value from the vector (VEC_COND)
4947 into a scalar (MATCHED_DATA_REDUC). Logically we want to do a OR
4948 reduction, but because this doesn't exist, we can use a MAX reduction
4949 instead. The data value might be signed or a float so we need to cast
4950 it first.
4951 In the case where the loop never made any matches, the data values are
4952 all identical, and so will reduce down correctly. */
4954 /* Make the matched data values unsigned. */
4955 tree vec_cond_cast = make_ssa_name (vectype_unsigned);
4956 tree vec_cond_cast_rhs = build1 (VIEW_CONVERT_EXPR, vectype_unsigned,
4957 vec_cond);
4958 gimple *vec_cond_cast_stmt = gimple_build_assign (vec_cond_cast,
4959 VIEW_CONVERT_EXPR,
4960 vec_cond_cast_rhs);
4961 gsi_insert_before (&exit_gsi, vec_cond_cast_stmt, GSI_SAME_STMT);
4963 /* Reduce down to a scalar value. */
4964 tree data_reduc = make_ssa_name (scalar_type_unsigned);
4965 gcall *data_reduc_stmt = gimple_build_call_internal (IFN_REDUC_MAX,
4966 1, vec_cond_cast);
4967 gimple_call_set_lhs (data_reduc_stmt, data_reduc);
4968 gsi_insert_before (&exit_gsi, data_reduc_stmt, GSI_SAME_STMT);
4970 /* Convert the reduced value back to the result type and set as the
4971 result. */
4972 gimple_seq stmts = NULL;
4973 new_temp = gimple_build (&stmts, VIEW_CONVERT_EXPR, scalar_type,
4974 data_reduc);
4975 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
4976 scalar_results.safe_push (new_temp);
4978 else if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION
4979 && reduc_fn == IFN_LAST)
4981 /* Condition reduction without supported IFN_REDUC_MAX. Generate
4982 idx = 0;
4983 idx_val = induction_index[0];
4984 val = data_reduc[0];
4985 for (idx = 0, val = init, i = 0; i < nelts; ++i)
4986 if (induction_index[i] > idx_val)
4987 val = data_reduc[i], idx_val = induction_index[i];
4988 return val; */
4990 tree data_eltype = TREE_TYPE (TREE_TYPE (new_phi_result));
4991 tree idx_eltype = TREE_TYPE (TREE_TYPE (induction_index));
4992 unsigned HOST_WIDE_INT el_size = tree_to_uhwi (TYPE_SIZE (idx_eltype));
4993 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (TREE_TYPE (induction_index));
4994 /* Enforced by vectorizable_reduction, which ensures we have target
4995 support before allowing a conditional reduction on variable-length
4996 vectors. */
4997 unsigned HOST_WIDE_INT v_size = el_size * nunits.to_constant ();
4998 tree idx_val = NULL_TREE, val = NULL_TREE;
4999 for (unsigned HOST_WIDE_INT off = 0; off < v_size; off += el_size)
5001 tree old_idx_val = idx_val;
5002 tree old_val = val;
5003 idx_val = make_ssa_name (idx_eltype);
5004 epilog_stmt = gimple_build_assign (idx_val, BIT_FIELD_REF,
5005 build3 (BIT_FIELD_REF, idx_eltype,
5006 induction_index,
5007 bitsize_int (el_size),
5008 bitsize_int (off)));
5009 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5010 val = make_ssa_name (data_eltype);
5011 epilog_stmt = gimple_build_assign (val, BIT_FIELD_REF,
5012 build3 (BIT_FIELD_REF,
5013 data_eltype,
5014 new_phi_result,
5015 bitsize_int (el_size),
5016 bitsize_int (off)));
5017 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5018 if (off != 0)
5020 tree new_idx_val = idx_val;
5021 tree new_val = val;
5022 if (off != v_size - el_size)
5024 new_idx_val = make_ssa_name (idx_eltype);
5025 epilog_stmt = gimple_build_assign (new_idx_val,
5026 MAX_EXPR, idx_val,
5027 old_idx_val);
5028 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5030 new_val = make_ssa_name (data_eltype);
5031 epilog_stmt = gimple_build_assign (new_val,
5032 COND_EXPR,
5033 build2 (GT_EXPR,
5034 boolean_type_node,
5035 idx_val,
5036 old_idx_val),
5037 val, old_val);
5038 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5039 idx_val = new_idx_val;
5040 val = new_val;
5043 /* Convert the reduced value back to the result type and set as the
5044 result. */
5045 gimple_seq stmts = NULL;
5046 val = gimple_convert (&stmts, scalar_type, val);
5047 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
5048 scalar_results.safe_push (val);
5051 /* 2.3 Create the reduction code, using one of the three schemes described
5052 above. In SLP we simply need to extract all the elements from the
5053 vector (without reducing them), so we use scalar shifts. */
5054 else if (reduc_fn != IFN_LAST && !slp_reduc)
5056 tree tmp;
5057 tree vec_elem_type;
5059 /* Case 1: Create:
5060 v_out2 = reduc_expr <v_out1> */
5062 if (dump_enabled_p ())
5063 dump_printf_loc (MSG_NOTE, vect_location,
5064 "Reduce using direct vector reduction.\n");
5066 vec_elem_type = TREE_TYPE (TREE_TYPE (new_phi_result));
5067 if (!useless_type_conversion_p (scalar_type, vec_elem_type))
5069 tree tmp_dest
5070 = vect_create_destination_var (scalar_dest, vec_elem_type);
5071 epilog_stmt = gimple_build_call_internal (reduc_fn, 1,
5072 new_phi_result);
5073 gimple_set_lhs (epilog_stmt, tmp_dest);
5074 new_temp = make_ssa_name (tmp_dest, epilog_stmt);
5075 gimple_set_lhs (epilog_stmt, new_temp);
5076 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5078 epilog_stmt = gimple_build_assign (new_scalar_dest, NOP_EXPR,
5079 new_temp);
5081 else
5083 epilog_stmt = gimple_build_call_internal (reduc_fn, 1,
5084 new_phi_result);
5085 gimple_set_lhs (epilog_stmt, new_scalar_dest);
5088 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5089 gimple_set_lhs (epilog_stmt, new_temp);
5090 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5092 if ((STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5093 == INTEGER_INDUC_COND_REDUCTION)
5094 && !operand_equal_p (initial_def, induc_val, 0))
5096 /* Earlier we set the initial value to be a vector if induc_val
5097 values. Check the result and if it is induc_val then replace
5098 with the original initial value, unless induc_val is
5099 the same as initial_def already. */
5100 tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp,
5101 induc_val);
5103 tmp = make_ssa_name (new_scalar_dest);
5104 epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare,
5105 initial_def, new_temp);
5106 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5107 new_temp = tmp;
5110 scalar_results.safe_push (new_temp);
5112 else if (direct_slp_reduc)
5114 /* Here we create one vector for each of the REDUC_GROUP_SIZE results,
5115 with the elements for other SLP statements replaced with the
5116 neutral value. We can then do a normal reduction on each vector. */
5118 /* Enforced by vectorizable_reduction. */
5119 gcc_assert (new_phis.length () == 1);
5120 gcc_assert (pow2p_hwi (group_size));
5122 slp_tree orig_phis_slp_node = slp_node_instance->reduc_phis;
5123 vec<stmt_vec_info> orig_phis
5124 = SLP_TREE_SCALAR_STMTS (orig_phis_slp_node);
5125 gimple_seq seq = NULL;
5127 /* Build a vector {0, 1, 2, ...}, with the same number of elements
5128 and the same element size as VECTYPE. */
5129 tree index = build_index_vector (vectype, 0, 1);
5130 tree index_type = TREE_TYPE (index);
5131 tree index_elt_type = TREE_TYPE (index_type);
5132 tree mask_type = build_same_sized_truth_vector_type (index_type);
5134 /* Create a vector that, for each element, identifies which of
5135 the REDUC_GROUP_SIZE results should use it. */
5136 tree index_mask = build_int_cst (index_elt_type, group_size - 1);
5137 index = gimple_build (&seq, BIT_AND_EXPR, index_type, index,
5138 build_vector_from_val (index_type, index_mask));
5140 /* Get a neutral vector value. This is simply a splat of the neutral
5141 scalar value if we have one, otherwise the initial scalar value
5142 is itself a neutral value. */
5143 tree vector_identity = NULL_TREE;
5144 if (neutral_op)
5145 vector_identity = gimple_build_vector_from_val (&seq, vectype,
5146 neutral_op);
5147 for (unsigned int i = 0; i < group_size; ++i)
5149 /* If there's no univeral neutral value, we can use the
5150 initial scalar value from the original PHI. This is used
5151 for MIN and MAX reduction, for example. */
5152 if (!neutral_op)
5154 tree scalar_value
5155 = PHI_ARG_DEF_FROM_EDGE (orig_phis[i]->stmt,
5156 loop_preheader_edge (loop));
5157 vector_identity = gimple_build_vector_from_val (&seq, vectype,
5158 scalar_value);
5161 /* Calculate the equivalent of:
5163 sel[j] = (index[j] == i);
5165 which selects the elements of NEW_PHI_RESULT that should
5166 be included in the result. */
5167 tree compare_val = build_int_cst (index_elt_type, i);
5168 compare_val = build_vector_from_val (index_type, compare_val);
5169 tree sel = gimple_build (&seq, EQ_EXPR, mask_type,
5170 index, compare_val);
5172 /* Calculate the equivalent of:
5174 vec = seq ? new_phi_result : vector_identity;
5176 VEC is now suitable for a full vector reduction. */
5177 tree vec = gimple_build (&seq, VEC_COND_EXPR, vectype,
5178 sel, new_phi_result, vector_identity);
5180 /* Do the reduction and convert it to the appropriate type. */
5181 tree scalar = gimple_build (&seq, as_combined_fn (reduc_fn),
5182 TREE_TYPE (vectype), vec);
5183 scalar = gimple_convert (&seq, scalar_type, scalar);
5184 scalar_results.safe_push (scalar);
5186 gsi_insert_seq_before (&exit_gsi, seq, GSI_SAME_STMT);
5188 else
5190 bool reduce_with_shift;
5191 tree vec_temp;
5193 /* COND reductions all do the final reduction with MAX_EXPR
5194 or MIN_EXPR. */
5195 if (code == COND_EXPR)
5197 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5198 == INTEGER_INDUC_COND_REDUCTION)
5199 code = induc_code;
5200 else if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5201 == CONST_COND_REDUCTION)
5202 code = STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info);
5203 else
5204 code = MAX_EXPR;
5207 /* See if the target wants to do the final (shift) reduction
5208 in a vector mode of smaller size and first reduce upper/lower
5209 halves against each other. */
5210 enum machine_mode mode1 = mode;
5211 tree vectype1 = vectype;
5212 unsigned sz = tree_to_uhwi (TYPE_SIZE_UNIT (vectype));
5213 unsigned sz1 = sz;
5214 if (!slp_reduc
5215 && (mode1 = targetm.vectorize.split_reduction (mode)) != mode)
5216 sz1 = GET_MODE_SIZE (mode1).to_constant ();
5218 vectype1 = get_vectype_for_scalar_type_and_size (scalar_type, sz1);
5219 reduce_with_shift = have_whole_vector_shift (mode1);
5220 if (!VECTOR_MODE_P (mode1))
5221 reduce_with_shift = false;
5222 else
5224 optab optab = optab_for_tree_code (code, vectype1, optab_default);
5225 if (optab_handler (optab, mode1) == CODE_FOR_nothing)
5226 reduce_with_shift = false;
5229 /* First reduce the vector to the desired vector size we should
5230 do shift reduction on by combining upper and lower halves. */
5231 new_temp = new_phi_result;
5232 while (sz > sz1)
5234 gcc_assert (!slp_reduc);
5235 sz /= 2;
5236 vectype1 = get_vectype_for_scalar_type_and_size (scalar_type, sz);
5238 /* The target has to make sure we support lowpart/highpart
5239 extraction, either via direct vector extract or through
5240 an integer mode punning. */
5241 tree dst1, dst2;
5242 if (convert_optab_handler (vec_extract_optab,
5243 TYPE_MODE (TREE_TYPE (new_temp)),
5244 TYPE_MODE (vectype1))
5245 != CODE_FOR_nothing)
5247 /* Extract sub-vectors directly once vec_extract becomes
5248 a conversion optab. */
5249 dst1 = make_ssa_name (vectype1);
5250 epilog_stmt
5251 = gimple_build_assign (dst1, BIT_FIELD_REF,
5252 build3 (BIT_FIELD_REF, vectype1,
5253 new_temp, TYPE_SIZE (vectype1),
5254 bitsize_int (0)));
5255 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5256 dst2 = make_ssa_name (vectype1);
5257 epilog_stmt
5258 = gimple_build_assign (dst2, BIT_FIELD_REF,
5259 build3 (BIT_FIELD_REF, vectype1,
5260 new_temp, TYPE_SIZE (vectype1),
5261 bitsize_int (sz * BITS_PER_UNIT)));
5262 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5264 else
5266 /* Extract via punning to appropriately sized integer mode
5267 vector. */
5268 tree eltype = build_nonstandard_integer_type (sz * BITS_PER_UNIT,
5270 tree etype = build_vector_type (eltype, 2);
5271 gcc_assert (convert_optab_handler (vec_extract_optab,
5272 TYPE_MODE (etype),
5273 TYPE_MODE (eltype))
5274 != CODE_FOR_nothing);
5275 tree tem = make_ssa_name (etype);
5276 epilog_stmt = gimple_build_assign (tem, VIEW_CONVERT_EXPR,
5277 build1 (VIEW_CONVERT_EXPR,
5278 etype, new_temp));
5279 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5280 new_temp = tem;
5281 tem = make_ssa_name (eltype);
5282 epilog_stmt
5283 = gimple_build_assign (tem, BIT_FIELD_REF,
5284 build3 (BIT_FIELD_REF, eltype,
5285 new_temp, TYPE_SIZE (eltype),
5286 bitsize_int (0)));
5287 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5288 dst1 = make_ssa_name (vectype1);
5289 epilog_stmt = gimple_build_assign (dst1, VIEW_CONVERT_EXPR,
5290 build1 (VIEW_CONVERT_EXPR,
5291 vectype1, tem));
5292 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5293 tem = make_ssa_name (eltype);
5294 epilog_stmt
5295 = gimple_build_assign (tem, BIT_FIELD_REF,
5296 build3 (BIT_FIELD_REF, eltype,
5297 new_temp, TYPE_SIZE (eltype),
5298 bitsize_int (sz * BITS_PER_UNIT)));
5299 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5300 dst2 = make_ssa_name (vectype1);
5301 epilog_stmt = gimple_build_assign (dst2, VIEW_CONVERT_EXPR,
5302 build1 (VIEW_CONVERT_EXPR,
5303 vectype1, tem));
5304 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5307 new_temp = make_ssa_name (vectype1);
5308 epilog_stmt = gimple_build_assign (new_temp, code, dst1, dst2);
5309 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5312 if (reduce_with_shift && !slp_reduc)
5314 int element_bitsize = tree_to_uhwi (bitsize);
5315 /* Enforced by vectorizable_reduction, which disallows SLP reductions
5316 for variable-length vectors and also requires direct target support
5317 for loop reductions. */
5318 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype1));
5319 int nelements = vec_size_in_bits / element_bitsize;
5320 vec_perm_builder sel;
5321 vec_perm_indices indices;
5323 int elt_offset;
5325 tree zero_vec = build_zero_cst (vectype1);
5326 /* Case 2: Create:
5327 for (offset = nelements/2; offset >= 1; offset/=2)
5329 Create: va' = vec_shift <va, offset>
5330 Create: va = vop <va, va'>
5331 } */
5333 tree rhs;
5335 if (dump_enabled_p ())
5336 dump_printf_loc (MSG_NOTE, vect_location,
5337 "Reduce using vector shifts\n");
5339 mode1 = TYPE_MODE (vectype1);
5340 vec_dest = vect_create_destination_var (scalar_dest, vectype1);
5341 for (elt_offset = nelements / 2;
5342 elt_offset >= 1;
5343 elt_offset /= 2)
5345 calc_vec_perm_mask_for_shift (elt_offset, nelements, &sel);
5346 indices.new_vector (sel, 2, nelements);
5347 tree mask = vect_gen_perm_mask_any (vectype1, indices);
5348 epilog_stmt = gimple_build_assign (vec_dest, VEC_PERM_EXPR,
5349 new_temp, zero_vec, mask);
5350 new_name = make_ssa_name (vec_dest, epilog_stmt);
5351 gimple_assign_set_lhs (epilog_stmt, new_name);
5352 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5354 epilog_stmt = gimple_build_assign (vec_dest, code, new_name,
5355 new_temp);
5356 new_temp = make_ssa_name (vec_dest, epilog_stmt);
5357 gimple_assign_set_lhs (epilog_stmt, new_temp);
5358 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5361 /* 2.4 Extract the final scalar result. Create:
5362 s_out3 = extract_field <v_out2, bitpos> */
5364 if (dump_enabled_p ())
5365 dump_printf_loc (MSG_NOTE, vect_location,
5366 "extract scalar result\n");
5368 rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp,
5369 bitsize, bitsize_zero_node);
5370 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
5371 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5372 gimple_assign_set_lhs (epilog_stmt, new_temp);
5373 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5374 scalar_results.safe_push (new_temp);
5376 else
5378 /* Case 3: Create:
5379 s = extract_field <v_out2, 0>
5380 for (offset = element_size;
5381 offset < vector_size;
5382 offset += element_size;)
5384 Create: s' = extract_field <v_out2, offset>
5385 Create: s = op <s, s'> // For non SLP cases
5386 } */
5388 if (dump_enabled_p ())
5389 dump_printf_loc (MSG_NOTE, vect_location,
5390 "Reduce using scalar code.\n");
5392 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype1));
5393 int element_bitsize = tree_to_uhwi (bitsize);
5394 FOR_EACH_VEC_ELT (new_phis, i, new_phi)
5396 int bit_offset;
5397 if (gimple_code (new_phi) == GIMPLE_PHI)
5398 vec_temp = PHI_RESULT (new_phi);
5399 else
5400 vec_temp = gimple_assign_lhs (new_phi);
5401 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
5402 bitsize_zero_node);
5403 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
5404 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5405 gimple_assign_set_lhs (epilog_stmt, new_temp);
5406 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5408 /* In SLP we don't need to apply reduction operation, so we just
5409 collect s' values in SCALAR_RESULTS. */
5410 if (slp_reduc)
5411 scalar_results.safe_push (new_temp);
5413 for (bit_offset = element_bitsize;
5414 bit_offset < vec_size_in_bits;
5415 bit_offset += element_bitsize)
5417 tree bitpos = bitsize_int (bit_offset);
5418 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp,
5419 bitsize, bitpos);
5421 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
5422 new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
5423 gimple_assign_set_lhs (epilog_stmt, new_name);
5424 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5426 if (slp_reduc)
5428 /* In SLP we don't need to apply reduction operation, so
5429 we just collect s' values in SCALAR_RESULTS. */
5430 new_temp = new_name;
5431 scalar_results.safe_push (new_name);
5433 else
5435 epilog_stmt = gimple_build_assign (new_scalar_dest, code,
5436 new_name, new_temp);
5437 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5438 gimple_assign_set_lhs (epilog_stmt, new_temp);
5439 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5444 /* The only case where we need to reduce scalar results in SLP, is
5445 unrolling. If the size of SCALAR_RESULTS is greater than
5446 REDUC_GROUP_SIZE, we reduce them combining elements modulo
5447 REDUC_GROUP_SIZE. */
5448 if (slp_reduc)
5450 tree res, first_res, new_res;
5451 gimple *new_stmt;
5453 /* Reduce multiple scalar results in case of SLP unrolling. */
5454 for (j = group_size; scalar_results.iterate (j, &res);
5455 j++)
5457 first_res = scalar_results[j % group_size];
5458 new_stmt = gimple_build_assign (new_scalar_dest, code,
5459 first_res, res);
5460 new_res = make_ssa_name (new_scalar_dest, new_stmt);
5461 gimple_assign_set_lhs (new_stmt, new_res);
5462 gsi_insert_before (&exit_gsi, new_stmt, GSI_SAME_STMT);
5463 scalar_results[j % group_size] = new_res;
5466 else
5467 /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
5468 scalar_results.safe_push (new_temp);
5471 if ((STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5472 == INTEGER_INDUC_COND_REDUCTION)
5473 && !operand_equal_p (initial_def, induc_val, 0))
5475 /* Earlier we set the initial value to be a vector if induc_val
5476 values. Check the result and if it is induc_val then replace
5477 with the original initial value, unless induc_val is
5478 the same as initial_def already. */
5479 tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp,
5480 induc_val);
5482 tree tmp = make_ssa_name (new_scalar_dest);
5483 epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare,
5484 initial_def, new_temp);
5485 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5486 scalar_results[0] = tmp;
5490 vect_finalize_reduction:
5492 if (double_reduc)
5493 loop = loop->inner;
5495 /* 2.5 Adjust the final result by the initial value of the reduction
5496 variable. (When such adjustment is not needed, then
5497 'adjustment_def' is zero). For example, if code is PLUS we create:
5498 new_temp = loop_exit_def + adjustment_def */
5500 if (adjustment_def)
5502 gcc_assert (!slp_reduc);
5503 if (nested_in_vect_loop)
5505 new_phi = new_phis[0];
5506 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
5507 expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
5508 new_dest = vect_create_destination_var (scalar_dest, vectype);
5510 else
5512 new_temp = scalar_results[0];
5513 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
5514 expr = build2 (code, scalar_type, new_temp, adjustment_def);
5515 new_dest = vect_create_destination_var (scalar_dest, scalar_type);
5518 epilog_stmt = gimple_build_assign (new_dest, expr);
5519 new_temp = make_ssa_name (new_dest, epilog_stmt);
5520 gimple_assign_set_lhs (epilog_stmt, new_temp);
5521 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5522 if (nested_in_vect_loop)
5524 stmt_vec_info epilog_stmt_info = loop_vinfo->add_stmt (epilog_stmt);
5525 STMT_VINFO_RELATED_STMT (epilog_stmt_info)
5526 = STMT_VINFO_RELATED_STMT (loop_vinfo->lookup_stmt (new_phi));
5528 if (!double_reduc)
5529 scalar_results.quick_push (new_temp);
5530 else
5531 scalar_results[0] = new_temp;
5533 else
5534 scalar_results[0] = new_temp;
5536 new_phis[0] = epilog_stmt;
5539 /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit
5540 phis with new adjusted scalar results, i.e., replace use <s_out0>
5541 with use <s_out4>.
5543 Transform:
5544 loop_exit:
5545 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
5546 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
5547 v_out2 = reduce <v_out1>
5548 s_out3 = extract_field <v_out2, 0>
5549 s_out4 = adjust_result <s_out3>
5550 use <s_out0>
5551 use <s_out0>
5553 into:
5555 loop_exit:
5556 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
5557 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
5558 v_out2 = reduce <v_out1>
5559 s_out3 = extract_field <v_out2, 0>
5560 s_out4 = adjust_result <s_out3>
5561 use <s_out4>
5562 use <s_out4> */
5565 /* In SLP reduction chain we reduce vector results into one vector if
5566 necessary, hence we set here REDUC_GROUP_SIZE to 1. SCALAR_DEST is the
5567 LHS of the last stmt in the reduction chain, since we are looking for
5568 the loop exit phi node. */
5569 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info))
5571 stmt_vec_info dest_stmt_info
5572 = vect_orig_stmt (SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1]);
5573 scalar_dest = gimple_assign_lhs (dest_stmt_info->stmt);
5574 group_size = 1;
5577 /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
5578 case that REDUC_GROUP_SIZE is greater than vectorization factor).
5579 Therefore, we need to match SCALAR_RESULTS with corresponding statements.
5580 The first (REDUC_GROUP_SIZE / number of new vector stmts) scalar results
5581 correspond to the first vector stmt, etc.
5582 (RATIO is equal to (REDUC_GROUP_SIZE / number of new vector stmts)). */
5583 if (group_size > new_phis.length ())
5585 ratio = group_size / new_phis.length ();
5586 gcc_assert (!(group_size % new_phis.length ()));
5588 else
5589 ratio = 1;
5591 stmt_vec_info epilog_stmt_info = NULL;
5592 for (k = 0; k < group_size; k++)
5594 if (k % ratio == 0)
5596 epilog_stmt_info = loop_vinfo->lookup_stmt (new_phis[k / ratio]);
5597 reduction_phi_info = reduction_phis[k / ratio];
5598 if (double_reduc)
5599 inner_phi = inner_phis[k / ratio];
5602 if (slp_reduc)
5604 stmt_vec_info scalar_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[k];
5606 orig_stmt_info = STMT_VINFO_RELATED_STMT (scalar_stmt_info);
5607 /* SLP statements can't participate in patterns. */
5608 gcc_assert (!orig_stmt_info);
5609 scalar_dest = gimple_assign_lhs (scalar_stmt_info->stmt);
5612 phis.create (3);
5613 /* Find the loop-closed-use at the loop exit of the original scalar
5614 result. (The reduction result is expected to have two immediate uses -
5615 one at the latch block, and one at the loop exit). */
5616 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
5617 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p)))
5618 && !is_gimple_debug (USE_STMT (use_p)))
5619 phis.safe_push (USE_STMT (use_p));
5621 /* While we expect to have found an exit_phi because of loop-closed-ssa
5622 form we can end up without one if the scalar cycle is dead. */
5624 FOR_EACH_VEC_ELT (phis, i, exit_phi)
5626 if (outer_loop)
5628 stmt_vec_info exit_phi_vinfo
5629 = loop_vinfo->lookup_stmt (exit_phi);
5630 gphi *vect_phi;
5632 /* FORNOW. Currently not supporting the case that an inner-loop
5633 reduction is not used in the outer-loop (but only outside the
5634 outer-loop), unless it is double reduction. */
5635 gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
5636 && !STMT_VINFO_LIVE_P (exit_phi_vinfo))
5637 || double_reduc);
5639 if (double_reduc)
5640 STMT_VINFO_VEC_STMT (exit_phi_vinfo) = inner_phi;
5641 else
5642 STMT_VINFO_VEC_STMT (exit_phi_vinfo) = epilog_stmt_info;
5643 if (!double_reduc
5644 || STMT_VINFO_DEF_TYPE (exit_phi_vinfo)
5645 != vect_double_reduction_def)
5646 continue;
5648 /* Handle double reduction:
5650 stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
5651 stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop)
5652 stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop)
5653 stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
5655 At that point the regular reduction (stmt2 and stmt3) is
5656 already vectorized, as well as the exit phi node, stmt4.
5657 Here we vectorize the phi node of double reduction, stmt1, and
5658 update all relevant statements. */
5660 /* Go through all the uses of s2 to find double reduction phi
5661 node, i.e., stmt1 above. */
5662 orig_name = PHI_RESULT (exit_phi);
5663 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
5665 stmt_vec_info use_stmt_vinfo;
5666 tree vect_phi_init, preheader_arg, vect_phi_res;
5667 basic_block bb = gimple_bb (use_stmt);
5669 /* Check that USE_STMT is really double reduction phi
5670 node. */
5671 if (gimple_code (use_stmt) != GIMPLE_PHI
5672 || gimple_phi_num_args (use_stmt) != 2
5673 || bb->loop_father != outer_loop)
5674 continue;
5675 use_stmt_vinfo = loop_vinfo->lookup_stmt (use_stmt);
5676 if (!use_stmt_vinfo
5677 || STMT_VINFO_DEF_TYPE (use_stmt_vinfo)
5678 != vect_double_reduction_def)
5679 continue;
5681 /* Create vector phi node for double reduction:
5682 vs1 = phi <vs0, vs2>
5683 vs1 was created previously in this function by a call to
5684 vect_get_vec_def_for_operand and is stored in
5685 vec_initial_def;
5686 vs2 is defined by INNER_PHI, the vectorized EXIT_PHI;
5687 vs0 is created here. */
5689 /* Create vector phi node. */
5690 vect_phi = create_phi_node (vec_initial_def, bb);
5691 loop_vec_info_for_loop (outer_loop)->add_stmt (vect_phi);
5693 /* Create vs0 - initial def of the double reduction phi. */
5694 preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt,
5695 loop_preheader_edge (outer_loop));
5696 vect_phi_init = get_initial_def_for_reduction
5697 (stmt_info, preheader_arg, NULL);
5699 /* Update phi node arguments with vs0 and vs2. */
5700 add_phi_arg (vect_phi, vect_phi_init,
5701 loop_preheader_edge (outer_loop),
5702 UNKNOWN_LOCATION);
5703 add_phi_arg (vect_phi, PHI_RESULT (inner_phi->stmt),
5704 loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
5705 if (dump_enabled_p ())
5707 dump_printf_loc (MSG_NOTE, vect_location,
5708 "created double reduction phi node: ");
5709 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vect_phi, 0);
5712 vect_phi_res = PHI_RESULT (vect_phi);
5714 /* Replace the use, i.e., set the correct vs1 in the regular
5715 reduction phi node. FORNOW, NCOPIES is always 1, so the
5716 loop is redundant. */
5717 stmt_vec_info use_info = reduction_phi_info;
5718 for (j = 0; j < ncopies; j++)
5720 edge pr_edge = loop_preheader_edge (loop);
5721 SET_PHI_ARG_DEF (as_a <gphi *> (use_info->stmt),
5722 pr_edge->dest_idx, vect_phi_res);
5723 use_info = STMT_VINFO_RELATED_STMT (use_info);
5729 phis.release ();
5730 if (nested_in_vect_loop)
5732 if (double_reduc)
5733 loop = outer_loop;
5734 else
5735 continue;
5738 phis.create (3);
5739 /* Find the loop-closed-use at the loop exit of the original scalar
5740 result. (The reduction result is expected to have two immediate uses,
5741 one at the latch block, and one at the loop exit). For double
5742 reductions we are looking for exit phis of the outer loop. */
5743 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
5745 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
5747 if (!is_gimple_debug (USE_STMT (use_p)))
5748 phis.safe_push (USE_STMT (use_p));
5750 else
5752 if (double_reduc && gimple_code (USE_STMT (use_p)) == GIMPLE_PHI)
5754 tree phi_res = PHI_RESULT (USE_STMT (use_p));
5756 FOR_EACH_IMM_USE_FAST (phi_use_p, phi_imm_iter, phi_res)
5758 if (!flow_bb_inside_loop_p (loop,
5759 gimple_bb (USE_STMT (phi_use_p)))
5760 && !is_gimple_debug (USE_STMT (phi_use_p)))
5761 phis.safe_push (USE_STMT (phi_use_p));
5767 FOR_EACH_VEC_ELT (phis, i, exit_phi)
5769 /* Replace the uses: */
5770 orig_name = PHI_RESULT (exit_phi);
5771 scalar_result = scalar_results[k];
5772 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
5773 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
5774 SET_USE (use_p, scalar_result);
5777 phis.release ();
5781 /* Return a vector of type VECTYPE that is equal to the vector select
5782 operation "MASK ? VEC : IDENTITY". Insert the select statements
5783 before GSI. */
5785 static tree
5786 merge_with_identity (gimple_stmt_iterator *gsi, tree mask, tree vectype,
5787 tree vec, tree identity)
5789 tree cond = make_temp_ssa_name (vectype, NULL, "cond");
5790 gimple *new_stmt = gimple_build_assign (cond, VEC_COND_EXPR,
5791 mask, vec, identity);
5792 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
5793 return cond;
5796 /* Successively apply CODE to each element of VECTOR_RHS, in left-to-right
5797 order, starting with LHS. Insert the extraction statements before GSI and
5798 associate the new scalar SSA names with variable SCALAR_DEST.
5799 Return the SSA name for the result. */
5801 static tree
5802 vect_expand_fold_left (gimple_stmt_iterator *gsi, tree scalar_dest,
5803 tree_code code, tree lhs, tree vector_rhs)
5805 tree vectype = TREE_TYPE (vector_rhs);
5806 tree scalar_type = TREE_TYPE (vectype);
5807 tree bitsize = TYPE_SIZE (scalar_type);
5808 unsigned HOST_WIDE_INT vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
5809 unsigned HOST_WIDE_INT element_bitsize = tree_to_uhwi (bitsize);
5811 for (unsigned HOST_WIDE_INT bit_offset = 0;
5812 bit_offset < vec_size_in_bits;
5813 bit_offset += element_bitsize)
5815 tree bitpos = bitsize_int (bit_offset);
5816 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vector_rhs,
5817 bitsize, bitpos);
5819 gassign *stmt = gimple_build_assign (scalar_dest, rhs);
5820 rhs = make_ssa_name (scalar_dest, stmt);
5821 gimple_assign_set_lhs (stmt, rhs);
5822 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
5824 stmt = gimple_build_assign (scalar_dest, code, lhs, rhs);
5825 tree new_name = make_ssa_name (scalar_dest, stmt);
5826 gimple_assign_set_lhs (stmt, new_name);
5827 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
5828 lhs = new_name;
5830 return lhs;
5833 /* Perform an in-order reduction (FOLD_LEFT_REDUCTION). STMT_INFO is the
5834 statement that sets the live-out value. REDUC_DEF_STMT is the phi
5835 statement. CODE is the operation performed by STMT_INFO and OPS are
5836 its scalar operands. REDUC_INDEX is the index of the operand in
5837 OPS that is set by REDUC_DEF_STMT. REDUC_FN is the function that
5838 implements in-order reduction, or IFN_LAST if we should open-code it.
5839 VECTYPE_IN is the type of the vector input. MASKS specifies the masks
5840 that should be used to control the operation in a fully-masked loop. */
5842 static bool
5843 vectorize_fold_left_reduction (stmt_vec_info stmt_info,
5844 gimple_stmt_iterator *gsi,
5845 stmt_vec_info *vec_stmt, slp_tree slp_node,
5846 gimple *reduc_def_stmt,
5847 tree_code code, internal_fn reduc_fn,
5848 tree ops[3], tree vectype_in,
5849 int reduc_index, vec_loop_masks *masks)
5851 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5852 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5853 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
5854 stmt_vec_info new_stmt_info = NULL;
5856 int ncopies;
5857 if (slp_node)
5858 ncopies = 1;
5859 else
5860 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
5862 gcc_assert (!nested_in_vect_loop_p (loop, stmt_info));
5863 gcc_assert (ncopies == 1);
5864 gcc_assert (TREE_CODE_LENGTH (code) == binary_op);
5865 gcc_assert (reduc_index == (code == MINUS_EXPR ? 0 : 1));
5866 gcc_assert (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5867 == FOLD_LEFT_REDUCTION);
5869 if (slp_node)
5870 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype_out),
5871 TYPE_VECTOR_SUBPARTS (vectype_in)));
5873 tree op0 = ops[1 - reduc_index];
5875 int group_size = 1;
5876 stmt_vec_info scalar_dest_def_info;
5877 auto_vec<tree> vec_oprnds0;
5878 if (slp_node)
5880 vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL,
5881 slp_node);
5882 group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
5883 scalar_dest_def_info = SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1];
5885 else
5887 tree loop_vec_def0 = vect_get_vec_def_for_operand (op0, stmt_info);
5888 vec_oprnds0.create (1);
5889 vec_oprnds0.quick_push (loop_vec_def0);
5890 scalar_dest_def_info = stmt_info;
5893 tree scalar_dest = gimple_assign_lhs (scalar_dest_def_info->stmt);
5894 tree scalar_type = TREE_TYPE (scalar_dest);
5895 tree reduc_var = gimple_phi_result (reduc_def_stmt);
5897 int vec_num = vec_oprnds0.length ();
5898 gcc_assert (vec_num == 1 || slp_node);
5899 tree vec_elem_type = TREE_TYPE (vectype_out);
5900 gcc_checking_assert (useless_type_conversion_p (scalar_type, vec_elem_type));
5902 tree vector_identity = NULL_TREE;
5903 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
5904 vector_identity = build_zero_cst (vectype_out);
5906 tree scalar_dest_var = vect_create_destination_var (scalar_dest, NULL);
5907 int i;
5908 tree def0;
5909 FOR_EACH_VEC_ELT (vec_oprnds0, i, def0)
5911 gimple *new_stmt;
5912 tree mask = NULL_TREE;
5913 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
5914 mask = vect_get_loop_mask (gsi, masks, vec_num, vectype_in, i);
5916 /* Handle MINUS by adding the negative. */
5917 if (reduc_fn != IFN_LAST && code == MINUS_EXPR)
5919 tree negated = make_ssa_name (vectype_out);
5920 new_stmt = gimple_build_assign (negated, NEGATE_EXPR, def0);
5921 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
5922 def0 = negated;
5925 if (mask)
5926 def0 = merge_with_identity (gsi, mask, vectype_out, def0,
5927 vector_identity);
5929 /* On the first iteration the input is simply the scalar phi
5930 result, and for subsequent iterations it is the output of
5931 the preceding operation. */
5932 if (reduc_fn != IFN_LAST)
5934 new_stmt = gimple_build_call_internal (reduc_fn, 2, reduc_var, def0);
5935 /* For chained SLP reductions the output of the previous reduction
5936 operation serves as the input of the next. For the final statement
5937 the output cannot be a temporary - we reuse the original
5938 scalar destination of the last statement. */
5939 if (i != vec_num - 1)
5941 gimple_set_lhs (new_stmt, scalar_dest_var);
5942 reduc_var = make_ssa_name (scalar_dest_var, new_stmt);
5943 gimple_set_lhs (new_stmt, reduc_var);
5946 else
5948 reduc_var = vect_expand_fold_left (gsi, scalar_dest_var, code,
5949 reduc_var, def0);
5950 new_stmt = SSA_NAME_DEF_STMT (reduc_var);
5951 /* Remove the statement, so that we can use the same code paths
5952 as for statements that we've just created. */
5953 gimple_stmt_iterator tmp_gsi = gsi_for_stmt (new_stmt);
5954 gsi_remove (&tmp_gsi, false);
5957 if (i == vec_num - 1)
5959 gimple_set_lhs (new_stmt, scalar_dest);
5960 new_stmt_info = vect_finish_replace_stmt (scalar_dest_def_info,
5961 new_stmt);
5963 else
5964 new_stmt_info = vect_finish_stmt_generation (scalar_dest_def_info,
5965 new_stmt, gsi);
5967 if (slp_node)
5968 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
5971 if (!slp_node)
5972 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
5974 return true;
5977 /* Function is_nonwrapping_integer_induction.
5979 Check if STMT_VINO (which is part of loop LOOP) both increments and
5980 does not cause overflow. */
5982 static bool
5983 is_nonwrapping_integer_induction (stmt_vec_info stmt_vinfo, struct loop *loop)
5985 gphi *phi = as_a <gphi *> (stmt_vinfo->stmt);
5986 tree base = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo);
5987 tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo);
5988 tree lhs_type = TREE_TYPE (gimple_phi_result (phi));
5989 widest_int ni, max_loop_value, lhs_max;
5990 wi::overflow_type overflow = wi::OVF_NONE;
5992 /* Make sure the loop is integer based. */
5993 if (TREE_CODE (base) != INTEGER_CST
5994 || TREE_CODE (step) != INTEGER_CST)
5995 return false;
5997 /* Check that the max size of the loop will not wrap. */
5999 if (TYPE_OVERFLOW_UNDEFINED (lhs_type))
6000 return true;
6002 if (! max_stmt_executions (loop, &ni))
6003 return false;
6005 max_loop_value = wi::mul (wi::to_widest (step), ni, TYPE_SIGN (lhs_type),
6006 &overflow);
6007 if (overflow)
6008 return false;
6010 max_loop_value = wi::add (wi::to_widest (base), max_loop_value,
6011 TYPE_SIGN (lhs_type), &overflow);
6012 if (overflow)
6013 return false;
6015 return (wi::min_precision (max_loop_value, TYPE_SIGN (lhs_type))
6016 <= TYPE_PRECISION (lhs_type));
6019 /* Function vectorizable_reduction.
6021 Check if STMT_INFO performs a reduction operation that can be vectorized.
6022 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
6023 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
6024 Return true if STMT_INFO is vectorizable in this way.
6026 This function also handles reduction idioms (patterns) that have been
6027 recognized in advance during vect_pattern_recog. In this case, STMT_INFO
6028 may be of this form:
6029 X = pattern_expr (arg0, arg1, ..., X)
6030 and its STMT_VINFO_RELATED_STMT points to the last stmt in the original
6031 sequence that had been detected and replaced by the pattern-stmt
6032 (STMT_INFO).
6034 This function also handles reduction of condition expressions, for example:
6035 for (int i = 0; i < N; i++)
6036 if (a[i] < value)
6037 last = a[i];
6038 This is handled by vectorising the loop and creating an additional vector
6039 containing the loop indexes for which "a[i] < value" was true. In the
6040 function epilogue this is reduced to a single max value and then used to
6041 index into the vector of results.
6043 In some cases of reduction patterns, the type of the reduction variable X is
6044 different than the type of the other arguments of STMT_INFO.
6045 In such cases, the vectype that is used when transforming STMT_INFO into
6046 a vector stmt is different than the vectype that is used to determine the
6047 vectorization factor, because it consists of a different number of elements
6048 than the actual number of elements that are being operated upon in parallel.
6050 For example, consider an accumulation of shorts into an int accumulator.
6051 On some targets it's possible to vectorize this pattern operating on 8
6052 shorts at a time (hence, the vectype for purposes of determining the
6053 vectorization factor should be V8HI); on the other hand, the vectype that
6054 is used to create the vector form is actually V4SI (the type of the result).
6056 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
6057 indicates what is the actual level of parallelism (V8HI in the example), so
6058 that the right vectorization factor would be derived. This vectype
6059 corresponds to the type of arguments to the reduction stmt, and should *NOT*
6060 be used to create the vectorized stmt. The right vectype for the vectorized
6061 stmt is obtained from the type of the result X:
6062 get_vectype_for_scalar_type (TREE_TYPE (X))
6064 This means that, contrary to "regular" reductions (or "regular" stmts in
6065 general), the following equation:
6066 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
6067 does *NOT* necessarily hold for reduction patterns. */
6069 bool
6070 vectorizable_reduction (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
6071 stmt_vec_info *vec_stmt, slp_tree slp_node,
6072 slp_instance slp_node_instance,
6073 stmt_vector_for_cost *cost_vec)
6075 tree vec_dest;
6076 tree scalar_dest;
6077 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
6078 tree vectype_in = NULL_TREE;
6079 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6080 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
6081 enum tree_code code, orig_code;
6082 internal_fn reduc_fn;
6083 machine_mode vec_mode;
6084 int op_type;
6085 optab optab;
6086 tree new_temp = NULL_TREE;
6087 enum vect_def_type dt, cond_reduc_dt = vect_unknown_def_type;
6088 stmt_vec_info cond_stmt_vinfo = NULL;
6089 enum tree_code cond_reduc_op_code = ERROR_MARK;
6090 tree scalar_type;
6091 bool is_simple_use;
6092 int i;
6093 int ncopies;
6094 int epilog_copies;
6095 stmt_vec_info prev_stmt_info, prev_phi_info;
6096 bool single_defuse_cycle = false;
6097 stmt_vec_info new_stmt_info = NULL;
6098 int j;
6099 tree ops[3];
6100 enum vect_def_type dts[3];
6101 bool nested_cycle = false, found_nested_cycle_def = false;
6102 bool double_reduc = false;
6103 basic_block def_bb;
6104 struct loop * def_stmt_loop;
6105 tree def_arg;
6106 auto_vec<tree> vec_oprnds0;
6107 auto_vec<tree> vec_oprnds1;
6108 auto_vec<tree> vec_oprnds2;
6109 auto_vec<tree> vect_defs;
6110 auto_vec<stmt_vec_info> phis;
6111 int vec_num;
6112 tree def0, tem;
6113 tree cr_index_scalar_type = NULL_TREE, cr_index_vector_type = NULL_TREE;
6114 tree cond_reduc_val = NULL_TREE;
6116 /* Make sure it was already recognized as a reduction computation. */
6117 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def
6118 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_nested_cycle)
6119 return false;
6121 if (nested_in_vect_loop_p (loop, stmt_info))
6123 loop = loop->inner;
6124 nested_cycle = true;
6127 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info))
6128 gcc_assert (slp_node
6129 && REDUC_GROUP_FIRST_ELEMENT (stmt_info) == stmt_info);
6131 if (gphi *phi = dyn_cast <gphi *> (stmt_info->stmt))
6133 tree phi_result = gimple_phi_result (phi);
6134 /* Analysis is fully done on the reduction stmt invocation. */
6135 if (! vec_stmt)
6137 if (slp_node)
6138 slp_node_instance->reduc_phis = slp_node;
6140 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
6141 return true;
6144 if (STMT_VINFO_REDUC_TYPE (stmt_info) == FOLD_LEFT_REDUCTION)
6145 /* Leave the scalar phi in place. Note that checking
6146 STMT_VINFO_VEC_REDUCTION_TYPE (as below) only works
6147 for reductions involving a single statement. */
6148 return true;
6150 stmt_vec_info reduc_stmt_info = STMT_VINFO_REDUC_DEF (stmt_info);
6151 reduc_stmt_info = vect_stmt_to_vectorize (reduc_stmt_info);
6153 if (STMT_VINFO_VEC_REDUCTION_TYPE (reduc_stmt_info)
6154 == EXTRACT_LAST_REDUCTION)
6155 /* Leave the scalar phi in place. */
6156 return true;
6158 gassign *reduc_stmt = as_a <gassign *> (reduc_stmt_info->stmt);
6159 for (unsigned k = 1; k < gimple_num_ops (reduc_stmt); ++k)
6161 tree op = gimple_op (reduc_stmt, k);
6162 if (op == phi_result)
6163 continue;
6164 if (k == 1
6165 && gimple_assign_rhs_code (reduc_stmt) == COND_EXPR)
6166 continue;
6167 if (!vectype_in
6168 || (GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (vectype_in)))
6169 < GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (op)))))
6170 vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op));
6171 break;
6173 gcc_assert (vectype_in);
6175 if (slp_node)
6176 ncopies = 1;
6177 else
6178 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
6180 stmt_vec_info use_stmt_info;
6181 if (ncopies > 1
6182 && STMT_VINFO_RELEVANT (reduc_stmt_info) <= vect_used_only_live
6183 && (use_stmt_info = loop_vinfo->lookup_single_use (phi_result))
6184 && vect_stmt_to_vectorize (use_stmt_info) == reduc_stmt_info)
6185 single_defuse_cycle = true;
6187 /* Create the destination vector */
6188 scalar_dest = gimple_assign_lhs (reduc_stmt);
6189 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
6191 if (slp_node)
6192 /* The size vect_schedule_slp_instance computes is off for us. */
6193 vec_num = vect_get_num_vectors
6194 (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6195 * SLP_TREE_SCALAR_STMTS (slp_node).length (),
6196 vectype_in);
6197 else
6198 vec_num = 1;
6200 /* Generate the reduction PHIs upfront. */
6201 prev_phi_info = NULL;
6202 for (j = 0; j < ncopies; j++)
6204 if (j == 0 || !single_defuse_cycle)
6206 for (i = 0; i < vec_num; i++)
6208 /* Create the reduction-phi that defines the reduction
6209 operand. */
6210 gimple *new_phi = create_phi_node (vec_dest, loop->header);
6211 stmt_vec_info new_phi_info = loop_vinfo->add_stmt (new_phi);
6213 if (slp_node)
6214 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_phi_info);
6215 else
6217 if (j == 0)
6218 STMT_VINFO_VEC_STMT (stmt_info)
6219 = *vec_stmt = new_phi_info;
6220 else
6221 STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi_info;
6222 prev_phi_info = new_phi_info;
6228 return true;
6231 /* 1. Is vectorizable reduction? */
6232 /* Not supportable if the reduction variable is used in the loop, unless
6233 it's a reduction chain. */
6234 if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer
6235 && !REDUC_GROUP_FIRST_ELEMENT (stmt_info))
6236 return false;
6238 /* Reductions that are not used even in an enclosing outer-loop,
6239 are expected to be "live" (used out of the loop). */
6240 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope
6241 && !STMT_VINFO_LIVE_P (stmt_info))
6242 return false;
6244 /* 2. Has this been recognized as a reduction pattern?
6246 Check if STMT represents a pattern that has been recognized
6247 in earlier analysis stages. For stmts that represent a pattern,
6248 the STMT_VINFO_RELATED_STMT field records the last stmt in
6249 the original sequence that constitutes the pattern. */
6251 stmt_vec_info orig_stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
6252 if (orig_stmt_info)
6254 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
6255 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
6258 /* 3. Check the operands of the operation. The first operands are defined
6259 inside the loop body. The last operand is the reduction variable,
6260 which is defined by the loop-header-phi. */
6262 gassign *stmt = as_a <gassign *> (stmt_info->stmt);
6264 /* Flatten RHS. */
6265 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
6267 case GIMPLE_BINARY_RHS:
6268 code = gimple_assign_rhs_code (stmt);
6269 op_type = TREE_CODE_LENGTH (code);
6270 gcc_assert (op_type == binary_op);
6271 ops[0] = gimple_assign_rhs1 (stmt);
6272 ops[1] = gimple_assign_rhs2 (stmt);
6273 break;
6275 case GIMPLE_TERNARY_RHS:
6276 code = gimple_assign_rhs_code (stmt);
6277 op_type = TREE_CODE_LENGTH (code);
6278 gcc_assert (op_type == ternary_op);
6279 ops[0] = gimple_assign_rhs1 (stmt);
6280 ops[1] = gimple_assign_rhs2 (stmt);
6281 ops[2] = gimple_assign_rhs3 (stmt);
6282 break;
6284 case GIMPLE_UNARY_RHS:
6285 return false;
6287 default:
6288 gcc_unreachable ();
6291 if (code == COND_EXPR && slp_node)
6292 return false;
6294 scalar_dest = gimple_assign_lhs (stmt);
6295 scalar_type = TREE_TYPE (scalar_dest);
6296 if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
6297 && !SCALAR_FLOAT_TYPE_P (scalar_type))
6298 return false;
6300 /* Do not try to vectorize bit-precision reductions. */
6301 if (!type_has_mode_precision_p (scalar_type))
6302 return false;
6304 /* All uses but the last are expected to be defined in the loop.
6305 The last use is the reduction variable. In case of nested cycle this
6306 assumption is not true: we use reduc_index to record the index of the
6307 reduction variable. */
6308 stmt_vec_info reduc_def_info = NULL;
6309 int reduc_index = -1;
6310 for (i = 0; i < op_type; i++)
6312 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
6313 if (i == 0 && code == COND_EXPR)
6314 continue;
6316 stmt_vec_info def_stmt_info;
6317 is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, &dts[i], &tem,
6318 &def_stmt_info);
6319 dt = dts[i];
6320 gcc_assert (is_simple_use);
6321 if (dt == vect_reduction_def)
6323 reduc_def_info = def_stmt_info;
6324 reduc_index = i;
6325 continue;
6327 else if (tem)
6329 /* To properly compute ncopies we are interested in the widest
6330 input type in case we're looking at a widening accumulation. */
6331 if (!vectype_in
6332 || (GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (vectype_in)))
6333 < GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (tem)))))
6334 vectype_in = tem;
6337 if (dt != vect_internal_def
6338 && dt != vect_external_def
6339 && dt != vect_constant_def
6340 && dt != vect_induction_def
6341 && !(dt == vect_nested_cycle && nested_cycle))
6342 return false;
6344 if (dt == vect_nested_cycle)
6346 found_nested_cycle_def = true;
6347 reduc_def_info = def_stmt_info;
6348 reduc_index = i;
6351 if (i == 1 && code == COND_EXPR)
6353 /* Record how value of COND_EXPR is defined. */
6354 if (dt == vect_constant_def)
6356 cond_reduc_dt = dt;
6357 cond_reduc_val = ops[i];
6359 if (dt == vect_induction_def
6360 && def_stmt_info
6361 && is_nonwrapping_integer_induction (def_stmt_info, loop))
6363 cond_reduc_dt = dt;
6364 cond_stmt_vinfo = def_stmt_info;
6369 if (!vectype_in)
6370 vectype_in = vectype_out;
6372 /* When vectorizing a reduction chain w/o SLP the reduction PHI is not
6373 directy used in stmt. */
6374 if (reduc_index == -1)
6376 if (STMT_VINFO_REDUC_TYPE (stmt_info) == FOLD_LEFT_REDUCTION)
6378 if (dump_enabled_p ())
6379 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6380 "in-order reduction chain without SLP.\n");
6381 return false;
6384 if (orig_stmt_info)
6385 reduc_def_info = STMT_VINFO_REDUC_DEF (orig_stmt_info);
6386 else
6387 reduc_def_info = STMT_VINFO_REDUC_DEF (stmt_info);
6390 if (! reduc_def_info)
6391 return false;
6393 gphi *reduc_def_phi = dyn_cast <gphi *> (reduc_def_info->stmt);
6394 if (!reduc_def_phi)
6395 return false;
6397 if (!(reduc_index == -1
6398 || dts[reduc_index] == vect_reduction_def
6399 || dts[reduc_index] == vect_nested_cycle
6400 || ((dts[reduc_index] == vect_internal_def
6401 || dts[reduc_index] == vect_external_def
6402 || dts[reduc_index] == vect_constant_def
6403 || dts[reduc_index] == vect_induction_def)
6404 && nested_cycle && found_nested_cycle_def)))
6406 /* For pattern recognized stmts, orig_stmt might be a reduction,
6407 but some helper statements for the pattern might not, or
6408 might be COND_EXPRs with reduction uses in the condition. */
6409 gcc_assert (orig_stmt_info);
6410 return false;
6413 /* PHIs should not participate in patterns. */
6414 gcc_assert (!STMT_VINFO_RELATED_STMT (reduc_def_info));
6415 enum vect_reduction_type v_reduc_type
6416 = STMT_VINFO_REDUC_TYPE (reduc_def_info);
6417 stmt_vec_info tmp = STMT_VINFO_REDUC_DEF (reduc_def_info);
6419 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = v_reduc_type;
6420 /* If we have a condition reduction, see if we can simplify it further. */
6421 if (v_reduc_type == COND_REDUCTION)
6423 /* TODO: We can't yet handle reduction chains, since we need to treat
6424 each COND_EXPR in the chain specially, not just the last one.
6425 E.g. for:
6427 x_1 = PHI <x_3, ...>
6428 x_2 = a_2 ? ... : x_1;
6429 x_3 = a_3 ? ... : x_2;
6431 we're interested in the last element in x_3 for which a_2 || a_3
6432 is true, whereas the current reduction chain handling would
6433 vectorize x_2 as a normal VEC_COND_EXPR and only treat x_3
6434 as a reduction operation. */
6435 if (reduc_index == -1)
6437 if (dump_enabled_p ())
6438 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6439 "conditional reduction chains not supported\n");
6440 return false;
6443 /* vect_is_simple_reduction ensured that operand 2 is the
6444 loop-carried operand. */
6445 gcc_assert (reduc_index == 2);
6447 /* Loop peeling modifies initial value of reduction PHI, which
6448 makes the reduction stmt to be transformed different to the
6449 original stmt analyzed. We need to record reduction code for
6450 CONST_COND_REDUCTION type reduction at analyzing stage, thus
6451 it can be used directly at transform stage. */
6452 if (STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info) == MAX_EXPR
6453 || STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info) == MIN_EXPR)
6455 /* Also set the reduction type to CONST_COND_REDUCTION. */
6456 gcc_assert (cond_reduc_dt == vect_constant_def);
6457 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = CONST_COND_REDUCTION;
6459 else if (direct_internal_fn_supported_p (IFN_FOLD_EXTRACT_LAST,
6460 vectype_in, OPTIMIZE_FOR_SPEED))
6462 if (dump_enabled_p ())
6463 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6464 "optimizing condition reduction with"
6465 " FOLD_EXTRACT_LAST.\n");
6466 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = EXTRACT_LAST_REDUCTION;
6468 else if (cond_reduc_dt == vect_induction_def)
6470 tree base
6471 = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (cond_stmt_vinfo);
6472 tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (cond_stmt_vinfo);
6474 gcc_assert (TREE_CODE (base) == INTEGER_CST
6475 && TREE_CODE (step) == INTEGER_CST);
6476 cond_reduc_val = NULL_TREE;
6477 /* Find a suitable value, for MAX_EXPR below base, for MIN_EXPR
6478 above base; punt if base is the minimum value of the type for
6479 MAX_EXPR or maximum value of the type for MIN_EXPR for now. */
6480 if (tree_int_cst_sgn (step) == -1)
6482 cond_reduc_op_code = MIN_EXPR;
6483 if (tree_int_cst_sgn (base) == -1)
6484 cond_reduc_val = build_int_cst (TREE_TYPE (base), 0);
6485 else if (tree_int_cst_lt (base,
6486 TYPE_MAX_VALUE (TREE_TYPE (base))))
6487 cond_reduc_val
6488 = int_const_binop (PLUS_EXPR, base, integer_one_node);
6490 else
6492 cond_reduc_op_code = MAX_EXPR;
6493 if (tree_int_cst_sgn (base) == 1)
6494 cond_reduc_val = build_int_cst (TREE_TYPE (base), 0);
6495 else if (tree_int_cst_lt (TYPE_MIN_VALUE (TREE_TYPE (base)),
6496 base))
6497 cond_reduc_val
6498 = int_const_binop (MINUS_EXPR, base, integer_one_node);
6500 if (cond_reduc_val)
6502 if (dump_enabled_p ())
6503 dump_printf_loc (MSG_NOTE, vect_location,
6504 "condition expression based on "
6505 "integer induction.\n");
6506 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
6507 = INTEGER_INDUC_COND_REDUCTION;
6510 else if (cond_reduc_dt == vect_constant_def)
6512 enum vect_def_type cond_initial_dt;
6513 gimple *def_stmt = SSA_NAME_DEF_STMT (ops[reduc_index]);
6514 tree cond_initial_val
6515 = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
6517 gcc_assert (cond_reduc_val != NULL_TREE);
6518 vect_is_simple_use (cond_initial_val, loop_vinfo, &cond_initial_dt);
6519 if (cond_initial_dt == vect_constant_def
6520 && types_compatible_p (TREE_TYPE (cond_initial_val),
6521 TREE_TYPE (cond_reduc_val)))
6523 tree e = fold_binary (LE_EXPR, boolean_type_node,
6524 cond_initial_val, cond_reduc_val);
6525 if (e && (integer_onep (e) || integer_zerop (e)))
6527 if (dump_enabled_p ())
6528 dump_printf_loc (MSG_NOTE, vect_location,
6529 "condition expression based on "
6530 "compile time constant.\n");
6531 /* Record reduction code at analysis stage. */
6532 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info)
6533 = integer_onep (e) ? MAX_EXPR : MIN_EXPR;
6534 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
6535 = CONST_COND_REDUCTION;
6541 if (orig_stmt_info)
6542 gcc_assert (tmp == orig_stmt_info
6543 || REDUC_GROUP_FIRST_ELEMENT (tmp) == orig_stmt_info);
6544 else
6545 /* We changed STMT to be the first stmt in reduction chain, hence we
6546 check that in this case the first element in the chain is STMT. */
6547 gcc_assert (tmp == stmt_info
6548 || REDUC_GROUP_FIRST_ELEMENT (tmp) == stmt_info);
6550 if (STMT_VINFO_LIVE_P (reduc_def_info))
6551 return false;
6553 if (slp_node)
6554 ncopies = 1;
6555 else
6556 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
6558 gcc_assert (ncopies >= 1);
6560 vec_mode = TYPE_MODE (vectype_in);
6561 poly_uint64 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
6563 if (code == COND_EXPR)
6565 /* Only call during the analysis stage, otherwise we'll lose
6566 STMT_VINFO_TYPE. */
6567 if (!vec_stmt && !vectorizable_condition (stmt_info, gsi, NULL,
6568 ops[reduc_index], 0, NULL,
6569 cost_vec))
6571 if (dump_enabled_p ())
6572 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6573 "unsupported condition in reduction\n");
6574 return false;
6577 else
6579 /* 4. Supportable by target? */
6581 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR
6582 || code == LROTATE_EXPR || code == RROTATE_EXPR)
6584 /* Shifts and rotates are only supported by vectorizable_shifts,
6585 not vectorizable_reduction. */
6586 if (dump_enabled_p ())
6587 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6588 "unsupported shift or rotation.\n");
6589 return false;
6592 /* 4.1. check support for the operation in the loop */
6593 optab = optab_for_tree_code (code, vectype_in, optab_default);
6594 if (!optab)
6596 if (dump_enabled_p ())
6597 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6598 "no optab.\n");
6600 return false;
6603 if (optab_handler (optab, vec_mode) == CODE_FOR_nothing)
6605 if (dump_enabled_p ())
6606 dump_printf (MSG_NOTE, "op not supported by target.\n");
6608 if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
6609 || !vect_worthwhile_without_simd_p (loop_vinfo, code))
6610 return false;
6612 if (dump_enabled_p ())
6613 dump_printf (MSG_NOTE, "proceeding using word mode.\n");
6616 /* Worthwhile without SIMD support? */
6617 if (!VECTOR_MODE_P (TYPE_MODE (vectype_in))
6618 && !vect_worthwhile_without_simd_p (loop_vinfo, code))
6620 if (dump_enabled_p ())
6621 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6622 "not worthwhile without SIMD support.\n");
6624 return false;
6628 /* 4.2. Check support for the epilog operation.
6630 If STMT represents a reduction pattern, then the type of the
6631 reduction variable may be different than the type of the rest
6632 of the arguments. For example, consider the case of accumulation
6633 of shorts into an int accumulator; The original code:
6634 S1: int_a = (int) short_a;
6635 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
6637 was replaced with:
6638 STMT: int_acc = widen_sum <short_a, int_acc>
6640 This means that:
6641 1. The tree-code that is used to create the vector operation in the
6642 epilog code (that reduces the partial results) is not the
6643 tree-code of STMT, but is rather the tree-code of the original
6644 stmt from the pattern that STMT is replacing. I.e, in the example
6645 above we want to use 'widen_sum' in the loop, but 'plus' in the
6646 epilog.
6647 2. The type (mode) we use to check available target support
6648 for the vector operation to be created in the *epilog*, is
6649 determined by the type of the reduction variable (in the example
6650 above we'd check this: optab_handler (plus_optab, vect_int_mode])).
6651 However the type (mode) we use to check available target support
6652 for the vector operation to be created *inside the loop*, is
6653 determined by the type of the other arguments to STMT (in the
6654 example we'd check this: optab_handler (widen_sum_optab,
6655 vect_short_mode)).
6657 This is contrary to "regular" reductions, in which the types of all
6658 the arguments are the same as the type of the reduction variable.
6659 For "regular" reductions we can therefore use the same vector type
6660 (and also the same tree-code) when generating the epilog code and
6661 when generating the code inside the loop. */
6663 vect_reduction_type reduction_type
6664 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info);
6665 if (orig_stmt_info
6666 && (reduction_type == TREE_CODE_REDUCTION
6667 || reduction_type == FOLD_LEFT_REDUCTION))
6669 /* This is a reduction pattern: get the vectype from the type of the
6670 reduction variable, and get the tree-code from orig_stmt. */
6671 orig_code = gimple_assign_rhs_code (orig_stmt_info->stmt);
6672 gcc_assert (vectype_out);
6673 vec_mode = TYPE_MODE (vectype_out);
6675 else
6677 /* Regular reduction: use the same vectype and tree-code as used for
6678 the vector code inside the loop can be used for the epilog code. */
6679 orig_code = code;
6681 if (code == MINUS_EXPR)
6682 orig_code = PLUS_EXPR;
6684 /* For simple condition reductions, replace with the actual expression
6685 we want to base our reduction around. */
6686 if (reduction_type == CONST_COND_REDUCTION)
6688 orig_code = STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info);
6689 gcc_assert (orig_code == MAX_EXPR || orig_code == MIN_EXPR);
6691 else if (reduction_type == INTEGER_INDUC_COND_REDUCTION)
6692 orig_code = cond_reduc_op_code;
6695 if (nested_cycle)
6697 def_bb = gimple_bb (reduc_def_phi);
6698 def_stmt_loop = def_bb->loop_father;
6699 def_arg = PHI_ARG_DEF_FROM_EDGE (reduc_def_phi,
6700 loop_preheader_edge (def_stmt_loop));
6701 stmt_vec_info def_arg_stmt_info = loop_vinfo->lookup_def (def_arg);
6702 if (def_arg_stmt_info
6703 && (STMT_VINFO_DEF_TYPE (def_arg_stmt_info)
6704 == vect_double_reduction_def))
6705 double_reduc = true;
6708 reduc_fn = IFN_LAST;
6710 if (reduction_type == TREE_CODE_REDUCTION
6711 || reduction_type == FOLD_LEFT_REDUCTION
6712 || reduction_type == INTEGER_INDUC_COND_REDUCTION
6713 || reduction_type == CONST_COND_REDUCTION)
6715 if (reduction_type == FOLD_LEFT_REDUCTION
6716 ? fold_left_reduction_fn (orig_code, &reduc_fn)
6717 : reduction_fn_for_scalar_code (orig_code, &reduc_fn))
6719 if (reduc_fn != IFN_LAST
6720 && !direct_internal_fn_supported_p (reduc_fn, vectype_out,
6721 OPTIMIZE_FOR_SPEED))
6723 if (dump_enabled_p ())
6724 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6725 "reduc op not supported by target.\n");
6727 reduc_fn = IFN_LAST;
6730 else
6732 if (!nested_cycle || double_reduc)
6734 if (dump_enabled_p ())
6735 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6736 "no reduc code for scalar code.\n");
6738 return false;
6742 else if (reduction_type == COND_REDUCTION)
6744 int scalar_precision
6745 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type));
6746 cr_index_scalar_type = make_unsigned_type (scalar_precision);
6747 cr_index_vector_type = build_vector_type (cr_index_scalar_type,
6748 nunits_out);
6750 if (direct_internal_fn_supported_p (IFN_REDUC_MAX, cr_index_vector_type,
6751 OPTIMIZE_FOR_SPEED))
6752 reduc_fn = IFN_REDUC_MAX;
6755 if (reduction_type != EXTRACT_LAST_REDUCTION
6756 && (!nested_cycle || double_reduc)
6757 && reduc_fn == IFN_LAST
6758 && !nunits_out.is_constant ())
6760 if (dump_enabled_p ())
6761 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6762 "missing target support for reduction on"
6763 " variable-length vectors.\n");
6764 return false;
6767 if ((double_reduc || reduction_type != TREE_CODE_REDUCTION)
6768 && ncopies > 1)
6770 if (dump_enabled_p ())
6771 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6772 "multiple types in double reduction or condition "
6773 "reduction.\n");
6774 return false;
6777 /* For SLP reductions, see if there is a neutral value we can use. */
6778 tree neutral_op = NULL_TREE;
6779 if (slp_node)
6780 neutral_op = neutral_op_for_slp_reduction
6781 (slp_node_instance->reduc_phis, code,
6782 REDUC_GROUP_FIRST_ELEMENT (stmt_info) != NULL);
6784 if (double_reduc && reduction_type == FOLD_LEFT_REDUCTION)
6786 /* We can't support in-order reductions of code such as this:
6788 for (int i = 0; i < n1; ++i)
6789 for (int j = 0; j < n2; ++j)
6790 l += a[j];
6792 since GCC effectively transforms the loop when vectorizing:
6794 for (int i = 0; i < n1 / VF; ++i)
6795 for (int j = 0; j < n2; ++j)
6796 for (int k = 0; k < VF; ++k)
6797 l += a[j];
6799 which is a reassociation of the original operation. */
6800 if (dump_enabled_p ())
6801 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6802 "in-order double reduction not supported.\n");
6804 return false;
6807 if (reduction_type == FOLD_LEFT_REDUCTION
6808 && slp_node
6809 && !REDUC_GROUP_FIRST_ELEMENT (stmt_info))
6811 /* We cannot use in-order reductions in this case because there is
6812 an implicit reassociation of the operations involved. */
6813 if (dump_enabled_p ())
6814 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6815 "in-order unchained SLP reductions not supported.\n");
6816 return false;
6819 /* For double reductions, and for SLP reductions with a neutral value,
6820 we construct a variable-length initial vector by loading a vector
6821 full of the neutral value and then shift-and-inserting the start
6822 values into the low-numbered elements. */
6823 if ((double_reduc || neutral_op)
6824 && !nunits_out.is_constant ()
6825 && !direct_internal_fn_supported_p (IFN_VEC_SHL_INSERT,
6826 vectype_out, OPTIMIZE_FOR_SPEED))
6828 if (dump_enabled_p ())
6829 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6830 "reduction on variable-length vectors requires"
6831 " target support for a vector-shift-and-insert"
6832 " operation.\n");
6833 return false;
6836 /* Check extra constraints for variable-length unchained SLP reductions. */
6837 if (STMT_SLP_TYPE (stmt_info)
6838 && !REDUC_GROUP_FIRST_ELEMENT (stmt_info)
6839 && !nunits_out.is_constant ())
6841 /* We checked above that we could build the initial vector when
6842 there's a neutral element value. Check here for the case in
6843 which each SLP statement has its own initial value and in which
6844 that value needs to be repeated for every instance of the
6845 statement within the initial vector. */
6846 unsigned int group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
6847 scalar_mode elt_mode = SCALAR_TYPE_MODE (TREE_TYPE (vectype_out));
6848 if (!neutral_op
6849 && !can_duplicate_and_interleave_p (group_size, elt_mode))
6851 if (dump_enabled_p ())
6852 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6853 "unsupported form of SLP reduction for"
6854 " variable-length vectors: cannot build"
6855 " initial vector.\n");
6856 return false;
6858 /* The epilogue code relies on the number of elements being a multiple
6859 of the group size. The duplicate-and-interleave approach to setting
6860 up the the initial vector does too. */
6861 if (!multiple_p (nunits_out, group_size))
6863 if (dump_enabled_p ())
6864 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6865 "unsupported form of SLP reduction for"
6866 " variable-length vectors: the vector size"
6867 " is not a multiple of the number of results.\n");
6868 return false;
6872 /* In case of widenning multiplication by a constant, we update the type
6873 of the constant to be the type of the other operand. We check that the
6874 constant fits the type in the pattern recognition pass. */
6875 if (code == DOT_PROD_EXPR
6876 && !types_compatible_p (TREE_TYPE (ops[0]), TREE_TYPE (ops[1])))
6878 if (TREE_CODE (ops[0]) == INTEGER_CST)
6879 ops[0] = fold_convert (TREE_TYPE (ops[1]), ops[0]);
6880 else if (TREE_CODE (ops[1]) == INTEGER_CST)
6881 ops[1] = fold_convert (TREE_TYPE (ops[0]), ops[1]);
6882 else
6884 if (dump_enabled_p ())
6885 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6886 "invalid types in dot-prod\n");
6888 return false;
6892 if (reduction_type == COND_REDUCTION)
6894 widest_int ni;
6896 if (! max_loop_iterations (loop, &ni))
6898 if (dump_enabled_p ())
6899 dump_printf_loc (MSG_NOTE, vect_location,
6900 "loop count not known, cannot create cond "
6901 "reduction.\n");
6902 return false;
6904 /* Convert backedges to iterations. */
6905 ni += 1;
6907 /* The additional index will be the same type as the condition. Check
6908 that the loop can fit into this less one (because we'll use up the
6909 zero slot for when there are no matches). */
6910 tree max_index = TYPE_MAX_VALUE (cr_index_scalar_type);
6911 if (wi::geu_p (ni, wi::to_widest (max_index)))
6913 if (dump_enabled_p ())
6914 dump_printf_loc (MSG_NOTE, vect_location,
6915 "loop size is greater than data size.\n");
6916 return false;
6920 /* In case the vectorization factor (VF) is bigger than the number
6921 of elements that we can fit in a vectype (nunits), we have to generate
6922 more than one vector stmt - i.e - we need to "unroll" the
6923 vector stmt by a factor VF/nunits. For more details see documentation
6924 in vectorizable_operation. */
6926 /* If the reduction is used in an outer loop we need to generate
6927 VF intermediate results, like so (e.g. for ncopies=2):
6928 r0 = phi (init, r0)
6929 r1 = phi (init, r1)
6930 r0 = x0 + r0;
6931 r1 = x1 + r1;
6932 (i.e. we generate VF results in 2 registers).
6933 In this case we have a separate def-use cycle for each copy, and therefore
6934 for each copy we get the vector def for the reduction variable from the
6935 respective phi node created for this copy.
6937 Otherwise (the reduction is unused in the loop nest), we can combine
6938 together intermediate results, like so (e.g. for ncopies=2):
6939 r = phi (init, r)
6940 r = x0 + r;
6941 r = x1 + r;
6942 (i.e. we generate VF/2 results in a single register).
6943 In this case for each copy we get the vector def for the reduction variable
6944 from the vectorized reduction operation generated in the previous iteration.
6946 This only works when we see both the reduction PHI and its only consumer
6947 in vectorizable_reduction and there are no intermediate stmts
6948 participating. */
6949 stmt_vec_info use_stmt_info;
6950 tree reduc_phi_result = gimple_phi_result (reduc_def_phi);
6951 if (ncopies > 1
6952 && (STMT_VINFO_RELEVANT (stmt_info) <= vect_used_only_live)
6953 && (use_stmt_info = loop_vinfo->lookup_single_use (reduc_phi_result))
6954 && vect_stmt_to_vectorize (use_stmt_info) == stmt_info)
6956 single_defuse_cycle = true;
6957 epilog_copies = 1;
6959 else
6960 epilog_copies = ncopies;
6962 /* If the reduction stmt is one of the patterns that have lane
6963 reduction embedded we cannot handle the case of ! single_defuse_cycle. */
6964 if ((ncopies > 1
6965 && ! single_defuse_cycle)
6966 && (code == DOT_PROD_EXPR
6967 || code == WIDEN_SUM_EXPR
6968 || code == SAD_EXPR))
6970 if (dump_enabled_p ())
6971 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6972 "multi def-use cycle not possible for lane-reducing "
6973 "reduction operation\n");
6974 return false;
6977 if (slp_node)
6978 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6979 else
6980 vec_num = 1;
6982 internal_fn cond_fn = get_conditional_internal_fn (code);
6983 vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
6985 if (!vec_stmt) /* transformation not required. */
6987 vect_model_reduction_cost (stmt_info, reduc_fn, ncopies, cost_vec);
6988 if (loop_vinfo && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
6990 if (reduction_type != FOLD_LEFT_REDUCTION
6991 && (cond_fn == IFN_LAST
6992 || !direct_internal_fn_supported_p (cond_fn, vectype_in,
6993 OPTIMIZE_FOR_SPEED)))
6995 if (dump_enabled_p ())
6996 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6997 "can't use a fully-masked loop because no"
6998 " conditional operation is available.\n");
6999 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
7001 else if (reduc_index == -1)
7003 if (dump_enabled_p ())
7004 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7005 "can't use a fully-masked loop for chained"
7006 " reductions.\n");
7007 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
7009 else
7010 vect_record_loop_mask (loop_vinfo, masks, ncopies * vec_num,
7011 vectype_in);
7013 if (dump_enabled_p ()
7014 && reduction_type == FOLD_LEFT_REDUCTION)
7015 dump_printf_loc (MSG_NOTE, vect_location,
7016 "using an in-order (fold-left) reduction.\n");
7017 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
7018 return true;
7021 /* Transform. */
7023 if (dump_enabled_p ())
7024 dump_printf_loc (MSG_NOTE, vect_location, "transform reduction.\n");
7026 /* FORNOW: Multiple types are not supported for condition. */
7027 if (code == COND_EXPR)
7028 gcc_assert (ncopies == 1);
7030 bool masked_loop_p = LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
7032 if (reduction_type == FOLD_LEFT_REDUCTION)
7033 return vectorize_fold_left_reduction
7034 (stmt_info, gsi, vec_stmt, slp_node, reduc_def_phi, code,
7035 reduc_fn, ops, vectype_in, reduc_index, masks);
7037 if (reduction_type == EXTRACT_LAST_REDUCTION)
7039 gcc_assert (!slp_node);
7040 return vectorizable_condition (stmt_info, gsi, vec_stmt,
7041 NULL, reduc_index, NULL, NULL);
7044 /* Create the destination vector */
7045 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
7047 prev_stmt_info = NULL;
7048 prev_phi_info = NULL;
7049 if (!slp_node)
7051 vec_oprnds0.create (1);
7052 vec_oprnds1.create (1);
7053 if (op_type == ternary_op)
7054 vec_oprnds2.create (1);
7057 phis.create (vec_num);
7058 vect_defs.create (vec_num);
7059 if (!slp_node)
7060 vect_defs.quick_push (NULL_TREE);
7062 if (slp_node)
7063 phis.splice (SLP_TREE_VEC_STMTS (slp_node_instance->reduc_phis));
7064 else
7065 phis.quick_push (STMT_VINFO_VEC_STMT (reduc_def_info));
7067 for (j = 0; j < ncopies; j++)
7069 if (code == COND_EXPR)
7071 gcc_assert (!slp_node);
7072 vectorizable_condition (stmt_info, gsi, vec_stmt,
7073 PHI_RESULT (phis[0]->stmt),
7074 reduc_index, NULL, NULL);
7075 /* Multiple types are not supported for condition. */
7076 break;
7079 /* Handle uses. */
7080 if (j == 0)
7082 if (slp_node)
7084 /* Get vec defs for all the operands except the reduction index,
7085 ensuring the ordering of the ops in the vector is kept. */
7086 auto_vec<tree, 3> slp_ops;
7087 auto_vec<vec<tree>, 3> vec_defs;
7089 slp_ops.quick_push (ops[0]);
7090 slp_ops.quick_push (ops[1]);
7091 if (op_type == ternary_op)
7092 slp_ops.quick_push (ops[2]);
7094 vect_get_slp_defs (slp_ops, slp_node, &vec_defs);
7096 vec_oprnds0.safe_splice (vec_defs[0]);
7097 vec_defs[0].release ();
7098 vec_oprnds1.safe_splice (vec_defs[1]);
7099 vec_defs[1].release ();
7100 if (op_type == ternary_op)
7102 vec_oprnds2.safe_splice (vec_defs[2]);
7103 vec_defs[2].release ();
7106 else
7108 vec_oprnds0.quick_push
7109 (vect_get_vec_def_for_operand (ops[0], stmt_info));
7110 vec_oprnds1.quick_push
7111 (vect_get_vec_def_for_operand (ops[1], stmt_info));
7112 if (op_type == ternary_op)
7113 vec_oprnds2.quick_push
7114 (vect_get_vec_def_for_operand (ops[2], stmt_info));
7117 else
7119 if (!slp_node)
7121 gcc_assert (reduc_index != -1 || ! single_defuse_cycle);
7123 if (single_defuse_cycle && reduc_index == 0)
7124 vec_oprnds0[0] = gimple_get_lhs (new_stmt_info->stmt);
7125 else
7126 vec_oprnds0[0]
7127 = vect_get_vec_def_for_stmt_copy (loop_vinfo,
7128 vec_oprnds0[0]);
7129 if (single_defuse_cycle && reduc_index == 1)
7130 vec_oprnds1[0] = gimple_get_lhs (new_stmt_info->stmt);
7131 else
7132 vec_oprnds1[0]
7133 = vect_get_vec_def_for_stmt_copy (loop_vinfo,
7134 vec_oprnds1[0]);
7135 if (op_type == ternary_op)
7137 if (single_defuse_cycle && reduc_index == 2)
7138 vec_oprnds2[0] = gimple_get_lhs (new_stmt_info->stmt);
7139 else
7140 vec_oprnds2[0]
7141 = vect_get_vec_def_for_stmt_copy (loop_vinfo,
7142 vec_oprnds2[0]);
7147 FOR_EACH_VEC_ELT (vec_oprnds0, i, def0)
7149 tree vop[3] = { def0, vec_oprnds1[i], NULL_TREE };
7150 if (masked_loop_p)
7152 /* Make sure that the reduction accumulator is vop[0]. */
7153 if (reduc_index == 1)
7155 gcc_assert (commutative_tree_code (code));
7156 std::swap (vop[0], vop[1]);
7158 tree mask = vect_get_loop_mask (gsi, masks, vec_num * ncopies,
7159 vectype_in, i * ncopies + j);
7160 gcall *call = gimple_build_call_internal (cond_fn, 4, mask,
7161 vop[0], vop[1],
7162 vop[0]);
7163 new_temp = make_ssa_name (vec_dest, call);
7164 gimple_call_set_lhs (call, new_temp);
7165 gimple_call_set_nothrow (call, true);
7166 new_stmt_info
7167 = vect_finish_stmt_generation (stmt_info, call, gsi);
7169 else
7171 if (op_type == ternary_op)
7172 vop[2] = vec_oprnds2[i];
7174 gassign *new_stmt = gimple_build_assign (vec_dest, code,
7175 vop[0], vop[1], vop[2]);
7176 new_temp = make_ssa_name (vec_dest, new_stmt);
7177 gimple_assign_set_lhs (new_stmt, new_temp);
7178 new_stmt_info
7179 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
7182 if (slp_node)
7184 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
7185 vect_defs.quick_push (new_temp);
7187 else
7188 vect_defs[0] = new_temp;
7191 if (slp_node)
7192 continue;
7194 if (j == 0)
7195 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
7196 else
7197 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
7199 prev_stmt_info = new_stmt_info;
7202 /* Finalize the reduction-phi (set its arguments) and create the
7203 epilog reduction code. */
7204 if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node)
7205 vect_defs[0] = gimple_get_lhs ((*vec_stmt)->stmt);
7207 vect_create_epilog_for_reduction (vect_defs, stmt_info, reduc_def_phi,
7208 epilog_copies, reduc_fn, phis,
7209 double_reduc, slp_node, slp_node_instance,
7210 cond_reduc_val, cond_reduc_op_code,
7211 neutral_op);
7213 return true;
7216 /* Function vect_min_worthwhile_factor.
7218 For a loop where we could vectorize the operation indicated by CODE,
7219 return the minimum vectorization factor that makes it worthwhile
7220 to use generic vectors. */
7221 static unsigned int
7222 vect_min_worthwhile_factor (enum tree_code code)
7224 switch (code)
7226 case PLUS_EXPR:
7227 case MINUS_EXPR:
7228 case NEGATE_EXPR:
7229 return 4;
7231 case BIT_AND_EXPR:
7232 case BIT_IOR_EXPR:
7233 case BIT_XOR_EXPR:
7234 case BIT_NOT_EXPR:
7235 return 2;
7237 default:
7238 return INT_MAX;
7242 /* Return true if VINFO indicates we are doing loop vectorization and if
7243 it is worth decomposing CODE operations into scalar operations for
7244 that loop's vectorization factor. */
7246 bool
7247 vect_worthwhile_without_simd_p (vec_info *vinfo, tree_code code)
7249 loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
7250 unsigned HOST_WIDE_INT value;
7251 return (loop_vinfo
7252 && LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&value)
7253 && value >= vect_min_worthwhile_factor (code));
7256 /* Function vectorizable_induction
7258 Check if STMT_INFO performs an induction computation that can be vectorized.
7259 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
7260 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
7261 Return true if STMT_INFO is vectorizable in this way. */
7263 bool
7264 vectorizable_induction (stmt_vec_info stmt_info,
7265 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
7266 stmt_vec_info *vec_stmt, slp_tree slp_node,
7267 stmt_vector_for_cost *cost_vec)
7269 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7270 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
7271 unsigned ncopies;
7272 bool nested_in_vect_loop = false;
7273 struct loop *iv_loop;
7274 tree vec_def;
7275 edge pe = loop_preheader_edge (loop);
7276 basic_block new_bb;
7277 tree new_vec, vec_init, vec_step, t;
7278 tree new_name;
7279 gimple *new_stmt;
7280 gphi *induction_phi;
7281 tree induc_def, vec_dest;
7282 tree init_expr, step_expr;
7283 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
7284 unsigned i;
7285 tree expr;
7286 gimple_seq stmts;
7287 imm_use_iterator imm_iter;
7288 use_operand_p use_p;
7289 gimple *exit_phi;
7290 edge latch_e;
7291 tree loop_arg;
7292 gimple_stmt_iterator si;
7294 gphi *phi = dyn_cast <gphi *> (stmt_info->stmt);
7295 if (!phi)
7296 return false;
7298 if (!STMT_VINFO_RELEVANT_P (stmt_info))
7299 return false;
7301 /* Make sure it was recognized as induction computation. */
7302 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
7303 return false;
7305 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7306 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
7308 if (slp_node)
7309 ncopies = 1;
7310 else
7311 ncopies = vect_get_num_copies (loop_vinfo, vectype);
7312 gcc_assert (ncopies >= 1);
7314 /* FORNOW. These restrictions should be relaxed. */
7315 if (nested_in_vect_loop_p (loop, stmt_info))
7317 imm_use_iterator imm_iter;
7318 use_operand_p use_p;
7319 gimple *exit_phi;
7320 edge latch_e;
7321 tree loop_arg;
7323 if (ncopies > 1)
7325 if (dump_enabled_p ())
7326 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7327 "multiple types in nested loop.\n");
7328 return false;
7331 /* FORNOW: outer loop induction with SLP not supported. */
7332 if (STMT_SLP_TYPE (stmt_info))
7333 return false;
7335 exit_phi = NULL;
7336 latch_e = loop_latch_edge (loop->inner);
7337 loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
7338 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
7340 gimple *use_stmt = USE_STMT (use_p);
7341 if (is_gimple_debug (use_stmt))
7342 continue;
7344 if (!flow_bb_inside_loop_p (loop->inner, gimple_bb (use_stmt)))
7346 exit_phi = use_stmt;
7347 break;
7350 if (exit_phi)
7352 stmt_vec_info exit_phi_vinfo = loop_vinfo->lookup_stmt (exit_phi);
7353 if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
7354 && !STMT_VINFO_LIVE_P (exit_phi_vinfo)))
7356 if (dump_enabled_p ())
7357 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7358 "inner-loop induction only used outside "
7359 "of the outer vectorized loop.\n");
7360 return false;
7364 nested_in_vect_loop = true;
7365 iv_loop = loop->inner;
7367 else
7368 iv_loop = loop;
7369 gcc_assert (iv_loop == (gimple_bb (phi))->loop_father);
7371 if (slp_node && !nunits.is_constant ())
7373 /* The current SLP code creates the initial value element-by-element. */
7374 if (dump_enabled_p ())
7375 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7376 "SLP induction not supported for variable-length"
7377 " vectors.\n");
7378 return false;
7381 if (!vec_stmt) /* transformation not required. */
7383 STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
7384 DUMP_VECT_SCOPE ("vectorizable_induction");
7385 vect_model_induction_cost (stmt_info, ncopies, cost_vec);
7386 return true;
7389 /* Transform. */
7391 /* Compute a vector variable, initialized with the first VF values of
7392 the induction variable. E.g., for an iv with IV_PHI='X' and
7393 evolution S, for a vector of 4 units, we want to compute:
7394 [X, X + S, X + 2*S, X + 3*S]. */
7396 if (dump_enabled_p ())
7397 dump_printf_loc (MSG_NOTE, vect_location, "transform induction phi.\n");
7399 latch_e = loop_latch_edge (iv_loop);
7400 loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
7402 step_expr = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_info);
7403 gcc_assert (step_expr != NULL_TREE);
7405 pe = loop_preheader_edge (iv_loop);
7406 init_expr = PHI_ARG_DEF_FROM_EDGE (phi,
7407 loop_preheader_edge (iv_loop));
7409 stmts = NULL;
7410 if (!nested_in_vect_loop)
7412 /* Convert the initial value to the desired type. */
7413 tree new_type = TREE_TYPE (vectype);
7414 init_expr = gimple_convert (&stmts, new_type, init_expr);
7416 /* If we are using the loop mask to "peel" for alignment then we need
7417 to adjust the start value here. */
7418 tree skip_niters = LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo);
7419 if (skip_niters != NULL_TREE)
7421 if (FLOAT_TYPE_P (vectype))
7422 skip_niters = gimple_build (&stmts, FLOAT_EXPR, new_type,
7423 skip_niters);
7424 else
7425 skip_niters = gimple_convert (&stmts, new_type, skip_niters);
7426 tree skip_step = gimple_build (&stmts, MULT_EXPR, new_type,
7427 skip_niters, step_expr);
7428 init_expr = gimple_build (&stmts, MINUS_EXPR, new_type,
7429 init_expr, skip_step);
7433 /* Convert the step to the desired type. */
7434 step_expr = gimple_convert (&stmts, TREE_TYPE (vectype), step_expr);
7436 if (stmts)
7438 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
7439 gcc_assert (!new_bb);
7442 /* Find the first insertion point in the BB. */
7443 basic_block bb = gimple_bb (phi);
7444 si = gsi_after_labels (bb);
7446 /* For SLP induction we have to generate several IVs as for example
7447 with group size 3 we need [i, i, i, i + S] [i + S, i + S, i + 2*S, i + 2*S]
7448 [i + 2*S, i + 3*S, i + 3*S, i + 3*S]. The step is the same uniform
7449 [VF*S, VF*S, VF*S, VF*S] for all. */
7450 if (slp_node)
7452 /* Enforced above. */
7453 unsigned int const_nunits = nunits.to_constant ();
7455 /* Generate [VF*S, VF*S, ... ]. */
7456 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7458 expr = build_int_cst (integer_type_node, vf);
7459 expr = fold_convert (TREE_TYPE (step_expr), expr);
7461 else
7462 expr = build_int_cst (TREE_TYPE (step_expr), vf);
7463 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
7464 expr, step_expr);
7465 if (! CONSTANT_CLASS_P (new_name))
7466 new_name = vect_init_vector (stmt_info, new_name,
7467 TREE_TYPE (step_expr), NULL);
7468 new_vec = build_vector_from_val (vectype, new_name);
7469 vec_step = vect_init_vector (stmt_info, new_vec, vectype, NULL);
7471 /* Now generate the IVs. */
7472 unsigned group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
7473 unsigned nvects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7474 unsigned elts = const_nunits * nvects;
7475 unsigned nivs = least_common_multiple (group_size,
7476 const_nunits) / const_nunits;
7477 gcc_assert (elts % group_size == 0);
7478 tree elt = init_expr;
7479 unsigned ivn;
7480 for (ivn = 0; ivn < nivs; ++ivn)
7482 tree_vector_builder elts (vectype, const_nunits, 1);
7483 stmts = NULL;
7484 for (unsigned eltn = 0; eltn < const_nunits; ++eltn)
7486 if (ivn*const_nunits + eltn >= group_size
7487 && (ivn * const_nunits + eltn) % group_size == 0)
7488 elt = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (elt),
7489 elt, step_expr);
7490 elts.quick_push (elt);
7492 vec_init = gimple_build_vector (&stmts, &elts);
7493 if (stmts)
7495 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
7496 gcc_assert (!new_bb);
7499 /* Create the induction-phi that defines the induction-operand. */
7500 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
7501 induction_phi = create_phi_node (vec_dest, iv_loop->header);
7502 stmt_vec_info induction_phi_info
7503 = loop_vinfo->add_stmt (induction_phi);
7504 induc_def = PHI_RESULT (induction_phi);
7506 /* Create the iv update inside the loop */
7507 vec_def = make_ssa_name (vec_dest);
7508 new_stmt = gimple_build_assign (vec_def, PLUS_EXPR, induc_def, vec_step);
7509 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
7510 loop_vinfo->add_stmt (new_stmt);
7512 /* Set the arguments of the phi node: */
7513 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
7514 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
7515 UNKNOWN_LOCATION);
7517 SLP_TREE_VEC_STMTS (slp_node).quick_push (induction_phi_info);
7520 /* Re-use IVs when we can. */
7521 if (ivn < nvects)
7523 unsigned vfp
7524 = least_common_multiple (group_size, const_nunits) / group_size;
7525 /* Generate [VF'*S, VF'*S, ... ]. */
7526 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7528 expr = build_int_cst (integer_type_node, vfp);
7529 expr = fold_convert (TREE_TYPE (step_expr), expr);
7531 else
7532 expr = build_int_cst (TREE_TYPE (step_expr), vfp);
7533 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
7534 expr, step_expr);
7535 if (! CONSTANT_CLASS_P (new_name))
7536 new_name = vect_init_vector (stmt_info, new_name,
7537 TREE_TYPE (step_expr), NULL);
7538 new_vec = build_vector_from_val (vectype, new_name);
7539 vec_step = vect_init_vector (stmt_info, new_vec, vectype, NULL);
7540 for (; ivn < nvects; ++ivn)
7542 gimple *iv = SLP_TREE_VEC_STMTS (slp_node)[ivn - nivs]->stmt;
7543 tree def;
7544 if (gimple_code (iv) == GIMPLE_PHI)
7545 def = gimple_phi_result (iv);
7546 else
7547 def = gimple_assign_lhs (iv);
7548 new_stmt = gimple_build_assign (make_ssa_name (vectype),
7549 PLUS_EXPR,
7550 def, vec_step);
7551 if (gimple_code (iv) == GIMPLE_PHI)
7552 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
7553 else
7555 gimple_stmt_iterator tgsi = gsi_for_stmt (iv);
7556 gsi_insert_after (&tgsi, new_stmt, GSI_CONTINUE_LINKING);
7558 SLP_TREE_VEC_STMTS (slp_node).quick_push
7559 (loop_vinfo->add_stmt (new_stmt));
7563 return true;
7566 /* Create the vector that holds the initial_value of the induction. */
7567 if (nested_in_vect_loop)
7569 /* iv_loop is nested in the loop to be vectorized. init_expr had already
7570 been created during vectorization of previous stmts. We obtain it
7571 from the STMT_VINFO_VEC_STMT of the defining stmt. */
7572 vec_init = vect_get_vec_def_for_operand (init_expr, stmt_info);
7573 /* If the initial value is not of proper type, convert it. */
7574 if (!useless_type_conversion_p (vectype, TREE_TYPE (vec_init)))
7576 new_stmt
7577 = gimple_build_assign (vect_get_new_ssa_name (vectype,
7578 vect_simple_var,
7579 "vec_iv_"),
7580 VIEW_CONVERT_EXPR,
7581 build1 (VIEW_CONVERT_EXPR, vectype,
7582 vec_init));
7583 vec_init = gimple_assign_lhs (new_stmt);
7584 new_bb = gsi_insert_on_edge_immediate (loop_preheader_edge (iv_loop),
7585 new_stmt);
7586 gcc_assert (!new_bb);
7587 loop_vinfo->add_stmt (new_stmt);
7590 else
7592 /* iv_loop is the loop to be vectorized. Create:
7593 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
7594 stmts = NULL;
7595 new_name = gimple_convert (&stmts, TREE_TYPE (vectype), init_expr);
7597 unsigned HOST_WIDE_INT const_nunits;
7598 if (nunits.is_constant (&const_nunits))
7600 tree_vector_builder elts (vectype, const_nunits, 1);
7601 elts.quick_push (new_name);
7602 for (i = 1; i < const_nunits; i++)
7604 /* Create: new_name_i = new_name + step_expr */
7605 new_name = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (new_name),
7606 new_name, step_expr);
7607 elts.quick_push (new_name);
7609 /* Create a vector from [new_name_0, new_name_1, ...,
7610 new_name_nunits-1] */
7611 vec_init = gimple_build_vector (&stmts, &elts);
7613 else if (INTEGRAL_TYPE_P (TREE_TYPE (step_expr)))
7614 /* Build the initial value directly from a VEC_SERIES_EXPR. */
7615 vec_init = gimple_build (&stmts, VEC_SERIES_EXPR, vectype,
7616 new_name, step_expr);
7617 else
7619 /* Build:
7620 [base, base, base, ...]
7621 + (vectype) [0, 1, 2, ...] * [step, step, step, ...]. */
7622 gcc_assert (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)));
7623 gcc_assert (flag_associative_math);
7624 tree index = build_index_vector (vectype, 0, 1);
7625 tree base_vec = gimple_build_vector_from_val (&stmts, vectype,
7626 new_name);
7627 tree step_vec = gimple_build_vector_from_val (&stmts, vectype,
7628 step_expr);
7629 vec_init = gimple_build (&stmts, FLOAT_EXPR, vectype, index);
7630 vec_init = gimple_build (&stmts, MULT_EXPR, vectype,
7631 vec_init, step_vec);
7632 vec_init = gimple_build (&stmts, PLUS_EXPR, vectype,
7633 vec_init, base_vec);
7636 if (stmts)
7638 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
7639 gcc_assert (!new_bb);
7644 /* Create the vector that holds the step of the induction. */
7645 if (nested_in_vect_loop)
7646 /* iv_loop is nested in the loop to be vectorized. Generate:
7647 vec_step = [S, S, S, S] */
7648 new_name = step_expr;
7649 else
7651 /* iv_loop is the loop to be vectorized. Generate:
7652 vec_step = [VF*S, VF*S, VF*S, VF*S] */
7653 gimple_seq seq = NULL;
7654 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7656 expr = build_int_cst (integer_type_node, vf);
7657 expr = gimple_build (&seq, FLOAT_EXPR, TREE_TYPE (step_expr), expr);
7659 else
7660 expr = build_int_cst (TREE_TYPE (step_expr), vf);
7661 new_name = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step_expr),
7662 expr, step_expr);
7663 if (seq)
7665 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
7666 gcc_assert (!new_bb);
7670 t = unshare_expr (new_name);
7671 gcc_assert (CONSTANT_CLASS_P (new_name)
7672 || TREE_CODE (new_name) == SSA_NAME);
7673 new_vec = build_vector_from_val (vectype, t);
7674 vec_step = vect_init_vector (stmt_info, new_vec, vectype, NULL);
7677 /* Create the following def-use cycle:
7678 loop prolog:
7679 vec_init = ...
7680 vec_step = ...
7681 loop:
7682 vec_iv = PHI <vec_init, vec_loop>
7684 STMT
7686 vec_loop = vec_iv + vec_step; */
7688 /* Create the induction-phi that defines the induction-operand. */
7689 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
7690 induction_phi = create_phi_node (vec_dest, iv_loop->header);
7691 stmt_vec_info induction_phi_info = loop_vinfo->add_stmt (induction_phi);
7692 induc_def = PHI_RESULT (induction_phi);
7694 /* Create the iv update inside the loop */
7695 vec_def = make_ssa_name (vec_dest);
7696 new_stmt = gimple_build_assign (vec_def, PLUS_EXPR, induc_def, vec_step);
7697 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
7698 stmt_vec_info new_stmt_info = loop_vinfo->add_stmt (new_stmt);
7700 /* Set the arguments of the phi node: */
7701 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
7702 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
7703 UNKNOWN_LOCATION);
7705 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = induction_phi_info;
7707 /* In case that vectorization factor (VF) is bigger than the number
7708 of elements that we can fit in a vectype (nunits), we have to generate
7709 more than one vector stmt - i.e - we need to "unroll" the
7710 vector stmt by a factor VF/nunits. For more details see documentation
7711 in vectorizable_operation. */
7713 if (ncopies > 1)
7715 gimple_seq seq = NULL;
7716 stmt_vec_info prev_stmt_vinfo;
7717 /* FORNOW. This restriction should be relaxed. */
7718 gcc_assert (!nested_in_vect_loop);
7720 /* Create the vector that holds the step of the induction. */
7721 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7723 expr = build_int_cst (integer_type_node, nunits);
7724 expr = gimple_build (&seq, FLOAT_EXPR, TREE_TYPE (step_expr), expr);
7726 else
7727 expr = build_int_cst (TREE_TYPE (step_expr), nunits);
7728 new_name = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step_expr),
7729 expr, step_expr);
7730 if (seq)
7732 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
7733 gcc_assert (!new_bb);
7736 t = unshare_expr (new_name);
7737 gcc_assert (CONSTANT_CLASS_P (new_name)
7738 || TREE_CODE (new_name) == SSA_NAME);
7739 new_vec = build_vector_from_val (vectype, t);
7740 vec_step = vect_init_vector (stmt_info, new_vec, vectype, NULL);
7742 vec_def = induc_def;
7743 prev_stmt_vinfo = induction_phi_info;
7744 for (i = 1; i < ncopies; i++)
7746 /* vec_i = vec_prev + vec_step */
7747 new_stmt = gimple_build_assign (vec_dest, PLUS_EXPR,
7748 vec_def, vec_step);
7749 vec_def = make_ssa_name (vec_dest, new_stmt);
7750 gimple_assign_set_lhs (new_stmt, vec_def);
7752 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
7753 new_stmt_info = loop_vinfo->add_stmt (new_stmt);
7754 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt_info;
7755 prev_stmt_vinfo = new_stmt_info;
7759 if (nested_in_vect_loop)
7761 /* Find the loop-closed exit-phi of the induction, and record
7762 the final vector of induction results: */
7763 exit_phi = NULL;
7764 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
7766 gimple *use_stmt = USE_STMT (use_p);
7767 if (is_gimple_debug (use_stmt))
7768 continue;
7770 if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (use_stmt)))
7772 exit_phi = use_stmt;
7773 break;
7776 if (exit_phi)
7778 stmt_vec_info stmt_vinfo = loop_vinfo->lookup_stmt (exit_phi);
7779 /* FORNOW. Currently not supporting the case that an inner-loop induction
7780 is not used in the outer-loop (i.e. only outside the outer-loop). */
7781 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
7782 && !STMT_VINFO_LIVE_P (stmt_vinfo));
7784 STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt_info;
7785 if (dump_enabled_p ())
7787 dump_printf_loc (MSG_NOTE, vect_location,
7788 "vector of inductions after inner-loop:");
7789 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
7795 if (dump_enabled_p ())
7797 dump_printf_loc (MSG_NOTE, vect_location,
7798 "transform induction: created def-use cycle: ");
7799 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, induction_phi, 0);
7800 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
7801 SSA_NAME_DEF_STMT (vec_def), 0);
7804 return true;
7807 /* Function vectorizable_live_operation.
7809 STMT_INFO computes a value that is used outside the loop. Check if
7810 it can be supported. */
7812 bool
7813 vectorizable_live_operation (stmt_vec_info stmt_info,
7814 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
7815 slp_tree slp_node, int slp_index,
7816 stmt_vec_info *vec_stmt,
7817 stmt_vector_for_cost *)
7819 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7820 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
7821 imm_use_iterator imm_iter;
7822 tree lhs, lhs_type, bitsize, vec_bitsize;
7823 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7824 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
7825 int ncopies;
7826 gimple *use_stmt;
7827 auto_vec<tree> vec_oprnds;
7828 int vec_entry = 0;
7829 poly_uint64 vec_index = 0;
7831 gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
7833 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
7834 return false;
7836 /* FORNOW. CHECKME. */
7837 if (nested_in_vect_loop_p (loop, stmt_info))
7838 return false;
7840 /* If STMT is not relevant and it is a simple assignment and its inputs are
7841 invariant then it can remain in place, unvectorized. The original last
7842 scalar value that it computes will be used. */
7843 if (!STMT_VINFO_RELEVANT_P (stmt_info))
7845 gcc_assert (is_simple_and_all_uses_invariant (stmt_info, loop_vinfo));
7846 if (dump_enabled_p ())
7847 dump_printf_loc (MSG_NOTE, vect_location,
7848 "statement is simple and uses invariant. Leaving in "
7849 "place.\n");
7850 return true;
7853 if (slp_node)
7854 ncopies = 1;
7855 else
7856 ncopies = vect_get_num_copies (loop_vinfo, vectype);
7858 if (slp_node)
7860 gcc_assert (slp_index >= 0);
7862 int num_scalar = SLP_TREE_SCALAR_STMTS (slp_node).length ();
7863 int num_vec = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7865 /* Get the last occurrence of the scalar index from the concatenation of
7866 all the slp vectors. Calculate which slp vector it is and the index
7867 within. */
7868 poly_uint64 pos = (num_vec * nunits) - num_scalar + slp_index;
7870 /* Calculate which vector contains the result, and which lane of
7871 that vector we need. */
7872 if (!can_div_trunc_p (pos, nunits, &vec_entry, &vec_index))
7874 if (dump_enabled_p ())
7875 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7876 "Cannot determine which vector holds the"
7877 " final result.\n");
7878 return false;
7882 if (!vec_stmt)
7884 /* No transformation required. */
7885 if (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
7887 if (!direct_internal_fn_supported_p (IFN_EXTRACT_LAST, vectype,
7888 OPTIMIZE_FOR_SPEED))
7890 if (dump_enabled_p ())
7891 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7892 "can't use a fully-masked loop because "
7893 "the target doesn't support extract last "
7894 "reduction.\n");
7895 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
7897 else if (slp_node)
7899 if (dump_enabled_p ())
7900 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7901 "can't use a fully-masked loop because an "
7902 "SLP statement is live after the loop.\n");
7903 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
7905 else if (ncopies > 1)
7907 if (dump_enabled_p ())
7908 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7909 "can't use a fully-masked loop because"
7910 " ncopies is greater than 1.\n");
7911 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
7913 else
7915 gcc_assert (ncopies == 1 && !slp_node);
7916 vect_record_loop_mask (loop_vinfo,
7917 &LOOP_VINFO_MASKS (loop_vinfo),
7918 1, vectype);
7921 return true;
7924 /* Use the lhs of the original scalar statement. */
7925 gimple *stmt = vect_orig_stmt (stmt_info)->stmt;
7927 lhs = (is_a <gphi *> (stmt)) ? gimple_phi_result (stmt)
7928 : gimple_get_lhs (stmt);
7929 lhs_type = TREE_TYPE (lhs);
7931 bitsize = (VECTOR_BOOLEAN_TYPE_P (vectype)
7932 ? bitsize_int (TYPE_PRECISION (TREE_TYPE (vectype)))
7933 : TYPE_SIZE (TREE_TYPE (vectype)));
7934 vec_bitsize = TYPE_SIZE (vectype);
7936 /* Get the vectorized lhs of STMT and the lane to use (counted in bits). */
7937 tree vec_lhs, bitstart;
7938 if (slp_node)
7940 gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
7942 /* Get the correct slp vectorized stmt. */
7943 gimple *vec_stmt = SLP_TREE_VEC_STMTS (slp_node)[vec_entry]->stmt;
7944 if (gphi *phi = dyn_cast <gphi *> (vec_stmt))
7945 vec_lhs = gimple_phi_result (phi);
7946 else
7947 vec_lhs = gimple_get_lhs (vec_stmt);
7949 /* Get entry to use. */
7950 bitstart = bitsize_int (vec_index);
7951 bitstart = int_const_binop (MULT_EXPR, bitsize, bitstart);
7953 else
7955 enum vect_def_type dt = STMT_VINFO_DEF_TYPE (stmt_info);
7956 vec_lhs = vect_get_vec_def_for_operand_1 (stmt_info, dt);
7957 gcc_checking_assert (ncopies == 1
7958 || !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
7960 /* For multiple copies, get the last copy. */
7961 for (int i = 1; i < ncopies; ++i)
7962 vec_lhs = vect_get_vec_def_for_stmt_copy (loop_vinfo, vec_lhs);
7964 /* Get the last lane in the vector. */
7965 bitstart = int_const_binop (MINUS_EXPR, vec_bitsize, bitsize);
7968 gimple_seq stmts = NULL;
7969 tree new_tree;
7970 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
7972 /* Emit:
7974 SCALAR_RES = EXTRACT_LAST <VEC_LHS, MASK>
7976 where VEC_LHS is the vectorized live-out result and MASK is
7977 the loop mask for the final iteration. */
7978 gcc_assert (ncopies == 1 && !slp_node);
7979 tree scalar_type = TREE_TYPE (STMT_VINFO_VECTYPE (stmt_info));
7980 tree mask = vect_get_loop_mask (gsi, &LOOP_VINFO_MASKS (loop_vinfo),
7981 1, vectype, 0);
7982 tree scalar_res = gimple_build (&stmts, CFN_EXTRACT_LAST,
7983 scalar_type, mask, vec_lhs);
7985 /* Convert the extracted vector element to the required scalar type. */
7986 new_tree = gimple_convert (&stmts, lhs_type, scalar_res);
7988 else
7990 tree bftype = TREE_TYPE (vectype);
7991 if (VECTOR_BOOLEAN_TYPE_P (vectype))
7992 bftype = build_nonstandard_integer_type (tree_to_uhwi (bitsize), 1);
7993 new_tree = build3 (BIT_FIELD_REF, bftype, vec_lhs, bitsize, bitstart);
7994 new_tree = force_gimple_operand (fold_convert (lhs_type, new_tree),
7995 &stmts, true, NULL_TREE);
7998 if (stmts)
7999 gsi_insert_seq_on_edge_immediate (single_exit (loop), stmts);
8001 /* Replace use of lhs with newly computed result. If the use stmt is a
8002 single arg PHI, just replace all uses of PHI result. It's necessary
8003 because lcssa PHI defining lhs may be before newly inserted stmt. */
8004 use_operand_p use_p;
8005 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, lhs)
8006 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
8007 && !is_gimple_debug (use_stmt))
8009 if (gimple_code (use_stmt) == GIMPLE_PHI
8010 && gimple_phi_num_args (use_stmt) == 1)
8012 replace_uses_by (gimple_phi_result (use_stmt), new_tree);
8014 else
8016 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
8017 SET_USE (use_p, new_tree);
8019 update_stmt (use_stmt);
8022 return true;
8025 /* Kill any debug uses outside LOOP of SSA names defined in STMT_INFO. */
8027 static void
8028 vect_loop_kill_debug_uses (struct loop *loop, stmt_vec_info stmt_info)
8030 ssa_op_iter op_iter;
8031 imm_use_iterator imm_iter;
8032 def_operand_p def_p;
8033 gimple *ustmt;
8035 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt_info->stmt, op_iter, SSA_OP_DEF)
8037 FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p))
8039 basic_block bb;
8041 if (!is_gimple_debug (ustmt))
8042 continue;
8044 bb = gimple_bb (ustmt);
8046 if (!flow_bb_inside_loop_p (loop, bb))
8048 if (gimple_debug_bind_p (ustmt))
8050 if (dump_enabled_p ())
8051 dump_printf_loc (MSG_NOTE, vect_location,
8052 "killing debug use\n");
8054 gimple_debug_bind_reset_value (ustmt);
8055 update_stmt (ustmt);
8057 else
8058 gcc_unreachable ();
8064 /* Given loop represented by LOOP_VINFO, return true if computation of
8065 LOOP_VINFO_NITERS (= LOOP_VINFO_NITERSM1 + 1) doesn't overflow, false
8066 otherwise. */
8068 static bool
8069 loop_niters_no_overflow (loop_vec_info loop_vinfo)
8071 /* Constant case. */
8072 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
8074 tree cst_niters = LOOP_VINFO_NITERS (loop_vinfo);
8075 tree cst_nitersm1 = LOOP_VINFO_NITERSM1 (loop_vinfo);
8077 gcc_assert (TREE_CODE (cst_niters) == INTEGER_CST);
8078 gcc_assert (TREE_CODE (cst_nitersm1) == INTEGER_CST);
8079 if (wi::to_widest (cst_nitersm1) < wi::to_widest (cst_niters))
8080 return true;
8083 widest_int max;
8084 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
8085 /* Check the upper bound of loop niters. */
8086 if (get_max_loop_iterations (loop, &max))
8088 tree type = TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo));
8089 signop sgn = TYPE_SIGN (type);
8090 widest_int type_max = widest_int::from (wi::max_value (type), sgn);
8091 if (max < type_max)
8092 return true;
8094 return false;
8097 /* Return a mask type with half the number of elements as TYPE. */
8099 tree
8100 vect_halve_mask_nunits (tree type)
8102 poly_uint64 nunits = exact_div (TYPE_VECTOR_SUBPARTS (type), 2);
8103 return build_truth_vector_type (nunits, current_vector_size);
8106 /* Return a mask type with twice as many elements as TYPE. */
8108 tree
8109 vect_double_mask_nunits (tree type)
8111 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (type) * 2;
8112 return build_truth_vector_type (nunits, current_vector_size);
8115 /* Record that a fully-masked version of LOOP_VINFO would need MASKS to
8116 contain a sequence of NVECTORS masks that each control a vector of type
8117 VECTYPE. */
8119 void
8120 vect_record_loop_mask (loop_vec_info loop_vinfo, vec_loop_masks *masks,
8121 unsigned int nvectors, tree vectype)
8123 gcc_assert (nvectors != 0);
8124 if (masks->length () < nvectors)
8125 masks->safe_grow_cleared (nvectors);
8126 rgroup_masks *rgm = &(*masks)[nvectors - 1];
8127 /* The number of scalars per iteration and the number of vectors are
8128 both compile-time constants. */
8129 unsigned int nscalars_per_iter
8130 = exact_div (nvectors * TYPE_VECTOR_SUBPARTS (vectype),
8131 LOOP_VINFO_VECT_FACTOR (loop_vinfo)).to_constant ();
8132 if (rgm->max_nscalars_per_iter < nscalars_per_iter)
8134 rgm->max_nscalars_per_iter = nscalars_per_iter;
8135 rgm->mask_type = build_same_sized_truth_vector_type (vectype);
8139 /* Given a complete set of masks MASKS, extract mask number INDEX
8140 for an rgroup that operates on NVECTORS vectors of type VECTYPE,
8141 where 0 <= INDEX < NVECTORS. Insert any set-up statements before GSI.
8143 See the comment above vec_loop_masks for more details about the mask
8144 arrangement. */
8146 tree
8147 vect_get_loop_mask (gimple_stmt_iterator *gsi, vec_loop_masks *masks,
8148 unsigned int nvectors, tree vectype, unsigned int index)
8150 rgroup_masks *rgm = &(*masks)[nvectors - 1];
8151 tree mask_type = rgm->mask_type;
8153 /* Populate the rgroup's mask array, if this is the first time we've
8154 used it. */
8155 if (rgm->masks.is_empty ())
8157 rgm->masks.safe_grow_cleared (nvectors);
8158 for (unsigned int i = 0; i < nvectors; ++i)
8160 tree mask = make_temp_ssa_name (mask_type, NULL, "loop_mask");
8161 /* Provide a dummy definition until the real one is available. */
8162 SSA_NAME_DEF_STMT (mask) = gimple_build_nop ();
8163 rgm->masks[i] = mask;
8167 tree mask = rgm->masks[index];
8168 if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type),
8169 TYPE_VECTOR_SUBPARTS (vectype)))
8171 /* A loop mask for data type X can be reused for data type Y
8172 if X has N times more elements than Y and if Y's elements
8173 are N times bigger than X's. In this case each sequence
8174 of N elements in the loop mask will be all-zero or all-one.
8175 We can then view-convert the mask so that each sequence of
8176 N elements is replaced by a single element. */
8177 gcc_assert (multiple_p (TYPE_VECTOR_SUBPARTS (mask_type),
8178 TYPE_VECTOR_SUBPARTS (vectype)));
8179 gimple_seq seq = NULL;
8180 mask_type = build_same_sized_truth_vector_type (vectype);
8181 mask = gimple_build (&seq, VIEW_CONVERT_EXPR, mask_type, mask);
8182 if (seq)
8183 gsi_insert_seq_before (gsi, seq, GSI_SAME_STMT);
8185 return mask;
8188 /* Scale profiling counters by estimation for LOOP which is vectorized
8189 by factor VF. */
8191 static void
8192 scale_profile_for_vect_loop (struct loop *loop, unsigned vf)
8194 edge preheader = loop_preheader_edge (loop);
8195 /* Reduce loop iterations by the vectorization factor. */
8196 gcov_type new_est_niter = niter_for_unrolled_loop (loop, vf);
8197 profile_count freq_h = loop->header->count, freq_e = preheader->count ();
8199 if (freq_h.nonzero_p ())
8201 profile_probability p;
8203 /* Avoid dropping loop body profile counter to 0 because of zero count
8204 in loop's preheader. */
8205 if (!(freq_e == profile_count::zero ()))
8206 freq_e = freq_e.force_nonzero ();
8207 p = freq_e.apply_scale (new_est_niter + 1, 1).probability_in (freq_h);
8208 scale_loop_frequencies (loop, p);
8211 edge exit_e = single_exit (loop);
8212 exit_e->probability = profile_probability::always ()
8213 .apply_scale (1, new_est_niter + 1);
8215 edge exit_l = single_pred_edge (loop->latch);
8216 profile_probability prob = exit_l->probability;
8217 exit_l->probability = exit_e->probability.invert ();
8218 if (prob.initialized_p () && exit_l->probability.initialized_p ())
8219 scale_bbs_frequencies (&loop->latch, 1, exit_l->probability / prob);
8222 /* Vectorize STMT_INFO if relevant, inserting any new instructions before GSI.
8223 When vectorizing STMT_INFO as a store, set *SEEN_STORE to its
8224 stmt_vec_info. */
8226 static void
8227 vect_transform_loop_stmt (loop_vec_info loop_vinfo, stmt_vec_info stmt_info,
8228 gimple_stmt_iterator *gsi, stmt_vec_info *seen_store)
8230 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
8231 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
8233 if (dump_enabled_p ())
8235 dump_printf_loc (MSG_NOTE, vect_location,
8236 "------>vectorizing statement: ");
8237 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
8240 if (MAY_HAVE_DEBUG_BIND_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
8241 vect_loop_kill_debug_uses (loop, stmt_info);
8243 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8244 && !STMT_VINFO_LIVE_P (stmt_info))
8245 return;
8247 if (STMT_VINFO_VECTYPE (stmt_info))
8249 poly_uint64 nunits
8250 = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
8251 if (!STMT_SLP_TYPE (stmt_info)
8252 && maybe_ne (nunits, vf)
8253 && dump_enabled_p ())
8254 /* For SLP VF is set according to unrolling factor, and not
8255 to vector size, hence for SLP this print is not valid. */
8256 dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
8259 /* Pure SLP statements have already been vectorized. We still need
8260 to apply loop vectorization to hybrid SLP statements. */
8261 if (PURE_SLP_STMT (stmt_info))
8262 return;
8264 if (dump_enabled_p ())
8265 dump_printf_loc (MSG_NOTE, vect_location, "transform statement.\n");
8267 if (vect_transform_stmt (stmt_info, gsi, NULL, NULL))
8268 *seen_store = stmt_info;
8271 /* Function vect_transform_loop.
8273 The analysis phase has determined that the loop is vectorizable.
8274 Vectorize the loop - created vectorized stmts to replace the scalar
8275 stmts in the loop, and update the loop exit condition.
8276 Returns scalar epilogue loop if any. */
8278 struct loop *
8279 vect_transform_loop (loop_vec_info loop_vinfo)
8281 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
8282 struct loop *epilogue = NULL;
8283 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
8284 int nbbs = loop->num_nodes;
8285 int i;
8286 tree niters_vector = NULL_TREE;
8287 tree step_vector = NULL_TREE;
8288 tree niters_vector_mult_vf = NULL_TREE;
8289 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
8290 unsigned int lowest_vf = constant_lower_bound (vf);
8291 gimple *stmt;
8292 bool check_profitability = false;
8293 unsigned int th;
8295 DUMP_VECT_SCOPE ("vec_transform_loop");
8297 loop_vinfo->shared->check_datarefs ();
8299 /* Use the more conservative vectorization threshold. If the number
8300 of iterations is constant assume the cost check has been performed
8301 by our caller. If the threshold makes all loops profitable that
8302 run at least the (estimated) vectorization factor number of times
8303 checking is pointless, too. */
8304 th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
8305 if (th >= vect_vf_for_cost (loop_vinfo)
8306 && !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
8308 if (dump_enabled_p ())
8309 dump_printf_loc (MSG_NOTE, vect_location,
8310 "Profitability threshold is %d loop iterations.\n",
8311 th);
8312 check_profitability = true;
8315 /* Make sure there exists a single-predecessor exit bb. Do this before
8316 versioning. */
8317 edge e = single_exit (loop);
8318 if (! single_pred_p (e->dest))
8320 split_loop_exit_edge (e);
8321 if (dump_enabled_p ())
8322 dump_printf (MSG_NOTE, "split exit edge\n");
8325 /* Version the loop first, if required, so the profitability check
8326 comes first. */
8328 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
8330 poly_uint64 versioning_threshold
8331 = LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo);
8332 if (check_profitability
8333 && ordered_p (poly_uint64 (th), versioning_threshold))
8335 versioning_threshold = ordered_max (poly_uint64 (th),
8336 versioning_threshold);
8337 check_profitability = false;
8339 vect_loop_versioning (loop_vinfo, th, check_profitability,
8340 versioning_threshold);
8341 check_profitability = false;
8344 /* Make sure there exists a single-predecessor exit bb also on the
8345 scalar loop copy. Do this after versioning but before peeling
8346 so CFG structure is fine for both scalar and if-converted loop
8347 to make slpeel_duplicate_current_defs_from_edges face matched
8348 loop closed PHI nodes on the exit. */
8349 if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo))
8351 e = single_exit (LOOP_VINFO_SCALAR_LOOP (loop_vinfo));
8352 if (! single_pred_p (e->dest))
8354 split_loop_exit_edge (e);
8355 if (dump_enabled_p ())
8356 dump_printf (MSG_NOTE, "split exit edge of scalar loop\n");
8360 tree niters = vect_build_loop_niters (loop_vinfo);
8361 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = niters;
8362 tree nitersm1 = unshare_expr (LOOP_VINFO_NITERSM1 (loop_vinfo));
8363 bool niters_no_overflow = loop_niters_no_overflow (loop_vinfo);
8364 epilogue = vect_do_peeling (loop_vinfo, niters, nitersm1, &niters_vector,
8365 &step_vector, &niters_vector_mult_vf, th,
8366 check_profitability, niters_no_overflow);
8368 if (niters_vector == NULL_TREE)
8370 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
8371 && !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
8372 && known_eq (lowest_vf, vf))
8374 niters_vector
8375 = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
8376 LOOP_VINFO_INT_NITERS (loop_vinfo) / lowest_vf);
8377 step_vector = build_one_cst (TREE_TYPE (niters));
8379 else
8380 vect_gen_vector_loop_niters (loop_vinfo, niters, &niters_vector,
8381 &step_vector, niters_no_overflow);
8384 /* 1) Make sure the loop header has exactly two entries
8385 2) Make sure we have a preheader basic block. */
8387 gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
8389 split_edge (loop_preheader_edge (loop));
8391 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
8392 && vect_use_loop_mask_for_alignment_p (loop_vinfo))
8393 /* This will deal with any possible peeling. */
8394 vect_prepare_for_masked_peels (loop_vinfo);
8396 /* Schedule the SLP instances first, then handle loop vectorization
8397 below. */
8398 if (!loop_vinfo->slp_instances.is_empty ())
8400 DUMP_VECT_SCOPE ("scheduling SLP instances");
8401 vect_schedule_slp (loop_vinfo);
8404 /* FORNOW: the vectorizer supports only loops which body consist
8405 of one basic block (header + empty latch). When the vectorizer will
8406 support more involved loop forms, the order by which the BBs are
8407 traversed need to be reconsidered. */
8409 for (i = 0; i < nbbs; i++)
8411 basic_block bb = bbs[i];
8412 stmt_vec_info stmt_info;
8414 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
8415 gsi_next (&si))
8417 gphi *phi = si.phi ();
8418 if (dump_enabled_p ())
8420 dump_printf_loc (MSG_NOTE, vect_location,
8421 "------>vectorizing phi: ");
8422 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
8424 stmt_info = loop_vinfo->lookup_stmt (phi);
8425 if (!stmt_info)
8426 continue;
8428 if (MAY_HAVE_DEBUG_BIND_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
8429 vect_loop_kill_debug_uses (loop, stmt_info);
8431 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8432 && !STMT_VINFO_LIVE_P (stmt_info))
8433 continue;
8435 if (STMT_VINFO_VECTYPE (stmt_info)
8436 && (maybe_ne
8437 (TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)), vf))
8438 && dump_enabled_p ())
8439 dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
8441 if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def
8442 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
8443 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
8444 && ! PURE_SLP_STMT (stmt_info))
8446 if (dump_enabled_p ())
8447 dump_printf_loc (MSG_NOTE, vect_location, "transform phi.\n");
8448 vect_transform_stmt (stmt_info, NULL, NULL, NULL);
8452 for (gimple_stmt_iterator si = gsi_start_bb (bb);
8453 !gsi_end_p (si);)
8455 stmt = gsi_stmt (si);
8456 /* During vectorization remove existing clobber stmts. */
8457 if (gimple_clobber_p (stmt))
8459 unlink_stmt_vdef (stmt);
8460 gsi_remove (&si, true);
8461 release_defs (stmt);
8463 else
8465 stmt_info = loop_vinfo->lookup_stmt (stmt);
8467 /* vector stmts created in the outer-loop during vectorization of
8468 stmts in an inner-loop may not have a stmt_info, and do not
8469 need to be vectorized. */
8470 stmt_vec_info seen_store = NULL;
8471 if (stmt_info)
8473 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
8475 gimple *def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
8476 for (gimple_stmt_iterator subsi = gsi_start (def_seq);
8477 !gsi_end_p (subsi); gsi_next (&subsi))
8479 stmt_vec_info pat_stmt_info
8480 = loop_vinfo->lookup_stmt (gsi_stmt (subsi));
8481 vect_transform_loop_stmt (loop_vinfo, pat_stmt_info,
8482 &si, &seen_store);
8484 stmt_vec_info pat_stmt_info
8485 = STMT_VINFO_RELATED_STMT (stmt_info);
8486 vect_transform_loop_stmt (loop_vinfo, pat_stmt_info, &si,
8487 &seen_store);
8489 vect_transform_loop_stmt (loop_vinfo, stmt_info, &si,
8490 &seen_store);
8492 gsi_next (&si);
8493 if (seen_store)
8495 if (STMT_VINFO_GROUPED_ACCESS (seen_store))
8496 /* Interleaving. If IS_STORE is TRUE, the
8497 vectorization of the interleaving chain was
8498 completed - free all the stores in the chain. */
8499 vect_remove_stores (DR_GROUP_FIRST_ELEMENT (seen_store));
8500 else
8501 /* Free the attached stmt_vec_info and remove the stmt. */
8502 loop_vinfo->remove_stmt (stmt_info);
8507 /* Stub out scalar statements that must not survive vectorization.
8508 Doing this here helps with grouped statements, or statements that
8509 are involved in patterns. */
8510 for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
8511 !gsi_end_p (gsi); gsi_next (&gsi))
8513 gcall *call = dyn_cast <gcall *> (gsi_stmt (gsi));
8514 if (call && gimple_call_internal_p (call, IFN_MASK_LOAD))
8516 tree lhs = gimple_get_lhs (call);
8517 if (!VECTOR_TYPE_P (TREE_TYPE (lhs)))
8519 tree zero = build_zero_cst (TREE_TYPE (lhs));
8520 gimple *new_stmt = gimple_build_assign (lhs, zero);
8521 gsi_replace (&gsi, new_stmt, true);
8525 } /* BBs in loop */
8527 /* The vectorization factor is always > 1, so if we use an IV increment of 1.
8528 a zero NITERS becomes a nonzero NITERS_VECTOR. */
8529 if (integer_onep (step_vector))
8530 niters_no_overflow = true;
8531 vect_set_loop_condition (loop, loop_vinfo, niters_vector, step_vector,
8532 niters_vector_mult_vf, !niters_no_overflow);
8534 unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
8535 scale_profile_for_vect_loop (loop, assumed_vf);
8537 /* True if the final iteration might not handle a full vector's
8538 worth of scalar iterations. */
8539 bool final_iter_may_be_partial = LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
8540 /* The minimum number of iterations performed by the epilogue. This
8541 is 1 when peeling for gaps because we always need a final scalar
8542 iteration. */
8543 int min_epilogue_iters = LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) ? 1 : 0;
8544 /* +1 to convert latch counts to loop iteration counts,
8545 -min_epilogue_iters to remove iterations that cannot be performed
8546 by the vector code. */
8547 int bias_for_lowest = 1 - min_epilogue_iters;
8548 int bias_for_assumed = bias_for_lowest;
8549 int alignment_npeels = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
8550 if (alignment_npeels && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
8552 /* When the amount of peeling is known at compile time, the first
8553 iteration will have exactly alignment_npeels active elements.
8554 In the worst case it will have at least one. */
8555 int min_first_active = (alignment_npeels > 0 ? alignment_npeels : 1);
8556 bias_for_lowest += lowest_vf - min_first_active;
8557 bias_for_assumed += assumed_vf - min_first_active;
8559 /* In these calculations the "- 1" converts loop iteration counts
8560 back to latch counts. */
8561 if (loop->any_upper_bound)
8562 loop->nb_iterations_upper_bound
8563 = (final_iter_may_be_partial
8564 ? wi::udiv_ceil (loop->nb_iterations_upper_bound + bias_for_lowest,
8565 lowest_vf) - 1
8566 : wi::udiv_floor (loop->nb_iterations_upper_bound + bias_for_lowest,
8567 lowest_vf) - 1);
8568 if (loop->any_likely_upper_bound)
8569 loop->nb_iterations_likely_upper_bound
8570 = (final_iter_may_be_partial
8571 ? wi::udiv_ceil (loop->nb_iterations_likely_upper_bound
8572 + bias_for_lowest, lowest_vf) - 1
8573 : wi::udiv_floor (loop->nb_iterations_likely_upper_bound
8574 + bias_for_lowest, lowest_vf) - 1);
8575 if (loop->any_estimate)
8576 loop->nb_iterations_estimate
8577 = (final_iter_may_be_partial
8578 ? wi::udiv_ceil (loop->nb_iterations_estimate + bias_for_assumed,
8579 assumed_vf) - 1
8580 : wi::udiv_floor (loop->nb_iterations_estimate + bias_for_assumed,
8581 assumed_vf) - 1);
8583 if (dump_enabled_p ())
8585 if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo))
8587 dump_printf_loc (MSG_NOTE, vect_location,
8588 "LOOP VECTORIZED\n");
8589 if (loop->inner)
8590 dump_printf_loc (MSG_NOTE, vect_location,
8591 "OUTER LOOP VECTORIZED\n");
8592 dump_printf (MSG_NOTE, "\n");
8594 else
8596 dump_printf_loc (MSG_NOTE, vect_location,
8597 "LOOP EPILOGUE VECTORIZED (VS=");
8598 dump_dec (MSG_NOTE, current_vector_size);
8599 dump_printf (MSG_NOTE, ")\n");
8603 /* Free SLP instances here because otherwise stmt reference counting
8604 won't work. */
8605 slp_instance instance;
8606 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance)
8607 vect_free_slp_instance (instance, true);
8608 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
8609 /* Clear-up safelen field since its value is invalid after vectorization
8610 since vectorized loop can have loop-carried dependencies. */
8611 loop->safelen = 0;
8613 /* Don't vectorize epilogue for epilogue. */
8614 if (LOOP_VINFO_EPILOGUE_P (loop_vinfo))
8615 epilogue = NULL;
8617 if (!PARAM_VALUE (PARAM_VECT_EPILOGUES_NOMASK))
8618 epilogue = NULL;
8620 if (epilogue)
8622 auto_vector_sizes vector_sizes;
8623 targetm.vectorize.autovectorize_vector_sizes (&vector_sizes);
8624 unsigned int next_size = 0;
8626 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
8627 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) >= 0
8628 && known_eq (vf, lowest_vf))
8630 unsigned int eiters
8631 = (LOOP_VINFO_INT_NITERS (loop_vinfo)
8632 - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo));
8633 eiters = eiters % lowest_vf;
8634 epilogue->nb_iterations_upper_bound = eiters - 1;
8636 unsigned int ratio;
8637 while (next_size < vector_sizes.length ()
8638 && !(constant_multiple_p (current_vector_size,
8639 vector_sizes[next_size], &ratio)
8640 && eiters >= lowest_vf / ratio))
8641 next_size += 1;
8643 else
8644 while (next_size < vector_sizes.length ()
8645 && maybe_lt (current_vector_size, vector_sizes[next_size]))
8646 next_size += 1;
8648 if (next_size == vector_sizes.length ())
8649 epilogue = NULL;
8652 if (epilogue)
8654 epilogue->force_vectorize = loop->force_vectorize;
8655 epilogue->safelen = loop->safelen;
8656 epilogue->dont_vectorize = false;
8658 /* We may need to if-convert epilogue to vectorize it. */
8659 if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo))
8660 tree_if_conversion (epilogue);
8663 return epilogue;
8666 /* The code below is trying to perform simple optimization - revert
8667 if-conversion for masked stores, i.e. if the mask of a store is zero
8668 do not perform it and all stored value producers also if possible.
8669 For example,
8670 for (i=0; i<n; i++)
8671 if (c[i])
8673 p1[i] += 1;
8674 p2[i] = p3[i] +2;
8676 this transformation will produce the following semi-hammock:
8678 if (!mask__ifc__42.18_165 == { 0, 0, 0, 0, 0, 0, 0, 0 })
8680 vect__11.19_170 = MASK_LOAD (vectp_p1.20_168, 0B, mask__ifc__42.18_165);
8681 vect__12.22_172 = vect__11.19_170 + vect_cst__171;
8682 MASK_STORE (vectp_p1.23_175, 0B, mask__ifc__42.18_165, vect__12.22_172);
8683 vect__18.25_182 = MASK_LOAD (vectp_p3.26_180, 0B, mask__ifc__42.18_165);
8684 vect__19.28_184 = vect__18.25_182 + vect_cst__183;
8685 MASK_STORE (vectp_p2.29_187, 0B, mask__ifc__42.18_165, vect__19.28_184);
8689 void
8690 optimize_mask_stores (struct loop *loop)
8692 basic_block *bbs = get_loop_body (loop);
8693 unsigned nbbs = loop->num_nodes;
8694 unsigned i;
8695 basic_block bb;
8696 struct loop *bb_loop;
8697 gimple_stmt_iterator gsi;
8698 gimple *stmt;
8699 auto_vec<gimple *> worklist;
8701 vect_location = find_loop_location (loop);
8702 /* Pick up all masked stores in loop if any. */
8703 for (i = 0; i < nbbs; i++)
8705 bb = bbs[i];
8706 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
8707 gsi_next (&gsi))
8709 stmt = gsi_stmt (gsi);
8710 if (gimple_call_internal_p (stmt, IFN_MASK_STORE))
8711 worklist.safe_push (stmt);
8715 free (bbs);
8716 if (worklist.is_empty ())
8717 return;
8719 /* Loop has masked stores. */
8720 while (!worklist.is_empty ())
8722 gimple *last, *last_store;
8723 edge e, efalse;
8724 tree mask;
8725 basic_block store_bb, join_bb;
8726 gimple_stmt_iterator gsi_to;
8727 tree vdef, new_vdef;
8728 gphi *phi;
8729 tree vectype;
8730 tree zero;
8732 last = worklist.pop ();
8733 mask = gimple_call_arg (last, 2);
8734 bb = gimple_bb (last);
8735 /* Create then_bb and if-then structure in CFG, then_bb belongs to
8736 the same loop as if_bb. It could be different to LOOP when two
8737 level loop-nest is vectorized and mask_store belongs to the inner
8738 one. */
8739 e = split_block (bb, last);
8740 bb_loop = bb->loop_father;
8741 gcc_assert (loop == bb_loop || flow_loop_nested_p (loop, bb_loop));
8742 join_bb = e->dest;
8743 store_bb = create_empty_bb (bb);
8744 add_bb_to_loop (store_bb, bb_loop);
8745 e->flags = EDGE_TRUE_VALUE;
8746 efalse = make_edge (bb, store_bb, EDGE_FALSE_VALUE);
8747 /* Put STORE_BB to likely part. */
8748 efalse->probability = profile_probability::unlikely ();
8749 store_bb->count = efalse->count ();
8750 make_single_succ_edge (store_bb, join_bb, EDGE_FALLTHRU);
8751 if (dom_info_available_p (CDI_DOMINATORS))
8752 set_immediate_dominator (CDI_DOMINATORS, store_bb, bb);
8753 if (dump_enabled_p ())
8754 dump_printf_loc (MSG_NOTE, vect_location,
8755 "Create new block %d to sink mask stores.",
8756 store_bb->index);
8757 /* Create vector comparison with boolean result. */
8758 vectype = TREE_TYPE (mask);
8759 zero = build_zero_cst (vectype);
8760 stmt = gimple_build_cond (EQ_EXPR, mask, zero, NULL_TREE, NULL_TREE);
8761 gsi = gsi_last_bb (bb);
8762 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
8763 /* Create new PHI node for vdef of the last masked store:
8764 .MEM_2 = VDEF <.MEM_1>
8765 will be converted to
8766 .MEM.3 = VDEF <.MEM_1>
8767 and new PHI node will be created in join bb
8768 .MEM_2 = PHI <.MEM_1, .MEM_3>
8770 vdef = gimple_vdef (last);
8771 new_vdef = make_ssa_name (gimple_vop (cfun), last);
8772 gimple_set_vdef (last, new_vdef);
8773 phi = create_phi_node (vdef, join_bb);
8774 add_phi_arg (phi, new_vdef, EDGE_SUCC (store_bb, 0), UNKNOWN_LOCATION);
8776 /* Put all masked stores with the same mask to STORE_BB if possible. */
8777 while (true)
8779 gimple_stmt_iterator gsi_from;
8780 gimple *stmt1 = NULL;
8782 /* Move masked store to STORE_BB. */
8783 last_store = last;
8784 gsi = gsi_for_stmt (last);
8785 gsi_from = gsi;
8786 /* Shift GSI to the previous stmt for further traversal. */
8787 gsi_prev (&gsi);
8788 gsi_to = gsi_start_bb (store_bb);
8789 gsi_move_before (&gsi_from, &gsi_to);
8790 /* Setup GSI_TO to the non-empty block start. */
8791 gsi_to = gsi_start_bb (store_bb);
8792 if (dump_enabled_p ())
8794 dump_printf_loc (MSG_NOTE, vect_location,
8795 "Move stmt to created bb\n");
8796 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, last, 0);
8798 /* Move all stored value producers if possible. */
8799 while (!gsi_end_p (gsi))
8801 tree lhs;
8802 imm_use_iterator imm_iter;
8803 use_operand_p use_p;
8804 bool res;
8806 /* Skip debug statements. */
8807 if (is_gimple_debug (gsi_stmt (gsi)))
8809 gsi_prev (&gsi);
8810 continue;
8812 stmt1 = gsi_stmt (gsi);
8813 /* Do not consider statements writing to memory or having
8814 volatile operand. */
8815 if (gimple_vdef (stmt1)
8816 || gimple_has_volatile_ops (stmt1))
8817 break;
8818 gsi_from = gsi;
8819 gsi_prev (&gsi);
8820 lhs = gimple_get_lhs (stmt1);
8821 if (!lhs)
8822 break;
8824 /* LHS of vectorized stmt must be SSA_NAME. */
8825 if (TREE_CODE (lhs) != SSA_NAME)
8826 break;
8828 if (!VECTOR_TYPE_P (TREE_TYPE (lhs)))
8830 /* Remove dead scalar statement. */
8831 if (has_zero_uses (lhs))
8833 gsi_remove (&gsi_from, true);
8834 continue;
8838 /* Check that LHS does not have uses outside of STORE_BB. */
8839 res = true;
8840 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
8842 gimple *use_stmt;
8843 use_stmt = USE_STMT (use_p);
8844 if (is_gimple_debug (use_stmt))
8845 continue;
8846 if (gimple_bb (use_stmt) != store_bb)
8848 res = false;
8849 break;
8852 if (!res)
8853 break;
8855 if (gimple_vuse (stmt1)
8856 && gimple_vuse (stmt1) != gimple_vuse (last_store))
8857 break;
8859 /* Can move STMT1 to STORE_BB. */
8860 if (dump_enabled_p ())
8862 dump_printf_loc (MSG_NOTE, vect_location,
8863 "Move stmt to created bb\n");
8864 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt1, 0);
8866 gsi_move_before (&gsi_from, &gsi_to);
8867 /* Shift GSI_TO for further insertion. */
8868 gsi_prev (&gsi_to);
8870 /* Put other masked stores with the same mask to STORE_BB. */
8871 if (worklist.is_empty ()
8872 || gimple_call_arg (worklist.last (), 2) != mask
8873 || worklist.last () != stmt1)
8874 break;
8875 last = worklist.pop ();
8877 add_phi_arg (phi, gimple_vuse (last_store), e, UNKNOWN_LOCATION);