[Ada] Do not perform useless work in Check_No_Parts_Violations
[official-gcc.git] / gcc / tree-vect-loop.c
blobee79808472cea88786e5c04756980b456c3f5a02
1 /* Loop Vectorization
2 Copyright (C) 2003-2021 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
4 Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "cfghooks.h"
31 #include "tree-pass.h"
32 #include "ssa.h"
33 #include "optabs-tree.h"
34 #include "diagnostic-core.h"
35 #include "fold-const.h"
36 #include "stor-layout.h"
37 #include "cfganal.h"
38 #include "gimplify.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
41 #include "tree-ssa-loop-ivopts.h"
42 #include "tree-ssa-loop-manip.h"
43 #include "tree-ssa-loop-niter.h"
44 #include "tree-ssa-loop.h"
45 #include "cfgloop.h"
46 #include "tree-scalar-evolution.h"
47 #include "tree-vectorizer.h"
48 #include "gimple-fold.h"
49 #include "cgraph.h"
50 #include "tree-cfg.h"
51 #include "tree-if-conv.h"
52 #include "internal-fn.h"
53 #include "tree-vector-builder.h"
54 #include "vec-perm-indices.h"
55 #include "tree-eh.h"
57 /* Loop Vectorization Pass.
59 This pass tries to vectorize loops.
61 For example, the vectorizer transforms the following simple loop:
63 short a[N]; short b[N]; short c[N]; int i;
65 for (i=0; i<N; i++){
66 a[i] = b[i] + c[i];
69 as if it was manually vectorized by rewriting the source code into:
71 typedef int __attribute__((mode(V8HI))) v8hi;
72 short a[N]; short b[N]; short c[N]; int i;
73 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
74 v8hi va, vb, vc;
76 for (i=0; i<N/8; i++){
77 vb = pb[i];
78 vc = pc[i];
79 va = vb + vc;
80 pa[i] = va;
83 The main entry to this pass is vectorize_loops(), in which
84 the vectorizer applies a set of analyses on a given set of loops,
85 followed by the actual vectorization transformation for the loops that
86 had successfully passed the analysis phase.
87 Throughout this pass we make a distinction between two types of
88 data: scalars (which are represented by SSA_NAMES), and memory references
89 ("data-refs"). These two types of data require different handling both
90 during analysis and transformation. The types of data-refs that the
91 vectorizer currently supports are ARRAY_REFS which base is an array DECL
92 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
93 accesses are required to have a simple (consecutive) access pattern.
95 Analysis phase:
96 ===============
97 The driver for the analysis phase is vect_analyze_loop().
98 It applies a set of analyses, some of which rely on the scalar evolution
99 analyzer (scev) developed by Sebastian Pop.
101 During the analysis phase the vectorizer records some information
102 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
103 loop, as well as general information about the loop as a whole, which is
104 recorded in a "loop_vec_info" struct attached to each loop.
106 Transformation phase:
107 =====================
108 The loop transformation phase scans all the stmts in the loop, and
109 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
110 the loop that needs to be vectorized. It inserts the vector code sequence
111 just before the scalar stmt S, and records a pointer to the vector code
112 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
113 attached to S). This pointer will be used for the vectorization of following
114 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
115 otherwise, we rely on dead code elimination for removing it.
117 For example, say stmt S1 was vectorized into stmt VS1:
119 VS1: vb = px[i];
120 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
121 S2: a = b;
123 To vectorize stmt S2, the vectorizer first finds the stmt that defines
124 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
125 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
126 resulting sequence would be:
128 VS1: vb = px[i];
129 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
130 VS2: va = vb;
131 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
133 Operands that are not SSA_NAMEs, are data-refs that appear in
134 load/store operations (like 'x[i]' in S1), and are handled differently.
136 Target modeling:
137 =================
138 Currently the only target specific information that is used is the
139 size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".
140 Targets that can support different sizes of vectors, for now will need
141 to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More
142 flexibility will be added in the future.
144 Since we only vectorize operations which vector form can be
145 expressed using existing tree codes, to verify that an operation is
146 supported, the vectorizer checks the relevant optab at the relevant
147 machine_mode (e.g, optab_handler (add_optab, V8HImode)). If
148 the value found is CODE_FOR_nothing, then there's no target support, and
149 we can't vectorize the stmt.
151 For additional information on this project see:
152 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
155 static void vect_estimate_min_profitable_iters (loop_vec_info, int *, int *);
156 static stmt_vec_info vect_is_simple_reduction (loop_vec_info, stmt_vec_info,
157 bool *, bool *);
159 /* Subroutine of vect_determine_vf_for_stmt that handles only one
160 statement. VECTYPE_MAYBE_SET_P is true if STMT_VINFO_VECTYPE
161 may already be set for general statements (not just data refs). */
163 static opt_result
164 vect_determine_vf_for_stmt_1 (vec_info *vinfo, stmt_vec_info stmt_info,
165 bool vectype_maybe_set_p,
166 poly_uint64 *vf)
168 gimple *stmt = stmt_info->stmt;
170 if ((!STMT_VINFO_RELEVANT_P (stmt_info)
171 && !STMT_VINFO_LIVE_P (stmt_info))
172 || gimple_clobber_p (stmt))
174 if (dump_enabled_p ())
175 dump_printf_loc (MSG_NOTE, vect_location, "skip.\n");
176 return opt_result::success ();
179 tree stmt_vectype, nunits_vectype;
180 opt_result res = vect_get_vector_types_for_stmt (vinfo, stmt_info,
181 &stmt_vectype,
182 &nunits_vectype);
183 if (!res)
184 return res;
186 if (stmt_vectype)
188 if (STMT_VINFO_VECTYPE (stmt_info))
189 /* The only case when a vectype had been already set is for stmts
190 that contain a data ref, or for "pattern-stmts" (stmts generated
191 by the vectorizer to represent/replace a certain idiom). */
192 gcc_assert ((STMT_VINFO_DATA_REF (stmt_info)
193 || vectype_maybe_set_p)
194 && STMT_VINFO_VECTYPE (stmt_info) == stmt_vectype);
195 else
196 STMT_VINFO_VECTYPE (stmt_info) = stmt_vectype;
199 if (nunits_vectype)
200 vect_update_max_nunits (vf, nunits_vectype);
202 return opt_result::success ();
205 /* Subroutine of vect_determine_vectorization_factor. Set the vector
206 types of STMT_INFO and all attached pattern statements and update
207 the vectorization factor VF accordingly. Return true on success
208 or false if something prevented vectorization. */
210 static opt_result
211 vect_determine_vf_for_stmt (vec_info *vinfo,
212 stmt_vec_info stmt_info, poly_uint64 *vf)
214 if (dump_enabled_p ())
215 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: %G",
216 stmt_info->stmt);
217 opt_result res = vect_determine_vf_for_stmt_1 (vinfo, stmt_info, false, vf);
218 if (!res)
219 return res;
221 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
222 && STMT_VINFO_RELATED_STMT (stmt_info))
224 gimple *pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
225 stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
227 /* If a pattern statement has def stmts, analyze them too. */
228 for (gimple_stmt_iterator si = gsi_start (pattern_def_seq);
229 !gsi_end_p (si); gsi_next (&si))
231 stmt_vec_info def_stmt_info = vinfo->lookup_stmt (gsi_stmt (si));
232 if (dump_enabled_p ())
233 dump_printf_loc (MSG_NOTE, vect_location,
234 "==> examining pattern def stmt: %G",
235 def_stmt_info->stmt);
236 res = vect_determine_vf_for_stmt_1 (vinfo, def_stmt_info, true, vf);
237 if (!res)
238 return res;
241 if (dump_enabled_p ())
242 dump_printf_loc (MSG_NOTE, vect_location,
243 "==> examining pattern statement: %G",
244 stmt_info->stmt);
245 res = vect_determine_vf_for_stmt_1 (vinfo, stmt_info, true, vf);
246 if (!res)
247 return res;
250 return opt_result::success ();
253 /* Function vect_determine_vectorization_factor
255 Determine the vectorization factor (VF). VF is the number of data elements
256 that are operated upon in parallel in a single iteration of the vectorized
257 loop. For example, when vectorizing a loop that operates on 4byte elements,
258 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
259 elements can fit in a single vector register.
261 We currently support vectorization of loops in which all types operated upon
262 are of the same size. Therefore this function currently sets VF according to
263 the size of the types operated upon, and fails if there are multiple sizes
264 in the loop.
266 VF is also the factor by which the loop iterations are strip-mined, e.g.:
267 original loop:
268 for (i=0; i<N; i++){
269 a[i] = b[i] + c[i];
272 vectorized loop:
273 for (i=0; i<N; i+=VF){
274 a[i:VF] = b[i:VF] + c[i:VF];
278 static opt_result
279 vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
281 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
282 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
283 unsigned nbbs = loop->num_nodes;
284 poly_uint64 vectorization_factor = 1;
285 tree scalar_type = NULL_TREE;
286 gphi *phi;
287 tree vectype;
288 stmt_vec_info stmt_info;
289 unsigned i;
291 DUMP_VECT_SCOPE ("vect_determine_vectorization_factor");
293 for (i = 0; i < nbbs; i++)
295 basic_block bb = bbs[i];
297 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
298 gsi_next (&si))
300 phi = si.phi ();
301 stmt_info = loop_vinfo->lookup_stmt (phi);
302 if (dump_enabled_p ())
303 dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: %G",
304 phi);
306 gcc_assert (stmt_info);
308 if (STMT_VINFO_RELEVANT_P (stmt_info)
309 || STMT_VINFO_LIVE_P (stmt_info))
311 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
312 scalar_type = TREE_TYPE (PHI_RESULT (phi));
314 if (dump_enabled_p ())
315 dump_printf_loc (MSG_NOTE, vect_location,
316 "get vectype for scalar type: %T\n",
317 scalar_type);
319 vectype = get_vectype_for_scalar_type (loop_vinfo, scalar_type);
320 if (!vectype)
321 return opt_result::failure_at (phi,
322 "not vectorized: unsupported "
323 "data-type %T\n",
324 scalar_type);
325 STMT_VINFO_VECTYPE (stmt_info) = vectype;
327 if (dump_enabled_p ())
328 dump_printf_loc (MSG_NOTE, vect_location, "vectype: %T\n",
329 vectype);
331 if (dump_enabled_p ())
333 dump_printf_loc (MSG_NOTE, vect_location, "nunits = ");
334 dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (vectype));
335 dump_printf (MSG_NOTE, "\n");
338 vect_update_max_nunits (&vectorization_factor, vectype);
342 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
343 gsi_next (&si))
345 if (is_gimple_debug (gsi_stmt (si)))
346 continue;
347 stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
348 opt_result res
349 = vect_determine_vf_for_stmt (loop_vinfo,
350 stmt_info, &vectorization_factor);
351 if (!res)
352 return res;
356 /* TODO: Analyze cost. Decide if worth while to vectorize. */
357 if (dump_enabled_p ())
359 dump_printf_loc (MSG_NOTE, vect_location, "vectorization factor = ");
360 dump_dec (MSG_NOTE, vectorization_factor);
361 dump_printf (MSG_NOTE, "\n");
364 if (known_le (vectorization_factor, 1U))
365 return opt_result::failure_at (vect_location,
366 "not vectorized: unsupported data-type\n");
367 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
368 return opt_result::success ();
372 /* Function vect_is_simple_iv_evolution.
374 FORNOW: A simple evolution of an induction variables in the loop is
375 considered a polynomial evolution. */
377 static bool
378 vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
379 tree * step)
381 tree init_expr;
382 tree step_expr;
383 tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
384 basic_block bb;
386 /* When there is no evolution in this loop, the evolution function
387 is not "simple". */
388 if (evolution_part == NULL_TREE)
389 return false;
391 /* When the evolution is a polynomial of degree >= 2
392 the evolution function is not "simple". */
393 if (tree_is_chrec (evolution_part))
394 return false;
396 step_expr = evolution_part;
397 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
399 if (dump_enabled_p ())
400 dump_printf_loc (MSG_NOTE, vect_location, "step: %T, init: %T\n",
401 step_expr, init_expr);
403 *init = init_expr;
404 *step = step_expr;
406 if (TREE_CODE (step_expr) != INTEGER_CST
407 && (TREE_CODE (step_expr) != SSA_NAME
408 || ((bb = gimple_bb (SSA_NAME_DEF_STMT (step_expr)))
409 && flow_bb_inside_loop_p (get_loop (cfun, loop_nb), bb))
410 || (!INTEGRAL_TYPE_P (TREE_TYPE (step_expr))
411 && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))
412 || !flag_associative_math)))
413 && (TREE_CODE (step_expr) != REAL_CST
414 || !flag_associative_math))
416 if (dump_enabled_p ())
417 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
418 "step unknown.\n");
419 return false;
422 return true;
425 /* Return true if PHI, described by STMT_INFO, is the inner PHI in
426 what we are assuming is a double reduction. For example, given
427 a structure like this:
429 outer1:
430 x_1 = PHI <x_4(outer2), ...>;
433 inner:
434 x_2 = PHI <x_1(outer1), ...>;
436 x_3 = ...;
439 outer2:
440 x_4 = PHI <x_3(inner)>;
443 outer loop analysis would treat x_1 as a double reduction phi and
444 this function would then return true for x_2. */
446 static bool
447 vect_inner_phi_in_double_reduction_p (loop_vec_info loop_vinfo, gphi *phi)
449 use_operand_p use_p;
450 ssa_op_iter op_iter;
451 FOR_EACH_PHI_ARG (use_p, phi, op_iter, SSA_OP_USE)
452 if (stmt_vec_info def_info = loop_vinfo->lookup_def (USE_FROM_PTR (use_p)))
453 if (STMT_VINFO_DEF_TYPE (def_info) == vect_double_reduction_def)
454 return true;
455 return false;
458 /* Function vect_analyze_scalar_cycles_1.
460 Examine the cross iteration def-use cycles of scalar variables
461 in LOOP. LOOP_VINFO represents the loop that is now being
462 considered for vectorization (can be LOOP, or an outer-loop
463 enclosing LOOP). */
465 static void
466 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, class loop *loop)
468 basic_block bb = loop->header;
469 tree init, step;
470 auto_vec<stmt_vec_info, 64> worklist;
471 gphi_iterator gsi;
472 bool double_reduc, reduc_chain;
474 DUMP_VECT_SCOPE ("vect_analyze_scalar_cycles");
476 /* First - identify all inductions. Reduction detection assumes that all the
477 inductions have been identified, therefore, this order must not be
478 changed. */
479 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
481 gphi *phi = gsi.phi ();
482 tree access_fn = NULL;
483 tree def = PHI_RESULT (phi);
484 stmt_vec_info stmt_vinfo = loop_vinfo->lookup_stmt (phi);
486 if (dump_enabled_p ())
487 dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: %G", phi);
489 /* Skip virtual phi's. The data dependences that are associated with
490 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
491 if (virtual_operand_p (def))
492 continue;
494 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
496 /* Analyze the evolution function. */
497 access_fn = analyze_scalar_evolution (loop, def);
498 if (access_fn)
500 STRIP_NOPS (access_fn);
501 if (dump_enabled_p ())
502 dump_printf_loc (MSG_NOTE, vect_location,
503 "Access function of PHI: %T\n", access_fn);
504 STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo)
505 = initial_condition_in_loop_num (access_fn, loop->num);
506 STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo)
507 = evolution_part_in_loop_num (access_fn, loop->num);
510 if (!access_fn
511 || vect_inner_phi_in_double_reduction_p (loop_vinfo, phi)
512 || !vect_is_simple_iv_evolution (loop->num, access_fn, &init, &step)
513 || (LOOP_VINFO_LOOP (loop_vinfo) != loop
514 && TREE_CODE (step) != INTEGER_CST))
516 worklist.safe_push (stmt_vinfo);
517 continue;
520 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo)
521 != NULL_TREE);
522 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) != NULL_TREE);
524 if (dump_enabled_p ())
525 dump_printf_loc (MSG_NOTE, vect_location, "Detected induction.\n");
526 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
530 /* Second - identify all reductions and nested cycles. */
531 while (worklist.length () > 0)
533 stmt_vec_info stmt_vinfo = worklist.pop ();
534 gphi *phi = as_a <gphi *> (stmt_vinfo->stmt);
535 tree def = PHI_RESULT (phi);
537 if (dump_enabled_p ())
538 dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: %G", phi);
540 gcc_assert (!virtual_operand_p (def)
541 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
543 stmt_vec_info reduc_stmt_info
544 = vect_is_simple_reduction (loop_vinfo, stmt_vinfo, &double_reduc,
545 &reduc_chain);
546 if (reduc_stmt_info)
548 STMT_VINFO_REDUC_DEF (stmt_vinfo) = reduc_stmt_info;
549 STMT_VINFO_REDUC_DEF (reduc_stmt_info) = stmt_vinfo;
550 if (double_reduc)
552 if (dump_enabled_p ())
553 dump_printf_loc (MSG_NOTE, vect_location,
554 "Detected double reduction.\n");
556 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
557 STMT_VINFO_DEF_TYPE (reduc_stmt_info) = vect_double_reduction_def;
559 else
561 if (loop != LOOP_VINFO_LOOP (loop_vinfo))
563 if (dump_enabled_p ())
564 dump_printf_loc (MSG_NOTE, vect_location,
565 "Detected vectorizable nested cycle.\n");
567 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
569 else
571 if (dump_enabled_p ())
572 dump_printf_loc (MSG_NOTE, vect_location,
573 "Detected reduction.\n");
575 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
576 STMT_VINFO_DEF_TYPE (reduc_stmt_info) = vect_reduction_def;
577 /* Store the reduction cycles for possible vectorization in
578 loop-aware SLP if it was not detected as reduction
579 chain. */
580 if (! reduc_chain)
581 LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push
582 (reduc_stmt_info);
586 else
587 if (dump_enabled_p ())
588 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
589 "Unknown def-use cycle pattern.\n");
594 /* Function vect_analyze_scalar_cycles.
596 Examine the cross iteration def-use cycles of scalar variables, by
597 analyzing the loop-header PHIs of scalar variables. Classify each
598 cycle as one of the following: invariant, induction, reduction, unknown.
599 We do that for the loop represented by LOOP_VINFO, and also to its
600 inner-loop, if exists.
601 Examples for scalar cycles:
603 Example1: reduction:
605 loop1:
606 for (i=0; i<N; i++)
607 sum += a[i];
609 Example2: induction:
611 loop2:
612 for (i=0; i<N; i++)
613 a[i] = i; */
615 static void
616 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
618 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
620 vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
622 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
623 Reductions in such inner-loop therefore have different properties than
624 the reductions in the nest that gets vectorized:
625 1. When vectorized, they are executed in the same order as in the original
626 scalar loop, so we can't change the order of computation when
627 vectorizing them.
628 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
629 current checks are too strict. */
631 if (loop->inner)
632 vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
635 /* Transfer group and reduction information from STMT_INFO to its
636 pattern stmt. */
638 static void
639 vect_fixup_reduc_chain (stmt_vec_info stmt_info)
641 stmt_vec_info firstp = STMT_VINFO_RELATED_STMT (stmt_info);
642 stmt_vec_info stmtp;
643 gcc_assert (!REDUC_GROUP_FIRST_ELEMENT (firstp)
644 && REDUC_GROUP_FIRST_ELEMENT (stmt_info));
645 REDUC_GROUP_SIZE (firstp) = REDUC_GROUP_SIZE (stmt_info);
648 stmtp = STMT_VINFO_RELATED_STMT (stmt_info);
649 gcc_checking_assert (STMT_VINFO_DEF_TYPE (stmtp)
650 == STMT_VINFO_DEF_TYPE (stmt_info));
651 REDUC_GROUP_FIRST_ELEMENT (stmtp) = firstp;
652 stmt_info = REDUC_GROUP_NEXT_ELEMENT (stmt_info);
653 if (stmt_info)
654 REDUC_GROUP_NEXT_ELEMENT (stmtp)
655 = STMT_VINFO_RELATED_STMT (stmt_info);
657 while (stmt_info);
660 /* Fixup scalar cycles that now have their stmts detected as patterns. */
662 static void
663 vect_fixup_scalar_cycles_with_patterns (loop_vec_info loop_vinfo)
665 stmt_vec_info first;
666 unsigned i;
668 FOR_EACH_VEC_ELT (LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo), i, first)
670 stmt_vec_info next = REDUC_GROUP_NEXT_ELEMENT (first);
671 while (next)
673 if ((STMT_VINFO_IN_PATTERN_P (next)
674 != STMT_VINFO_IN_PATTERN_P (first))
675 || STMT_VINFO_REDUC_IDX (vect_stmt_to_vectorize (next)) == -1)
676 break;
677 next = REDUC_GROUP_NEXT_ELEMENT (next);
679 /* If all reduction chain members are well-formed patterns adjust
680 the group to group the pattern stmts instead. */
681 if (! next
682 && STMT_VINFO_REDUC_IDX (vect_stmt_to_vectorize (first)) != -1)
684 if (STMT_VINFO_IN_PATTERN_P (first))
686 vect_fixup_reduc_chain (first);
687 LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo)[i]
688 = STMT_VINFO_RELATED_STMT (first);
691 /* If not all stmt in the chain are patterns or if we failed
692 to update STMT_VINFO_REDUC_IDX dissolve the chain and handle
693 it as regular reduction instead. */
694 else
696 stmt_vec_info vinfo = first;
697 stmt_vec_info last = NULL;
698 while (vinfo)
700 next = REDUC_GROUP_NEXT_ELEMENT (vinfo);
701 REDUC_GROUP_FIRST_ELEMENT (vinfo) = NULL;
702 REDUC_GROUP_NEXT_ELEMENT (vinfo) = NULL;
703 last = vinfo;
704 vinfo = next;
706 STMT_VINFO_DEF_TYPE (vect_stmt_to_vectorize (first))
707 = vect_internal_def;
708 loop_vinfo->reductions.safe_push (vect_stmt_to_vectorize (last));
709 LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).unordered_remove (i);
710 --i;
715 /* Function vect_get_loop_niters.
717 Determine how many iterations the loop is executed and place it
718 in NUMBER_OF_ITERATIONS. Place the number of latch iterations
719 in NUMBER_OF_ITERATIONSM1. Place the condition under which the
720 niter information holds in ASSUMPTIONS.
722 Return the loop exit condition. */
725 static gcond *
726 vect_get_loop_niters (class loop *loop, tree *assumptions,
727 tree *number_of_iterations, tree *number_of_iterationsm1)
729 edge exit = single_exit (loop);
730 class tree_niter_desc niter_desc;
731 tree niter_assumptions, niter, may_be_zero;
732 gcond *cond = get_loop_exit_condition (loop);
734 *assumptions = boolean_true_node;
735 *number_of_iterationsm1 = chrec_dont_know;
736 *number_of_iterations = chrec_dont_know;
737 DUMP_VECT_SCOPE ("get_loop_niters");
739 if (!exit)
740 return cond;
742 may_be_zero = NULL_TREE;
743 if (!number_of_iterations_exit_assumptions (loop, exit, &niter_desc, NULL)
744 || chrec_contains_undetermined (niter_desc.niter))
745 return cond;
747 niter_assumptions = niter_desc.assumptions;
748 may_be_zero = niter_desc.may_be_zero;
749 niter = niter_desc.niter;
751 if (may_be_zero && integer_zerop (may_be_zero))
752 may_be_zero = NULL_TREE;
754 if (may_be_zero)
756 if (COMPARISON_CLASS_P (may_be_zero))
758 /* Try to combine may_be_zero with assumptions, this can simplify
759 computation of niter expression. */
760 if (niter_assumptions && !integer_nonzerop (niter_assumptions))
761 niter_assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
762 niter_assumptions,
763 fold_build1 (TRUTH_NOT_EXPR,
764 boolean_type_node,
765 may_be_zero));
766 else
767 niter = fold_build3 (COND_EXPR, TREE_TYPE (niter), may_be_zero,
768 build_int_cst (TREE_TYPE (niter), 0),
769 rewrite_to_non_trapping_overflow (niter));
771 may_be_zero = NULL_TREE;
773 else if (integer_nonzerop (may_be_zero))
775 *number_of_iterationsm1 = build_int_cst (TREE_TYPE (niter), 0);
776 *number_of_iterations = build_int_cst (TREE_TYPE (niter), 1);
777 return cond;
779 else
780 return cond;
783 *assumptions = niter_assumptions;
784 *number_of_iterationsm1 = niter;
786 /* We want the number of loop header executions which is the number
787 of latch executions plus one.
788 ??? For UINT_MAX latch executions this number overflows to zero
789 for loops like do { n++; } while (n != 0); */
790 if (niter && !chrec_contains_undetermined (niter))
791 niter = fold_build2 (PLUS_EXPR, TREE_TYPE (niter), unshare_expr (niter),
792 build_int_cst (TREE_TYPE (niter), 1));
793 *number_of_iterations = niter;
795 return cond;
798 /* Function bb_in_loop_p
800 Used as predicate for dfs order traversal of the loop bbs. */
802 static bool
803 bb_in_loop_p (const_basic_block bb, const void *data)
805 const class loop *const loop = (const class loop *)data;
806 if (flow_bb_inside_loop_p (loop, bb))
807 return true;
808 return false;
812 /* Create and initialize a new loop_vec_info struct for LOOP_IN, as well as
813 stmt_vec_info structs for all the stmts in LOOP_IN. */
815 _loop_vec_info::_loop_vec_info (class loop *loop_in, vec_info_shared *shared)
816 : vec_info (vec_info::loop, init_cost (loop_in, false), shared),
817 loop (loop_in),
818 bbs (XCNEWVEC (basic_block, loop->num_nodes)),
819 num_itersm1 (NULL_TREE),
820 num_iters (NULL_TREE),
821 num_iters_unchanged (NULL_TREE),
822 num_iters_assumptions (NULL_TREE),
823 th (0),
824 versioning_threshold (0),
825 vectorization_factor (0),
826 max_vectorization_factor (0),
827 mask_skip_niters (NULL_TREE),
828 rgroup_compare_type (NULL_TREE),
829 simd_if_cond (NULL_TREE),
830 unaligned_dr (NULL),
831 peeling_for_alignment (0),
832 ptr_mask (0),
833 ivexpr_map (NULL),
834 scan_map (NULL),
835 slp_unrolling_factor (1),
836 single_scalar_iteration_cost (0),
837 vec_outside_cost (0),
838 vec_inside_cost (0),
839 inner_loop_cost_factor (param_vect_inner_loop_cost_factor),
840 vectorizable (false),
841 can_use_partial_vectors_p (param_vect_partial_vector_usage != 0),
842 using_partial_vectors_p (false),
843 epil_using_partial_vectors_p (false),
844 peeling_for_gaps (false),
845 peeling_for_niter (false),
846 no_data_dependencies (false),
847 has_mask_store (false),
848 scalar_loop_scaling (profile_probability::uninitialized ()),
849 scalar_loop (NULL),
850 orig_loop_info (NULL)
852 /* CHECKME: We want to visit all BBs before their successors (except for
853 latch blocks, for which this assertion wouldn't hold). In the simple
854 case of the loop forms we allow, a dfs order of the BBs would the same
855 as reversed postorder traversal, so we are safe. */
857 unsigned int nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
858 bbs, loop->num_nodes, loop);
859 gcc_assert (nbbs == loop->num_nodes);
861 for (unsigned int i = 0; i < nbbs; i++)
863 basic_block bb = bbs[i];
864 gimple_stmt_iterator si;
866 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
868 gimple *phi = gsi_stmt (si);
869 gimple_set_uid (phi, 0);
870 add_stmt (phi);
873 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
875 gimple *stmt = gsi_stmt (si);
876 gimple_set_uid (stmt, 0);
877 if (is_gimple_debug (stmt))
878 continue;
879 add_stmt (stmt);
880 /* If .GOMP_SIMD_LANE call for the current loop has 3 arguments, the
881 third argument is the #pragma omp simd if (x) condition, when 0,
882 loop shouldn't be vectorized, when non-zero constant, it should
883 be vectorized normally, otherwise versioned with vectorized loop
884 done if the condition is non-zero at runtime. */
885 if (loop_in->simduid
886 && is_gimple_call (stmt)
887 && gimple_call_internal_p (stmt)
888 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE
889 && gimple_call_num_args (stmt) >= 3
890 && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
891 && (loop_in->simduid
892 == SSA_NAME_VAR (gimple_call_arg (stmt, 0))))
894 tree arg = gimple_call_arg (stmt, 2);
895 if (integer_zerop (arg) || TREE_CODE (arg) == SSA_NAME)
896 simd_if_cond = arg;
897 else
898 gcc_assert (integer_nonzerop (arg));
903 epilogue_vinfos.create (6);
906 /* Free all levels of rgroup CONTROLS. */
908 void
909 release_vec_loop_controls (vec<rgroup_controls> *controls)
911 rgroup_controls *rgc;
912 unsigned int i;
913 FOR_EACH_VEC_ELT (*controls, i, rgc)
914 rgc->controls.release ();
915 controls->release ();
918 /* Free all memory used by the _loop_vec_info, as well as all the
919 stmt_vec_info structs of all the stmts in the loop. */
921 _loop_vec_info::~_loop_vec_info ()
923 free (bbs);
925 release_vec_loop_controls (&masks);
926 release_vec_loop_controls (&lens);
927 delete ivexpr_map;
928 delete scan_map;
929 epilogue_vinfos.release ();
931 /* When we release an epiloge vinfo that we do not intend to use
932 avoid clearing AUX of the main loop which should continue to
933 point to the main loop vinfo since otherwise we'll leak that. */
934 if (loop->aux == this)
935 loop->aux = NULL;
938 /* Return an invariant or register for EXPR and emit necessary
939 computations in the LOOP_VINFO loop preheader. */
941 tree
942 cse_and_gimplify_to_preheader (loop_vec_info loop_vinfo, tree expr)
944 if (is_gimple_reg (expr)
945 || is_gimple_min_invariant (expr))
946 return expr;
948 if (! loop_vinfo->ivexpr_map)
949 loop_vinfo->ivexpr_map = new hash_map<tree_operand_hash, tree>;
950 tree &cached = loop_vinfo->ivexpr_map->get_or_insert (expr);
951 if (! cached)
953 gimple_seq stmts = NULL;
954 cached = force_gimple_operand (unshare_expr (expr),
955 &stmts, true, NULL_TREE);
956 if (stmts)
958 edge e = loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo));
959 gsi_insert_seq_on_edge_immediate (e, stmts);
962 return cached;
965 /* Return true if we can use CMP_TYPE as the comparison type to produce
966 all masks required to mask LOOP_VINFO. */
968 static bool
969 can_produce_all_loop_masks_p (loop_vec_info loop_vinfo, tree cmp_type)
971 rgroup_controls *rgm;
972 unsigned int i;
973 FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), i, rgm)
974 if (rgm->type != NULL_TREE
975 && !direct_internal_fn_supported_p (IFN_WHILE_ULT,
976 cmp_type, rgm->type,
977 OPTIMIZE_FOR_SPEED))
978 return false;
979 return true;
982 /* Calculate the maximum number of scalars per iteration for every
983 rgroup in LOOP_VINFO. */
985 static unsigned int
986 vect_get_max_nscalars_per_iter (loop_vec_info loop_vinfo)
988 unsigned int res = 1;
989 unsigned int i;
990 rgroup_controls *rgm;
991 FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), i, rgm)
992 res = MAX (res, rgm->max_nscalars_per_iter);
993 return res;
996 /* Calculate the minimum precision necessary to represent:
998 MAX_NITERS * FACTOR
1000 as an unsigned integer, where MAX_NITERS is the maximum number of
1001 loop header iterations for the original scalar form of LOOP_VINFO. */
1003 static unsigned
1004 vect_min_prec_for_max_niters (loop_vec_info loop_vinfo, unsigned int factor)
1006 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1008 /* Get the maximum number of iterations that is representable
1009 in the counter type. */
1010 tree ni_type = TREE_TYPE (LOOP_VINFO_NITERSM1 (loop_vinfo));
1011 widest_int max_ni = wi::to_widest (TYPE_MAX_VALUE (ni_type)) + 1;
1013 /* Get a more refined estimate for the number of iterations. */
1014 widest_int max_back_edges;
1015 if (max_loop_iterations (loop, &max_back_edges))
1016 max_ni = wi::smin (max_ni, max_back_edges + 1);
1018 /* Work out how many bits we need to represent the limit. */
1019 return wi::min_precision (max_ni * factor, UNSIGNED);
1022 /* True if the loop needs peeling or partial vectors when vectorized. */
1024 static bool
1025 vect_need_peeling_or_partial_vectors_p (loop_vec_info loop_vinfo)
1027 unsigned HOST_WIDE_INT const_vf;
1028 HOST_WIDE_INT max_niter
1029 = likely_max_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo));
1031 unsigned th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
1032 if (!th && LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo))
1033 th = LOOP_VINFO_COST_MODEL_THRESHOLD (LOOP_VINFO_ORIG_LOOP_INFO
1034 (loop_vinfo));
1036 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1037 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) >= 0)
1039 /* Work out the (constant) number of iterations that need to be
1040 peeled for reasons other than niters. */
1041 unsigned int peel_niter = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
1042 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
1043 peel_niter += 1;
1044 if (!multiple_p (LOOP_VINFO_INT_NITERS (loop_vinfo) - peel_niter,
1045 LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
1046 return true;
1048 else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
1049 /* ??? When peeling for gaps but not alignment, we could
1050 try to check whether the (variable) niters is known to be
1051 VF * N + 1. That's something of a niche case though. */
1052 || LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
1053 || !LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&const_vf)
1054 || ((tree_ctz (LOOP_VINFO_NITERS (loop_vinfo))
1055 < (unsigned) exact_log2 (const_vf))
1056 /* In case of versioning, check if the maximum number of
1057 iterations is greater than th. If they are identical,
1058 the epilogue is unnecessary. */
1059 && (!LOOP_REQUIRES_VERSIONING (loop_vinfo)
1060 || ((unsigned HOST_WIDE_INT) max_niter
1061 > (th / const_vf) * const_vf))))
1062 return true;
1064 return false;
1067 /* Each statement in LOOP_VINFO can be masked where necessary. Check
1068 whether we can actually generate the masks required. Return true if so,
1069 storing the type of the scalar IV in LOOP_VINFO_RGROUP_COMPARE_TYPE. */
1071 static bool
1072 vect_verify_full_masking (loop_vec_info loop_vinfo)
1074 unsigned int min_ni_width;
1075 unsigned int max_nscalars_per_iter
1076 = vect_get_max_nscalars_per_iter (loop_vinfo);
1078 /* Use a normal loop if there are no statements that need masking.
1079 This only happens in rare degenerate cases: it means that the loop
1080 has no loads, no stores, and no live-out values. */
1081 if (LOOP_VINFO_MASKS (loop_vinfo).is_empty ())
1082 return false;
1084 /* Work out how many bits we need to represent the limit. */
1085 min_ni_width
1086 = vect_min_prec_for_max_niters (loop_vinfo, max_nscalars_per_iter);
1088 /* Find a scalar mode for which WHILE_ULT is supported. */
1089 opt_scalar_int_mode cmp_mode_iter;
1090 tree cmp_type = NULL_TREE;
1091 tree iv_type = NULL_TREE;
1092 widest_int iv_limit = vect_iv_limit_for_partial_vectors (loop_vinfo);
1093 unsigned int iv_precision = UINT_MAX;
1095 if (iv_limit != -1)
1096 iv_precision = wi::min_precision (iv_limit * max_nscalars_per_iter,
1097 UNSIGNED);
1099 FOR_EACH_MODE_IN_CLASS (cmp_mode_iter, MODE_INT)
1101 unsigned int cmp_bits = GET_MODE_BITSIZE (cmp_mode_iter.require ());
1102 if (cmp_bits >= min_ni_width
1103 && targetm.scalar_mode_supported_p (cmp_mode_iter.require ()))
1105 tree this_type = build_nonstandard_integer_type (cmp_bits, true);
1106 if (this_type
1107 && can_produce_all_loop_masks_p (loop_vinfo, this_type))
1109 /* Although we could stop as soon as we find a valid mode,
1110 there are at least two reasons why that's not always the
1111 best choice:
1113 - An IV that's Pmode or wider is more likely to be reusable
1114 in address calculations than an IV that's narrower than
1115 Pmode.
1117 - Doing the comparison in IV_PRECISION or wider allows
1118 a natural 0-based IV, whereas using a narrower comparison
1119 type requires mitigations against wrap-around.
1121 Conversely, if the IV limit is variable, doing the comparison
1122 in a wider type than the original type can introduce
1123 unnecessary extensions, so picking the widest valid mode
1124 is not always a good choice either.
1126 Here we prefer the first IV type that's Pmode or wider,
1127 and the first comparison type that's IV_PRECISION or wider.
1128 (The comparison type must be no wider than the IV type,
1129 to avoid extensions in the vector loop.)
1131 ??? We might want to try continuing beyond Pmode for ILP32
1132 targets if CMP_BITS < IV_PRECISION. */
1133 iv_type = this_type;
1134 if (!cmp_type || iv_precision > TYPE_PRECISION (cmp_type))
1135 cmp_type = this_type;
1136 if (cmp_bits >= GET_MODE_BITSIZE (Pmode))
1137 break;
1142 if (!cmp_type)
1143 return false;
1145 LOOP_VINFO_RGROUP_COMPARE_TYPE (loop_vinfo) = cmp_type;
1146 LOOP_VINFO_RGROUP_IV_TYPE (loop_vinfo) = iv_type;
1147 return true;
1150 /* Check whether we can use vector access with length based on precison
1151 comparison. So far, to keep it simple, we only allow the case that the
1152 precision of the target supported length is larger than the precision
1153 required by loop niters. */
1155 static bool
1156 vect_verify_loop_lens (loop_vec_info loop_vinfo)
1158 if (LOOP_VINFO_LENS (loop_vinfo).is_empty ())
1159 return false;
1161 unsigned int max_nitems_per_iter = 1;
1162 unsigned int i;
1163 rgroup_controls *rgl;
1164 /* Find the maximum number of items per iteration for every rgroup. */
1165 FOR_EACH_VEC_ELT (LOOP_VINFO_LENS (loop_vinfo), i, rgl)
1167 unsigned nitems_per_iter = rgl->max_nscalars_per_iter * rgl->factor;
1168 max_nitems_per_iter = MAX (max_nitems_per_iter, nitems_per_iter);
1171 /* Work out how many bits we need to represent the length limit. */
1172 unsigned int min_ni_prec
1173 = vect_min_prec_for_max_niters (loop_vinfo, max_nitems_per_iter);
1175 /* Now use the maximum of below precisions for one suitable IV type:
1176 - the IV's natural precision
1177 - the precision needed to hold: the maximum number of scalar
1178 iterations multiplied by the scale factor (min_ni_prec above)
1179 - the Pmode precision
1181 If min_ni_prec is less than the precision of the current niters,
1182 we perfer to still use the niters type. Prefer to use Pmode and
1183 wider IV to avoid narrow conversions. */
1185 unsigned int ni_prec
1186 = TYPE_PRECISION (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)));
1187 min_ni_prec = MAX (min_ni_prec, ni_prec);
1188 min_ni_prec = MAX (min_ni_prec, GET_MODE_BITSIZE (Pmode));
1190 tree iv_type = NULL_TREE;
1191 opt_scalar_int_mode tmode_iter;
1192 FOR_EACH_MODE_IN_CLASS (tmode_iter, MODE_INT)
1194 scalar_mode tmode = tmode_iter.require ();
1195 unsigned int tbits = GET_MODE_BITSIZE (tmode);
1197 /* ??? Do we really want to construct one IV whose precision exceeds
1198 BITS_PER_WORD? */
1199 if (tbits > BITS_PER_WORD)
1200 break;
1202 /* Find the first available standard integral type. */
1203 if (tbits >= min_ni_prec && targetm.scalar_mode_supported_p (tmode))
1205 iv_type = build_nonstandard_integer_type (tbits, true);
1206 break;
1210 if (!iv_type)
1212 if (dump_enabled_p ())
1213 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1214 "can't vectorize with length-based partial vectors"
1215 " because there is no suitable iv type.\n");
1216 return false;
1219 LOOP_VINFO_RGROUP_COMPARE_TYPE (loop_vinfo) = iv_type;
1220 LOOP_VINFO_RGROUP_IV_TYPE (loop_vinfo) = iv_type;
1222 return true;
1225 /* Calculate the cost of one scalar iteration of the loop. */
1226 static void
1227 vect_compute_single_scalar_iteration_cost (loop_vec_info loop_vinfo)
1229 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1230 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1231 int nbbs = loop->num_nodes, factor;
1232 int innerloop_iters, i;
1234 DUMP_VECT_SCOPE ("vect_compute_single_scalar_iteration_cost");
1236 /* Gather costs for statements in the scalar loop. */
1238 /* FORNOW. */
1239 innerloop_iters = 1;
1240 if (loop->inner)
1241 innerloop_iters = LOOP_VINFO_INNER_LOOP_COST_FACTOR (loop_vinfo);
1243 for (i = 0; i < nbbs; i++)
1245 gimple_stmt_iterator si;
1246 basic_block bb = bbs[i];
1248 if (bb->loop_father == loop->inner)
1249 factor = innerloop_iters;
1250 else
1251 factor = 1;
1253 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1255 gimple *stmt = gsi_stmt (si);
1256 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (stmt);
1258 if (!is_gimple_assign (stmt) && !is_gimple_call (stmt))
1259 continue;
1261 /* Skip stmts that are not vectorized inside the loop. */
1262 stmt_vec_info vstmt_info = vect_stmt_to_vectorize (stmt_info);
1263 if (!STMT_VINFO_RELEVANT_P (vstmt_info)
1264 && (!STMT_VINFO_LIVE_P (vstmt_info)
1265 || !VECTORIZABLE_CYCLE_DEF
1266 (STMT_VINFO_DEF_TYPE (vstmt_info))))
1267 continue;
1269 vect_cost_for_stmt kind;
1270 if (STMT_VINFO_DATA_REF (stmt_info))
1272 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
1273 kind = scalar_load;
1274 else
1275 kind = scalar_store;
1277 else if (vect_nop_conversion_p (stmt_info))
1278 continue;
1279 else
1280 kind = scalar_stmt;
1282 record_stmt_cost (&LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
1283 factor, kind, stmt_info, 0, vect_prologue);
1287 /* Now accumulate cost. */
1288 void *target_cost_data = init_cost (loop, true);
1289 stmt_info_for_cost *si;
1290 int j;
1291 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
1292 j, si)
1293 (void) add_stmt_cost (loop_vinfo, target_cost_data, si->count,
1294 si->kind, si->stmt_info, si->vectype,
1295 si->misalign, vect_body);
1296 unsigned dummy, body_cost = 0;
1297 finish_cost (target_cost_data, &dummy, &body_cost, &dummy);
1298 destroy_cost_data (target_cost_data);
1299 LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo) = body_cost;
1303 /* Function vect_analyze_loop_form_1.
1305 Verify that certain CFG restrictions hold, including:
1306 - the loop has a pre-header
1307 - the loop has a single entry and exit
1308 - the loop exit condition is simple enough
1309 - the number of iterations can be analyzed, i.e, a countable loop. The
1310 niter could be analyzed under some assumptions. */
1312 opt_result
1313 vect_analyze_loop_form_1 (class loop *loop, gcond **loop_cond,
1314 tree *assumptions, tree *number_of_iterationsm1,
1315 tree *number_of_iterations, gcond **inner_loop_cond)
1317 DUMP_VECT_SCOPE ("vect_analyze_loop_form");
1319 /* Different restrictions apply when we are considering an inner-most loop,
1320 vs. an outer (nested) loop.
1321 (FORNOW. May want to relax some of these restrictions in the future). */
1323 if (!loop->inner)
1325 /* Inner-most loop. We currently require that the number of BBs is
1326 exactly 2 (the header and latch). Vectorizable inner-most loops
1327 look like this:
1329 (pre-header)
1331 header <--------+
1332 | | |
1333 | +--> latch --+
1335 (exit-bb) */
1337 if (loop->num_nodes != 2)
1338 return opt_result::failure_at (vect_location,
1339 "not vectorized:"
1340 " control flow in loop.\n");
1342 if (empty_block_p (loop->header))
1343 return opt_result::failure_at (vect_location,
1344 "not vectorized: empty loop.\n");
1346 else
1348 class loop *innerloop = loop->inner;
1349 edge entryedge;
1351 /* Nested loop. We currently require that the loop is doubly-nested,
1352 contains a single inner loop, and the number of BBs is exactly 5.
1353 Vectorizable outer-loops look like this:
1355 (pre-header)
1357 header <---+
1359 inner-loop |
1361 tail ------+
1363 (exit-bb)
1365 The inner-loop has the properties expected of inner-most loops
1366 as described above. */
1368 if ((loop->inner)->inner || (loop->inner)->next)
1369 return opt_result::failure_at (vect_location,
1370 "not vectorized:"
1371 " multiple nested loops.\n");
1373 if (loop->num_nodes != 5)
1374 return opt_result::failure_at (vect_location,
1375 "not vectorized:"
1376 " control flow in loop.\n");
1378 entryedge = loop_preheader_edge (innerloop);
1379 if (entryedge->src != loop->header
1380 || !single_exit (innerloop)
1381 || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
1382 return opt_result::failure_at (vect_location,
1383 "not vectorized:"
1384 " unsupported outerloop form.\n");
1386 /* Analyze the inner-loop. */
1387 tree inner_niterm1, inner_niter, inner_assumptions;
1388 opt_result res
1389 = vect_analyze_loop_form_1 (loop->inner, inner_loop_cond,
1390 &inner_assumptions, &inner_niterm1,
1391 &inner_niter, NULL);
1392 if (!res)
1394 if (dump_enabled_p ())
1395 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1396 "not vectorized: Bad inner loop.\n");
1397 return res;
1400 /* Don't support analyzing niter under assumptions for inner
1401 loop. */
1402 if (!integer_onep (inner_assumptions))
1403 return opt_result::failure_at (vect_location,
1404 "not vectorized: Bad inner loop.\n");
1406 if (!expr_invariant_in_loop_p (loop, inner_niter))
1407 return opt_result::failure_at (vect_location,
1408 "not vectorized: inner-loop count not"
1409 " invariant.\n");
1411 if (dump_enabled_p ())
1412 dump_printf_loc (MSG_NOTE, vect_location,
1413 "Considering outer-loop vectorization.\n");
1416 if (!single_exit (loop))
1417 return opt_result::failure_at (vect_location,
1418 "not vectorized: multiple exits.\n");
1419 if (EDGE_COUNT (loop->header->preds) != 2)
1420 return opt_result::failure_at (vect_location,
1421 "not vectorized:"
1422 " too many incoming edges.\n");
1424 /* We assume that the loop exit condition is at the end of the loop. i.e,
1425 that the loop is represented as a do-while (with a proper if-guard
1426 before the loop if needed), where the loop header contains all the
1427 executable statements, and the latch is empty. */
1428 if (!empty_block_p (loop->latch)
1429 || !gimple_seq_empty_p (phi_nodes (loop->latch)))
1430 return opt_result::failure_at (vect_location,
1431 "not vectorized: latch block not empty.\n");
1433 /* Make sure the exit is not abnormal. */
1434 edge e = single_exit (loop);
1435 if (e->flags & EDGE_ABNORMAL)
1436 return opt_result::failure_at (vect_location,
1437 "not vectorized:"
1438 " abnormal loop exit edge.\n");
1440 *loop_cond = vect_get_loop_niters (loop, assumptions, number_of_iterations,
1441 number_of_iterationsm1);
1442 if (!*loop_cond)
1443 return opt_result::failure_at
1444 (vect_location,
1445 "not vectorized: complicated exit condition.\n");
1447 if (integer_zerop (*assumptions)
1448 || !*number_of_iterations
1449 || chrec_contains_undetermined (*number_of_iterations))
1450 return opt_result::failure_at
1451 (*loop_cond,
1452 "not vectorized: number of iterations cannot be computed.\n");
1454 if (integer_zerop (*number_of_iterations))
1455 return opt_result::failure_at
1456 (*loop_cond,
1457 "not vectorized: number of iterations = 0.\n");
1459 return opt_result::success ();
1462 /* Analyze LOOP form and return a loop_vec_info if it is of suitable form. */
1464 opt_loop_vec_info
1465 vect_analyze_loop_form (class loop *loop, vec_info_shared *shared)
1467 tree assumptions, number_of_iterations, number_of_iterationsm1;
1468 gcond *loop_cond, *inner_loop_cond = NULL;
1470 opt_result res
1471 = vect_analyze_loop_form_1 (loop, &loop_cond,
1472 &assumptions, &number_of_iterationsm1,
1473 &number_of_iterations, &inner_loop_cond);
1474 if (!res)
1475 return opt_loop_vec_info::propagate_failure (res);
1477 loop_vec_info loop_vinfo = new _loop_vec_info (loop, shared);
1478 LOOP_VINFO_NITERSM1 (loop_vinfo) = number_of_iterationsm1;
1479 LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
1480 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
1481 if (!integer_onep (assumptions))
1483 /* We consider to vectorize this loop by versioning it under
1484 some assumptions. In order to do this, we need to clear
1485 existing information computed by scev and niter analyzer. */
1486 scev_reset_htab ();
1487 free_numbers_of_iterations_estimates (loop);
1488 /* Also set flag for this loop so that following scev and niter
1489 analysis are done under the assumptions. */
1490 loop_constraint_set (loop, LOOP_C_FINITE);
1491 /* Also record the assumptions for versioning. */
1492 LOOP_VINFO_NITERS_ASSUMPTIONS (loop_vinfo) = assumptions;
1495 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
1497 if (dump_enabled_p ())
1499 dump_printf_loc (MSG_NOTE, vect_location,
1500 "Symbolic number of iterations is ");
1501 dump_generic_expr (MSG_NOTE, TDF_DETAILS, number_of_iterations);
1502 dump_printf (MSG_NOTE, "\n");
1506 stmt_vec_info loop_cond_info = loop_vinfo->lookup_stmt (loop_cond);
1507 STMT_VINFO_TYPE (loop_cond_info) = loop_exit_ctrl_vec_info_type;
1508 if (inner_loop_cond)
1510 stmt_vec_info inner_loop_cond_info
1511 = loop_vinfo->lookup_stmt (inner_loop_cond);
1512 STMT_VINFO_TYPE (inner_loop_cond_info) = loop_exit_ctrl_vec_info_type;
1515 gcc_assert (!loop->aux);
1516 loop->aux = loop_vinfo;
1517 return opt_loop_vec_info::success (loop_vinfo);
1522 /* Scan the loop stmts and dependent on whether there are any (non-)SLP
1523 statements update the vectorization factor. */
1525 static void
1526 vect_update_vf_for_slp (loop_vec_info loop_vinfo)
1528 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1529 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1530 int nbbs = loop->num_nodes;
1531 poly_uint64 vectorization_factor;
1532 int i;
1534 DUMP_VECT_SCOPE ("vect_update_vf_for_slp");
1536 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1537 gcc_assert (known_ne (vectorization_factor, 0U));
1539 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1540 vectorization factor of the loop is the unrolling factor required by
1541 the SLP instances. If that unrolling factor is 1, we say, that we
1542 perform pure SLP on loop - cross iteration parallelism is not
1543 exploited. */
1544 bool only_slp_in_loop = true;
1545 for (i = 0; i < nbbs; i++)
1547 basic_block bb = bbs[i];
1548 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
1549 gsi_next (&si))
1551 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (si.phi ());
1552 if (!stmt_info)
1553 continue;
1554 if ((STMT_VINFO_RELEVANT_P (stmt_info)
1555 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1556 && !PURE_SLP_STMT (stmt_info))
1557 /* STMT needs both SLP and loop-based vectorization. */
1558 only_slp_in_loop = false;
1560 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
1561 gsi_next (&si))
1563 if (is_gimple_debug (gsi_stmt (si)))
1564 continue;
1565 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
1566 stmt_info = vect_stmt_to_vectorize (stmt_info);
1567 if ((STMT_VINFO_RELEVANT_P (stmt_info)
1568 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1569 && !PURE_SLP_STMT (stmt_info))
1570 /* STMT needs both SLP and loop-based vectorization. */
1571 only_slp_in_loop = false;
1575 if (only_slp_in_loop)
1577 if (dump_enabled_p ())
1578 dump_printf_loc (MSG_NOTE, vect_location,
1579 "Loop contains only SLP stmts\n");
1580 vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
1582 else
1584 if (dump_enabled_p ())
1585 dump_printf_loc (MSG_NOTE, vect_location,
1586 "Loop contains SLP and non-SLP stmts\n");
1587 /* Both the vectorization factor and unroll factor have the form
1588 GET_MODE_SIZE (loop_vinfo->vector_mode) * X for some rational X,
1589 so they must have a common multiple. */
1590 vectorization_factor
1591 = force_common_multiple (vectorization_factor,
1592 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
1595 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
1596 if (dump_enabled_p ())
1598 dump_printf_loc (MSG_NOTE, vect_location,
1599 "Updating vectorization factor to ");
1600 dump_dec (MSG_NOTE, vectorization_factor);
1601 dump_printf (MSG_NOTE, ".\n");
1605 /* Return true if STMT_INFO describes a double reduction phi and if
1606 the other phi in the reduction is also relevant for vectorization.
1607 This rejects cases such as:
1609 outer1:
1610 x_1 = PHI <x_3(outer2), ...>;
1613 inner:
1614 x_2 = ...;
1617 outer2:
1618 x_3 = PHI <x_2(inner)>;
1620 if nothing in x_2 or elsewhere makes x_1 relevant. */
1622 static bool
1623 vect_active_double_reduction_p (stmt_vec_info stmt_info)
1625 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_double_reduction_def)
1626 return false;
1628 return STMT_VINFO_RELEVANT_P (STMT_VINFO_REDUC_DEF (stmt_info));
1631 /* Function vect_analyze_loop_operations.
1633 Scan the loop stmts and make sure they are all vectorizable. */
1635 static opt_result
1636 vect_analyze_loop_operations (loop_vec_info loop_vinfo)
1638 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1639 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1640 int nbbs = loop->num_nodes;
1641 int i;
1642 stmt_vec_info stmt_info;
1643 bool need_to_vectorize = false;
1644 bool ok;
1646 DUMP_VECT_SCOPE ("vect_analyze_loop_operations");
1648 auto_vec<stmt_info_for_cost> cost_vec;
1650 for (i = 0; i < nbbs; i++)
1652 basic_block bb = bbs[i];
1654 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
1655 gsi_next (&si))
1657 gphi *phi = si.phi ();
1658 ok = true;
1660 stmt_info = loop_vinfo->lookup_stmt (phi);
1661 if (dump_enabled_p ())
1662 dump_printf_loc (MSG_NOTE, vect_location, "examining phi: %G", phi);
1663 if (virtual_operand_p (gimple_phi_result (phi)))
1664 continue;
1666 /* Inner-loop loop-closed exit phi in outer-loop vectorization
1667 (i.e., a phi in the tail of the outer-loop). */
1668 if (! is_loop_header_bb_p (bb))
1670 /* FORNOW: we currently don't support the case that these phis
1671 are not used in the outerloop (unless it is double reduction,
1672 i.e., this phi is vect_reduction_def), cause this case
1673 requires to actually do something here. */
1674 if (STMT_VINFO_LIVE_P (stmt_info)
1675 && !vect_active_double_reduction_p (stmt_info))
1676 return opt_result::failure_at (phi,
1677 "Unsupported loop-closed phi"
1678 " in outer-loop.\n");
1680 /* If PHI is used in the outer loop, we check that its operand
1681 is defined in the inner loop. */
1682 if (STMT_VINFO_RELEVANT_P (stmt_info))
1684 tree phi_op;
1686 if (gimple_phi_num_args (phi) != 1)
1687 return opt_result::failure_at (phi, "unsupported phi");
1689 phi_op = PHI_ARG_DEF (phi, 0);
1690 stmt_vec_info op_def_info = loop_vinfo->lookup_def (phi_op);
1691 if (!op_def_info)
1692 return opt_result::failure_at (phi, "unsupported phi\n");
1694 if (STMT_VINFO_RELEVANT (op_def_info) != vect_used_in_outer
1695 && (STMT_VINFO_RELEVANT (op_def_info)
1696 != vect_used_in_outer_by_reduction))
1697 return opt_result::failure_at (phi, "unsupported phi\n");
1699 if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_internal_def
1700 || (STMT_VINFO_DEF_TYPE (stmt_info)
1701 == vect_double_reduction_def))
1702 && !vectorizable_lc_phi (loop_vinfo,
1703 stmt_info, NULL, NULL))
1704 return opt_result::failure_at (phi, "unsupported phi\n");
1707 continue;
1710 gcc_assert (stmt_info);
1712 if ((STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
1713 || STMT_VINFO_LIVE_P (stmt_info))
1714 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
1715 /* A scalar-dependence cycle that we don't support. */
1716 return opt_result::failure_at (phi,
1717 "not vectorized:"
1718 " scalar dependence cycle.\n");
1720 if (STMT_VINFO_RELEVANT_P (stmt_info))
1722 need_to_vectorize = true;
1723 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def
1724 && ! PURE_SLP_STMT (stmt_info))
1725 ok = vectorizable_induction (loop_vinfo,
1726 stmt_info, NULL, NULL,
1727 &cost_vec);
1728 else if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
1729 || (STMT_VINFO_DEF_TYPE (stmt_info)
1730 == vect_double_reduction_def)
1731 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
1732 && ! PURE_SLP_STMT (stmt_info))
1733 ok = vectorizable_reduction (loop_vinfo,
1734 stmt_info, NULL, NULL, &cost_vec);
1737 /* SLP PHIs are tested by vect_slp_analyze_node_operations. */
1738 if (ok
1739 && STMT_VINFO_LIVE_P (stmt_info)
1740 && !PURE_SLP_STMT (stmt_info))
1741 ok = vectorizable_live_operation (loop_vinfo,
1742 stmt_info, NULL, NULL, NULL,
1743 -1, false, &cost_vec);
1745 if (!ok)
1746 return opt_result::failure_at (phi,
1747 "not vectorized: relevant phi not "
1748 "supported: %G",
1749 static_cast <gimple *> (phi));
1752 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
1753 gsi_next (&si))
1755 gimple *stmt = gsi_stmt (si);
1756 if (!gimple_clobber_p (stmt)
1757 && !is_gimple_debug (stmt))
1759 opt_result res
1760 = vect_analyze_stmt (loop_vinfo,
1761 loop_vinfo->lookup_stmt (stmt),
1762 &need_to_vectorize,
1763 NULL, NULL, &cost_vec);
1764 if (!res)
1765 return res;
1768 } /* bbs */
1770 add_stmt_costs (loop_vinfo, loop_vinfo->target_cost_data, &cost_vec);
1772 /* All operations in the loop are either irrelevant (deal with loop
1773 control, or dead), or only used outside the loop and can be moved
1774 out of the loop (e.g. invariants, inductions). The loop can be
1775 optimized away by scalar optimizations. We're better off not
1776 touching this loop. */
1777 if (!need_to_vectorize)
1779 if (dump_enabled_p ())
1780 dump_printf_loc (MSG_NOTE, vect_location,
1781 "All the computation can be taken out of the loop.\n");
1782 return opt_result::failure_at
1783 (vect_location,
1784 "not vectorized: redundant loop. no profit to vectorize.\n");
1787 return opt_result::success ();
1790 /* Return true if we know that the iteration count is smaller than the
1791 vectorization factor. Return false if it isn't, or if we can't be sure
1792 either way. */
1794 static bool
1795 vect_known_niters_smaller_than_vf (loop_vec_info loop_vinfo)
1797 unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
1799 HOST_WIDE_INT max_niter;
1800 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
1801 max_niter = LOOP_VINFO_INT_NITERS (loop_vinfo);
1802 else
1803 max_niter = max_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo));
1805 if (max_niter != -1 && (unsigned HOST_WIDE_INT) max_niter < assumed_vf)
1806 return true;
1808 return false;
1811 /* Analyze the cost of the loop described by LOOP_VINFO. Decide if it
1812 is worthwhile to vectorize. Return 1 if definitely yes, 0 if
1813 definitely no, or -1 if it's worth retrying. */
1815 static int
1816 vect_analyze_loop_costing (loop_vec_info loop_vinfo)
1818 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1819 unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
1821 /* Only loops that can handle partially-populated vectors can have iteration
1822 counts less than the vectorization factor. */
1823 if (!LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo))
1825 if (vect_known_niters_smaller_than_vf (loop_vinfo))
1827 if (dump_enabled_p ())
1828 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1829 "not vectorized: iteration count smaller than "
1830 "vectorization factor.\n");
1831 return 0;
1835 /* If using the "very cheap" model. reject cases in which we'd keep
1836 a copy of the scalar code (even if we might be able to vectorize it). */
1837 if (flag_vect_cost_model == VECT_COST_MODEL_VERY_CHEAP
1838 && (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
1839 || LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
1840 || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo)))
1842 if (dump_enabled_p ())
1843 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1844 "some scalar iterations would need to be peeled\n");
1845 return 0;
1848 int min_profitable_iters, min_profitable_estimate;
1849 vect_estimate_min_profitable_iters (loop_vinfo, &min_profitable_iters,
1850 &min_profitable_estimate);
1852 if (min_profitable_iters < 0)
1854 if (dump_enabled_p ())
1855 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1856 "not vectorized: vectorization not profitable.\n");
1857 if (dump_enabled_p ())
1858 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1859 "not vectorized: vector version will never be "
1860 "profitable.\n");
1861 return -1;
1864 int min_scalar_loop_bound = (param_min_vect_loop_bound
1865 * assumed_vf);
1867 /* Use the cost model only if it is more conservative than user specified
1868 threshold. */
1869 unsigned int th = (unsigned) MAX (min_scalar_loop_bound,
1870 min_profitable_iters);
1872 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = th;
1874 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1875 && LOOP_VINFO_INT_NITERS (loop_vinfo) < th)
1877 if (dump_enabled_p ())
1878 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1879 "not vectorized: vectorization not profitable.\n");
1880 if (dump_enabled_p ())
1881 dump_printf_loc (MSG_NOTE, vect_location,
1882 "not vectorized: iteration count smaller than user "
1883 "specified loop bound parameter or minimum profitable "
1884 "iterations (whichever is more conservative).\n");
1885 return 0;
1888 /* The static profitablity threshold min_profitable_estimate includes
1889 the cost of having to check at runtime whether the scalar loop
1890 should be used instead. If it turns out that we don't need or want
1891 such a check, the threshold we should use for the static estimate
1892 is simply the point at which the vector loop becomes more profitable
1893 than the scalar loop. */
1894 if (min_profitable_estimate > min_profitable_iters
1895 && !LOOP_REQUIRES_VERSIONING (loop_vinfo)
1896 && !LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo)
1897 && !LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
1898 && !vect_apply_runtime_profitability_check_p (loop_vinfo))
1900 if (dump_enabled_p ())
1901 dump_printf_loc (MSG_NOTE, vect_location, "no need for a runtime"
1902 " choice between the scalar and vector loops\n");
1903 min_profitable_estimate = min_profitable_iters;
1906 /* If the vector loop needs multiple iterations to be beneficial then
1907 things are probably too close to call, and the conservative thing
1908 would be to stick with the scalar code. */
1909 if (flag_vect_cost_model == VECT_COST_MODEL_VERY_CHEAP
1910 && min_profitable_estimate > (int) vect_vf_for_cost (loop_vinfo))
1912 if (dump_enabled_p ())
1913 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1914 "one iteration of the vector loop would be"
1915 " more expensive than the equivalent number of"
1916 " iterations of the scalar loop\n");
1917 return 0;
1920 HOST_WIDE_INT estimated_niter;
1922 /* If we are vectorizing an epilogue then we know the maximum number of
1923 scalar iterations it will cover is at least one lower than the
1924 vectorization factor of the main loop. */
1925 if (LOOP_VINFO_EPILOGUE_P (loop_vinfo))
1926 estimated_niter
1927 = vect_vf_for_cost (LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo)) - 1;
1928 else
1930 estimated_niter = estimated_stmt_executions_int (loop);
1931 if (estimated_niter == -1)
1932 estimated_niter = likely_max_stmt_executions_int (loop);
1934 if (estimated_niter != -1
1935 && ((unsigned HOST_WIDE_INT) estimated_niter
1936 < MAX (th, (unsigned) min_profitable_estimate)))
1938 if (dump_enabled_p ())
1939 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1940 "not vectorized: estimated iteration count too "
1941 "small.\n");
1942 if (dump_enabled_p ())
1943 dump_printf_loc (MSG_NOTE, vect_location,
1944 "not vectorized: estimated iteration count smaller "
1945 "than specified loop bound parameter or minimum "
1946 "profitable iterations (whichever is more "
1947 "conservative).\n");
1948 return -1;
1951 return 1;
1954 static opt_result
1955 vect_get_datarefs_in_loop (loop_p loop, basic_block *bbs,
1956 vec<data_reference_p> *datarefs,
1957 unsigned int *n_stmts)
1959 *n_stmts = 0;
1960 for (unsigned i = 0; i < loop->num_nodes; i++)
1961 for (gimple_stmt_iterator gsi = gsi_start_bb (bbs[i]);
1962 !gsi_end_p (gsi); gsi_next (&gsi))
1964 gimple *stmt = gsi_stmt (gsi);
1965 if (is_gimple_debug (stmt))
1966 continue;
1967 ++(*n_stmts);
1968 opt_result res = vect_find_stmt_data_reference (loop, stmt, datarefs,
1969 NULL, 0);
1970 if (!res)
1972 if (is_gimple_call (stmt) && loop->safelen)
1974 tree fndecl = gimple_call_fndecl (stmt), op;
1975 if (fndecl != NULL_TREE)
1977 cgraph_node *node = cgraph_node::get (fndecl);
1978 if (node != NULL && node->simd_clones != NULL)
1980 unsigned int j, n = gimple_call_num_args (stmt);
1981 for (j = 0; j < n; j++)
1983 op = gimple_call_arg (stmt, j);
1984 if (DECL_P (op)
1985 || (REFERENCE_CLASS_P (op)
1986 && get_base_address (op)))
1987 break;
1989 op = gimple_call_lhs (stmt);
1990 /* Ignore #pragma omp declare simd functions
1991 if they don't have data references in the
1992 call stmt itself. */
1993 if (j == n
1994 && !(op
1995 && (DECL_P (op)
1996 || (REFERENCE_CLASS_P (op)
1997 && get_base_address (op)))))
1998 continue;
2002 return res;
2004 /* If dependence analysis will give up due to the limit on the
2005 number of datarefs stop here and fail fatally. */
2006 if (datarefs->length ()
2007 > (unsigned)param_loop_max_datarefs_for_datadeps)
2008 return opt_result::failure_at (stmt, "exceeded param "
2009 "loop-max-datarefs-for-datadeps\n");
2011 return opt_result::success ();
2014 /* Look for SLP-only access groups and turn each individual access into its own
2015 group. */
2016 static void
2017 vect_dissolve_slp_only_groups (loop_vec_info loop_vinfo)
2019 unsigned int i;
2020 struct data_reference *dr;
2022 DUMP_VECT_SCOPE ("vect_dissolve_slp_only_groups");
2024 vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
2025 FOR_EACH_VEC_ELT (datarefs, i, dr)
2027 gcc_assert (DR_REF (dr));
2028 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (DR_STMT (dr));
2030 /* Check if the load is a part of an interleaving chain. */
2031 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
2033 stmt_vec_info first_element = DR_GROUP_FIRST_ELEMENT (stmt_info);
2034 unsigned int group_size = DR_GROUP_SIZE (first_element);
2036 /* Check if SLP-only groups. */
2037 if (!STMT_SLP_TYPE (stmt_info)
2038 && STMT_VINFO_SLP_VECT_ONLY (first_element))
2040 /* Dissolve the group. */
2041 STMT_VINFO_SLP_VECT_ONLY (first_element) = false;
2043 stmt_vec_info vinfo = first_element;
2044 while (vinfo)
2046 stmt_vec_info next = DR_GROUP_NEXT_ELEMENT (vinfo);
2047 DR_GROUP_FIRST_ELEMENT (vinfo) = vinfo;
2048 DR_GROUP_NEXT_ELEMENT (vinfo) = NULL;
2049 DR_GROUP_SIZE (vinfo) = 1;
2050 if (STMT_VINFO_STRIDED_P (first_element))
2051 DR_GROUP_GAP (vinfo) = 0;
2052 else
2053 DR_GROUP_GAP (vinfo) = group_size - 1;
2054 vinfo = next;
2061 /* Determine if operating on full vectors for LOOP_VINFO might leave
2062 some scalar iterations still to do. If so, decide how we should
2063 handle those scalar iterations. The possibilities are:
2065 (1) Make LOOP_VINFO operate on partial vectors instead of full vectors.
2066 In this case:
2068 LOOP_VINFO_USING_PARTIAL_VECTORS_P == true
2069 LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P == false
2070 LOOP_VINFO_PEELING_FOR_NITER == false
2072 (2) Make LOOP_VINFO operate on full vectors and use an epilogue loop
2073 to handle the remaining scalar iterations. In this case:
2075 LOOP_VINFO_USING_PARTIAL_VECTORS_P == false
2076 LOOP_VINFO_PEELING_FOR_NITER == true
2078 There are two choices:
2080 (2a) Consider vectorizing the epilogue loop at the same VF as the
2081 main loop, but using partial vectors instead of full vectors.
2082 In this case:
2084 LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P == true
2086 (2b) Consider vectorizing the epilogue loop at lower VFs only.
2087 In this case:
2089 LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P == false
2091 When FOR_EPILOGUE_P is true, make this determination based on the
2092 assumption that LOOP_VINFO is an epilogue loop, otherwise make it
2093 based on the assumption that LOOP_VINFO is the main loop. The caller
2094 has made sure that the number of iterations is set appropriately for
2095 this value of FOR_EPILOGUE_P. */
2097 opt_result
2098 vect_determine_partial_vectors_and_peeling (loop_vec_info loop_vinfo,
2099 bool for_epilogue_p)
2101 /* Determine whether there would be any scalar iterations left over. */
2102 bool need_peeling_or_partial_vectors_p
2103 = vect_need_peeling_or_partial_vectors_p (loop_vinfo);
2105 /* Decide whether to vectorize the loop with partial vectors. */
2106 LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo) = false;
2107 LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P (loop_vinfo) = false;
2108 if (LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo)
2109 && need_peeling_or_partial_vectors_p)
2111 /* For partial-vector-usage=1, try to push the handling of partial
2112 vectors to the epilogue, with the main loop continuing to operate
2113 on full vectors.
2115 ??? We could then end up failing to use partial vectors if we
2116 decide to peel iterations into a prologue, and if the main loop
2117 then ends up processing fewer than VF iterations. */
2118 if (param_vect_partial_vector_usage == 1
2119 && !LOOP_VINFO_EPILOGUE_P (loop_vinfo)
2120 && !vect_known_niters_smaller_than_vf (loop_vinfo))
2121 LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P (loop_vinfo) = true;
2122 else
2123 LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo) = true;
2126 if (dump_enabled_p ())
2128 if (LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo))
2129 dump_printf_loc (MSG_NOTE, vect_location,
2130 "operating on partial vectors%s.\n",
2131 for_epilogue_p ? " for epilogue loop" : "");
2132 else
2133 dump_printf_loc (MSG_NOTE, vect_location,
2134 "operating only on full vectors%s.\n",
2135 for_epilogue_p ? " for epilogue loop" : "");
2138 if (for_epilogue_p)
2140 loop_vec_info orig_loop_vinfo = LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo);
2141 gcc_assert (orig_loop_vinfo);
2142 if (!LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo))
2143 gcc_assert (known_lt (LOOP_VINFO_VECT_FACTOR (loop_vinfo),
2144 LOOP_VINFO_VECT_FACTOR (orig_loop_vinfo)));
2147 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2148 && !LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo))
2150 /* Check that the loop processes at least one full vector. */
2151 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2152 tree scalar_niters = LOOP_VINFO_NITERS (loop_vinfo);
2153 if (known_lt (wi::to_widest (scalar_niters), vf))
2154 return opt_result::failure_at (vect_location,
2155 "loop does not have enough iterations"
2156 " to support vectorization.\n");
2158 /* If we need to peel an extra epilogue iteration to handle data
2159 accesses with gaps, check that there are enough scalar iterations
2160 available.
2162 The check above is redundant with this one when peeling for gaps,
2163 but the distinction is useful for diagnostics. */
2164 tree scalar_nitersm1 = LOOP_VINFO_NITERSM1 (loop_vinfo);
2165 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
2166 && known_lt (wi::to_widest (scalar_nitersm1), vf))
2167 return opt_result::failure_at (vect_location,
2168 "loop does not have enough iterations"
2169 " to support peeling for gaps.\n");
2172 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo)
2173 = (!LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo)
2174 && need_peeling_or_partial_vectors_p);
2176 return opt_result::success ();
2179 /* Function vect_analyze_loop_2.
2181 Apply a set of analyses on LOOP, and create a loop_vec_info struct
2182 for it. The different analyses will record information in the
2183 loop_vec_info struct. */
2184 static opt_result
2185 vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal, unsigned *n_stmts)
2187 opt_result ok = opt_result::success ();
2188 int res;
2189 unsigned int max_vf = MAX_VECTORIZATION_FACTOR;
2190 poly_uint64 min_vf = 2;
2191 loop_vec_info orig_loop_vinfo = NULL;
2193 /* If we are dealing with an epilogue then orig_loop_vinfo points to the
2194 loop_vec_info of the first vectorized loop. */
2195 if (LOOP_VINFO_EPILOGUE_P (loop_vinfo))
2196 orig_loop_vinfo = LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo);
2197 else
2198 orig_loop_vinfo = loop_vinfo;
2199 gcc_assert (orig_loop_vinfo);
2201 /* The first group of checks is independent of the vector size. */
2202 fatal = true;
2204 if (LOOP_VINFO_SIMD_IF_COND (loop_vinfo)
2205 && integer_zerop (LOOP_VINFO_SIMD_IF_COND (loop_vinfo)))
2206 return opt_result::failure_at (vect_location,
2207 "not vectorized: simd if(0)\n");
2209 /* Find all data references in the loop (which correspond to vdefs/vuses)
2210 and analyze their evolution in the loop. */
2212 loop_p loop = LOOP_VINFO_LOOP (loop_vinfo);
2214 /* Gather the data references and count stmts in the loop. */
2215 if (!LOOP_VINFO_DATAREFS (loop_vinfo).exists ())
2217 opt_result res
2218 = vect_get_datarefs_in_loop (loop, LOOP_VINFO_BBS (loop_vinfo),
2219 &LOOP_VINFO_DATAREFS (loop_vinfo),
2220 n_stmts);
2221 if (!res)
2223 if (dump_enabled_p ())
2224 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2225 "not vectorized: loop contains function "
2226 "calls or data references that cannot "
2227 "be analyzed\n");
2228 return res;
2230 loop_vinfo->shared->save_datarefs ();
2232 else
2233 loop_vinfo->shared->check_datarefs ();
2235 /* Analyze the data references and also adjust the minimal
2236 vectorization factor according to the loads and stores. */
2238 ok = vect_analyze_data_refs (loop_vinfo, &min_vf, &fatal);
2239 if (!ok)
2241 if (dump_enabled_p ())
2242 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2243 "bad data references.\n");
2244 return ok;
2247 /* Classify all cross-iteration scalar data-flow cycles.
2248 Cross-iteration cycles caused by virtual phis are analyzed separately. */
2249 vect_analyze_scalar_cycles (loop_vinfo);
2251 vect_pattern_recog (loop_vinfo);
2253 vect_fixup_scalar_cycles_with_patterns (loop_vinfo);
2255 /* Analyze the access patterns of the data-refs in the loop (consecutive,
2256 complex, etc.). FORNOW: Only handle consecutive access pattern. */
2258 ok = vect_analyze_data_ref_accesses (loop_vinfo, NULL);
2259 if (!ok)
2261 if (dump_enabled_p ())
2262 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2263 "bad data access.\n");
2264 return ok;
2267 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
2269 ok = vect_mark_stmts_to_be_vectorized (loop_vinfo, &fatal);
2270 if (!ok)
2272 if (dump_enabled_p ())
2273 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2274 "unexpected pattern.\n");
2275 return ok;
2278 /* While the rest of the analysis below depends on it in some way. */
2279 fatal = false;
2281 /* Analyze data dependences between the data-refs in the loop
2282 and adjust the maximum vectorization factor according to
2283 the dependences.
2284 FORNOW: fail at the first data dependence that we encounter. */
2286 ok = vect_analyze_data_ref_dependences (loop_vinfo, &max_vf);
2287 if (!ok)
2289 if (dump_enabled_p ())
2290 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2291 "bad data dependence.\n");
2292 return ok;
2294 if (max_vf != MAX_VECTORIZATION_FACTOR
2295 && maybe_lt (max_vf, min_vf))
2296 return opt_result::failure_at (vect_location, "bad data dependence.\n");
2297 LOOP_VINFO_MAX_VECT_FACTOR (loop_vinfo) = max_vf;
2299 ok = vect_determine_vectorization_factor (loop_vinfo);
2300 if (!ok)
2302 if (dump_enabled_p ())
2303 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2304 "can't determine vectorization factor.\n");
2305 return ok;
2307 if (max_vf != MAX_VECTORIZATION_FACTOR
2308 && maybe_lt (max_vf, LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
2309 return opt_result::failure_at (vect_location, "bad data dependence.\n");
2311 /* Compute the scalar iteration cost. */
2312 vect_compute_single_scalar_iteration_cost (loop_vinfo);
2314 poly_uint64 saved_vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2316 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
2317 ok = vect_analyze_slp (loop_vinfo, *n_stmts);
2318 if (!ok)
2319 return ok;
2321 /* If there are any SLP instances mark them as pure_slp. */
2322 bool slp = vect_make_slp_decision (loop_vinfo);
2323 if (slp)
2325 /* Find stmts that need to be both vectorized and SLPed. */
2326 vect_detect_hybrid_slp (loop_vinfo);
2328 /* Update the vectorization factor based on the SLP decision. */
2329 vect_update_vf_for_slp (loop_vinfo);
2331 /* Optimize the SLP graph with the vectorization factor fixed. */
2332 vect_optimize_slp (loop_vinfo);
2334 /* Gather the loads reachable from the SLP graph entries. */
2335 vect_gather_slp_loads (loop_vinfo);
2338 bool saved_can_use_partial_vectors_p
2339 = LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo);
2341 /* We don't expect to have to roll back to anything other than an empty
2342 set of rgroups. */
2343 gcc_assert (LOOP_VINFO_MASKS (loop_vinfo).is_empty ());
2345 /* This is the point where we can re-start analysis with SLP forced off. */
2346 start_over:
2348 /* Now the vectorization factor is final. */
2349 poly_uint64 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2350 gcc_assert (known_ne (vectorization_factor, 0U));
2352 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && dump_enabled_p ())
2354 dump_printf_loc (MSG_NOTE, vect_location,
2355 "vectorization_factor = ");
2356 dump_dec (MSG_NOTE, vectorization_factor);
2357 dump_printf (MSG_NOTE, ", niters = %wd\n",
2358 LOOP_VINFO_INT_NITERS (loop_vinfo));
2361 /* Analyze the alignment of the data-refs in the loop.
2362 Fail if a data reference is found that cannot be vectorized. */
2364 ok = vect_analyze_data_refs_alignment (loop_vinfo);
2365 if (!ok)
2367 if (dump_enabled_p ())
2368 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2369 "bad data alignment.\n");
2370 return ok;
2373 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
2374 It is important to call pruning after vect_analyze_data_ref_accesses,
2375 since we use grouping information gathered by interleaving analysis. */
2376 ok = vect_prune_runtime_alias_test_list (loop_vinfo);
2377 if (!ok)
2378 return ok;
2380 /* Do not invoke vect_enhance_data_refs_alignment for epilogue
2381 vectorization, since we do not want to add extra peeling or
2382 add versioning for alignment. */
2383 if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo))
2384 /* This pass will decide on using loop versioning and/or loop peeling in
2385 order to enhance the alignment of data references in the loop. */
2386 ok = vect_enhance_data_refs_alignment (loop_vinfo);
2387 if (!ok)
2388 return ok;
2390 if (slp)
2392 /* Analyze operations in the SLP instances. Note this may
2393 remove unsupported SLP instances which makes the above
2394 SLP kind detection invalid. */
2395 unsigned old_size = LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length ();
2396 vect_slp_analyze_operations (loop_vinfo);
2397 if (LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length () != old_size)
2399 ok = opt_result::failure_at (vect_location,
2400 "unsupported SLP instances\n");
2401 goto again;
2404 /* Check whether any load in ALL SLP instances is possibly permuted. */
2405 slp_tree load_node, slp_root;
2406 unsigned i, x;
2407 slp_instance instance;
2408 bool can_use_lanes = true;
2409 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), x, instance)
2411 slp_root = SLP_INSTANCE_TREE (instance);
2412 int group_size = SLP_TREE_LANES (slp_root);
2413 tree vectype = SLP_TREE_VECTYPE (slp_root);
2414 bool loads_permuted = false;
2415 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), i, load_node)
2417 if (!SLP_TREE_LOAD_PERMUTATION (load_node).exists ())
2418 continue;
2419 unsigned j;
2420 stmt_vec_info load_info;
2421 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node), j, load_info)
2422 if (SLP_TREE_LOAD_PERMUTATION (load_node)[j] != j)
2424 loads_permuted = true;
2425 break;
2429 /* If the loads and stores can be handled with load/store-lane
2430 instructions record it and move on to the next instance. */
2431 if (loads_permuted
2432 && SLP_INSTANCE_KIND (instance) == slp_inst_kind_store
2433 && vect_store_lanes_supported (vectype, group_size, false))
2435 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), i, load_node)
2437 stmt_vec_info stmt_vinfo = DR_GROUP_FIRST_ELEMENT
2438 (SLP_TREE_SCALAR_STMTS (load_node)[0]);
2439 /* Use SLP for strided accesses (or if we can't
2440 load-lanes). */
2441 if (STMT_VINFO_STRIDED_P (stmt_vinfo)
2442 || ! vect_load_lanes_supported
2443 (STMT_VINFO_VECTYPE (stmt_vinfo),
2444 DR_GROUP_SIZE (stmt_vinfo), false))
2445 break;
2448 can_use_lanes
2449 = can_use_lanes && i == SLP_INSTANCE_LOADS (instance).length ();
2451 if (can_use_lanes && dump_enabled_p ())
2452 dump_printf_loc (MSG_NOTE, vect_location,
2453 "SLP instance %p can use load/store-lanes\n",
2454 instance);
2456 else
2458 can_use_lanes = false;
2459 break;
2463 /* If all SLP instances can use load/store-lanes abort SLP and try again
2464 with SLP disabled. */
2465 if (can_use_lanes)
2467 ok = opt_result::failure_at (vect_location,
2468 "Built SLP cancelled: can use "
2469 "load/store-lanes\n");
2470 if (dump_enabled_p ())
2471 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2472 "Built SLP cancelled: all SLP instances support "
2473 "load/store-lanes\n");
2474 goto again;
2478 /* Dissolve SLP-only groups. */
2479 vect_dissolve_slp_only_groups (loop_vinfo);
2481 /* Scan all the remaining operations in the loop that are not subject
2482 to SLP and make sure they are vectorizable. */
2483 ok = vect_analyze_loop_operations (loop_vinfo);
2484 if (!ok)
2486 if (dump_enabled_p ())
2487 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2488 "bad operation or unsupported loop bound.\n");
2489 return ok;
2492 /* For now, we don't expect to mix both masking and length approaches for one
2493 loop, disable it if both are recorded. */
2494 if (LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo)
2495 && !LOOP_VINFO_MASKS (loop_vinfo).is_empty ()
2496 && !LOOP_VINFO_LENS (loop_vinfo).is_empty ())
2498 if (dump_enabled_p ())
2499 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2500 "can't vectorize a loop with partial vectors"
2501 " because we don't expect to mix different"
2502 " approaches with partial vectors for the"
2503 " same loop.\n");
2504 LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false;
2507 /* If we still have the option of using partial vectors,
2508 check whether we can generate the necessary loop controls. */
2509 if (LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo)
2510 && !vect_verify_full_masking (loop_vinfo)
2511 && !vect_verify_loop_lens (loop_vinfo))
2512 LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false;
2514 /* If we're vectorizing an epilogue loop, the vectorized loop either needs
2515 to be able to handle fewer than VF scalars, or needs to have a lower VF
2516 than the main loop. */
2517 if (LOOP_VINFO_EPILOGUE_P (loop_vinfo)
2518 && !LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo)
2519 && maybe_ge (LOOP_VINFO_VECT_FACTOR (loop_vinfo),
2520 LOOP_VINFO_VECT_FACTOR (orig_loop_vinfo)))
2521 return opt_result::failure_at (vect_location,
2522 "Vectorization factor too high for"
2523 " epilogue loop.\n");
2525 /* Decide whether this loop_vinfo should use partial vectors or peeling,
2526 assuming that the loop will be used as a main loop. We will redo
2527 this analysis later if we instead decide to use the loop as an
2528 epilogue loop. */
2529 ok = vect_determine_partial_vectors_and_peeling (loop_vinfo, false);
2530 if (!ok)
2531 return ok;
2533 /* Check the costings of the loop make vectorizing worthwhile. */
2534 res = vect_analyze_loop_costing (loop_vinfo);
2535 if (res < 0)
2537 ok = opt_result::failure_at (vect_location,
2538 "Loop costings may not be worthwhile.\n");
2539 goto again;
2541 if (!res)
2542 return opt_result::failure_at (vect_location,
2543 "Loop costings not worthwhile.\n");
2545 /* If an epilogue loop is required make sure we can create one. */
2546 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
2547 || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo))
2549 if (dump_enabled_p ())
2550 dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required\n");
2551 if (!vect_can_advance_ivs_p (loop_vinfo)
2552 || !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo),
2553 single_exit (LOOP_VINFO_LOOP
2554 (loop_vinfo))))
2556 ok = opt_result::failure_at (vect_location,
2557 "not vectorized: can't create required "
2558 "epilog loop\n");
2559 goto again;
2563 /* During peeling, we need to check if number of loop iterations is
2564 enough for both peeled prolog loop and vector loop. This check
2565 can be merged along with threshold check of loop versioning, so
2566 increase threshold for this case if necessary.
2568 If we are analyzing an epilogue we still want to check what its
2569 versioning threshold would be. If we decide to vectorize the epilogues we
2570 will want to use the lowest versioning threshold of all epilogues and main
2571 loop. This will enable us to enter a vectorized epilogue even when
2572 versioning the loop. We can't simply check whether the epilogue requires
2573 versioning though since we may have skipped some versioning checks when
2574 analyzing the epilogue. For instance, checks for alias versioning will be
2575 skipped when dealing with epilogues as we assume we already checked them
2576 for the main loop. So instead we always check the 'orig_loop_vinfo'. */
2577 if (LOOP_REQUIRES_VERSIONING (orig_loop_vinfo))
2579 poly_uint64 niters_th = 0;
2580 unsigned int th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
2582 if (!vect_use_loop_mask_for_alignment_p (loop_vinfo))
2584 /* Niters for peeled prolog loop. */
2585 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
2587 dr_vec_info *dr_info = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
2588 tree vectype = STMT_VINFO_VECTYPE (dr_info->stmt);
2589 niters_th += TYPE_VECTOR_SUBPARTS (vectype) - 1;
2591 else
2592 niters_th += LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
2595 /* Niters for at least one iteration of vectorized loop. */
2596 if (!LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo))
2597 niters_th += LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2598 /* One additional iteration because of peeling for gap. */
2599 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
2600 niters_th += 1;
2602 /* Use the same condition as vect_transform_loop to decide when to use
2603 the cost to determine a versioning threshold. */
2604 if (vect_apply_runtime_profitability_check_p (loop_vinfo)
2605 && ordered_p (th, niters_th))
2606 niters_th = ordered_max (poly_uint64 (th), niters_th);
2608 LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = niters_th;
2611 gcc_assert (known_eq (vectorization_factor,
2612 LOOP_VINFO_VECT_FACTOR (loop_vinfo)));
2614 /* Ok to vectorize! */
2615 return opt_result::success ();
2617 again:
2618 /* Ensure that "ok" is false (with an opt_problem if dumping is enabled). */
2619 gcc_assert (!ok);
2621 /* Try again with SLP forced off but if we didn't do any SLP there is
2622 no point in re-trying. */
2623 if (!slp)
2624 return ok;
2626 /* If there are reduction chains re-trying will fail anyway. */
2627 if (! LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).is_empty ())
2628 return ok;
2630 /* Likewise if the grouped loads or stores in the SLP cannot be handled
2631 via interleaving or lane instructions. */
2632 slp_instance instance;
2633 slp_tree node;
2634 unsigned i, j;
2635 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance)
2637 stmt_vec_info vinfo;
2638 vinfo = SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance))[0];
2639 if (! STMT_VINFO_GROUPED_ACCESS (vinfo))
2640 continue;
2641 vinfo = DR_GROUP_FIRST_ELEMENT (vinfo);
2642 unsigned int size = DR_GROUP_SIZE (vinfo);
2643 tree vectype = STMT_VINFO_VECTYPE (vinfo);
2644 if (! vect_store_lanes_supported (vectype, size, false)
2645 && ! known_eq (TYPE_VECTOR_SUBPARTS (vectype), 1U)
2646 && ! vect_grouped_store_supported (vectype, size))
2647 return opt_result::failure_at (vinfo->stmt,
2648 "unsupported grouped store\n");
2649 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), j, node)
2651 vinfo = SLP_TREE_SCALAR_STMTS (node)[0];
2652 vinfo = DR_GROUP_FIRST_ELEMENT (vinfo);
2653 bool single_element_p = !DR_GROUP_NEXT_ELEMENT (vinfo);
2654 size = DR_GROUP_SIZE (vinfo);
2655 vectype = STMT_VINFO_VECTYPE (vinfo);
2656 if (! vect_load_lanes_supported (vectype, size, false)
2657 && ! vect_grouped_load_supported (vectype, single_element_p,
2658 size))
2659 return opt_result::failure_at (vinfo->stmt,
2660 "unsupported grouped load\n");
2664 if (dump_enabled_p ())
2665 dump_printf_loc (MSG_NOTE, vect_location,
2666 "re-trying with SLP disabled\n");
2668 /* Roll back state appropriately. No SLP this time. */
2669 slp = false;
2670 /* Restore vectorization factor as it were without SLP. */
2671 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = saved_vectorization_factor;
2672 /* Free the SLP instances. */
2673 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), j, instance)
2674 vect_free_slp_instance (instance);
2675 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
2676 /* Reset SLP type to loop_vect on all stmts. */
2677 for (i = 0; i < LOOP_VINFO_LOOP (loop_vinfo)->num_nodes; ++i)
2679 basic_block bb = LOOP_VINFO_BBS (loop_vinfo)[i];
2680 for (gimple_stmt_iterator si = gsi_start_phis (bb);
2681 !gsi_end_p (si); gsi_next (&si))
2683 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
2684 STMT_SLP_TYPE (stmt_info) = loop_vect;
2685 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
2686 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_double_reduction_def)
2688 /* vectorizable_reduction adjusts reduction stmt def-types,
2689 restore them to that of the PHI. */
2690 STMT_VINFO_DEF_TYPE (STMT_VINFO_REDUC_DEF (stmt_info))
2691 = STMT_VINFO_DEF_TYPE (stmt_info);
2692 STMT_VINFO_DEF_TYPE (vect_stmt_to_vectorize
2693 (STMT_VINFO_REDUC_DEF (stmt_info)))
2694 = STMT_VINFO_DEF_TYPE (stmt_info);
2697 for (gimple_stmt_iterator si = gsi_start_bb (bb);
2698 !gsi_end_p (si); gsi_next (&si))
2700 if (is_gimple_debug (gsi_stmt (si)))
2701 continue;
2702 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
2703 STMT_SLP_TYPE (stmt_info) = loop_vect;
2704 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
2706 stmt_vec_info pattern_stmt_info
2707 = STMT_VINFO_RELATED_STMT (stmt_info);
2708 if (STMT_VINFO_SLP_VECT_ONLY_PATTERN (pattern_stmt_info))
2709 STMT_VINFO_IN_PATTERN_P (stmt_info) = false;
2711 gimple *pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
2712 STMT_SLP_TYPE (pattern_stmt_info) = loop_vect;
2713 for (gimple_stmt_iterator pi = gsi_start (pattern_def_seq);
2714 !gsi_end_p (pi); gsi_next (&pi))
2715 STMT_SLP_TYPE (loop_vinfo->lookup_stmt (gsi_stmt (pi)))
2716 = loop_vect;
2720 /* Free optimized alias test DDRS. */
2721 LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).truncate (0);
2722 LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).release ();
2723 LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).release ();
2724 /* Reset target cost data. */
2725 destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo));
2726 LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)
2727 = init_cost (LOOP_VINFO_LOOP (loop_vinfo), false);
2728 /* Reset accumulated rgroup information. */
2729 release_vec_loop_controls (&LOOP_VINFO_MASKS (loop_vinfo));
2730 release_vec_loop_controls (&LOOP_VINFO_LENS (loop_vinfo));
2731 /* Reset assorted flags. */
2732 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false;
2733 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = false;
2734 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = 0;
2735 LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = 0;
2736 LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo)
2737 = saved_can_use_partial_vectors_p;
2739 goto start_over;
2742 /* Return true if vectorizing a loop using NEW_LOOP_VINFO appears
2743 to be better than vectorizing it using OLD_LOOP_VINFO. Assume that
2744 OLD_LOOP_VINFO is better unless something specifically indicates
2745 otherwise.
2747 Note that this deliberately isn't a partial order. */
2749 static bool
2750 vect_better_loop_vinfo_p (loop_vec_info new_loop_vinfo,
2751 loop_vec_info old_loop_vinfo)
2753 struct loop *loop = LOOP_VINFO_LOOP (new_loop_vinfo);
2754 gcc_assert (LOOP_VINFO_LOOP (old_loop_vinfo) == loop);
2756 poly_int64 new_vf = LOOP_VINFO_VECT_FACTOR (new_loop_vinfo);
2757 poly_int64 old_vf = LOOP_VINFO_VECT_FACTOR (old_loop_vinfo);
2759 /* Always prefer a VF of loop->simdlen over any other VF. */
2760 if (loop->simdlen)
2762 bool new_simdlen_p = known_eq (new_vf, loop->simdlen);
2763 bool old_simdlen_p = known_eq (old_vf, loop->simdlen);
2764 if (new_simdlen_p != old_simdlen_p)
2765 return new_simdlen_p;
2768 /* Limit the VFs to what is likely to be the maximum number of iterations,
2769 to handle cases in which at least one loop_vinfo is fully-masked. */
2770 HOST_WIDE_INT estimated_max_niter = likely_max_stmt_executions_int (loop);
2771 if (estimated_max_niter != -1)
2773 if (known_le (estimated_max_niter, new_vf))
2774 new_vf = estimated_max_niter;
2775 if (known_le (estimated_max_niter, old_vf))
2776 old_vf = estimated_max_niter;
2779 /* Check whether the (fractional) cost per scalar iteration is lower
2780 or higher: new_inside_cost / new_vf vs. old_inside_cost / old_vf. */
2781 poly_int64 rel_new = new_loop_vinfo->vec_inside_cost * old_vf;
2782 poly_int64 rel_old = old_loop_vinfo->vec_inside_cost * new_vf;
2784 HOST_WIDE_INT est_rel_new_min
2785 = estimated_poly_value (rel_new, POLY_VALUE_MIN);
2786 HOST_WIDE_INT est_rel_new_max
2787 = estimated_poly_value (rel_new, POLY_VALUE_MAX);
2789 HOST_WIDE_INT est_rel_old_min
2790 = estimated_poly_value (rel_old, POLY_VALUE_MIN);
2791 HOST_WIDE_INT est_rel_old_max
2792 = estimated_poly_value (rel_old, POLY_VALUE_MAX);
2794 /* Check first if we can make out an unambigous total order from the minimum
2795 and maximum estimates. */
2796 if (est_rel_new_min < est_rel_old_min
2797 && est_rel_new_max < est_rel_old_max)
2798 return true;
2799 else if (est_rel_old_min < est_rel_new_min
2800 && est_rel_old_max < est_rel_new_max)
2801 return false;
2802 /* When old_loop_vinfo uses a variable vectorization factor,
2803 we know that it has a lower cost for at least one runtime VF.
2804 However, we don't know how likely that VF is.
2806 One option would be to compare the costs for the estimated VFs.
2807 The problem is that that can put too much pressure on the cost
2808 model. E.g. if the estimated VF is also the lowest possible VF,
2809 and if old_loop_vinfo is 1 unit worse than new_loop_vinfo
2810 for the estimated VF, we'd then choose new_loop_vinfo even
2811 though (a) new_loop_vinfo might not actually be better than
2812 old_loop_vinfo for that VF and (b) it would be significantly
2813 worse at larger VFs.
2815 Here we go for a hacky compromise: pick new_loop_vinfo if it is
2816 no more expensive than old_loop_vinfo even after doubling the
2817 estimated old_loop_vinfo VF. For all but trivial loops, this
2818 ensures that we only pick new_loop_vinfo if it is significantly
2819 better than old_loop_vinfo at the estimated VF. */
2821 if (est_rel_old_min != est_rel_new_min
2822 || est_rel_old_max != est_rel_new_max)
2824 HOST_WIDE_INT est_rel_new_likely
2825 = estimated_poly_value (rel_new, POLY_VALUE_LIKELY);
2826 HOST_WIDE_INT est_rel_old_likely
2827 = estimated_poly_value (rel_old, POLY_VALUE_LIKELY);
2829 return est_rel_new_likely * 2 <= est_rel_old_likely;
2832 /* If there's nothing to choose between the loop bodies, see whether
2833 there's a difference in the prologue and epilogue costs. */
2834 if (new_loop_vinfo->vec_outside_cost != old_loop_vinfo->vec_outside_cost)
2835 return new_loop_vinfo->vec_outside_cost < old_loop_vinfo->vec_outside_cost;
2837 return false;
2840 /* Decide whether to replace OLD_LOOP_VINFO with NEW_LOOP_VINFO. Return
2841 true if we should. */
2843 static bool
2844 vect_joust_loop_vinfos (loop_vec_info new_loop_vinfo,
2845 loop_vec_info old_loop_vinfo)
2847 if (!vect_better_loop_vinfo_p (new_loop_vinfo, old_loop_vinfo))
2848 return false;
2850 if (dump_enabled_p ())
2851 dump_printf_loc (MSG_NOTE, vect_location,
2852 "***** Preferring vector mode %s to vector mode %s\n",
2853 GET_MODE_NAME (new_loop_vinfo->vector_mode),
2854 GET_MODE_NAME (old_loop_vinfo->vector_mode));
2855 return true;
2858 /* If LOOP_VINFO is already a main loop, return it unmodified. Otherwise
2859 try to reanalyze it as a main loop. Return the loop_vinfo on success
2860 and null on failure. */
2862 static loop_vec_info
2863 vect_reanalyze_as_main_loop (loop_vec_info loop_vinfo, unsigned int *n_stmts)
2865 if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo))
2866 return loop_vinfo;
2868 if (dump_enabled_p ())
2869 dump_printf_loc (MSG_NOTE, vect_location,
2870 "***** Reanalyzing as a main loop with vector mode %s\n",
2871 GET_MODE_NAME (loop_vinfo->vector_mode));
2873 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2874 vec_info_shared *shared = loop_vinfo->shared;
2875 opt_loop_vec_info main_loop_vinfo = vect_analyze_loop_form (loop, shared);
2876 gcc_assert (main_loop_vinfo);
2878 main_loop_vinfo->vector_mode = loop_vinfo->vector_mode;
2880 bool fatal = false;
2881 bool res = vect_analyze_loop_2 (main_loop_vinfo, fatal, n_stmts);
2882 loop->aux = NULL;
2883 if (!res)
2885 if (dump_enabled_p ())
2886 dump_printf_loc (MSG_NOTE, vect_location,
2887 "***** Failed to analyze main loop with vector"
2888 " mode %s\n",
2889 GET_MODE_NAME (loop_vinfo->vector_mode));
2890 delete main_loop_vinfo;
2891 return NULL;
2893 LOOP_VINFO_VECTORIZABLE_P (main_loop_vinfo) = 1;
2894 return main_loop_vinfo;
2897 /* Function vect_analyze_loop.
2899 Apply a set of analyses on LOOP, and create a loop_vec_info struct
2900 for it. The different analyses will record information in the
2901 loop_vec_info struct. */
2902 opt_loop_vec_info
2903 vect_analyze_loop (class loop *loop, vec_info_shared *shared)
2905 auto_vector_modes vector_modes;
2907 /* Autodetect first vector size we try. */
2908 unsigned int autovec_flags
2909 = targetm.vectorize.autovectorize_vector_modes (&vector_modes,
2910 loop->simdlen != 0);
2911 unsigned int mode_i = 0;
2913 DUMP_VECT_SCOPE ("analyze_loop_nest");
2915 if (loop_outer (loop)
2916 && loop_vec_info_for_loop (loop_outer (loop))
2917 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
2918 return opt_loop_vec_info::failure_at (vect_location,
2919 "outer-loop already vectorized.\n");
2921 if (!find_loop_nest (loop, &shared->loop_nest))
2922 return opt_loop_vec_info::failure_at
2923 (vect_location,
2924 "not vectorized: loop nest containing two or more consecutive inner"
2925 " loops cannot be vectorized\n");
2927 unsigned n_stmts = 0;
2928 machine_mode autodetected_vector_mode = VOIDmode;
2929 opt_loop_vec_info first_loop_vinfo = opt_loop_vec_info::success (NULL);
2930 machine_mode next_vector_mode = VOIDmode;
2931 poly_uint64 lowest_th = 0;
2932 unsigned vectorized_loops = 0;
2933 bool pick_lowest_cost_p = ((autovec_flags & VECT_COMPARE_COSTS)
2934 && !unlimited_cost_model (loop));
2936 bool vect_epilogues = false;
2937 opt_result res = opt_result::success ();
2938 unsigned HOST_WIDE_INT simdlen = loop->simdlen;
2939 while (1)
2941 /* Check the CFG characteristics of the loop (nesting, entry/exit). */
2942 opt_loop_vec_info loop_vinfo = vect_analyze_loop_form (loop, shared);
2943 if (!loop_vinfo)
2945 if (dump_enabled_p ())
2946 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2947 "bad loop form.\n");
2948 gcc_checking_assert (first_loop_vinfo == NULL);
2949 return loop_vinfo;
2951 loop_vinfo->vector_mode = next_vector_mode;
2953 bool fatal = false;
2955 /* When pick_lowest_cost_p is true, we should in principle iterate
2956 over all the loop_vec_infos that LOOP_VINFO could replace and
2957 try to vectorize LOOP_VINFO under the same conditions.
2958 E.g. when trying to replace an epilogue loop, we should vectorize
2959 LOOP_VINFO as an epilogue loop with the same VF limit. When trying
2960 to replace the main loop, we should vectorize LOOP_VINFO as a main
2961 loop too.
2963 However, autovectorize_vector_modes is usually sorted as follows:
2965 - Modes that naturally produce lower VFs usually follow modes that
2966 naturally produce higher VFs.
2968 - When modes naturally produce the same VF, maskable modes
2969 usually follow unmaskable ones, so that the maskable mode
2970 can be used to vectorize the epilogue of the unmaskable mode.
2972 This order is preferred because it leads to the maximum
2973 epilogue vectorization opportunities. Targets should only use
2974 a different order if they want to make wide modes available while
2975 disparaging them relative to earlier, smaller modes. The assumption
2976 in that case is that the wider modes are more expensive in some
2977 way that isn't reflected directly in the costs.
2979 There should therefore be few interesting cases in which
2980 LOOP_VINFO fails when treated as an epilogue loop, succeeds when
2981 treated as a standalone loop, and ends up being genuinely cheaper
2982 than FIRST_LOOP_VINFO. */
2983 if (vect_epilogues)
2984 LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo) = first_loop_vinfo;
2986 res = vect_analyze_loop_2 (loop_vinfo, fatal, &n_stmts);
2987 if (mode_i == 0)
2988 autodetected_vector_mode = loop_vinfo->vector_mode;
2989 if (dump_enabled_p ())
2991 if (res)
2992 dump_printf_loc (MSG_NOTE, vect_location,
2993 "***** Analysis succeeded with vector mode %s\n",
2994 GET_MODE_NAME (loop_vinfo->vector_mode));
2995 else
2996 dump_printf_loc (MSG_NOTE, vect_location,
2997 "***** Analysis failed with vector mode %s\n",
2998 GET_MODE_NAME (loop_vinfo->vector_mode));
3001 loop->aux = NULL;
3003 if (!fatal)
3004 while (mode_i < vector_modes.length ()
3005 && vect_chooses_same_modes_p (loop_vinfo, vector_modes[mode_i]))
3007 if (dump_enabled_p ())
3008 dump_printf_loc (MSG_NOTE, vect_location,
3009 "***** The result for vector mode %s would"
3010 " be the same\n",
3011 GET_MODE_NAME (vector_modes[mode_i]));
3012 mode_i += 1;
3015 if (res)
3017 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
3018 vectorized_loops++;
3020 /* Once we hit the desired simdlen for the first time,
3021 discard any previous attempts. */
3022 if (simdlen
3023 && known_eq (LOOP_VINFO_VECT_FACTOR (loop_vinfo), simdlen))
3025 delete first_loop_vinfo;
3026 first_loop_vinfo = opt_loop_vec_info::success (NULL);
3027 LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo) = NULL;
3028 simdlen = 0;
3030 else if (pick_lowest_cost_p && first_loop_vinfo)
3032 /* Keep trying to roll back vectorization attempts while the
3033 loop_vec_infos they produced were worse than this one. */
3034 vec<loop_vec_info> &vinfos = first_loop_vinfo->epilogue_vinfos;
3035 while (!vinfos.is_empty ()
3036 && vect_joust_loop_vinfos (loop_vinfo, vinfos.last ()))
3038 gcc_assert (vect_epilogues);
3039 delete vinfos.pop ();
3041 if (vinfos.is_empty ()
3042 && vect_joust_loop_vinfos (loop_vinfo, first_loop_vinfo))
3044 loop_vec_info main_loop_vinfo
3045 = vect_reanalyze_as_main_loop (loop_vinfo, &n_stmts);
3046 if (main_loop_vinfo == loop_vinfo)
3048 delete first_loop_vinfo;
3049 first_loop_vinfo = opt_loop_vec_info::success (NULL);
3051 else if (main_loop_vinfo
3052 && vect_joust_loop_vinfos (main_loop_vinfo,
3053 first_loop_vinfo))
3055 delete first_loop_vinfo;
3056 first_loop_vinfo = opt_loop_vec_info::success (NULL);
3057 delete loop_vinfo;
3058 loop_vinfo
3059 = opt_loop_vec_info::success (main_loop_vinfo);
3061 else
3062 delete main_loop_vinfo;
3066 if (first_loop_vinfo == NULL)
3068 first_loop_vinfo = loop_vinfo;
3069 lowest_th = LOOP_VINFO_VERSIONING_THRESHOLD (first_loop_vinfo);
3071 else if (vect_epilogues
3072 /* For now only allow one epilogue loop. */
3073 && first_loop_vinfo->epilogue_vinfos.is_empty ())
3075 first_loop_vinfo->epilogue_vinfos.safe_push (loop_vinfo);
3076 poly_uint64 th = LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo);
3077 gcc_assert (!LOOP_REQUIRES_VERSIONING (loop_vinfo)
3078 || maybe_ne (lowest_th, 0U));
3079 /* Keep track of the known smallest versioning
3080 threshold. */
3081 if (ordered_p (lowest_th, th))
3082 lowest_th = ordered_min (lowest_th, th);
3084 else
3086 delete loop_vinfo;
3087 loop_vinfo = opt_loop_vec_info::success (NULL);
3090 /* Only vectorize epilogues if PARAM_VECT_EPILOGUES_NOMASK is
3091 enabled, SIMDUID is not set, it is the innermost loop and we have
3092 either already found the loop's SIMDLEN or there was no SIMDLEN to
3093 begin with.
3094 TODO: Enable epilogue vectorization for loops with SIMDUID set. */
3095 vect_epilogues = (!simdlen
3096 && loop->inner == NULL
3097 && param_vect_epilogues_nomask
3098 && LOOP_VINFO_PEELING_FOR_NITER (first_loop_vinfo)
3099 && !loop->simduid
3100 /* For now only allow one epilogue loop, but allow
3101 pick_lowest_cost_p to replace it. */
3102 && (first_loop_vinfo->epilogue_vinfos.is_empty ()
3103 || pick_lowest_cost_p));
3105 /* Commit to first_loop_vinfo if we have no reason to try
3106 alternatives. */
3107 if (!simdlen && !vect_epilogues && !pick_lowest_cost_p)
3108 break;
3110 else
3112 delete loop_vinfo;
3113 loop_vinfo = opt_loop_vec_info::success (NULL);
3114 if (fatal)
3116 gcc_checking_assert (first_loop_vinfo == NULL);
3117 break;
3121 /* Handle the case that the original loop can use partial
3122 vectorization, but want to only adopt it for the epilogue.
3123 The retry should be in the same mode as original. */
3124 if (vect_epilogues
3125 && loop_vinfo
3126 && LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P (loop_vinfo))
3128 gcc_assert (LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo)
3129 && !LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo));
3130 if (dump_enabled_p ())
3131 dump_printf_loc (MSG_NOTE, vect_location,
3132 "***** Re-trying analysis with same vector mode"
3133 " %s for epilogue with partial vectors.\n",
3134 GET_MODE_NAME (loop_vinfo->vector_mode));
3135 continue;
3138 if (mode_i < vector_modes.length ()
3139 && VECTOR_MODE_P (autodetected_vector_mode)
3140 && (related_vector_mode (vector_modes[mode_i],
3141 GET_MODE_INNER (autodetected_vector_mode))
3142 == autodetected_vector_mode)
3143 && (related_vector_mode (autodetected_vector_mode,
3144 GET_MODE_INNER (vector_modes[mode_i]))
3145 == vector_modes[mode_i]))
3147 if (dump_enabled_p ())
3148 dump_printf_loc (MSG_NOTE, vect_location,
3149 "***** Skipping vector mode %s, which would"
3150 " repeat the analysis for %s\n",
3151 GET_MODE_NAME (vector_modes[mode_i]),
3152 GET_MODE_NAME (autodetected_vector_mode));
3153 mode_i += 1;
3156 if (mode_i == vector_modes.length ()
3157 || autodetected_vector_mode == VOIDmode)
3158 break;
3160 /* Try the next biggest vector size. */
3161 next_vector_mode = vector_modes[mode_i++];
3162 if (dump_enabled_p ())
3163 dump_printf_loc (MSG_NOTE, vect_location,
3164 "***** Re-trying analysis with vector mode %s\n",
3165 GET_MODE_NAME (next_vector_mode));
3168 if (first_loop_vinfo)
3170 loop->aux = (loop_vec_info) first_loop_vinfo;
3171 if (dump_enabled_p ())
3172 dump_printf_loc (MSG_NOTE, vect_location,
3173 "***** Choosing vector mode %s\n",
3174 GET_MODE_NAME (first_loop_vinfo->vector_mode));
3175 LOOP_VINFO_VERSIONING_THRESHOLD (first_loop_vinfo) = lowest_th;
3176 return first_loop_vinfo;
3179 return opt_loop_vec_info::propagate_failure (res);
3182 /* Return true if there is an in-order reduction function for CODE, storing
3183 it in *REDUC_FN if so. */
3185 static bool
3186 fold_left_reduction_fn (tree_code code, internal_fn *reduc_fn)
3188 switch (code)
3190 case PLUS_EXPR:
3191 *reduc_fn = IFN_FOLD_LEFT_PLUS;
3192 return true;
3194 default:
3195 return false;
3199 /* Function reduction_fn_for_scalar_code
3201 Input:
3202 CODE - tree_code of a reduction operations.
3204 Output:
3205 REDUC_FN - the corresponding internal function to be used to reduce the
3206 vector of partial results into a single scalar result, or IFN_LAST
3207 if the operation is a supported reduction operation, but does not have
3208 such an internal function.
3210 Return FALSE if CODE currently cannot be vectorized as reduction. */
3212 static bool
3213 reduction_fn_for_scalar_code (enum tree_code code, internal_fn *reduc_fn)
3215 switch (code)
3217 case MAX_EXPR:
3218 *reduc_fn = IFN_REDUC_MAX;
3219 return true;
3221 case MIN_EXPR:
3222 *reduc_fn = IFN_REDUC_MIN;
3223 return true;
3225 case PLUS_EXPR:
3226 *reduc_fn = IFN_REDUC_PLUS;
3227 return true;
3229 case BIT_AND_EXPR:
3230 *reduc_fn = IFN_REDUC_AND;
3231 return true;
3233 case BIT_IOR_EXPR:
3234 *reduc_fn = IFN_REDUC_IOR;
3235 return true;
3237 case BIT_XOR_EXPR:
3238 *reduc_fn = IFN_REDUC_XOR;
3239 return true;
3241 case MULT_EXPR:
3242 case MINUS_EXPR:
3243 *reduc_fn = IFN_LAST;
3244 return true;
3246 default:
3247 return false;
3251 /* If there is a neutral value X such that SLP reduction NODE would not
3252 be affected by the introduction of additional X elements, return that X,
3253 otherwise return null. CODE is the code of the reduction and VECTOR_TYPE
3254 is the vector type that would hold element X. REDUC_CHAIN is true if
3255 the SLP statements perform a single reduction, false if each statement
3256 performs an independent reduction. */
3258 static tree
3259 neutral_op_for_slp_reduction (slp_tree slp_node, tree vector_type,
3260 tree_code code, bool reduc_chain)
3262 vec<stmt_vec_info> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
3263 stmt_vec_info stmt_vinfo = stmts[0];
3264 tree scalar_type = TREE_TYPE (vector_type);
3265 class loop *loop = gimple_bb (stmt_vinfo->stmt)->loop_father;
3266 gcc_assert (loop);
3268 switch (code)
3270 case WIDEN_SUM_EXPR:
3271 case DOT_PROD_EXPR:
3272 case SAD_EXPR:
3273 case PLUS_EXPR:
3274 case MINUS_EXPR:
3275 case BIT_IOR_EXPR:
3276 case BIT_XOR_EXPR:
3277 return build_zero_cst (scalar_type);
3279 case MULT_EXPR:
3280 return build_one_cst (scalar_type);
3282 case BIT_AND_EXPR:
3283 return build_all_ones_cst (scalar_type);
3285 case MAX_EXPR:
3286 case MIN_EXPR:
3287 /* For MIN/MAX the initial values are neutral. A reduction chain
3288 has only a single initial value, so that value is neutral for
3289 all statements. */
3290 if (reduc_chain)
3291 return PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt,
3292 loop_preheader_edge (loop));
3293 return NULL_TREE;
3295 default:
3296 return NULL_TREE;
3300 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
3301 STMT is printed with a message MSG. */
3303 static void
3304 report_vect_op (dump_flags_t msg_type, gimple *stmt, const char *msg)
3306 dump_printf_loc (msg_type, vect_location, "%s%G", msg, stmt);
3309 /* Return true if we need an in-order reduction for operation CODE
3310 on type TYPE. NEED_WRAPPING_INTEGRAL_OVERFLOW is true if integer
3311 overflow must wrap. */
3313 bool
3314 needs_fold_left_reduction_p (tree type, tree_code code)
3316 /* CHECKME: check for !flag_finite_math_only too? */
3317 if (SCALAR_FLOAT_TYPE_P (type))
3318 switch (code)
3320 case MIN_EXPR:
3321 case MAX_EXPR:
3322 return false;
3324 default:
3325 return !flag_associative_math;
3328 if (INTEGRAL_TYPE_P (type))
3330 if (!operation_no_trapping_overflow (type, code))
3331 return true;
3332 return false;
3335 if (SAT_FIXED_POINT_TYPE_P (type))
3336 return true;
3338 return false;
3341 /* Return true if the reduction PHI in LOOP with latch arg LOOP_ARG and
3342 has a handled computation expression. Store the main reduction
3343 operation in *CODE. */
3345 static bool
3346 check_reduction_path (dump_user_location_t loc, loop_p loop, gphi *phi,
3347 tree loop_arg, enum tree_code *code,
3348 vec<std::pair<ssa_op_iter, use_operand_p> > &path)
3350 auto_bitmap visited;
3351 tree lookfor = PHI_RESULT (phi);
3352 ssa_op_iter curri;
3353 use_operand_p curr = op_iter_init_phiuse (&curri, phi, SSA_OP_USE);
3354 while (USE_FROM_PTR (curr) != loop_arg)
3355 curr = op_iter_next_use (&curri);
3356 curri.i = curri.numops;
3359 path.safe_push (std::make_pair (curri, curr));
3360 tree use = USE_FROM_PTR (curr);
3361 if (use == lookfor)
3362 break;
3363 gimple *def = SSA_NAME_DEF_STMT (use);
3364 if (gimple_nop_p (def)
3365 || ! flow_bb_inside_loop_p (loop, gimple_bb (def)))
3367 pop:
3370 std::pair<ssa_op_iter, use_operand_p> x = path.pop ();
3371 curri = x.first;
3372 curr = x.second;
3374 curr = op_iter_next_use (&curri);
3375 /* Skip already visited or non-SSA operands (from iterating
3376 over PHI args). */
3377 while (curr != NULL_USE_OPERAND_P
3378 && (TREE_CODE (USE_FROM_PTR (curr)) != SSA_NAME
3379 || ! bitmap_set_bit (visited,
3380 SSA_NAME_VERSION
3381 (USE_FROM_PTR (curr)))));
3383 while (curr == NULL_USE_OPERAND_P && ! path.is_empty ());
3384 if (curr == NULL_USE_OPERAND_P)
3385 break;
3387 else
3389 if (gimple_code (def) == GIMPLE_PHI)
3390 curr = op_iter_init_phiuse (&curri, as_a <gphi *>(def), SSA_OP_USE);
3391 else
3392 curr = op_iter_init_use (&curri, def, SSA_OP_USE);
3393 while (curr != NULL_USE_OPERAND_P
3394 && (TREE_CODE (USE_FROM_PTR (curr)) != SSA_NAME
3395 || ! bitmap_set_bit (visited,
3396 SSA_NAME_VERSION
3397 (USE_FROM_PTR (curr)))))
3398 curr = op_iter_next_use (&curri);
3399 if (curr == NULL_USE_OPERAND_P)
3400 goto pop;
3403 while (1);
3404 if (dump_file && (dump_flags & TDF_DETAILS))
3406 dump_printf_loc (MSG_NOTE, loc, "reduction path: ");
3407 unsigned i;
3408 std::pair<ssa_op_iter, use_operand_p> *x;
3409 FOR_EACH_VEC_ELT (path, i, x)
3410 dump_printf (MSG_NOTE, "%T ", USE_FROM_PTR (x->second));
3411 dump_printf (MSG_NOTE, "\n");
3414 /* Check whether the reduction path detected is valid. */
3415 bool fail = path.length () == 0;
3416 bool neg = false;
3417 int sign = -1;
3418 *code = ERROR_MARK;
3419 for (unsigned i = 1; i < path.length (); ++i)
3421 gimple *use_stmt = USE_STMT (path[i].second);
3422 tree op = USE_FROM_PTR (path[i].second);
3423 if (! is_gimple_assign (use_stmt)
3424 /* The following make sure we can compute the operand index
3425 easily plus it mostly disallows chaining via COND_EXPR condition
3426 operands. */
3427 || (gimple_assign_rhs1_ptr (use_stmt) != path[i].second->use
3428 && (gimple_num_ops (use_stmt) <= 2
3429 || gimple_assign_rhs2_ptr (use_stmt) != path[i].second->use)
3430 && (gimple_num_ops (use_stmt) <= 3
3431 || gimple_assign_rhs3_ptr (use_stmt) != path[i].second->use)))
3433 fail = true;
3434 break;
3436 tree_code use_code = gimple_assign_rhs_code (use_stmt);
3437 if (use_code == MINUS_EXPR)
3439 use_code = PLUS_EXPR;
3440 /* Track whether we negate the reduction value each iteration. */
3441 if (gimple_assign_rhs2 (use_stmt) == op)
3442 neg = ! neg;
3444 if (CONVERT_EXPR_CODE_P (use_code)
3445 && tree_nop_conversion_p (TREE_TYPE (gimple_assign_lhs (use_stmt)),
3446 TREE_TYPE (gimple_assign_rhs1 (use_stmt))))
3448 else if (*code == ERROR_MARK)
3450 *code = use_code;
3451 sign = TYPE_SIGN (TREE_TYPE (gimple_assign_lhs (use_stmt)));
3453 else if (use_code != *code)
3455 fail = true;
3456 break;
3458 else if ((use_code == MIN_EXPR
3459 || use_code == MAX_EXPR)
3460 && sign != TYPE_SIGN (TREE_TYPE (gimple_assign_lhs (use_stmt))))
3462 fail = true;
3463 break;
3465 /* Check there's only a single stmt the op is used on. For the
3466 not value-changing tail and the last stmt allow out-of-loop uses.
3467 ??? We could relax this and handle arbitrary live stmts by
3468 forcing a scalar epilogue for example. */
3469 imm_use_iterator imm_iter;
3470 gimple *op_use_stmt;
3471 unsigned cnt = 0;
3472 FOR_EACH_IMM_USE_STMT (op_use_stmt, imm_iter, op)
3473 if (!is_gimple_debug (op_use_stmt)
3474 && (*code != ERROR_MARK
3475 || flow_bb_inside_loop_p (loop, gimple_bb (op_use_stmt))))
3477 /* We want to allow x + x but not x < 1 ? x : 2. */
3478 if (is_gimple_assign (op_use_stmt)
3479 && gimple_assign_rhs_code (op_use_stmt) == COND_EXPR)
3481 use_operand_p use_p;
3482 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
3483 cnt++;
3485 else
3486 cnt++;
3488 if (cnt != 1)
3490 fail = true;
3491 break;
3494 return ! fail && ! neg && *code != ERROR_MARK;
3497 bool
3498 check_reduction_path (dump_user_location_t loc, loop_p loop, gphi *phi,
3499 tree loop_arg, enum tree_code code)
3501 auto_vec<std::pair<ssa_op_iter, use_operand_p> > path;
3502 enum tree_code code_;
3503 return (check_reduction_path (loc, loop, phi, loop_arg, &code_, path)
3504 && code_ == code);
3509 /* Function vect_is_simple_reduction
3511 (1) Detect a cross-iteration def-use cycle that represents a simple
3512 reduction computation. We look for the following pattern:
3514 loop_header:
3515 a1 = phi < a0, a2 >
3516 a3 = ...
3517 a2 = operation (a3, a1)
3521 a3 = ...
3522 loop_header:
3523 a1 = phi < a0, a2 >
3524 a2 = operation (a3, a1)
3526 such that:
3527 1. operation is commutative and associative and it is safe to
3528 change the order of the computation
3529 2. no uses for a2 in the loop (a2 is used out of the loop)
3530 3. no uses of a1 in the loop besides the reduction operation
3531 4. no uses of a1 outside the loop.
3533 Conditions 1,4 are tested here.
3534 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
3536 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
3537 nested cycles.
3539 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
3540 reductions:
3542 a1 = phi < a0, a2 >
3543 inner loop (def of a3)
3544 a2 = phi < a3 >
3546 (4) Detect condition expressions, ie:
3547 for (int i = 0; i < N; i++)
3548 if (a[i] < val)
3549 ret_val = a[i];
3553 static stmt_vec_info
3554 vect_is_simple_reduction (loop_vec_info loop_info, stmt_vec_info phi_info,
3555 bool *double_reduc, bool *reduc_chain_p)
3557 gphi *phi = as_a <gphi *> (phi_info->stmt);
3558 gimple *phi_use_stmt = NULL;
3559 imm_use_iterator imm_iter;
3560 use_operand_p use_p;
3562 *double_reduc = false;
3563 *reduc_chain_p = false;
3564 STMT_VINFO_REDUC_TYPE (phi_info) = TREE_CODE_REDUCTION;
3566 tree phi_name = PHI_RESULT (phi);
3567 /* ??? If there are no uses of the PHI result the inner loop reduction
3568 won't be detected as possibly double-reduction by vectorizable_reduction
3569 because that tries to walk the PHI arg from the preheader edge which
3570 can be constant. See PR60382. */
3571 if (has_zero_uses (phi_name))
3572 return NULL;
3573 class loop *loop = (gimple_bb (phi))->loop_father;
3574 unsigned nphi_def_loop_uses = 0;
3575 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, phi_name)
3577 gimple *use_stmt = USE_STMT (use_p);
3578 if (is_gimple_debug (use_stmt))
3579 continue;
3581 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
3583 if (dump_enabled_p ())
3584 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3585 "intermediate value used outside loop.\n");
3587 return NULL;
3590 nphi_def_loop_uses++;
3591 phi_use_stmt = use_stmt;
3594 tree latch_def = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
3595 if (TREE_CODE (latch_def) != SSA_NAME)
3597 if (dump_enabled_p ())
3598 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3599 "reduction: not ssa_name: %T\n", latch_def);
3600 return NULL;
3603 stmt_vec_info def_stmt_info = loop_info->lookup_def (latch_def);
3604 if (!def_stmt_info
3605 || !flow_bb_inside_loop_p (loop, gimple_bb (def_stmt_info->stmt)))
3606 return NULL;
3608 bool nested_in_vect_loop
3609 = flow_loop_nested_p (LOOP_VINFO_LOOP (loop_info), loop);
3610 unsigned nlatch_def_loop_uses = 0;
3611 auto_vec<gphi *, 3> lcphis;
3612 bool inner_loop_of_double_reduc = false;
3613 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, latch_def)
3615 gimple *use_stmt = USE_STMT (use_p);
3616 if (is_gimple_debug (use_stmt))
3617 continue;
3618 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
3619 nlatch_def_loop_uses++;
3620 else
3622 /* We can have more than one loop-closed PHI. */
3623 lcphis.safe_push (as_a <gphi *> (use_stmt));
3624 if (nested_in_vect_loop
3625 && (STMT_VINFO_DEF_TYPE (loop_info->lookup_stmt (use_stmt))
3626 == vect_double_reduction_def))
3627 inner_loop_of_double_reduc = true;
3631 /* If we are vectorizing an inner reduction we are executing that
3632 in the original order only in case we are not dealing with a
3633 double reduction. */
3634 if (nested_in_vect_loop && !inner_loop_of_double_reduc)
3636 if (dump_enabled_p ())
3637 report_vect_op (MSG_NOTE, def_stmt_info->stmt,
3638 "detected nested cycle: ");
3639 return def_stmt_info;
3642 /* If this isn't a nested cycle or if the nested cycle reduction value
3643 is used ouside of the inner loop we cannot handle uses of the reduction
3644 value. */
3645 if (nlatch_def_loop_uses > 1 || nphi_def_loop_uses > 1)
3647 if (dump_enabled_p ())
3648 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3649 "reduction used in loop.\n");
3650 return NULL;
3653 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
3654 defined in the inner loop. */
3655 if (gphi *def_stmt = dyn_cast <gphi *> (def_stmt_info->stmt))
3657 tree op1 = PHI_ARG_DEF (def_stmt, 0);
3658 if (gimple_phi_num_args (def_stmt) != 1
3659 || TREE_CODE (op1) != SSA_NAME)
3661 if (dump_enabled_p ())
3662 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3663 "unsupported phi node definition.\n");
3665 return NULL;
3668 gimple *def1 = SSA_NAME_DEF_STMT (op1);
3669 if (gimple_bb (def1)
3670 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
3671 && loop->inner
3672 && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1))
3673 && is_gimple_assign (def1)
3674 && is_a <gphi *> (phi_use_stmt)
3675 && flow_bb_inside_loop_p (loop->inner, gimple_bb (phi_use_stmt)))
3677 if (dump_enabled_p ())
3678 report_vect_op (MSG_NOTE, def_stmt,
3679 "detected double reduction: ");
3681 *double_reduc = true;
3682 return def_stmt_info;
3685 return NULL;
3688 /* Look for the expression computing latch_def from then loop PHI result. */
3689 auto_vec<std::pair<ssa_op_iter, use_operand_p> > path;
3690 enum tree_code code;
3691 if (check_reduction_path (vect_location, loop, phi, latch_def, &code,
3692 path))
3694 STMT_VINFO_REDUC_CODE (phi_info) = code;
3695 if (code == COND_EXPR && !nested_in_vect_loop)
3696 STMT_VINFO_REDUC_TYPE (phi_info) = COND_REDUCTION;
3698 /* Fill in STMT_VINFO_REDUC_IDX and gather stmts for an SLP
3699 reduction chain for which the additional restriction is that
3700 all operations in the chain are the same. */
3701 auto_vec<stmt_vec_info, 8> reduc_chain;
3702 unsigned i;
3703 bool is_slp_reduc = !nested_in_vect_loop && code != COND_EXPR;
3704 for (i = path.length () - 1; i >= 1; --i)
3706 gimple *stmt = USE_STMT (path[i].second);
3707 stmt_vec_info stmt_info = loop_info->lookup_stmt (stmt);
3708 STMT_VINFO_REDUC_IDX (stmt_info)
3709 = path[i].second->use - gimple_assign_rhs1_ptr (stmt);
3710 enum tree_code stmt_code = gimple_assign_rhs_code (stmt);
3711 bool leading_conversion = (CONVERT_EXPR_CODE_P (stmt_code)
3712 && (i == 1 || i == path.length () - 1));
3713 if ((stmt_code != code && !leading_conversion)
3714 /* We can only handle the final value in epilogue
3715 generation for reduction chains. */
3716 || (i != 1 && !has_single_use (gimple_assign_lhs (stmt))))
3717 is_slp_reduc = false;
3718 /* For reduction chains we support a trailing/leading
3719 conversions. We do not store those in the actual chain. */
3720 if (leading_conversion)
3721 continue;
3722 reduc_chain.safe_push (stmt_info);
3724 if (is_slp_reduc && reduc_chain.length () > 1)
3726 for (unsigned i = 0; i < reduc_chain.length () - 1; ++i)
3728 REDUC_GROUP_FIRST_ELEMENT (reduc_chain[i]) = reduc_chain[0];
3729 REDUC_GROUP_NEXT_ELEMENT (reduc_chain[i]) = reduc_chain[i+1];
3731 REDUC_GROUP_FIRST_ELEMENT (reduc_chain.last ()) = reduc_chain[0];
3732 REDUC_GROUP_NEXT_ELEMENT (reduc_chain.last ()) = NULL;
3734 /* Save the chain for further analysis in SLP detection. */
3735 LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (reduc_chain[0]);
3736 REDUC_GROUP_SIZE (reduc_chain[0]) = reduc_chain.length ();
3738 *reduc_chain_p = true;
3739 if (dump_enabled_p ())
3740 dump_printf_loc (MSG_NOTE, vect_location,
3741 "reduction: detected reduction chain\n");
3743 else if (dump_enabled_p ())
3744 dump_printf_loc (MSG_NOTE, vect_location,
3745 "reduction: detected reduction\n");
3747 return def_stmt_info;
3750 if (dump_enabled_p ())
3751 dump_printf_loc (MSG_NOTE, vect_location,
3752 "reduction: unknown pattern\n");
3754 return NULL;
3757 /* Estimate the number of peeled epilogue iterations for LOOP_VINFO.
3758 PEEL_ITERS_PROLOGUE is the number of peeled prologue iterations,
3759 or -1 if not known. */
3761 static int
3762 vect_get_peel_iters_epilogue (loop_vec_info loop_vinfo, int peel_iters_prologue)
3764 int assumed_vf = vect_vf_for_cost (loop_vinfo);
3765 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) || peel_iters_prologue == -1)
3767 if (dump_enabled_p ())
3768 dump_printf_loc (MSG_NOTE, vect_location,
3769 "cost model: epilogue peel iters set to vf/2 "
3770 "because loop iterations are unknown .\n");
3771 return assumed_vf / 2;
3773 else
3775 int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
3776 peel_iters_prologue = MIN (niters, peel_iters_prologue);
3777 int peel_iters_epilogue = (niters - peel_iters_prologue) % assumed_vf;
3778 /* If we need to peel for gaps, but no peeling is required, we have to
3779 peel VF iterations. */
3780 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && !peel_iters_epilogue)
3781 peel_iters_epilogue = assumed_vf;
3782 return peel_iters_epilogue;
3786 /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */
3788 vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue,
3789 int *peel_iters_epilogue,
3790 stmt_vector_for_cost *scalar_cost_vec,
3791 stmt_vector_for_cost *prologue_cost_vec,
3792 stmt_vector_for_cost *epilogue_cost_vec)
3794 int retval = 0;
3796 *peel_iters_epilogue
3797 = vect_get_peel_iters_epilogue (loop_vinfo, peel_iters_prologue);
3799 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
3801 /* If peeled iterations are known but number of scalar loop
3802 iterations are unknown, count a taken branch per peeled loop. */
3803 if (peel_iters_prologue > 0)
3804 retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken,
3805 NULL, NULL_TREE, 0, vect_prologue);
3806 if (*peel_iters_epilogue > 0)
3807 retval += record_stmt_cost (epilogue_cost_vec, 1, cond_branch_taken,
3808 NULL, NULL_TREE, 0, vect_epilogue);
3811 stmt_info_for_cost *si;
3812 int j;
3813 if (peel_iters_prologue)
3814 FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si)
3815 retval += record_stmt_cost (prologue_cost_vec,
3816 si->count * peel_iters_prologue,
3817 si->kind, si->stmt_info, si->misalign,
3818 vect_prologue);
3819 if (*peel_iters_epilogue)
3820 FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si)
3821 retval += record_stmt_cost (epilogue_cost_vec,
3822 si->count * *peel_iters_epilogue,
3823 si->kind, si->stmt_info, si->misalign,
3824 vect_epilogue);
3826 return retval;
3829 /* Function vect_estimate_min_profitable_iters
3831 Return the number of iterations required for the vector version of the
3832 loop to be profitable relative to the cost of the scalar version of the
3833 loop.
3835 *RET_MIN_PROFITABLE_NITERS is a cost model profitability threshold
3836 of iterations for vectorization. -1 value means loop vectorization
3837 is not profitable. This returned value may be used for dynamic
3838 profitability check.
3840 *RET_MIN_PROFITABLE_ESTIMATE is a profitability threshold to be used
3841 for static check against estimated number of iterations. */
3843 static void
3844 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
3845 int *ret_min_profitable_niters,
3846 int *ret_min_profitable_estimate)
3848 int min_profitable_iters;
3849 int min_profitable_estimate;
3850 int peel_iters_prologue;
3851 int peel_iters_epilogue;
3852 unsigned vec_inside_cost = 0;
3853 int vec_outside_cost = 0;
3854 unsigned vec_prologue_cost = 0;
3855 unsigned vec_epilogue_cost = 0;
3856 int scalar_single_iter_cost = 0;
3857 int scalar_outside_cost = 0;
3858 int assumed_vf = vect_vf_for_cost (loop_vinfo);
3859 int npeel = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
3860 void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
3862 /* Cost model disabled. */
3863 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
3865 if (dump_enabled_p ())
3866 dump_printf_loc (MSG_NOTE, vect_location, "cost model disabled.\n");
3867 *ret_min_profitable_niters = 0;
3868 *ret_min_profitable_estimate = 0;
3869 return;
3872 /* Requires loop versioning tests to handle misalignment. */
3873 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
3875 /* FIXME: Make cost depend on complexity of individual check. */
3876 unsigned len = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ();
3877 (void) add_stmt_cost (loop_vinfo, target_cost_data, len, vector_stmt,
3878 NULL, NULL_TREE, 0, vect_prologue);
3879 if (dump_enabled_p ())
3880 dump_printf (MSG_NOTE,
3881 "cost model: Adding cost of checks for loop "
3882 "versioning to treat misalignment.\n");
3885 /* Requires loop versioning with alias checks. */
3886 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
3888 /* FIXME: Make cost depend on complexity of individual check. */
3889 unsigned len = LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).length ();
3890 (void) add_stmt_cost (loop_vinfo, target_cost_data, len, vector_stmt,
3891 NULL, NULL_TREE, 0, vect_prologue);
3892 len = LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).length ();
3893 if (len)
3894 /* Count LEN - 1 ANDs and LEN comparisons. */
3895 (void) add_stmt_cost (loop_vinfo, target_cost_data, len * 2 - 1,
3896 scalar_stmt, NULL, NULL_TREE, 0, vect_prologue);
3897 len = LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).length ();
3898 if (len)
3900 /* Count LEN - 1 ANDs and LEN comparisons. */
3901 unsigned int nstmts = len * 2 - 1;
3902 /* +1 for each bias that needs adding. */
3903 for (unsigned int i = 0; i < len; ++i)
3904 if (!LOOP_VINFO_LOWER_BOUNDS (loop_vinfo)[i].unsigned_p)
3905 nstmts += 1;
3906 (void) add_stmt_cost (loop_vinfo, target_cost_data, nstmts,
3907 scalar_stmt, NULL, NULL_TREE, 0, vect_prologue);
3909 if (dump_enabled_p ())
3910 dump_printf (MSG_NOTE,
3911 "cost model: Adding cost of checks for loop "
3912 "versioning aliasing.\n");
3915 /* Requires loop versioning with niter checks. */
3916 if (LOOP_REQUIRES_VERSIONING_FOR_NITERS (loop_vinfo))
3918 /* FIXME: Make cost depend on complexity of individual check. */
3919 (void) add_stmt_cost (loop_vinfo, target_cost_data, 1, vector_stmt,
3920 NULL, NULL_TREE, 0, vect_prologue);
3921 if (dump_enabled_p ())
3922 dump_printf (MSG_NOTE,
3923 "cost model: Adding cost of checks for loop "
3924 "versioning niters.\n");
3927 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
3928 (void) add_stmt_cost (loop_vinfo, target_cost_data, 1, cond_branch_taken,
3929 NULL, NULL_TREE, 0, vect_prologue);
3931 /* Count statements in scalar loop. Using this as scalar cost for a single
3932 iteration for now.
3934 TODO: Add outer loop support.
3936 TODO: Consider assigning different costs to different scalar
3937 statements. */
3939 scalar_single_iter_cost
3940 = LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo);
3942 /* Add additional cost for the peeled instructions in prologue and epilogue
3943 loop. (For fully-masked loops there will be no peeling.)
3945 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
3946 at compile-time - we assume it's vf/2 (the worst would be vf-1).
3948 TODO: Build an expression that represents peel_iters for prologue and
3949 epilogue to be used in a run-time test. */
3951 bool prologue_need_br_taken_cost = false;
3952 bool prologue_need_br_not_taken_cost = false;
3954 /* Calculate peel_iters_prologue. */
3955 if (vect_use_loop_mask_for_alignment_p (loop_vinfo))
3956 peel_iters_prologue = 0;
3957 else if (npeel < 0)
3959 peel_iters_prologue = assumed_vf / 2;
3960 if (dump_enabled_p ())
3961 dump_printf (MSG_NOTE, "cost model: "
3962 "prologue peel iters set to vf/2.\n");
3964 /* If peeled iterations are unknown, count a taken branch and a not taken
3965 branch per peeled loop. Even if scalar loop iterations are known,
3966 vector iterations are not known since peeled prologue iterations are
3967 not known. Hence guards remain the same. */
3968 prologue_need_br_taken_cost = true;
3969 prologue_need_br_not_taken_cost = true;
3971 else
3973 peel_iters_prologue = npeel;
3974 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && peel_iters_prologue > 0)
3975 /* If peeled iterations are known but number of scalar loop
3976 iterations are unknown, count a taken branch per peeled loop. */
3977 prologue_need_br_taken_cost = true;
3980 bool epilogue_need_br_taken_cost = false;
3981 bool epilogue_need_br_not_taken_cost = false;
3983 /* Calculate peel_iters_epilogue. */
3984 if (LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo))
3985 /* We need to peel exactly one iteration for gaps. */
3986 peel_iters_epilogue = LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) ? 1 : 0;
3987 else if (npeel < 0)
3989 /* If peeling for alignment is unknown, loop bound of main loop
3990 becomes unknown. */
3991 peel_iters_epilogue = assumed_vf / 2;
3992 if (dump_enabled_p ())
3993 dump_printf (MSG_NOTE, "cost model: "
3994 "epilogue peel iters set to vf/2 because "
3995 "peeling for alignment is unknown.\n");
3997 /* See the same reason above in peel_iters_prologue calculation. */
3998 epilogue_need_br_taken_cost = true;
3999 epilogue_need_br_not_taken_cost = true;
4001 else
4003 peel_iters_epilogue = vect_get_peel_iters_epilogue (loop_vinfo, npeel);
4004 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && peel_iters_epilogue > 0)
4005 /* If peeled iterations are known but number of scalar loop
4006 iterations are unknown, count a taken branch per peeled loop. */
4007 epilogue_need_br_taken_cost = true;
4010 stmt_info_for_cost *si;
4011 int j;
4012 /* Add costs associated with peel_iters_prologue. */
4013 if (peel_iters_prologue)
4014 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), j, si)
4016 (void) add_stmt_cost (loop_vinfo, target_cost_data,
4017 si->count * peel_iters_prologue, si->kind,
4018 si->stmt_info, si->vectype, si->misalign,
4019 vect_prologue);
4022 /* Add costs associated with peel_iters_epilogue. */
4023 if (peel_iters_epilogue)
4024 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), j, si)
4026 (void) add_stmt_cost (loop_vinfo, target_cost_data,
4027 si->count * peel_iters_epilogue, si->kind,
4028 si->stmt_info, si->vectype, si->misalign,
4029 vect_epilogue);
4032 /* Add possible cond_branch_taken/cond_branch_not_taken cost. */
4034 if (prologue_need_br_taken_cost)
4035 (void) add_stmt_cost (loop_vinfo, target_cost_data, 1, cond_branch_taken,
4036 NULL, NULL_TREE, 0, vect_prologue);
4038 if (prologue_need_br_not_taken_cost)
4039 (void) add_stmt_cost (loop_vinfo, target_cost_data, 1,
4040 cond_branch_not_taken, NULL, NULL_TREE, 0,
4041 vect_prologue);
4043 if (epilogue_need_br_taken_cost)
4044 (void) add_stmt_cost (loop_vinfo, target_cost_data, 1, cond_branch_taken,
4045 NULL, NULL_TREE, 0, vect_epilogue);
4047 if (epilogue_need_br_not_taken_cost)
4048 (void) add_stmt_cost (loop_vinfo, target_cost_data, 1,
4049 cond_branch_not_taken, NULL, NULL_TREE, 0,
4050 vect_epilogue);
4052 /* Take care of special costs for rgroup controls of partial vectors. */
4053 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
4055 /* Calculate how many masks we need to generate. */
4056 unsigned int num_masks = 0;
4057 rgroup_controls *rgm;
4058 unsigned int num_vectors_m1;
4059 FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), num_vectors_m1, rgm)
4060 if (rgm->type)
4061 num_masks += num_vectors_m1 + 1;
4062 gcc_assert (num_masks > 0);
4064 /* In the worst case, we need to generate each mask in the prologue
4065 and in the loop body. One of the loop body mask instructions
4066 replaces the comparison in the scalar loop, and since we don't
4067 count the scalar comparison against the scalar body, we shouldn't
4068 count that vector instruction against the vector body either.
4070 Sometimes we can use unpacks instead of generating prologue
4071 masks and sometimes the prologue mask will fold to a constant,
4072 so the actual prologue cost might be smaller. However, it's
4073 simpler and safer to use the worst-case cost; if this ends up
4074 being the tie-breaker between vectorizing or not, then it's
4075 probably better not to vectorize. */
4076 (void) add_stmt_cost (loop_vinfo, target_cost_data, num_masks,
4077 vector_stmt, NULL, NULL_TREE, 0, vect_prologue);
4078 (void) add_stmt_cost (loop_vinfo, target_cost_data, num_masks - 1,
4079 vector_stmt, NULL, NULL_TREE, 0, vect_body);
4081 else if (LOOP_VINFO_FULLY_WITH_LENGTH_P (loop_vinfo))
4083 /* Referring to the functions vect_set_loop_condition_partial_vectors
4084 and vect_set_loop_controls_directly, we need to generate each
4085 length in the prologue and in the loop body if required. Although
4086 there are some possible optimizations, we consider the worst case
4087 here. */
4089 bool niters_known_p = LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo);
4090 bool need_iterate_p
4091 = (!LOOP_VINFO_EPILOGUE_P (loop_vinfo)
4092 && !vect_known_niters_smaller_than_vf (loop_vinfo));
4094 /* Calculate how many statements to be added. */
4095 unsigned int prologue_stmts = 0;
4096 unsigned int body_stmts = 0;
4098 rgroup_controls *rgc;
4099 unsigned int num_vectors_m1;
4100 FOR_EACH_VEC_ELT (LOOP_VINFO_LENS (loop_vinfo), num_vectors_m1, rgc)
4101 if (rgc->type)
4103 /* May need one SHIFT for nitems_total computation. */
4104 unsigned nitems = rgc->max_nscalars_per_iter * rgc->factor;
4105 if (nitems != 1 && !niters_known_p)
4106 prologue_stmts += 1;
4108 /* May need one MAX and one MINUS for wrap around. */
4109 if (vect_rgroup_iv_might_wrap_p (loop_vinfo, rgc))
4110 prologue_stmts += 2;
4112 /* Need one MAX and one MINUS for each batch limit excepting for
4113 the 1st one. */
4114 prologue_stmts += num_vectors_m1 * 2;
4116 unsigned int num_vectors = num_vectors_m1 + 1;
4118 /* Need to set up lengths in prologue, only one MIN required
4119 for each since start index is zero. */
4120 prologue_stmts += num_vectors;
4122 /* Each may need two MINs and one MINUS to update lengths in body
4123 for next iteration. */
4124 if (need_iterate_p)
4125 body_stmts += 3 * num_vectors;
4128 (void) add_stmt_cost (loop_vinfo, target_cost_data, prologue_stmts,
4129 scalar_stmt, NULL, NULL_TREE, 0, vect_prologue);
4130 (void) add_stmt_cost (loop_vinfo, target_cost_data, body_stmts,
4131 scalar_stmt, NULL, NULL_TREE, 0, vect_body);
4134 /* FORNOW: The scalar outside cost is incremented in one of the
4135 following ways:
4137 1. The vectorizer checks for alignment and aliasing and generates
4138 a condition that allows dynamic vectorization. A cost model
4139 check is ANDED with the versioning condition. Hence scalar code
4140 path now has the added cost of the versioning check.
4142 if (cost > th & versioning_check)
4143 jmp to vector code
4145 Hence run-time scalar is incremented by not-taken branch cost.
4147 2. The vectorizer then checks if a prologue is required. If the
4148 cost model check was not done before during versioning, it has to
4149 be done before the prologue check.
4151 if (cost <= th)
4152 prologue = scalar_iters
4153 if (prologue == 0)
4154 jmp to vector code
4155 else
4156 execute prologue
4157 if (prologue == num_iters)
4158 go to exit
4160 Hence the run-time scalar cost is incremented by a taken branch,
4161 plus a not-taken branch, plus a taken branch cost.
4163 3. The vectorizer then checks if an epilogue is required. If the
4164 cost model check was not done before during prologue check, it
4165 has to be done with the epilogue check.
4167 if (prologue == 0)
4168 jmp to vector code
4169 else
4170 execute prologue
4171 if (prologue == num_iters)
4172 go to exit
4173 vector code:
4174 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
4175 jmp to epilogue
4177 Hence the run-time scalar cost should be incremented by 2 taken
4178 branches.
4180 TODO: The back end may reorder the BBS's differently and reverse
4181 conditions/branch directions. Change the estimates below to
4182 something more reasonable. */
4184 /* If the number of iterations is known and we do not do versioning, we can
4185 decide whether to vectorize at compile time. Hence the scalar version
4186 do not carry cost model guard costs. */
4187 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
4188 || LOOP_REQUIRES_VERSIONING (loop_vinfo))
4190 /* Cost model check occurs at versioning. */
4191 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
4192 scalar_outside_cost += vect_get_stmt_cost (cond_branch_not_taken);
4193 else
4195 /* Cost model check occurs at prologue generation. */
4196 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
4197 scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken)
4198 + vect_get_stmt_cost (cond_branch_not_taken);
4199 /* Cost model check occurs at epilogue generation. */
4200 else
4201 scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken);
4205 /* Complete the target-specific cost calculations. */
4206 finish_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo), &vec_prologue_cost,
4207 &vec_inside_cost, &vec_epilogue_cost);
4209 vec_outside_cost = (int)(vec_prologue_cost + vec_epilogue_cost);
4211 /* Stash the costs so that we can compare two loop_vec_infos. */
4212 loop_vinfo->vec_inside_cost = vec_inside_cost;
4213 loop_vinfo->vec_outside_cost = vec_outside_cost;
4215 if (dump_enabled_p ())
4217 dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
4218 dump_printf (MSG_NOTE, " Vector inside of loop cost: %d\n",
4219 vec_inside_cost);
4220 dump_printf (MSG_NOTE, " Vector prologue cost: %d\n",
4221 vec_prologue_cost);
4222 dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n",
4223 vec_epilogue_cost);
4224 dump_printf (MSG_NOTE, " Scalar iteration cost: %d\n",
4225 scalar_single_iter_cost);
4226 dump_printf (MSG_NOTE, " Scalar outside cost: %d\n",
4227 scalar_outside_cost);
4228 dump_printf (MSG_NOTE, " Vector outside cost: %d\n",
4229 vec_outside_cost);
4230 dump_printf (MSG_NOTE, " prologue iterations: %d\n",
4231 peel_iters_prologue);
4232 dump_printf (MSG_NOTE, " epilogue iterations: %d\n",
4233 peel_iters_epilogue);
4236 /* Calculate number of iterations required to make the vector version
4237 profitable, relative to the loop bodies only. The following condition
4238 must hold true:
4239 SIC * niters + SOC > VIC * ((niters - NPEEL) / VF) + VOC
4240 where
4241 SIC = scalar iteration cost, VIC = vector iteration cost,
4242 VOC = vector outside cost, VF = vectorization factor,
4243 NPEEL = prologue iterations + epilogue iterations,
4244 SOC = scalar outside cost for run time cost model check. */
4246 int saving_per_viter = (scalar_single_iter_cost * assumed_vf
4247 - vec_inside_cost);
4248 if (saving_per_viter <= 0)
4250 if (LOOP_VINFO_LOOP (loop_vinfo)->force_vectorize)
4251 warning_at (vect_location.get_location_t (), OPT_Wopenmp_simd,
4252 "vectorization did not happen for a simd loop");
4254 if (dump_enabled_p ())
4255 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4256 "cost model: the vector iteration cost = %d "
4257 "divided by the scalar iteration cost = %d "
4258 "is greater or equal to the vectorization factor = %d"
4259 ".\n",
4260 vec_inside_cost, scalar_single_iter_cost, assumed_vf);
4261 *ret_min_profitable_niters = -1;
4262 *ret_min_profitable_estimate = -1;
4263 return;
4266 /* ??? The "if" arm is written to handle all cases; see below for what
4267 we would do for !LOOP_VINFO_USING_PARTIAL_VECTORS_P. */
4268 if (LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo))
4270 /* Rewriting the condition above in terms of the number of
4271 vector iterations (vniters) rather than the number of
4272 scalar iterations (niters) gives:
4274 SIC * (vniters * VF + NPEEL) + SOC > VIC * vniters + VOC
4276 <==> vniters * (SIC * VF - VIC) > VOC - SIC * NPEEL - SOC
4278 For integer N, X and Y when X > 0:
4280 N * X > Y <==> N >= (Y /[floor] X) + 1. */
4281 int outside_overhead = (vec_outside_cost
4282 - scalar_single_iter_cost * peel_iters_prologue
4283 - scalar_single_iter_cost * peel_iters_epilogue
4284 - scalar_outside_cost);
4285 /* We're only interested in cases that require at least one
4286 vector iteration. */
4287 int min_vec_niters = 1;
4288 if (outside_overhead > 0)
4289 min_vec_niters = outside_overhead / saving_per_viter + 1;
4291 if (dump_enabled_p ())
4292 dump_printf (MSG_NOTE, " Minimum number of vector iterations: %d\n",
4293 min_vec_niters);
4295 if (LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo))
4297 /* Now that we know the minimum number of vector iterations,
4298 find the minimum niters for which the scalar cost is larger:
4300 SIC * niters > VIC * vniters + VOC - SOC
4302 We know that the minimum niters is no more than
4303 vniters * VF + NPEEL, but it might be (and often is) less
4304 than that if a partial vector iteration is cheaper than the
4305 equivalent scalar code. */
4306 int threshold = (vec_inside_cost * min_vec_niters
4307 + vec_outside_cost
4308 - scalar_outside_cost);
4309 if (threshold <= 0)
4310 min_profitable_iters = 1;
4311 else
4312 min_profitable_iters = threshold / scalar_single_iter_cost + 1;
4314 else
4315 /* Convert the number of vector iterations into a number of
4316 scalar iterations. */
4317 min_profitable_iters = (min_vec_niters * assumed_vf
4318 + peel_iters_prologue
4319 + peel_iters_epilogue);
4321 else
4323 min_profitable_iters = ((vec_outside_cost - scalar_outside_cost)
4324 * assumed_vf
4325 - vec_inside_cost * peel_iters_prologue
4326 - vec_inside_cost * peel_iters_epilogue);
4327 if (min_profitable_iters <= 0)
4328 min_profitable_iters = 0;
4329 else
4331 min_profitable_iters /= saving_per_viter;
4333 if ((scalar_single_iter_cost * assumed_vf * min_profitable_iters)
4334 <= (((int) vec_inside_cost * min_profitable_iters)
4335 + (((int) vec_outside_cost - scalar_outside_cost)
4336 * assumed_vf)))
4337 min_profitable_iters++;
4341 if (dump_enabled_p ())
4342 dump_printf (MSG_NOTE,
4343 " Calculated minimum iters for profitability: %d\n",
4344 min_profitable_iters);
4346 if (!LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo)
4347 && min_profitable_iters < (assumed_vf + peel_iters_prologue))
4348 /* We want the vectorized loop to execute at least once. */
4349 min_profitable_iters = assumed_vf + peel_iters_prologue;
4350 else if (min_profitable_iters < peel_iters_prologue)
4351 /* For LOOP_VINFO_USING_PARTIAL_VECTORS_P, we need to ensure the
4352 vectorized loop executes at least once. */
4353 min_profitable_iters = peel_iters_prologue;
4355 if (dump_enabled_p ())
4356 dump_printf_loc (MSG_NOTE, vect_location,
4357 " Runtime profitability threshold = %d\n",
4358 min_profitable_iters);
4360 *ret_min_profitable_niters = min_profitable_iters;
4362 /* Calculate number of iterations required to make the vector version
4363 profitable, relative to the loop bodies only.
4365 Non-vectorized variant is SIC * niters and it must win over vector
4366 variant on the expected loop trip count. The following condition must hold true:
4367 SIC * niters > VIC * ((niters - NPEEL) / VF) + VOC + SOC */
4369 if (vec_outside_cost <= 0)
4370 min_profitable_estimate = 0;
4371 /* ??? This "else if" arm is written to handle all cases; see below for
4372 what we would do for !LOOP_VINFO_USING_PARTIAL_VECTORS_P. */
4373 else if (LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo))
4375 /* This is a repeat of the code above, but with + SOC rather
4376 than - SOC. */
4377 int outside_overhead = (vec_outside_cost
4378 - scalar_single_iter_cost * peel_iters_prologue
4379 - scalar_single_iter_cost * peel_iters_epilogue
4380 + scalar_outside_cost);
4381 int min_vec_niters = 1;
4382 if (outside_overhead > 0)
4383 min_vec_niters = outside_overhead / saving_per_viter + 1;
4385 if (LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo))
4387 int threshold = (vec_inside_cost * min_vec_niters
4388 + vec_outside_cost
4389 + scalar_outside_cost);
4390 min_profitable_estimate = threshold / scalar_single_iter_cost + 1;
4392 else
4393 min_profitable_estimate = (min_vec_niters * assumed_vf
4394 + peel_iters_prologue
4395 + peel_iters_epilogue);
4397 else
4399 min_profitable_estimate = ((vec_outside_cost + scalar_outside_cost)
4400 * assumed_vf
4401 - vec_inside_cost * peel_iters_prologue
4402 - vec_inside_cost * peel_iters_epilogue)
4403 / ((scalar_single_iter_cost * assumed_vf)
4404 - vec_inside_cost);
4406 min_profitable_estimate = MAX (min_profitable_estimate, min_profitable_iters);
4407 if (dump_enabled_p ())
4408 dump_printf_loc (MSG_NOTE, vect_location,
4409 " Static estimate profitability threshold = %d\n",
4410 min_profitable_estimate);
4412 *ret_min_profitable_estimate = min_profitable_estimate;
4415 /* Writes into SEL a mask for a vec_perm, equivalent to a vec_shr by OFFSET
4416 vector elements (not bits) for a vector with NELT elements. */
4417 static void
4418 calc_vec_perm_mask_for_shift (unsigned int offset, unsigned int nelt,
4419 vec_perm_builder *sel)
4421 /* The encoding is a single stepped pattern. Any wrap-around is handled
4422 by vec_perm_indices. */
4423 sel->new_vector (nelt, 1, 3);
4424 for (unsigned int i = 0; i < 3; i++)
4425 sel->quick_push (i + offset);
4428 /* Checks whether the target supports whole-vector shifts for vectors of mode
4429 MODE. This is the case if _either_ the platform handles vec_shr_optab, _or_
4430 it supports vec_perm_const with masks for all necessary shift amounts. */
4431 static bool
4432 have_whole_vector_shift (machine_mode mode)
4434 if (optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
4435 return true;
4437 /* Variable-length vectors should be handled via the optab. */
4438 unsigned int nelt;
4439 if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
4440 return false;
4442 vec_perm_builder sel;
4443 vec_perm_indices indices;
4444 for (unsigned int i = nelt / 2; i >= 1; i /= 2)
4446 calc_vec_perm_mask_for_shift (i, nelt, &sel);
4447 indices.new_vector (sel, 2, nelt);
4448 if (!can_vec_perm_const_p (mode, indices, false))
4449 return false;
4451 return true;
4454 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
4455 functions. Design better to avoid maintenance issues. */
4457 /* Function vect_model_reduction_cost.
4459 Models cost for a reduction operation, including the vector ops
4460 generated within the strip-mine loop in some cases, the initial
4461 definition before the loop, and the epilogue code that must be generated. */
4463 static void
4464 vect_model_reduction_cost (loop_vec_info loop_vinfo,
4465 stmt_vec_info stmt_info, internal_fn reduc_fn,
4466 vect_reduction_type reduction_type,
4467 int ncopies, stmt_vector_for_cost *cost_vec)
4469 int prologue_cost = 0, epilogue_cost = 0, inside_cost = 0;
4470 enum tree_code code;
4471 optab optab;
4472 tree vectype;
4473 machine_mode mode;
4474 class loop *loop = NULL;
4476 if (loop_vinfo)
4477 loop = LOOP_VINFO_LOOP (loop_vinfo);
4479 /* Condition reductions generate two reductions in the loop. */
4480 if (reduction_type == COND_REDUCTION)
4481 ncopies *= 2;
4483 vectype = STMT_VINFO_VECTYPE (stmt_info);
4484 mode = TYPE_MODE (vectype);
4485 stmt_vec_info orig_stmt_info = vect_orig_stmt (stmt_info);
4487 code = gimple_assign_rhs_code (orig_stmt_info->stmt);
4489 if (reduction_type == EXTRACT_LAST_REDUCTION)
4490 /* No extra instructions are needed in the prologue. The loop body
4491 operations are costed in vectorizable_condition. */
4492 inside_cost = 0;
4493 else if (reduction_type == FOLD_LEFT_REDUCTION)
4495 /* No extra instructions needed in the prologue. */
4496 prologue_cost = 0;
4498 if (reduc_fn != IFN_LAST)
4499 /* Count one reduction-like operation per vector. */
4500 inside_cost = record_stmt_cost (cost_vec, ncopies, vec_to_scalar,
4501 stmt_info, 0, vect_body);
4502 else
4504 /* Use NELEMENTS extracts and NELEMENTS scalar ops. */
4505 unsigned int nelements = ncopies * vect_nunits_for_cost (vectype);
4506 inside_cost = record_stmt_cost (cost_vec, nelements,
4507 vec_to_scalar, stmt_info, 0,
4508 vect_body);
4509 inside_cost += record_stmt_cost (cost_vec, nelements,
4510 scalar_stmt, stmt_info, 0,
4511 vect_body);
4514 else
4516 /* Add in cost for initial definition.
4517 For cond reduction we have four vectors: initial index, step,
4518 initial result of the data reduction, initial value of the index
4519 reduction. */
4520 int prologue_stmts = reduction_type == COND_REDUCTION ? 4 : 1;
4521 prologue_cost += record_stmt_cost (cost_vec, prologue_stmts,
4522 scalar_to_vec, stmt_info, 0,
4523 vect_prologue);
4526 /* Determine cost of epilogue code.
4528 We have a reduction operator that will reduce the vector in one statement.
4529 Also requires scalar extract. */
4531 if (!loop || !nested_in_vect_loop_p (loop, orig_stmt_info))
4533 if (reduc_fn != IFN_LAST)
4535 if (reduction_type == COND_REDUCTION)
4537 /* An EQ stmt and an COND_EXPR stmt. */
4538 epilogue_cost += record_stmt_cost (cost_vec, 2,
4539 vector_stmt, stmt_info, 0,
4540 vect_epilogue);
4541 /* Reduction of the max index and a reduction of the found
4542 values. */
4543 epilogue_cost += record_stmt_cost (cost_vec, 2,
4544 vec_to_scalar, stmt_info, 0,
4545 vect_epilogue);
4546 /* A broadcast of the max value. */
4547 epilogue_cost += record_stmt_cost (cost_vec, 1,
4548 scalar_to_vec, stmt_info, 0,
4549 vect_epilogue);
4551 else
4553 epilogue_cost += record_stmt_cost (cost_vec, 1, vector_stmt,
4554 stmt_info, 0, vect_epilogue);
4555 epilogue_cost += record_stmt_cost (cost_vec, 1,
4556 vec_to_scalar, stmt_info, 0,
4557 vect_epilogue);
4560 else if (reduction_type == COND_REDUCTION)
4562 unsigned estimated_nunits = vect_nunits_for_cost (vectype);
4563 /* Extraction of scalar elements. */
4564 epilogue_cost += record_stmt_cost (cost_vec,
4565 2 * estimated_nunits,
4566 vec_to_scalar, stmt_info, 0,
4567 vect_epilogue);
4568 /* Scalar max reductions via COND_EXPR / MAX_EXPR. */
4569 epilogue_cost += record_stmt_cost (cost_vec,
4570 2 * estimated_nunits - 3,
4571 scalar_stmt, stmt_info, 0,
4572 vect_epilogue);
4574 else if (reduction_type == EXTRACT_LAST_REDUCTION
4575 || reduction_type == FOLD_LEFT_REDUCTION)
4576 /* No extra instructions need in the epilogue. */
4578 else
4580 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
4581 tree bitsize =
4582 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt_info->stmt)));
4583 int element_bitsize = tree_to_uhwi (bitsize);
4584 int nelements = vec_size_in_bits / element_bitsize;
4586 if (code == COND_EXPR)
4587 code = MAX_EXPR;
4589 optab = optab_for_tree_code (code, vectype, optab_default);
4591 /* We have a whole vector shift available. */
4592 if (optab != unknown_optab
4593 && VECTOR_MODE_P (mode)
4594 && optab_handler (optab, mode) != CODE_FOR_nothing
4595 && have_whole_vector_shift (mode))
4597 /* Final reduction via vector shifts and the reduction operator.
4598 Also requires scalar extract. */
4599 epilogue_cost += record_stmt_cost (cost_vec,
4600 exact_log2 (nelements) * 2,
4601 vector_stmt, stmt_info, 0,
4602 vect_epilogue);
4603 epilogue_cost += record_stmt_cost (cost_vec, 1,
4604 vec_to_scalar, stmt_info, 0,
4605 vect_epilogue);
4607 else
4608 /* Use extracts and reduction op for final reduction. For N
4609 elements, we have N extracts and N-1 reduction ops. */
4610 epilogue_cost += record_stmt_cost (cost_vec,
4611 nelements + nelements - 1,
4612 vector_stmt, stmt_info, 0,
4613 vect_epilogue);
4617 if (dump_enabled_p ())
4618 dump_printf (MSG_NOTE,
4619 "vect_model_reduction_cost: inside_cost = %d, "
4620 "prologue_cost = %d, epilogue_cost = %d .\n", inside_cost,
4621 prologue_cost, epilogue_cost);
4626 /* Function get_initial_def_for_reduction
4628 Input:
4629 STMT_VINFO - a stmt that performs a reduction operation in the loop.
4630 INIT_VAL - the initial value of the reduction variable
4632 Output:
4633 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
4634 of the reduction (used for adjusting the epilog - see below).
4635 Return a vector variable, initialized according to the operation that
4636 STMT_VINFO performs. This vector will be used as the initial value
4637 of the vector of partial results.
4639 Option1 (adjust in epilog): Initialize the vector as follows:
4640 add/bit or/xor: [0,0,...,0,0]
4641 mult/bit and: [1,1,...,1,1]
4642 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
4643 and when necessary (e.g. add/mult case) let the caller know
4644 that it needs to adjust the result by init_val.
4646 Option2: Initialize the vector as follows:
4647 add/bit or/xor: [init_val,0,0,...,0]
4648 mult/bit and: [init_val,1,1,...,1]
4649 min/max/cond_expr: [init_val,init_val,...,init_val]
4650 and no adjustments are needed.
4652 For example, for the following code:
4654 s = init_val;
4655 for (i=0;i<n;i++)
4656 s = s + a[i];
4658 STMT_VINFO is 's = s + a[i]', and the reduction variable is 's'.
4659 For a vector of 4 units, we want to return either [0,0,0,init_val],
4660 or [0,0,0,0] and let the caller know that it needs to adjust
4661 the result at the end by 'init_val'.
4663 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
4664 initialization vector is simpler (same element in all entries), if
4665 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
4667 A cost model should help decide between these two schemes. */
4669 static tree
4670 get_initial_def_for_reduction (loop_vec_info loop_vinfo,
4671 stmt_vec_info stmt_vinfo,
4672 enum tree_code code, tree init_val,
4673 tree *adjustment_def)
4675 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4676 tree scalar_type = TREE_TYPE (init_val);
4677 tree vectype = get_vectype_for_scalar_type (loop_vinfo, scalar_type);
4678 tree def_for_init;
4679 tree init_def;
4680 REAL_VALUE_TYPE real_init_val = dconst0;
4681 int int_init_val = 0;
4682 gimple_seq stmts = NULL;
4684 gcc_assert (vectype);
4686 gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
4687 || SCALAR_FLOAT_TYPE_P (scalar_type));
4689 gcc_assert (nested_in_vect_loop_p (loop, stmt_vinfo)
4690 || loop == (gimple_bb (stmt_vinfo->stmt))->loop_father);
4692 /* ADJUSTMENT_DEF is NULL when called from
4693 vect_create_epilog_for_reduction to vectorize double reduction. */
4694 if (adjustment_def)
4695 *adjustment_def = NULL;
4697 switch (code)
4699 case WIDEN_SUM_EXPR:
4700 case DOT_PROD_EXPR:
4701 case SAD_EXPR:
4702 case PLUS_EXPR:
4703 case MINUS_EXPR:
4704 case BIT_IOR_EXPR:
4705 case BIT_XOR_EXPR:
4706 case MULT_EXPR:
4707 case BIT_AND_EXPR:
4709 if (code == MULT_EXPR)
4711 real_init_val = dconst1;
4712 int_init_val = 1;
4715 if (code == BIT_AND_EXPR)
4716 int_init_val = -1;
4718 if (SCALAR_FLOAT_TYPE_P (scalar_type))
4719 def_for_init = build_real (scalar_type, real_init_val);
4720 else
4721 def_for_init = build_int_cst (scalar_type, int_init_val);
4723 if (adjustment_def || operand_equal_p (def_for_init, init_val, 0))
4725 /* Option1: the first element is '0' or '1' as well. */
4726 if (!operand_equal_p (def_for_init, init_val, 0))
4727 *adjustment_def = init_val;
4728 init_def = gimple_build_vector_from_val (&stmts, vectype,
4729 def_for_init);
4731 else if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant ())
4733 /* Option2 (variable length): the first element is INIT_VAL. */
4734 init_def = gimple_build_vector_from_val (&stmts, vectype,
4735 def_for_init);
4736 init_def = gimple_build (&stmts, CFN_VEC_SHL_INSERT,
4737 vectype, init_def, init_val);
4739 else
4741 /* Option2: the first element is INIT_VAL. */
4742 tree_vector_builder elts (vectype, 1, 2);
4743 elts.quick_push (init_val);
4744 elts.quick_push (def_for_init);
4745 init_def = gimple_build_vector (&stmts, &elts);
4748 break;
4750 case MIN_EXPR:
4751 case MAX_EXPR:
4752 case COND_EXPR:
4754 init_val = gimple_convert (&stmts, TREE_TYPE (vectype), init_val);
4755 init_def = gimple_build_vector_from_val (&stmts, vectype, init_val);
4757 break;
4759 default:
4760 gcc_unreachable ();
4763 if (stmts)
4764 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
4765 return init_def;
4768 /* Get at the initial defs for the reduction PHIs in SLP_NODE.
4769 NUMBER_OF_VECTORS is the number of vector defs to create.
4770 If NEUTRAL_OP is nonnull, introducing extra elements of that
4771 value will not change the result. */
4773 static void
4774 get_initial_defs_for_reduction (vec_info *vinfo,
4775 slp_tree slp_node,
4776 vec<tree> *vec_oprnds,
4777 unsigned int number_of_vectors,
4778 bool reduc_chain, tree neutral_op)
4780 vec<stmt_vec_info> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
4781 stmt_vec_info stmt_vinfo = stmts[0];
4782 unsigned HOST_WIDE_INT nunits;
4783 unsigned j, number_of_places_left_in_vector;
4784 tree vector_type;
4785 unsigned int group_size = stmts.length ();
4786 unsigned int i;
4787 class loop *loop;
4789 vector_type = STMT_VINFO_VECTYPE (stmt_vinfo);
4791 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def);
4793 loop = (gimple_bb (stmt_vinfo->stmt))->loop_father;
4794 gcc_assert (loop);
4795 edge pe = loop_preheader_edge (loop);
4797 gcc_assert (!reduc_chain || neutral_op);
4799 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
4800 created vectors. It is greater than 1 if unrolling is performed.
4802 For example, we have two scalar operands, s1 and s2 (e.g., group of
4803 strided accesses of size two), while NUNITS is four (i.e., four scalars
4804 of this type can be packed in a vector). The output vector will contain
4805 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
4806 will be 2).
4808 If REDUC_GROUP_SIZE > NUNITS, the scalars will be split into several
4809 vectors containing the operands.
4811 For example, NUNITS is four as before, and the group size is 8
4812 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
4813 {s5, s6, s7, s8}. */
4815 if (!TYPE_VECTOR_SUBPARTS (vector_type).is_constant (&nunits))
4816 nunits = group_size;
4818 number_of_places_left_in_vector = nunits;
4819 bool constant_p = true;
4820 tree_vector_builder elts (vector_type, nunits, 1);
4821 elts.quick_grow (nunits);
4822 gimple_seq ctor_seq = NULL;
4823 for (j = 0; j < nunits * number_of_vectors; ++j)
4825 tree op;
4826 i = j % group_size;
4827 stmt_vinfo = stmts[i];
4829 /* Get the def before the loop. In reduction chain we have only
4830 one initial value. Else we have as many as PHIs in the group. */
4831 if (reduc_chain)
4832 op = j != 0 ? neutral_op : PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt, pe);
4833 else if (((vec_oprnds->length () + 1) * nunits
4834 - number_of_places_left_in_vector >= group_size)
4835 && neutral_op)
4836 op = neutral_op;
4837 else
4838 op = PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt, pe);
4840 /* Create 'vect_ = {op0,op1,...,opn}'. */
4841 number_of_places_left_in_vector--;
4842 elts[nunits - number_of_places_left_in_vector - 1] = op;
4843 if (!CONSTANT_CLASS_P (op))
4844 constant_p = false;
4846 if (number_of_places_left_in_vector == 0)
4848 tree init;
4849 if (constant_p && !neutral_op
4850 ? multiple_p (TYPE_VECTOR_SUBPARTS (vector_type), nunits)
4851 : known_eq (TYPE_VECTOR_SUBPARTS (vector_type), nunits))
4852 /* Build the vector directly from ELTS. */
4853 init = gimple_build_vector (&ctor_seq, &elts);
4854 else if (neutral_op)
4856 /* Build a vector of the neutral value and shift the
4857 other elements into place. */
4858 init = gimple_build_vector_from_val (&ctor_seq, vector_type,
4859 neutral_op);
4860 int k = nunits;
4861 while (k > 0 && elts[k - 1] == neutral_op)
4862 k -= 1;
4863 while (k > 0)
4865 k -= 1;
4866 init = gimple_build (&ctor_seq, CFN_VEC_SHL_INSERT,
4867 vector_type, init, elts[k]);
4870 else
4872 /* First time round, duplicate ELTS to fill the
4873 required number of vectors. */
4874 duplicate_and_interleave (vinfo, &ctor_seq, vector_type, elts,
4875 number_of_vectors, *vec_oprnds);
4876 break;
4878 vec_oprnds->quick_push (init);
4880 number_of_places_left_in_vector = nunits;
4881 elts.new_vector (vector_type, nunits, 1);
4882 elts.quick_grow (nunits);
4883 constant_p = true;
4886 if (ctor_seq != NULL)
4887 gsi_insert_seq_on_edge_immediate (pe, ctor_seq);
4890 /* For a statement STMT_INFO taking part in a reduction operation return
4891 the stmt_vec_info the meta information is stored on. */
4893 stmt_vec_info
4894 info_for_reduction (vec_info *vinfo, stmt_vec_info stmt_info)
4896 stmt_info = vect_orig_stmt (stmt_info);
4897 gcc_assert (STMT_VINFO_REDUC_DEF (stmt_info));
4898 if (!is_a <gphi *> (stmt_info->stmt)
4899 || !VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
4900 stmt_info = STMT_VINFO_REDUC_DEF (stmt_info);
4901 gphi *phi = as_a <gphi *> (stmt_info->stmt);
4902 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_double_reduction_def)
4904 if (gimple_phi_num_args (phi) == 1)
4905 stmt_info = STMT_VINFO_REDUC_DEF (stmt_info);
4907 else if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
4909 edge pe = loop_preheader_edge (gimple_bb (phi)->loop_father);
4910 stmt_vec_info info
4911 = vinfo->lookup_def (PHI_ARG_DEF_FROM_EDGE (phi, pe));
4912 if (info && STMT_VINFO_DEF_TYPE (info) == vect_double_reduction_def)
4913 stmt_info = info;
4915 return stmt_info;
4918 /* Function vect_create_epilog_for_reduction
4920 Create code at the loop-epilog to finalize the result of a reduction
4921 computation.
4923 STMT_INFO is the scalar reduction stmt that is being vectorized.
4924 SLP_NODE is an SLP node containing a group of reduction statements. The
4925 first one in this group is STMT_INFO.
4926 SLP_NODE_INSTANCE is the SLP node instance containing SLP_NODE
4927 REDUC_INDEX says which rhs operand of the STMT_INFO is the reduction phi
4928 (counting from 0)
4930 This function:
4931 1. Completes the reduction def-use cycles.
4932 2. "Reduces" each vector of partial results VECT_DEFS into a single result,
4933 by calling the function specified by REDUC_FN if available, or by
4934 other means (whole-vector shifts or a scalar loop).
4935 The function also creates a new phi node at the loop exit to preserve
4936 loop-closed form, as illustrated below.
4938 The flow at the entry to this function:
4940 loop:
4941 vec_def = phi <vec_init, null> # REDUCTION_PHI
4942 VECT_DEF = vector_stmt # vectorized form of STMT_INFO
4943 s_loop = scalar_stmt # (scalar) STMT_INFO
4944 loop_exit:
4945 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4946 use <s_out0>
4947 use <s_out0>
4949 The above is transformed by this function into:
4951 loop:
4952 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
4953 VECT_DEF = vector_stmt # vectorized form of STMT_INFO
4954 s_loop = scalar_stmt # (scalar) STMT_INFO
4955 loop_exit:
4956 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4957 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4958 v_out2 = reduce <v_out1>
4959 s_out3 = extract_field <v_out2, 0>
4960 s_out4 = adjust_result <s_out3>
4961 use <s_out4>
4962 use <s_out4>
4965 static void
4966 vect_create_epilog_for_reduction (loop_vec_info loop_vinfo,
4967 stmt_vec_info stmt_info,
4968 slp_tree slp_node,
4969 slp_instance slp_node_instance)
4971 stmt_vec_info reduc_info = info_for_reduction (loop_vinfo, stmt_info);
4972 gcc_assert (reduc_info->is_reduc_info);
4973 /* For double reductions we need to get at the inner loop reduction
4974 stmt which has the meta info attached. Our stmt_info is that of the
4975 loop-closed PHI of the inner loop which we remember as
4976 def for the reduction PHI generation. */
4977 bool double_reduc = false;
4978 stmt_vec_info rdef_info = stmt_info;
4979 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_double_reduction_def)
4981 gcc_assert (!slp_node);
4982 double_reduc = true;
4983 stmt_info = loop_vinfo->lookup_def (gimple_phi_arg_def
4984 (stmt_info->stmt, 0));
4985 stmt_info = vect_stmt_to_vectorize (stmt_info);
4987 gphi *reduc_def_stmt
4988 = as_a <gphi *> (STMT_VINFO_REDUC_DEF (vect_orig_stmt (stmt_info))->stmt);
4989 enum tree_code code = STMT_VINFO_REDUC_CODE (reduc_info);
4990 internal_fn reduc_fn = STMT_VINFO_REDUC_FN (reduc_info);
4991 tree vectype;
4992 machine_mode mode;
4993 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
4994 basic_block exit_bb;
4995 tree scalar_dest;
4996 tree scalar_type;
4997 gimple *new_phi = NULL, *phi;
4998 gimple_stmt_iterator exit_gsi;
4999 tree new_temp = NULL_TREE, new_name, new_scalar_dest;
5000 gimple *epilog_stmt = NULL;
5001 gimple *exit_phi;
5002 tree bitsize;
5003 tree def;
5004 tree orig_name, scalar_result;
5005 imm_use_iterator imm_iter, phi_imm_iter;
5006 use_operand_p use_p, phi_use_p;
5007 gimple *use_stmt;
5008 bool nested_in_vect_loop = false;
5009 auto_vec<gimple *> new_phis;
5010 int j, i;
5011 auto_vec<tree> scalar_results;
5012 unsigned int group_size = 1, k;
5013 auto_vec<gimple *> phis;
5014 bool slp_reduc = false;
5015 bool direct_slp_reduc;
5016 tree new_phi_result;
5017 tree induction_index = NULL_TREE;
5019 if (slp_node)
5020 group_size = SLP_TREE_LANES (slp_node);
5022 if (nested_in_vect_loop_p (loop, stmt_info))
5024 outer_loop = loop;
5025 loop = loop->inner;
5026 nested_in_vect_loop = true;
5027 gcc_assert (!slp_node);
5029 gcc_assert (!nested_in_vect_loop || double_reduc);
5031 vectype = STMT_VINFO_REDUC_VECTYPE (reduc_info);
5032 gcc_assert (vectype);
5033 mode = TYPE_MODE (vectype);
5035 tree initial_def = NULL;
5036 tree induc_val = NULL_TREE;
5037 tree adjustment_def = NULL;
5038 if (slp_node)
5040 else
5042 /* Get at the scalar def before the loop, that defines the initial value
5043 of the reduction variable. */
5044 initial_def = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
5045 loop_preheader_edge (loop));
5046 /* Optimize: for induction condition reduction, if we can't use zero
5047 for induc_val, use initial_def. */
5048 if (STMT_VINFO_REDUC_TYPE (reduc_info) == INTEGER_INDUC_COND_REDUCTION)
5049 induc_val = STMT_VINFO_VEC_INDUC_COND_INITIAL_VAL (reduc_info);
5050 else if (double_reduc)
5052 else if (nested_in_vect_loop)
5054 else
5055 adjustment_def = STMT_VINFO_REDUC_EPILOGUE_ADJUSTMENT (reduc_info);
5058 unsigned vec_num;
5059 int ncopies;
5060 if (slp_node)
5062 vec_num = SLP_TREE_VEC_STMTS (slp_node_instance->reduc_phis).length ();
5063 ncopies = 1;
5065 else
5067 stmt_vec_info reduc_info = loop_vinfo->lookup_stmt (reduc_def_stmt);
5068 vec_num = 1;
5069 ncopies = STMT_VINFO_VEC_STMTS (reduc_info).length ();
5072 /* For cond reductions we want to create a new vector (INDEX_COND_EXPR)
5073 which is updated with the current index of the loop for every match of
5074 the original loop's cond_expr (VEC_STMT). This results in a vector
5075 containing the last time the condition passed for that vector lane.
5076 The first match will be a 1 to allow 0 to be used for non-matching
5077 indexes. If there are no matches at all then the vector will be all
5078 zeroes.
5080 PR92772: This algorithm is broken for architectures that support
5081 masked vectors, but do not provide fold_extract_last. */
5082 if (STMT_VINFO_REDUC_TYPE (reduc_info) == COND_REDUCTION)
5084 auto_vec<std::pair<tree, bool>, 2> ccompares;
5085 stmt_vec_info cond_info = STMT_VINFO_REDUC_DEF (reduc_info);
5086 cond_info = vect_stmt_to_vectorize (cond_info);
5087 while (cond_info != reduc_info)
5089 if (gimple_assign_rhs_code (cond_info->stmt) == COND_EXPR)
5091 gimple *vec_stmt = STMT_VINFO_VEC_STMTS (cond_info)[0];
5092 gcc_assert (gimple_assign_rhs_code (vec_stmt) == VEC_COND_EXPR);
5093 ccompares.safe_push
5094 (std::make_pair (unshare_expr (gimple_assign_rhs1 (vec_stmt)),
5095 STMT_VINFO_REDUC_IDX (cond_info) == 2));
5097 cond_info
5098 = loop_vinfo->lookup_def (gimple_op (cond_info->stmt,
5099 1 + STMT_VINFO_REDUC_IDX
5100 (cond_info)));
5101 cond_info = vect_stmt_to_vectorize (cond_info);
5103 gcc_assert (ccompares.length () != 0);
5105 tree indx_before_incr, indx_after_incr;
5106 poly_uint64 nunits_out = TYPE_VECTOR_SUBPARTS (vectype);
5107 int scalar_precision
5108 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (TREE_TYPE (vectype)));
5109 tree cr_index_scalar_type = make_unsigned_type (scalar_precision);
5110 tree cr_index_vector_type = get_related_vectype_for_scalar_type
5111 (TYPE_MODE (vectype), cr_index_scalar_type,
5112 TYPE_VECTOR_SUBPARTS (vectype));
5114 /* First we create a simple vector induction variable which starts
5115 with the values {1,2,3,...} (SERIES_VECT) and increments by the
5116 vector size (STEP). */
5118 /* Create a {1,2,3,...} vector. */
5119 tree series_vect = build_index_vector (cr_index_vector_type, 1, 1);
5121 /* Create a vector of the step value. */
5122 tree step = build_int_cst (cr_index_scalar_type, nunits_out);
5123 tree vec_step = build_vector_from_val (cr_index_vector_type, step);
5125 /* Create an induction variable. */
5126 gimple_stmt_iterator incr_gsi;
5127 bool insert_after;
5128 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
5129 create_iv (series_vect, vec_step, NULL_TREE, loop, &incr_gsi,
5130 insert_after, &indx_before_incr, &indx_after_incr);
5132 /* Next create a new phi node vector (NEW_PHI_TREE) which starts
5133 filled with zeros (VEC_ZERO). */
5135 /* Create a vector of 0s. */
5136 tree zero = build_zero_cst (cr_index_scalar_type);
5137 tree vec_zero = build_vector_from_val (cr_index_vector_type, zero);
5139 /* Create a vector phi node. */
5140 tree new_phi_tree = make_ssa_name (cr_index_vector_type);
5141 new_phi = create_phi_node (new_phi_tree, loop->header);
5142 add_phi_arg (as_a <gphi *> (new_phi), vec_zero,
5143 loop_preheader_edge (loop), UNKNOWN_LOCATION);
5145 /* Now take the condition from the loops original cond_exprs
5146 and produce a new cond_exprs (INDEX_COND_EXPR) which for
5147 every match uses values from the induction variable
5148 (INDEX_BEFORE_INCR) otherwise uses values from the phi node
5149 (NEW_PHI_TREE).
5150 Finally, we update the phi (NEW_PHI_TREE) to take the value of
5151 the new cond_expr (INDEX_COND_EXPR). */
5152 gimple_seq stmts = NULL;
5153 for (int i = ccompares.length () - 1; i != -1; --i)
5155 tree ccompare = ccompares[i].first;
5156 if (ccompares[i].second)
5157 new_phi_tree = gimple_build (&stmts, VEC_COND_EXPR,
5158 cr_index_vector_type,
5159 ccompare,
5160 indx_before_incr, new_phi_tree);
5161 else
5162 new_phi_tree = gimple_build (&stmts, VEC_COND_EXPR,
5163 cr_index_vector_type,
5164 ccompare,
5165 new_phi_tree, indx_before_incr);
5167 gsi_insert_seq_before (&incr_gsi, stmts, GSI_SAME_STMT);
5169 /* Update the phi with the vec cond. */
5170 induction_index = new_phi_tree;
5171 add_phi_arg (as_a <gphi *> (new_phi), induction_index,
5172 loop_latch_edge (loop), UNKNOWN_LOCATION);
5175 /* 2. Create epilog code.
5176 The reduction epilog code operates across the elements of the vector
5177 of partial results computed by the vectorized loop.
5178 The reduction epilog code consists of:
5180 step 1: compute the scalar result in a vector (v_out2)
5181 step 2: extract the scalar result (s_out3) from the vector (v_out2)
5182 step 3: adjust the scalar result (s_out3) if needed.
5184 Step 1 can be accomplished using one the following three schemes:
5185 (scheme 1) using reduc_fn, if available.
5186 (scheme 2) using whole-vector shifts, if available.
5187 (scheme 3) using a scalar loop. In this case steps 1+2 above are
5188 combined.
5190 The overall epilog code looks like this:
5192 s_out0 = phi <s_loop> # original EXIT_PHI
5193 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
5194 v_out2 = reduce <v_out1> # step 1
5195 s_out3 = extract_field <v_out2, 0> # step 2
5196 s_out4 = adjust_result <s_out3> # step 3
5198 (step 3 is optional, and steps 1 and 2 may be combined).
5199 Lastly, the uses of s_out0 are replaced by s_out4. */
5202 /* 2.1 Create new loop-exit-phis to preserve loop-closed form:
5203 v_out1 = phi <VECT_DEF>
5204 Store them in NEW_PHIS. */
5205 if (double_reduc)
5206 loop = outer_loop;
5207 exit_bb = single_exit (loop)->dest;
5208 new_phis.create (slp_node ? vec_num : ncopies);
5209 for (unsigned i = 0; i < vec_num; i++)
5211 if (slp_node)
5212 def = vect_get_slp_vect_def (slp_node, i);
5213 else
5214 def = gimple_get_lhs (STMT_VINFO_VEC_STMTS (rdef_info)[0]);
5215 for (j = 0; j < ncopies; j++)
5217 tree new_def = copy_ssa_name (def);
5218 phi = create_phi_node (new_def, exit_bb);
5219 if (j == 0)
5220 new_phis.quick_push (phi);
5221 else
5223 def = gimple_get_lhs (STMT_VINFO_VEC_STMTS (rdef_info)[j]);
5224 new_phis.quick_push (phi);
5227 SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
5231 exit_gsi = gsi_after_labels (exit_bb);
5233 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
5234 (i.e. when reduc_fn is not available) and in the final adjustment
5235 code (if needed). Also get the original scalar reduction variable as
5236 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
5237 represents a reduction pattern), the tree-code and scalar-def are
5238 taken from the original stmt that the pattern-stmt (STMT) replaces.
5239 Otherwise (it is a regular reduction) - the tree-code and scalar-def
5240 are taken from STMT. */
5242 stmt_vec_info orig_stmt_info = vect_orig_stmt (stmt_info);
5243 if (orig_stmt_info != stmt_info)
5245 /* Reduction pattern */
5246 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
5247 gcc_assert (STMT_VINFO_RELATED_STMT (orig_stmt_info) == stmt_info);
5250 scalar_dest = gimple_get_lhs (orig_stmt_info->stmt);
5251 scalar_type = TREE_TYPE (scalar_dest);
5252 scalar_results.create (group_size);
5253 new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
5254 bitsize = TYPE_SIZE (scalar_type);
5256 /* SLP reduction without reduction chain, e.g.,
5257 # a1 = phi <a2, a0>
5258 # b1 = phi <b2, b0>
5259 a2 = operation (a1)
5260 b2 = operation (b1) */
5261 slp_reduc = (slp_node && !REDUC_GROUP_FIRST_ELEMENT (stmt_info));
5263 /* True if we should implement SLP_REDUC using native reduction operations
5264 instead of scalar operations. */
5265 direct_slp_reduc = (reduc_fn != IFN_LAST
5266 && slp_reduc
5267 && !TYPE_VECTOR_SUBPARTS (vectype).is_constant ());
5269 /* In case of reduction chain, e.g.,
5270 # a1 = phi <a3, a0>
5271 a2 = operation (a1)
5272 a3 = operation (a2),
5274 we may end up with more than one vector result. Here we reduce them to
5275 one vector. */
5276 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info) || direct_slp_reduc)
5278 gimple_seq stmts = NULL;
5279 tree first_vect = PHI_RESULT (new_phis[0]);
5280 first_vect = gimple_convert (&stmts, vectype, first_vect);
5281 for (k = 1; k < new_phis.length (); k++)
5283 gimple *next_phi = new_phis[k];
5284 tree second_vect = PHI_RESULT (next_phi);
5285 second_vect = gimple_convert (&stmts, vectype, second_vect);
5286 first_vect = gimple_build (&stmts, code, vectype,
5287 first_vect, second_vect);
5289 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
5291 new_phi_result = first_vect;
5292 new_phis.truncate (0);
5293 new_phis.safe_push (SSA_NAME_DEF_STMT (first_vect));
5295 /* Likewise if we couldn't use a single defuse cycle. */
5296 else if (ncopies > 1)
5298 gimple_seq stmts = NULL;
5299 tree first_vect = PHI_RESULT (new_phis[0]);
5300 first_vect = gimple_convert (&stmts, vectype, first_vect);
5301 for (int k = 1; k < ncopies; ++k)
5303 tree second_vect = PHI_RESULT (new_phis[k]);
5304 second_vect = gimple_convert (&stmts, vectype, second_vect);
5305 first_vect = gimple_build (&stmts, code, vectype,
5306 first_vect, second_vect);
5308 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
5309 new_phi_result = first_vect;
5310 new_phis.truncate (0);
5311 new_phis.safe_push (SSA_NAME_DEF_STMT (first_vect));
5313 else
5314 new_phi_result = PHI_RESULT (new_phis[0]);
5316 if (STMT_VINFO_REDUC_TYPE (reduc_info) == COND_REDUCTION
5317 && reduc_fn != IFN_LAST)
5319 /* For condition reductions, we have a vector (NEW_PHI_RESULT) containing
5320 various data values where the condition matched and another vector
5321 (INDUCTION_INDEX) containing all the indexes of those matches. We
5322 need to extract the last matching index (which will be the index with
5323 highest value) and use this to index into the data vector.
5324 For the case where there were no matches, the data vector will contain
5325 all default values and the index vector will be all zeros. */
5327 /* Get various versions of the type of the vector of indexes. */
5328 tree index_vec_type = TREE_TYPE (induction_index);
5329 gcc_checking_assert (TYPE_UNSIGNED (index_vec_type));
5330 tree index_scalar_type = TREE_TYPE (index_vec_type);
5331 tree index_vec_cmp_type = truth_type_for (index_vec_type);
5333 /* Get an unsigned integer version of the type of the data vector. */
5334 int scalar_precision
5335 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type));
5336 tree scalar_type_unsigned = make_unsigned_type (scalar_precision);
5337 tree vectype_unsigned = get_same_sized_vectype (scalar_type_unsigned,
5338 vectype);
5340 /* First we need to create a vector (ZERO_VEC) of zeros and another
5341 vector (MAX_INDEX_VEC) filled with the last matching index, which we
5342 can create using a MAX reduction and then expanding.
5343 In the case where the loop never made any matches, the max index will
5344 be zero. */
5346 /* Vector of {0, 0, 0,...}. */
5347 tree zero_vec = build_zero_cst (vectype);
5349 gimple_seq stmts = NULL;
5350 new_phi_result = gimple_convert (&stmts, vectype, new_phi_result);
5351 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
5353 /* Find maximum value from the vector of found indexes. */
5354 tree max_index = make_ssa_name (index_scalar_type);
5355 gcall *max_index_stmt = gimple_build_call_internal (IFN_REDUC_MAX,
5356 1, induction_index);
5357 gimple_call_set_lhs (max_index_stmt, max_index);
5358 gsi_insert_before (&exit_gsi, max_index_stmt, GSI_SAME_STMT);
5360 /* Vector of {max_index, max_index, max_index,...}. */
5361 tree max_index_vec = make_ssa_name (index_vec_type);
5362 tree max_index_vec_rhs = build_vector_from_val (index_vec_type,
5363 max_index);
5364 gimple *max_index_vec_stmt = gimple_build_assign (max_index_vec,
5365 max_index_vec_rhs);
5366 gsi_insert_before (&exit_gsi, max_index_vec_stmt, GSI_SAME_STMT);
5368 /* Next we compare the new vector (MAX_INDEX_VEC) full of max indexes
5369 with the vector (INDUCTION_INDEX) of found indexes, choosing values
5370 from the data vector (NEW_PHI_RESULT) for matches, 0 (ZERO_VEC)
5371 otherwise. Only one value should match, resulting in a vector
5372 (VEC_COND) with one data value and the rest zeros.
5373 In the case where the loop never made any matches, every index will
5374 match, resulting in a vector with all data values (which will all be
5375 the default value). */
5377 /* Compare the max index vector to the vector of found indexes to find
5378 the position of the max value. */
5379 tree vec_compare = make_ssa_name (index_vec_cmp_type);
5380 gimple *vec_compare_stmt = gimple_build_assign (vec_compare, EQ_EXPR,
5381 induction_index,
5382 max_index_vec);
5383 gsi_insert_before (&exit_gsi, vec_compare_stmt, GSI_SAME_STMT);
5385 /* Use the compare to choose either values from the data vector or
5386 zero. */
5387 tree vec_cond = make_ssa_name (vectype);
5388 gimple *vec_cond_stmt = gimple_build_assign (vec_cond, VEC_COND_EXPR,
5389 vec_compare, new_phi_result,
5390 zero_vec);
5391 gsi_insert_before (&exit_gsi, vec_cond_stmt, GSI_SAME_STMT);
5393 /* Finally we need to extract the data value from the vector (VEC_COND)
5394 into a scalar (MATCHED_DATA_REDUC). Logically we want to do a OR
5395 reduction, but because this doesn't exist, we can use a MAX reduction
5396 instead. The data value might be signed or a float so we need to cast
5397 it first.
5398 In the case where the loop never made any matches, the data values are
5399 all identical, and so will reduce down correctly. */
5401 /* Make the matched data values unsigned. */
5402 tree vec_cond_cast = make_ssa_name (vectype_unsigned);
5403 tree vec_cond_cast_rhs = build1 (VIEW_CONVERT_EXPR, vectype_unsigned,
5404 vec_cond);
5405 gimple *vec_cond_cast_stmt = gimple_build_assign (vec_cond_cast,
5406 VIEW_CONVERT_EXPR,
5407 vec_cond_cast_rhs);
5408 gsi_insert_before (&exit_gsi, vec_cond_cast_stmt, GSI_SAME_STMT);
5410 /* Reduce down to a scalar value. */
5411 tree data_reduc = make_ssa_name (scalar_type_unsigned);
5412 gcall *data_reduc_stmt = gimple_build_call_internal (IFN_REDUC_MAX,
5413 1, vec_cond_cast);
5414 gimple_call_set_lhs (data_reduc_stmt, data_reduc);
5415 gsi_insert_before (&exit_gsi, data_reduc_stmt, GSI_SAME_STMT);
5417 /* Convert the reduced value back to the result type and set as the
5418 result. */
5419 stmts = NULL;
5420 new_temp = gimple_build (&stmts, VIEW_CONVERT_EXPR, scalar_type,
5421 data_reduc);
5422 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
5423 scalar_results.safe_push (new_temp);
5425 else if (STMT_VINFO_REDUC_TYPE (reduc_info) == COND_REDUCTION
5426 && reduc_fn == IFN_LAST)
5428 /* Condition reduction without supported IFN_REDUC_MAX. Generate
5429 idx = 0;
5430 idx_val = induction_index[0];
5431 val = data_reduc[0];
5432 for (idx = 0, val = init, i = 0; i < nelts; ++i)
5433 if (induction_index[i] > idx_val)
5434 val = data_reduc[i], idx_val = induction_index[i];
5435 return val; */
5437 tree data_eltype = TREE_TYPE (TREE_TYPE (new_phi_result));
5438 tree idx_eltype = TREE_TYPE (TREE_TYPE (induction_index));
5439 unsigned HOST_WIDE_INT el_size = tree_to_uhwi (TYPE_SIZE (idx_eltype));
5440 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (TREE_TYPE (induction_index));
5441 /* Enforced by vectorizable_reduction, which ensures we have target
5442 support before allowing a conditional reduction on variable-length
5443 vectors. */
5444 unsigned HOST_WIDE_INT v_size = el_size * nunits.to_constant ();
5445 tree idx_val = NULL_TREE, val = NULL_TREE;
5446 for (unsigned HOST_WIDE_INT off = 0; off < v_size; off += el_size)
5448 tree old_idx_val = idx_val;
5449 tree old_val = val;
5450 idx_val = make_ssa_name (idx_eltype);
5451 epilog_stmt = gimple_build_assign (idx_val, BIT_FIELD_REF,
5452 build3 (BIT_FIELD_REF, idx_eltype,
5453 induction_index,
5454 bitsize_int (el_size),
5455 bitsize_int (off)));
5456 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5457 val = make_ssa_name (data_eltype);
5458 epilog_stmt = gimple_build_assign (val, BIT_FIELD_REF,
5459 build3 (BIT_FIELD_REF,
5460 data_eltype,
5461 new_phi_result,
5462 bitsize_int (el_size),
5463 bitsize_int (off)));
5464 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5465 if (off != 0)
5467 tree new_idx_val = idx_val;
5468 if (off != v_size - el_size)
5470 new_idx_val = make_ssa_name (idx_eltype);
5471 epilog_stmt = gimple_build_assign (new_idx_val,
5472 MAX_EXPR, idx_val,
5473 old_idx_val);
5474 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5476 tree new_val = make_ssa_name (data_eltype);
5477 epilog_stmt = gimple_build_assign (new_val,
5478 COND_EXPR,
5479 build2 (GT_EXPR,
5480 boolean_type_node,
5481 idx_val,
5482 old_idx_val),
5483 val, old_val);
5484 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5485 idx_val = new_idx_val;
5486 val = new_val;
5489 /* Convert the reduced value back to the result type and set as the
5490 result. */
5491 gimple_seq stmts = NULL;
5492 val = gimple_convert (&stmts, scalar_type, val);
5493 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
5494 scalar_results.safe_push (val);
5497 /* 2.3 Create the reduction code, using one of the three schemes described
5498 above. In SLP we simply need to extract all the elements from the
5499 vector (without reducing them), so we use scalar shifts. */
5500 else if (reduc_fn != IFN_LAST && !slp_reduc)
5502 tree tmp;
5503 tree vec_elem_type;
5505 /* Case 1: Create:
5506 v_out2 = reduc_expr <v_out1> */
5508 if (dump_enabled_p ())
5509 dump_printf_loc (MSG_NOTE, vect_location,
5510 "Reduce using direct vector reduction.\n");
5512 gimple_seq stmts = NULL;
5513 new_phi_result = gimple_convert (&stmts, vectype, new_phi_result);
5514 vec_elem_type = TREE_TYPE (TREE_TYPE (new_phi_result));
5515 new_temp = gimple_build (&stmts, as_combined_fn (reduc_fn),
5516 vec_elem_type, new_phi_result);
5517 new_temp = gimple_convert (&stmts, scalar_type, new_temp);
5518 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
5520 if ((STMT_VINFO_REDUC_TYPE (reduc_info) == INTEGER_INDUC_COND_REDUCTION)
5521 && induc_val)
5523 /* Earlier we set the initial value to be a vector if induc_val
5524 values. Check the result and if it is induc_val then replace
5525 with the original initial value, unless induc_val is
5526 the same as initial_def already. */
5527 tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp,
5528 induc_val);
5530 tmp = make_ssa_name (new_scalar_dest);
5531 epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare,
5532 initial_def, new_temp);
5533 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5534 new_temp = tmp;
5537 scalar_results.safe_push (new_temp);
5539 else if (direct_slp_reduc)
5541 /* Here we create one vector for each of the REDUC_GROUP_SIZE results,
5542 with the elements for other SLP statements replaced with the
5543 neutral value. We can then do a normal reduction on each vector. */
5545 /* Enforced by vectorizable_reduction. */
5546 gcc_assert (new_phis.length () == 1);
5547 gcc_assert (pow2p_hwi (group_size));
5549 slp_tree orig_phis_slp_node = slp_node_instance->reduc_phis;
5550 vec<stmt_vec_info> orig_phis
5551 = SLP_TREE_SCALAR_STMTS (orig_phis_slp_node);
5552 gimple_seq seq = NULL;
5554 /* Build a vector {0, 1, 2, ...}, with the same number of elements
5555 and the same element size as VECTYPE. */
5556 tree index = build_index_vector (vectype, 0, 1);
5557 tree index_type = TREE_TYPE (index);
5558 tree index_elt_type = TREE_TYPE (index_type);
5559 tree mask_type = truth_type_for (index_type);
5561 /* Create a vector that, for each element, identifies which of
5562 the REDUC_GROUP_SIZE results should use it. */
5563 tree index_mask = build_int_cst (index_elt_type, group_size - 1);
5564 index = gimple_build (&seq, BIT_AND_EXPR, index_type, index,
5565 build_vector_from_val (index_type, index_mask));
5567 /* Get a neutral vector value. This is simply a splat of the neutral
5568 scalar value if we have one, otherwise the initial scalar value
5569 is itself a neutral value. */
5570 tree vector_identity = NULL_TREE;
5571 tree neutral_op = NULL_TREE;
5572 if (slp_node)
5574 stmt_vec_info first = REDUC_GROUP_FIRST_ELEMENT (stmt_info);
5575 neutral_op
5576 = neutral_op_for_slp_reduction (slp_node_instance->reduc_phis,
5577 vectype, code, first != NULL);
5579 if (neutral_op)
5580 vector_identity = gimple_build_vector_from_val (&seq, vectype,
5581 neutral_op);
5582 for (unsigned int i = 0; i < group_size; ++i)
5584 /* If there's no univeral neutral value, we can use the
5585 initial scalar value from the original PHI. This is used
5586 for MIN and MAX reduction, for example. */
5587 if (!neutral_op)
5589 tree scalar_value
5590 = PHI_ARG_DEF_FROM_EDGE (orig_phis[i]->stmt,
5591 loop_preheader_edge (loop));
5592 scalar_value = gimple_convert (&seq, TREE_TYPE (vectype),
5593 scalar_value);
5594 vector_identity = gimple_build_vector_from_val (&seq, vectype,
5595 scalar_value);
5598 /* Calculate the equivalent of:
5600 sel[j] = (index[j] == i);
5602 which selects the elements of NEW_PHI_RESULT that should
5603 be included in the result. */
5604 tree compare_val = build_int_cst (index_elt_type, i);
5605 compare_val = build_vector_from_val (index_type, compare_val);
5606 tree sel = gimple_build (&seq, EQ_EXPR, mask_type,
5607 index, compare_val);
5609 /* Calculate the equivalent of:
5611 vec = seq ? new_phi_result : vector_identity;
5613 VEC is now suitable for a full vector reduction. */
5614 tree vec = gimple_build (&seq, VEC_COND_EXPR, vectype,
5615 sel, new_phi_result, vector_identity);
5617 /* Do the reduction and convert it to the appropriate type. */
5618 tree scalar = gimple_build (&seq, as_combined_fn (reduc_fn),
5619 TREE_TYPE (vectype), vec);
5620 scalar = gimple_convert (&seq, scalar_type, scalar);
5621 scalar_results.safe_push (scalar);
5623 gsi_insert_seq_before (&exit_gsi, seq, GSI_SAME_STMT);
5625 else
5627 bool reduce_with_shift;
5628 tree vec_temp;
5630 gcc_assert (slp_reduc || new_phis.length () == 1);
5632 /* See if the target wants to do the final (shift) reduction
5633 in a vector mode of smaller size and first reduce upper/lower
5634 halves against each other. */
5635 enum machine_mode mode1 = mode;
5636 tree stype = TREE_TYPE (vectype);
5637 unsigned nunits = TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
5638 unsigned nunits1 = nunits;
5639 if ((mode1 = targetm.vectorize.split_reduction (mode)) != mode
5640 && new_phis.length () == 1)
5642 nunits1 = GET_MODE_NUNITS (mode1).to_constant ();
5643 /* For SLP reductions we have to make sure lanes match up, but
5644 since we're doing individual element final reduction reducing
5645 vector width here is even more important.
5646 ??? We can also separate lanes with permutes, for the common
5647 case of power-of-two group-size odd/even extracts would work. */
5648 if (slp_reduc && nunits != nunits1)
5650 nunits1 = least_common_multiple (nunits1, group_size);
5651 gcc_assert (exact_log2 (nunits1) != -1 && nunits1 <= nunits);
5654 if (!slp_reduc
5655 && (mode1 = targetm.vectorize.split_reduction (mode)) != mode)
5656 nunits1 = GET_MODE_NUNITS (mode1).to_constant ();
5658 tree vectype1 = get_related_vectype_for_scalar_type (TYPE_MODE (vectype),
5659 stype, nunits1);
5660 reduce_with_shift = have_whole_vector_shift (mode1);
5661 if (!VECTOR_MODE_P (mode1))
5662 reduce_with_shift = false;
5663 else
5665 optab optab = optab_for_tree_code (code, vectype1, optab_default);
5666 if (optab_handler (optab, mode1) == CODE_FOR_nothing)
5667 reduce_with_shift = false;
5670 /* First reduce the vector to the desired vector size we should
5671 do shift reduction on by combining upper and lower halves. */
5672 new_temp = new_phi_result;
5673 while (nunits > nunits1)
5675 nunits /= 2;
5676 vectype1 = get_related_vectype_for_scalar_type (TYPE_MODE (vectype),
5677 stype, nunits);
5678 unsigned int bitsize = tree_to_uhwi (TYPE_SIZE (vectype1));
5680 /* The target has to make sure we support lowpart/highpart
5681 extraction, either via direct vector extract or through
5682 an integer mode punning. */
5683 tree dst1, dst2;
5684 if (convert_optab_handler (vec_extract_optab,
5685 TYPE_MODE (TREE_TYPE (new_temp)),
5686 TYPE_MODE (vectype1))
5687 != CODE_FOR_nothing)
5689 /* Extract sub-vectors directly once vec_extract becomes
5690 a conversion optab. */
5691 dst1 = make_ssa_name (vectype1);
5692 epilog_stmt
5693 = gimple_build_assign (dst1, BIT_FIELD_REF,
5694 build3 (BIT_FIELD_REF, vectype1,
5695 new_temp, TYPE_SIZE (vectype1),
5696 bitsize_int (0)));
5697 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5698 dst2 = make_ssa_name (vectype1);
5699 epilog_stmt
5700 = gimple_build_assign (dst2, BIT_FIELD_REF,
5701 build3 (BIT_FIELD_REF, vectype1,
5702 new_temp, TYPE_SIZE (vectype1),
5703 bitsize_int (bitsize)));
5704 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5706 else
5708 /* Extract via punning to appropriately sized integer mode
5709 vector. */
5710 tree eltype = build_nonstandard_integer_type (bitsize, 1);
5711 tree etype = build_vector_type (eltype, 2);
5712 gcc_assert (convert_optab_handler (vec_extract_optab,
5713 TYPE_MODE (etype),
5714 TYPE_MODE (eltype))
5715 != CODE_FOR_nothing);
5716 tree tem = make_ssa_name (etype);
5717 epilog_stmt = gimple_build_assign (tem, VIEW_CONVERT_EXPR,
5718 build1 (VIEW_CONVERT_EXPR,
5719 etype, new_temp));
5720 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5721 new_temp = tem;
5722 tem = make_ssa_name (eltype);
5723 epilog_stmt
5724 = gimple_build_assign (tem, BIT_FIELD_REF,
5725 build3 (BIT_FIELD_REF, eltype,
5726 new_temp, TYPE_SIZE (eltype),
5727 bitsize_int (0)));
5728 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5729 dst1 = make_ssa_name (vectype1);
5730 epilog_stmt = gimple_build_assign (dst1, VIEW_CONVERT_EXPR,
5731 build1 (VIEW_CONVERT_EXPR,
5732 vectype1, tem));
5733 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5734 tem = make_ssa_name (eltype);
5735 epilog_stmt
5736 = gimple_build_assign (tem, BIT_FIELD_REF,
5737 build3 (BIT_FIELD_REF, eltype,
5738 new_temp, TYPE_SIZE (eltype),
5739 bitsize_int (bitsize)));
5740 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5741 dst2 = make_ssa_name (vectype1);
5742 epilog_stmt = gimple_build_assign (dst2, VIEW_CONVERT_EXPR,
5743 build1 (VIEW_CONVERT_EXPR,
5744 vectype1, tem));
5745 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5748 new_temp = make_ssa_name (vectype1);
5749 epilog_stmt = gimple_build_assign (new_temp, code, dst1, dst2);
5750 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5751 new_phis[0] = epilog_stmt;
5754 if (reduce_with_shift && !slp_reduc)
5756 int element_bitsize = tree_to_uhwi (bitsize);
5757 /* Enforced by vectorizable_reduction, which disallows SLP reductions
5758 for variable-length vectors and also requires direct target support
5759 for loop reductions. */
5760 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype1));
5761 int nelements = vec_size_in_bits / element_bitsize;
5762 vec_perm_builder sel;
5763 vec_perm_indices indices;
5765 int elt_offset;
5767 tree zero_vec = build_zero_cst (vectype1);
5768 /* Case 2: Create:
5769 for (offset = nelements/2; offset >= 1; offset/=2)
5771 Create: va' = vec_shift <va, offset>
5772 Create: va = vop <va, va'>
5773 } */
5775 tree rhs;
5777 if (dump_enabled_p ())
5778 dump_printf_loc (MSG_NOTE, vect_location,
5779 "Reduce using vector shifts\n");
5781 gimple_seq stmts = NULL;
5782 new_temp = gimple_convert (&stmts, vectype1, new_temp);
5783 for (elt_offset = nelements / 2;
5784 elt_offset >= 1;
5785 elt_offset /= 2)
5787 calc_vec_perm_mask_for_shift (elt_offset, nelements, &sel);
5788 indices.new_vector (sel, 2, nelements);
5789 tree mask = vect_gen_perm_mask_any (vectype1, indices);
5790 new_name = gimple_build (&stmts, VEC_PERM_EXPR, vectype1,
5791 new_temp, zero_vec, mask);
5792 new_temp = gimple_build (&stmts, code,
5793 vectype1, new_name, new_temp);
5795 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
5797 /* 2.4 Extract the final scalar result. Create:
5798 s_out3 = extract_field <v_out2, bitpos> */
5800 if (dump_enabled_p ())
5801 dump_printf_loc (MSG_NOTE, vect_location,
5802 "extract scalar result\n");
5804 rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp,
5805 bitsize, bitsize_zero_node);
5806 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
5807 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5808 gimple_assign_set_lhs (epilog_stmt, new_temp);
5809 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5810 scalar_results.safe_push (new_temp);
5812 else
5814 /* Case 3: Create:
5815 s = extract_field <v_out2, 0>
5816 for (offset = element_size;
5817 offset < vector_size;
5818 offset += element_size;)
5820 Create: s' = extract_field <v_out2, offset>
5821 Create: s = op <s, s'> // For non SLP cases
5822 } */
5824 if (dump_enabled_p ())
5825 dump_printf_loc (MSG_NOTE, vect_location,
5826 "Reduce using scalar code.\n");
5828 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype1));
5829 int element_bitsize = tree_to_uhwi (bitsize);
5830 tree compute_type = TREE_TYPE (vectype);
5831 gimple_seq stmts = NULL;
5832 FOR_EACH_VEC_ELT (new_phis, i, new_phi)
5834 int bit_offset;
5835 if (gimple_code (new_phi) == GIMPLE_PHI)
5836 vec_temp = PHI_RESULT (new_phi);
5837 else
5838 vec_temp = gimple_assign_lhs (new_phi);
5839 new_temp = gimple_build (&stmts, BIT_FIELD_REF, compute_type,
5840 vec_temp, bitsize, bitsize_zero_node);
5842 /* In SLP we don't need to apply reduction operation, so we just
5843 collect s' values in SCALAR_RESULTS. */
5844 if (slp_reduc)
5845 scalar_results.safe_push (new_temp);
5847 for (bit_offset = element_bitsize;
5848 bit_offset < vec_size_in_bits;
5849 bit_offset += element_bitsize)
5851 tree bitpos = bitsize_int (bit_offset);
5852 new_name = gimple_build (&stmts, BIT_FIELD_REF,
5853 compute_type, vec_temp,
5854 bitsize, bitpos);
5855 if (slp_reduc)
5857 /* In SLP we don't need to apply reduction operation, so
5858 we just collect s' values in SCALAR_RESULTS. */
5859 new_temp = new_name;
5860 scalar_results.safe_push (new_name);
5862 else
5863 new_temp = gimple_build (&stmts, code, compute_type,
5864 new_name, new_temp);
5868 /* The only case where we need to reduce scalar results in SLP, is
5869 unrolling. If the size of SCALAR_RESULTS is greater than
5870 REDUC_GROUP_SIZE, we reduce them combining elements modulo
5871 REDUC_GROUP_SIZE. */
5872 if (slp_reduc)
5874 tree res, first_res, new_res;
5876 /* Reduce multiple scalar results in case of SLP unrolling. */
5877 for (j = group_size; scalar_results.iterate (j, &res);
5878 j++)
5880 first_res = scalar_results[j % group_size];
5881 new_res = gimple_build (&stmts, code, compute_type,
5882 first_res, res);
5883 scalar_results[j % group_size] = new_res;
5885 for (k = 0; k < group_size; k++)
5886 scalar_results[k] = gimple_convert (&stmts, scalar_type,
5887 scalar_results[k]);
5889 else
5891 /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
5892 new_temp = gimple_convert (&stmts, scalar_type, new_temp);
5893 scalar_results.safe_push (new_temp);
5896 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
5899 if ((STMT_VINFO_REDUC_TYPE (reduc_info) == INTEGER_INDUC_COND_REDUCTION)
5900 && induc_val)
5902 /* Earlier we set the initial value to be a vector if induc_val
5903 values. Check the result and if it is induc_val then replace
5904 with the original initial value, unless induc_val is
5905 the same as initial_def already. */
5906 tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp,
5907 induc_val);
5909 tree tmp = make_ssa_name (new_scalar_dest);
5910 epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare,
5911 initial_def, new_temp);
5912 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5913 scalar_results[0] = tmp;
5917 /* 2.5 Adjust the final result by the initial value of the reduction
5918 variable. (When such adjustment is not needed, then
5919 'adjustment_def' is zero). For example, if code is PLUS we create:
5920 new_temp = loop_exit_def + adjustment_def */
5922 if (adjustment_def)
5924 gcc_assert (!slp_reduc);
5925 gimple_seq stmts = NULL;
5926 if (nested_in_vect_loop)
5928 new_phi = new_phis[0];
5929 gcc_assert (VECTOR_TYPE_P (TREE_TYPE (adjustment_def)));
5930 adjustment_def = gimple_convert (&stmts, vectype, adjustment_def);
5931 new_temp = gimple_build (&stmts, code, vectype,
5932 PHI_RESULT (new_phi), adjustment_def);
5934 else
5936 new_temp = scalar_results[0];
5937 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
5938 adjustment_def = gimple_convert (&stmts, scalar_type, adjustment_def);
5939 new_temp = gimple_build (&stmts, code, scalar_type,
5940 new_temp, adjustment_def);
5943 epilog_stmt = gimple_seq_last_stmt (stmts);
5944 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
5945 if (nested_in_vect_loop)
5947 if (!double_reduc)
5948 scalar_results.quick_push (new_temp);
5949 else
5950 scalar_results[0] = new_temp;
5952 else
5953 scalar_results[0] = new_temp;
5955 new_phis[0] = epilog_stmt;
5958 if (double_reduc)
5959 loop = loop->inner;
5961 /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit
5962 phis with new adjusted scalar results, i.e., replace use <s_out0>
5963 with use <s_out4>.
5965 Transform:
5966 loop_exit:
5967 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
5968 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
5969 v_out2 = reduce <v_out1>
5970 s_out3 = extract_field <v_out2, 0>
5971 s_out4 = adjust_result <s_out3>
5972 use <s_out0>
5973 use <s_out0>
5975 into:
5977 loop_exit:
5978 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
5979 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
5980 v_out2 = reduce <v_out1>
5981 s_out3 = extract_field <v_out2, 0>
5982 s_out4 = adjust_result <s_out3>
5983 use <s_out4>
5984 use <s_out4> */
5987 /* In SLP reduction chain we reduce vector results into one vector if
5988 necessary, hence we set here REDUC_GROUP_SIZE to 1. SCALAR_DEST is the
5989 LHS of the last stmt in the reduction chain, since we are looking for
5990 the loop exit phi node. */
5991 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info))
5993 stmt_vec_info dest_stmt_info
5994 = vect_orig_stmt (SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1]);
5995 scalar_dest = gimple_assign_lhs (dest_stmt_info->stmt);
5996 group_size = 1;
5999 /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
6000 case that REDUC_GROUP_SIZE is greater than vectorization factor).
6001 Therefore, we need to match SCALAR_RESULTS with corresponding statements.
6002 The first (REDUC_GROUP_SIZE / number of new vector stmts) scalar results
6003 correspond to the first vector stmt, etc.
6004 (RATIO is equal to (REDUC_GROUP_SIZE / number of new vector stmts)). */
6005 if (group_size > new_phis.length ())
6006 gcc_assert (!(group_size % new_phis.length ()));
6008 for (k = 0; k < group_size; k++)
6010 if (slp_reduc)
6012 stmt_vec_info scalar_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[k];
6014 orig_stmt_info = STMT_VINFO_RELATED_STMT (scalar_stmt_info);
6015 /* SLP statements can't participate in patterns. */
6016 gcc_assert (!orig_stmt_info);
6017 scalar_dest = gimple_assign_lhs (scalar_stmt_info->stmt);
6020 if (nested_in_vect_loop)
6022 if (double_reduc)
6023 loop = outer_loop;
6024 else
6025 gcc_unreachable ();
6028 phis.create (3);
6029 /* Find the loop-closed-use at the loop exit of the original scalar
6030 result. (The reduction result is expected to have two immediate uses,
6031 one at the latch block, and one at the loop exit). For double
6032 reductions we are looking for exit phis of the outer loop. */
6033 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
6035 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
6037 if (!is_gimple_debug (USE_STMT (use_p)))
6038 phis.safe_push (USE_STMT (use_p));
6040 else
6042 if (double_reduc && gimple_code (USE_STMT (use_p)) == GIMPLE_PHI)
6044 tree phi_res = PHI_RESULT (USE_STMT (use_p));
6046 FOR_EACH_IMM_USE_FAST (phi_use_p, phi_imm_iter, phi_res)
6048 if (!flow_bb_inside_loop_p (loop,
6049 gimple_bb (USE_STMT (phi_use_p)))
6050 && !is_gimple_debug (USE_STMT (phi_use_p)))
6051 phis.safe_push (USE_STMT (phi_use_p));
6057 FOR_EACH_VEC_ELT (phis, i, exit_phi)
6059 /* Replace the uses: */
6060 orig_name = PHI_RESULT (exit_phi);
6061 scalar_result = scalar_results[k];
6062 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
6064 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
6065 SET_USE (use_p, scalar_result);
6066 update_stmt (use_stmt);
6070 phis.release ();
6074 /* Return a vector of type VECTYPE that is equal to the vector select
6075 operation "MASK ? VEC : IDENTITY". Insert the select statements
6076 before GSI. */
6078 static tree
6079 merge_with_identity (gimple_stmt_iterator *gsi, tree mask, tree vectype,
6080 tree vec, tree identity)
6082 tree cond = make_temp_ssa_name (vectype, NULL, "cond");
6083 gimple *new_stmt = gimple_build_assign (cond, VEC_COND_EXPR,
6084 mask, vec, identity);
6085 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
6086 return cond;
6089 /* Successively apply CODE to each element of VECTOR_RHS, in left-to-right
6090 order, starting with LHS. Insert the extraction statements before GSI and
6091 associate the new scalar SSA names with variable SCALAR_DEST.
6092 Return the SSA name for the result. */
6094 static tree
6095 vect_expand_fold_left (gimple_stmt_iterator *gsi, tree scalar_dest,
6096 tree_code code, tree lhs, tree vector_rhs)
6098 tree vectype = TREE_TYPE (vector_rhs);
6099 tree scalar_type = TREE_TYPE (vectype);
6100 tree bitsize = TYPE_SIZE (scalar_type);
6101 unsigned HOST_WIDE_INT vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
6102 unsigned HOST_WIDE_INT element_bitsize = tree_to_uhwi (bitsize);
6104 for (unsigned HOST_WIDE_INT bit_offset = 0;
6105 bit_offset < vec_size_in_bits;
6106 bit_offset += element_bitsize)
6108 tree bitpos = bitsize_int (bit_offset);
6109 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vector_rhs,
6110 bitsize, bitpos);
6112 gassign *stmt = gimple_build_assign (scalar_dest, rhs);
6113 rhs = make_ssa_name (scalar_dest, stmt);
6114 gimple_assign_set_lhs (stmt, rhs);
6115 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
6117 stmt = gimple_build_assign (scalar_dest, code, lhs, rhs);
6118 tree new_name = make_ssa_name (scalar_dest, stmt);
6119 gimple_assign_set_lhs (stmt, new_name);
6120 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
6121 lhs = new_name;
6123 return lhs;
6126 /* Get a masked internal function equivalent to REDUC_FN. VECTYPE_IN is the
6127 type of the vector input. */
6129 static internal_fn
6130 get_masked_reduction_fn (internal_fn reduc_fn, tree vectype_in)
6132 internal_fn mask_reduc_fn;
6134 switch (reduc_fn)
6136 case IFN_FOLD_LEFT_PLUS:
6137 mask_reduc_fn = IFN_MASK_FOLD_LEFT_PLUS;
6138 break;
6140 default:
6141 return IFN_LAST;
6144 if (direct_internal_fn_supported_p (mask_reduc_fn, vectype_in,
6145 OPTIMIZE_FOR_SPEED))
6146 return mask_reduc_fn;
6147 return IFN_LAST;
6150 /* Perform an in-order reduction (FOLD_LEFT_REDUCTION). STMT_INFO is the
6151 statement that sets the live-out value. REDUC_DEF_STMT is the phi
6152 statement. CODE is the operation performed by STMT_INFO and OPS are
6153 its scalar operands. REDUC_INDEX is the index of the operand in
6154 OPS that is set by REDUC_DEF_STMT. REDUC_FN is the function that
6155 implements in-order reduction, or IFN_LAST if we should open-code it.
6156 VECTYPE_IN is the type of the vector input. MASKS specifies the masks
6157 that should be used to control the operation in a fully-masked loop. */
6159 static bool
6160 vectorize_fold_left_reduction (loop_vec_info loop_vinfo,
6161 stmt_vec_info stmt_info,
6162 gimple_stmt_iterator *gsi,
6163 gimple **vec_stmt, slp_tree slp_node,
6164 gimple *reduc_def_stmt,
6165 tree_code code, internal_fn reduc_fn,
6166 tree ops[3], tree vectype_in,
6167 int reduc_index, vec_loop_masks *masks)
6169 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
6170 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
6171 internal_fn mask_reduc_fn = get_masked_reduction_fn (reduc_fn, vectype_in);
6173 int ncopies;
6174 if (slp_node)
6175 ncopies = 1;
6176 else
6177 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
6179 gcc_assert (!nested_in_vect_loop_p (loop, stmt_info));
6180 gcc_assert (ncopies == 1);
6181 gcc_assert (TREE_CODE_LENGTH (code) == binary_op);
6183 if (slp_node)
6184 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype_out),
6185 TYPE_VECTOR_SUBPARTS (vectype_in)));
6187 tree op0 = ops[1 - reduc_index];
6189 int group_size = 1;
6190 stmt_vec_info scalar_dest_def_info;
6191 auto_vec<tree> vec_oprnds0;
6192 if (slp_node)
6194 auto_vec<vec<tree> > vec_defs (2);
6195 vect_get_slp_defs (loop_vinfo, slp_node, &vec_defs);
6196 vec_oprnds0.safe_splice (vec_defs[1 - reduc_index]);
6197 vec_defs[0].release ();
6198 vec_defs[1].release ();
6199 group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
6200 scalar_dest_def_info = SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1];
6202 else
6204 vect_get_vec_defs_for_operand (loop_vinfo, stmt_info, 1,
6205 op0, &vec_oprnds0);
6206 scalar_dest_def_info = stmt_info;
6209 tree scalar_dest = gimple_assign_lhs (scalar_dest_def_info->stmt);
6210 tree scalar_type = TREE_TYPE (scalar_dest);
6211 tree reduc_var = gimple_phi_result (reduc_def_stmt);
6213 int vec_num = vec_oprnds0.length ();
6214 gcc_assert (vec_num == 1 || slp_node);
6215 tree vec_elem_type = TREE_TYPE (vectype_out);
6216 gcc_checking_assert (useless_type_conversion_p (scalar_type, vec_elem_type));
6218 tree vector_identity = NULL_TREE;
6219 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
6220 vector_identity = build_zero_cst (vectype_out);
6222 tree scalar_dest_var = vect_create_destination_var (scalar_dest, NULL);
6223 int i;
6224 tree def0;
6225 FOR_EACH_VEC_ELT (vec_oprnds0, i, def0)
6227 gimple *new_stmt;
6228 tree mask = NULL_TREE;
6229 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
6230 mask = vect_get_loop_mask (gsi, masks, vec_num, vectype_in, i);
6232 /* Handle MINUS by adding the negative. */
6233 if (reduc_fn != IFN_LAST && code == MINUS_EXPR)
6235 tree negated = make_ssa_name (vectype_out);
6236 new_stmt = gimple_build_assign (negated, NEGATE_EXPR, def0);
6237 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
6238 def0 = negated;
6241 if (mask && mask_reduc_fn == IFN_LAST)
6242 def0 = merge_with_identity (gsi, mask, vectype_out, def0,
6243 vector_identity);
6245 /* On the first iteration the input is simply the scalar phi
6246 result, and for subsequent iterations it is the output of
6247 the preceding operation. */
6248 if (reduc_fn != IFN_LAST || (mask && mask_reduc_fn != IFN_LAST))
6250 if (mask && mask_reduc_fn != IFN_LAST)
6251 new_stmt = gimple_build_call_internal (mask_reduc_fn, 3, reduc_var,
6252 def0, mask);
6253 else
6254 new_stmt = gimple_build_call_internal (reduc_fn, 2, reduc_var,
6255 def0);
6256 /* For chained SLP reductions the output of the previous reduction
6257 operation serves as the input of the next. For the final statement
6258 the output cannot be a temporary - we reuse the original
6259 scalar destination of the last statement. */
6260 if (i != vec_num - 1)
6262 gimple_set_lhs (new_stmt, scalar_dest_var);
6263 reduc_var = make_ssa_name (scalar_dest_var, new_stmt);
6264 gimple_set_lhs (new_stmt, reduc_var);
6267 else
6269 reduc_var = vect_expand_fold_left (gsi, scalar_dest_var, code,
6270 reduc_var, def0);
6271 new_stmt = SSA_NAME_DEF_STMT (reduc_var);
6272 /* Remove the statement, so that we can use the same code paths
6273 as for statements that we've just created. */
6274 gimple_stmt_iterator tmp_gsi = gsi_for_stmt (new_stmt);
6275 gsi_remove (&tmp_gsi, true);
6278 if (i == vec_num - 1)
6280 gimple_set_lhs (new_stmt, scalar_dest);
6281 vect_finish_replace_stmt (loop_vinfo,
6282 scalar_dest_def_info,
6283 new_stmt);
6285 else
6286 vect_finish_stmt_generation (loop_vinfo,
6287 scalar_dest_def_info,
6288 new_stmt, gsi);
6290 if (slp_node)
6291 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
6292 else
6294 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
6295 *vec_stmt = new_stmt;
6299 return true;
6302 /* Function is_nonwrapping_integer_induction.
6304 Check if STMT_VINO (which is part of loop LOOP) both increments and
6305 does not cause overflow. */
6307 static bool
6308 is_nonwrapping_integer_induction (stmt_vec_info stmt_vinfo, class loop *loop)
6310 gphi *phi = as_a <gphi *> (stmt_vinfo->stmt);
6311 tree base = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo);
6312 tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo);
6313 tree lhs_type = TREE_TYPE (gimple_phi_result (phi));
6314 widest_int ni, max_loop_value, lhs_max;
6315 wi::overflow_type overflow = wi::OVF_NONE;
6317 /* Make sure the loop is integer based. */
6318 if (TREE_CODE (base) != INTEGER_CST
6319 || TREE_CODE (step) != INTEGER_CST)
6320 return false;
6322 /* Check that the max size of the loop will not wrap. */
6324 if (TYPE_OVERFLOW_UNDEFINED (lhs_type))
6325 return true;
6327 if (! max_stmt_executions (loop, &ni))
6328 return false;
6330 max_loop_value = wi::mul (wi::to_widest (step), ni, TYPE_SIGN (lhs_type),
6331 &overflow);
6332 if (overflow)
6333 return false;
6335 max_loop_value = wi::add (wi::to_widest (base), max_loop_value,
6336 TYPE_SIGN (lhs_type), &overflow);
6337 if (overflow)
6338 return false;
6340 return (wi::min_precision (max_loop_value, TYPE_SIGN (lhs_type))
6341 <= TYPE_PRECISION (lhs_type));
6344 /* Check if masking can be supported by inserting a conditional expression.
6345 CODE is the code for the operation. COND_FN is the conditional internal
6346 function, if it exists. VECTYPE_IN is the type of the vector input. */
6347 static bool
6348 use_mask_by_cond_expr_p (enum tree_code code, internal_fn cond_fn,
6349 tree vectype_in)
6351 if (cond_fn != IFN_LAST
6352 && direct_internal_fn_supported_p (cond_fn, vectype_in,
6353 OPTIMIZE_FOR_SPEED))
6354 return false;
6356 switch (code)
6358 case DOT_PROD_EXPR:
6359 case SAD_EXPR:
6360 return true;
6362 default:
6363 return false;
6367 /* Insert a conditional expression to enable masked vectorization. CODE is the
6368 code for the operation. VOP is the array of operands. MASK is the loop
6369 mask. GSI is a statement iterator used to place the new conditional
6370 expression. */
6371 static void
6372 build_vect_cond_expr (enum tree_code code, tree vop[3], tree mask,
6373 gimple_stmt_iterator *gsi)
6375 switch (code)
6377 case DOT_PROD_EXPR:
6379 tree vectype = TREE_TYPE (vop[1]);
6380 tree zero = build_zero_cst (vectype);
6381 tree masked_op1 = make_temp_ssa_name (vectype, NULL, "masked_op1");
6382 gassign *select = gimple_build_assign (masked_op1, VEC_COND_EXPR,
6383 mask, vop[1], zero);
6384 gsi_insert_before (gsi, select, GSI_SAME_STMT);
6385 vop[1] = masked_op1;
6386 break;
6389 case SAD_EXPR:
6391 tree vectype = TREE_TYPE (vop[1]);
6392 tree masked_op1 = make_temp_ssa_name (vectype, NULL, "masked_op1");
6393 gassign *select = gimple_build_assign (masked_op1, VEC_COND_EXPR,
6394 mask, vop[1], vop[0]);
6395 gsi_insert_before (gsi, select, GSI_SAME_STMT);
6396 vop[1] = masked_op1;
6397 break;
6400 default:
6401 gcc_unreachable ();
6405 /* Function vectorizable_reduction.
6407 Check if STMT_INFO performs a reduction operation that can be vectorized.
6408 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
6409 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
6410 Return true if STMT_INFO is vectorizable in this way.
6412 This function also handles reduction idioms (patterns) that have been
6413 recognized in advance during vect_pattern_recog. In this case, STMT_INFO
6414 may be of this form:
6415 X = pattern_expr (arg0, arg1, ..., X)
6416 and its STMT_VINFO_RELATED_STMT points to the last stmt in the original
6417 sequence that had been detected and replaced by the pattern-stmt
6418 (STMT_INFO).
6420 This function also handles reduction of condition expressions, for example:
6421 for (int i = 0; i < N; i++)
6422 if (a[i] < value)
6423 last = a[i];
6424 This is handled by vectorising the loop and creating an additional vector
6425 containing the loop indexes for which "a[i] < value" was true. In the
6426 function epilogue this is reduced to a single max value and then used to
6427 index into the vector of results.
6429 In some cases of reduction patterns, the type of the reduction variable X is
6430 different than the type of the other arguments of STMT_INFO.
6431 In such cases, the vectype that is used when transforming STMT_INFO into
6432 a vector stmt is different than the vectype that is used to determine the
6433 vectorization factor, because it consists of a different number of elements
6434 than the actual number of elements that are being operated upon in parallel.
6436 For example, consider an accumulation of shorts into an int accumulator.
6437 On some targets it's possible to vectorize this pattern operating on 8
6438 shorts at a time (hence, the vectype for purposes of determining the
6439 vectorization factor should be V8HI); on the other hand, the vectype that
6440 is used to create the vector form is actually V4SI (the type of the result).
6442 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
6443 indicates what is the actual level of parallelism (V8HI in the example), so
6444 that the right vectorization factor would be derived. This vectype
6445 corresponds to the type of arguments to the reduction stmt, and should *NOT*
6446 be used to create the vectorized stmt. The right vectype for the vectorized
6447 stmt is obtained from the type of the result X:
6448 get_vectype_for_scalar_type (vinfo, TREE_TYPE (X))
6450 This means that, contrary to "regular" reductions (or "regular" stmts in
6451 general), the following equation:
6452 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (vinfo, TREE_TYPE (X))
6453 does *NOT* necessarily hold for reduction patterns. */
6455 bool
6456 vectorizable_reduction (loop_vec_info loop_vinfo,
6457 stmt_vec_info stmt_info, slp_tree slp_node,
6458 slp_instance slp_node_instance,
6459 stmt_vector_for_cost *cost_vec)
6461 tree scalar_dest;
6462 tree vectype_in = NULL_TREE;
6463 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
6464 enum vect_def_type cond_reduc_dt = vect_unknown_def_type;
6465 stmt_vec_info cond_stmt_vinfo = NULL;
6466 tree scalar_type;
6467 int i;
6468 int ncopies;
6469 bool single_defuse_cycle = false;
6470 bool nested_cycle = false;
6471 bool double_reduc = false;
6472 int vec_num;
6473 tree tem;
6474 tree cr_index_scalar_type = NULL_TREE, cr_index_vector_type = NULL_TREE;
6475 tree cond_reduc_val = NULL_TREE;
6477 /* Make sure it was already recognized as a reduction computation. */
6478 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def
6479 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_double_reduction_def
6480 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_nested_cycle)
6481 return false;
6483 /* The stmt we store reduction analysis meta on. */
6484 stmt_vec_info reduc_info = info_for_reduction (loop_vinfo, stmt_info);
6485 reduc_info->is_reduc_info = true;
6487 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
6489 if (is_a <gphi *> (stmt_info->stmt))
6491 if (slp_node)
6493 /* We eventually need to set a vector type on invariant
6494 arguments. */
6495 unsigned j;
6496 slp_tree child;
6497 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (slp_node), j, child)
6498 if (!vect_maybe_update_slp_op_vectype
6499 (child, SLP_TREE_VECTYPE (slp_node)))
6501 if (dump_enabled_p ())
6502 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6503 "incompatible vector types for "
6504 "invariants\n");
6505 return false;
6508 /* Analysis for double-reduction is done on the outer
6509 loop PHI, nested cycles have no further restrictions. */
6510 STMT_VINFO_TYPE (stmt_info) = cycle_phi_info_type;
6512 else
6513 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
6514 return true;
6517 stmt_vec_info orig_stmt_of_analysis = stmt_info;
6518 stmt_vec_info phi_info = stmt_info;
6519 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
6520 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_double_reduction_def)
6522 if (!is_a <gphi *> (stmt_info->stmt))
6524 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
6525 return true;
6527 if (slp_node)
6529 slp_node_instance->reduc_phis = slp_node;
6530 /* ??? We're leaving slp_node to point to the PHIs, we only
6531 need it to get at the number of vector stmts which wasn't
6532 yet initialized for the instance root. */
6534 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
6535 stmt_info = vect_stmt_to_vectorize (STMT_VINFO_REDUC_DEF (stmt_info));
6536 else /* STMT_VINFO_DEF_TYPE (stmt_info) == vect_double_reduction_def */
6538 use_operand_p use_p;
6539 gimple *use_stmt;
6540 bool res = single_imm_use (gimple_phi_result (stmt_info->stmt),
6541 &use_p, &use_stmt);
6542 gcc_assert (res);
6543 phi_info = loop_vinfo->lookup_stmt (use_stmt);
6544 stmt_info = vect_stmt_to_vectorize (STMT_VINFO_REDUC_DEF (phi_info));
6548 /* PHIs should not participate in patterns. */
6549 gcc_assert (!STMT_VINFO_RELATED_STMT (phi_info));
6550 gphi *reduc_def_phi = as_a <gphi *> (phi_info->stmt);
6552 /* Verify following REDUC_IDX from the latch def leads us back to the PHI
6553 and compute the reduction chain length. Discover the real
6554 reduction operation stmt on the way (stmt_info and slp_for_stmt_info). */
6555 tree reduc_def
6556 = PHI_ARG_DEF_FROM_EDGE (reduc_def_phi,
6557 loop_latch_edge
6558 (gimple_bb (reduc_def_phi)->loop_father));
6559 unsigned reduc_chain_length = 0;
6560 bool only_slp_reduc_chain = true;
6561 stmt_info = NULL;
6562 slp_tree slp_for_stmt_info = slp_node ? slp_node_instance->root : NULL;
6563 while (reduc_def != PHI_RESULT (reduc_def_phi))
6565 stmt_vec_info def = loop_vinfo->lookup_def (reduc_def);
6566 stmt_vec_info vdef = vect_stmt_to_vectorize (def);
6567 if (STMT_VINFO_REDUC_IDX (vdef) == -1)
6569 if (dump_enabled_p ())
6570 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6571 "reduction chain broken by patterns.\n");
6572 return false;
6574 if (!REDUC_GROUP_FIRST_ELEMENT (vdef))
6575 only_slp_reduc_chain = false;
6576 /* ??? For epilogue generation live members of the chain need
6577 to point back to the PHI via their original stmt for
6578 info_for_reduction to work. */
6579 if (STMT_VINFO_LIVE_P (vdef))
6580 STMT_VINFO_REDUC_DEF (def) = phi_info;
6581 gassign *assign = dyn_cast <gassign *> (vdef->stmt);
6582 if (!assign)
6584 if (dump_enabled_p ())
6585 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6586 "reduction chain includes calls.\n");
6587 return false;
6589 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (assign)))
6591 if (!tree_nop_conversion_p (TREE_TYPE (gimple_assign_lhs (assign)),
6592 TREE_TYPE (gimple_assign_rhs1 (assign))))
6594 if (dump_enabled_p ())
6595 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6596 "conversion in the reduction chain.\n");
6597 return false;
6600 else if (!stmt_info)
6601 /* First non-conversion stmt. */
6602 stmt_info = vdef;
6603 reduc_def = gimple_op (vdef->stmt, 1 + STMT_VINFO_REDUC_IDX (vdef));
6604 reduc_chain_length++;
6605 if (!stmt_info && slp_node)
6606 slp_for_stmt_info = SLP_TREE_CHILDREN (slp_for_stmt_info)[0];
6608 /* PHIs should not participate in patterns. */
6609 gcc_assert (!STMT_VINFO_RELATED_STMT (phi_info));
6611 if (nested_in_vect_loop_p (loop, stmt_info))
6613 loop = loop->inner;
6614 nested_cycle = true;
6617 /* STMT_VINFO_REDUC_DEF doesn't point to the first but the last
6618 element. */
6619 if (slp_node && REDUC_GROUP_FIRST_ELEMENT (stmt_info))
6621 gcc_assert (!REDUC_GROUP_NEXT_ELEMENT (stmt_info));
6622 stmt_info = REDUC_GROUP_FIRST_ELEMENT (stmt_info);
6624 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info))
6625 gcc_assert (slp_node
6626 && REDUC_GROUP_FIRST_ELEMENT (stmt_info) == stmt_info);
6628 /* 1. Is vectorizable reduction? */
6629 /* Not supportable if the reduction variable is used in the loop, unless
6630 it's a reduction chain. */
6631 if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer
6632 && !REDUC_GROUP_FIRST_ELEMENT (stmt_info))
6633 return false;
6635 /* Reductions that are not used even in an enclosing outer-loop,
6636 are expected to be "live" (used out of the loop). */
6637 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope
6638 && !STMT_VINFO_LIVE_P (stmt_info))
6639 return false;
6641 /* 2. Has this been recognized as a reduction pattern?
6643 Check if STMT represents a pattern that has been recognized
6644 in earlier analysis stages. For stmts that represent a pattern,
6645 the STMT_VINFO_RELATED_STMT field records the last stmt in
6646 the original sequence that constitutes the pattern. */
6648 stmt_vec_info orig_stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
6649 if (orig_stmt_info)
6651 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
6652 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
6655 /* 3. Check the operands of the operation. The first operands are defined
6656 inside the loop body. The last operand is the reduction variable,
6657 which is defined by the loop-header-phi. */
6659 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
6660 STMT_VINFO_REDUC_VECTYPE (reduc_info) = vectype_out;
6661 gassign *stmt = as_a <gassign *> (stmt_info->stmt);
6662 enum tree_code code = gimple_assign_rhs_code (stmt);
6663 bool lane_reduc_code_p
6664 = (code == DOT_PROD_EXPR || code == WIDEN_SUM_EXPR || code == SAD_EXPR);
6665 int op_type = TREE_CODE_LENGTH (code);
6667 scalar_dest = gimple_assign_lhs (stmt);
6668 scalar_type = TREE_TYPE (scalar_dest);
6669 if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
6670 && !SCALAR_FLOAT_TYPE_P (scalar_type))
6671 return false;
6673 /* Do not try to vectorize bit-precision reductions. */
6674 if (!type_has_mode_precision_p (scalar_type))
6675 return false;
6677 /* For lane-reducing ops we're reducing the number of reduction PHIs
6678 which means the only use of that may be in the lane-reducing operation. */
6679 if (lane_reduc_code_p
6680 && reduc_chain_length != 1
6681 && !only_slp_reduc_chain)
6683 if (dump_enabled_p ())
6684 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6685 "lane-reducing reduction with extra stmts.\n");
6686 return false;
6689 /* All uses but the last are expected to be defined in the loop.
6690 The last use is the reduction variable. In case of nested cycle this
6691 assumption is not true: we use reduc_index to record the index of the
6692 reduction variable. */
6693 slp_tree *slp_op = XALLOCAVEC (slp_tree, op_type);
6694 /* We need to skip an extra operand for COND_EXPRs with embedded
6695 comparison. */
6696 unsigned opno_adjust = 0;
6697 if (code == COND_EXPR
6698 && COMPARISON_CLASS_P (gimple_assign_rhs1 (stmt)))
6699 opno_adjust = 1;
6700 for (i = 0; i < op_type; i++)
6702 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
6703 if (i == 0 && code == COND_EXPR)
6704 continue;
6706 stmt_vec_info def_stmt_info;
6707 enum vect_def_type dt;
6708 tree op;
6709 if (!vect_is_simple_use (loop_vinfo, stmt_info, slp_for_stmt_info,
6710 i + opno_adjust, &op, &slp_op[i], &dt, &tem,
6711 &def_stmt_info))
6713 if (dump_enabled_p ())
6714 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6715 "use not simple.\n");
6716 return false;
6718 if (i == STMT_VINFO_REDUC_IDX (stmt_info))
6719 continue;
6721 /* There should be only one cycle def in the stmt, the one
6722 leading to reduc_def. */
6723 if (VECTORIZABLE_CYCLE_DEF (dt))
6724 return false;
6726 /* To properly compute ncopies we are interested in the widest
6727 non-reduction input type in case we're looking at a widening
6728 accumulation that we later handle in vect_transform_reduction. */
6729 if (lane_reduc_code_p
6730 && tem
6731 && (!vectype_in
6732 || (GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (vectype_in)))
6733 < GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (tem))))))
6734 vectype_in = tem;
6736 if (code == COND_EXPR)
6738 /* Record how the non-reduction-def value of COND_EXPR is defined. */
6739 if (dt == vect_constant_def)
6741 cond_reduc_dt = dt;
6742 cond_reduc_val = op;
6744 if (dt == vect_induction_def
6745 && def_stmt_info
6746 && is_nonwrapping_integer_induction (def_stmt_info, loop))
6748 cond_reduc_dt = dt;
6749 cond_stmt_vinfo = def_stmt_info;
6753 if (!vectype_in)
6754 vectype_in = STMT_VINFO_VECTYPE (phi_info);
6755 STMT_VINFO_REDUC_VECTYPE_IN (reduc_info) = vectype_in;
6757 enum vect_reduction_type v_reduc_type = STMT_VINFO_REDUC_TYPE (phi_info);
6758 STMT_VINFO_REDUC_TYPE (reduc_info) = v_reduc_type;
6759 /* If we have a condition reduction, see if we can simplify it further. */
6760 if (v_reduc_type == COND_REDUCTION)
6762 if (slp_node)
6763 return false;
6765 /* When the condition uses the reduction value in the condition, fail. */
6766 if (STMT_VINFO_REDUC_IDX (stmt_info) == 0)
6768 if (dump_enabled_p ())
6769 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6770 "condition depends on previous iteration\n");
6771 return false;
6774 if (reduc_chain_length == 1
6775 && direct_internal_fn_supported_p (IFN_FOLD_EXTRACT_LAST,
6776 vectype_in, OPTIMIZE_FOR_SPEED))
6778 if (dump_enabled_p ())
6779 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6780 "optimizing condition reduction with"
6781 " FOLD_EXTRACT_LAST.\n");
6782 STMT_VINFO_REDUC_TYPE (reduc_info) = EXTRACT_LAST_REDUCTION;
6784 else if (cond_reduc_dt == vect_induction_def)
6786 tree base
6787 = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (cond_stmt_vinfo);
6788 tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (cond_stmt_vinfo);
6790 gcc_assert (TREE_CODE (base) == INTEGER_CST
6791 && TREE_CODE (step) == INTEGER_CST);
6792 cond_reduc_val = NULL_TREE;
6793 enum tree_code cond_reduc_op_code = ERROR_MARK;
6794 tree res = PHI_RESULT (STMT_VINFO_STMT (cond_stmt_vinfo));
6795 if (!types_compatible_p (TREE_TYPE (res), TREE_TYPE (base)))
6797 /* Find a suitable value, for MAX_EXPR below base, for MIN_EXPR
6798 above base; punt if base is the minimum value of the type for
6799 MAX_EXPR or maximum value of the type for MIN_EXPR for now. */
6800 else if (tree_int_cst_sgn (step) == -1)
6802 cond_reduc_op_code = MIN_EXPR;
6803 if (tree_int_cst_sgn (base) == -1)
6804 cond_reduc_val = build_int_cst (TREE_TYPE (base), 0);
6805 else if (tree_int_cst_lt (base,
6806 TYPE_MAX_VALUE (TREE_TYPE (base))))
6807 cond_reduc_val
6808 = int_const_binop (PLUS_EXPR, base, integer_one_node);
6810 else
6812 cond_reduc_op_code = MAX_EXPR;
6813 if (tree_int_cst_sgn (base) == 1)
6814 cond_reduc_val = build_int_cst (TREE_TYPE (base), 0);
6815 else if (tree_int_cst_lt (TYPE_MIN_VALUE (TREE_TYPE (base)),
6816 base))
6817 cond_reduc_val
6818 = int_const_binop (MINUS_EXPR, base, integer_one_node);
6820 if (cond_reduc_val)
6822 if (dump_enabled_p ())
6823 dump_printf_loc (MSG_NOTE, vect_location,
6824 "condition expression based on "
6825 "integer induction.\n");
6826 STMT_VINFO_REDUC_CODE (reduc_info) = cond_reduc_op_code;
6827 STMT_VINFO_VEC_INDUC_COND_INITIAL_VAL (reduc_info)
6828 = cond_reduc_val;
6829 STMT_VINFO_REDUC_TYPE (reduc_info) = INTEGER_INDUC_COND_REDUCTION;
6832 else if (cond_reduc_dt == vect_constant_def)
6834 enum vect_def_type cond_initial_dt;
6835 tree cond_initial_val
6836 = PHI_ARG_DEF_FROM_EDGE (reduc_def_phi, loop_preheader_edge (loop));
6838 gcc_assert (cond_reduc_val != NULL_TREE);
6839 vect_is_simple_use (cond_initial_val, loop_vinfo, &cond_initial_dt);
6840 if (cond_initial_dt == vect_constant_def
6841 && types_compatible_p (TREE_TYPE (cond_initial_val),
6842 TREE_TYPE (cond_reduc_val)))
6844 tree e = fold_binary (LE_EXPR, boolean_type_node,
6845 cond_initial_val, cond_reduc_val);
6846 if (e && (integer_onep (e) || integer_zerop (e)))
6848 if (dump_enabled_p ())
6849 dump_printf_loc (MSG_NOTE, vect_location,
6850 "condition expression based on "
6851 "compile time constant.\n");
6852 /* Record reduction code at analysis stage. */
6853 STMT_VINFO_REDUC_CODE (reduc_info)
6854 = integer_onep (e) ? MAX_EXPR : MIN_EXPR;
6855 STMT_VINFO_REDUC_TYPE (reduc_info) = CONST_COND_REDUCTION;
6861 if (STMT_VINFO_LIVE_P (phi_info))
6862 return false;
6864 if (slp_node)
6865 ncopies = 1;
6866 else
6867 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
6869 gcc_assert (ncopies >= 1);
6871 poly_uint64 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
6873 if (nested_cycle)
6875 gcc_assert (STMT_VINFO_DEF_TYPE (reduc_info)
6876 == vect_double_reduction_def);
6877 double_reduc = true;
6880 /* 4.2. Check support for the epilog operation.
6882 If STMT represents a reduction pattern, then the type of the
6883 reduction variable may be different than the type of the rest
6884 of the arguments. For example, consider the case of accumulation
6885 of shorts into an int accumulator; The original code:
6886 S1: int_a = (int) short_a;
6887 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
6889 was replaced with:
6890 STMT: int_acc = widen_sum <short_a, int_acc>
6892 This means that:
6893 1. The tree-code that is used to create the vector operation in the
6894 epilog code (that reduces the partial results) is not the
6895 tree-code of STMT, but is rather the tree-code of the original
6896 stmt from the pattern that STMT is replacing. I.e, in the example
6897 above we want to use 'widen_sum' in the loop, but 'plus' in the
6898 epilog.
6899 2. The type (mode) we use to check available target support
6900 for the vector operation to be created in the *epilog*, is
6901 determined by the type of the reduction variable (in the example
6902 above we'd check this: optab_handler (plus_optab, vect_int_mode])).
6903 However the type (mode) we use to check available target support
6904 for the vector operation to be created *inside the loop*, is
6905 determined by the type of the other arguments to STMT (in the
6906 example we'd check this: optab_handler (widen_sum_optab,
6907 vect_short_mode)).
6909 This is contrary to "regular" reductions, in which the types of all
6910 the arguments are the same as the type of the reduction variable.
6911 For "regular" reductions we can therefore use the same vector type
6912 (and also the same tree-code) when generating the epilog code and
6913 when generating the code inside the loop. */
6915 enum tree_code orig_code = STMT_VINFO_REDUC_CODE (phi_info);
6916 STMT_VINFO_REDUC_CODE (reduc_info) = orig_code;
6918 vect_reduction_type reduction_type = STMT_VINFO_REDUC_TYPE (reduc_info);
6919 if (reduction_type == TREE_CODE_REDUCTION)
6921 /* Check whether it's ok to change the order of the computation.
6922 Generally, when vectorizing a reduction we change the order of the
6923 computation. This may change the behavior of the program in some
6924 cases, so we need to check that this is ok. One exception is when
6925 vectorizing an outer-loop: the inner-loop is executed sequentially,
6926 and therefore vectorizing reductions in the inner-loop during
6927 outer-loop vectorization is safe. Likewise when we are vectorizing
6928 a series of reductions using SLP and the VF is one the reductions
6929 are performed in scalar order. */
6930 if (slp_node
6931 && !REDUC_GROUP_FIRST_ELEMENT (stmt_info)
6932 && known_eq (LOOP_VINFO_VECT_FACTOR (loop_vinfo), 1u))
6934 else if (needs_fold_left_reduction_p (scalar_type, orig_code))
6936 /* When vectorizing a reduction chain w/o SLP the reduction PHI
6937 is not directy used in stmt. */
6938 if (!only_slp_reduc_chain
6939 && reduc_chain_length != 1)
6941 if (dump_enabled_p ())
6942 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6943 "in-order reduction chain without SLP.\n");
6944 return false;
6946 STMT_VINFO_REDUC_TYPE (reduc_info)
6947 = reduction_type = FOLD_LEFT_REDUCTION;
6949 else if (!commutative_tree_code (orig_code)
6950 || !associative_tree_code (orig_code))
6952 if (dump_enabled_p ())
6953 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6954 "reduction: not commutative/associative");
6955 return false;
6959 if ((double_reduc || reduction_type != TREE_CODE_REDUCTION)
6960 && ncopies > 1)
6962 if (dump_enabled_p ())
6963 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6964 "multiple types in double reduction or condition "
6965 "reduction or fold-left reduction.\n");
6966 return false;
6969 internal_fn reduc_fn = IFN_LAST;
6970 if (reduction_type == TREE_CODE_REDUCTION
6971 || reduction_type == FOLD_LEFT_REDUCTION
6972 || reduction_type == INTEGER_INDUC_COND_REDUCTION
6973 || reduction_type == CONST_COND_REDUCTION)
6975 if (reduction_type == FOLD_LEFT_REDUCTION
6976 ? fold_left_reduction_fn (orig_code, &reduc_fn)
6977 : reduction_fn_for_scalar_code (orig_code, &reduc_fn))
6979 if (reduc_fn != IFN_LAST
6980 && !direct_internal_fn_supported_p (reduc_fn, vectype_out,
6981 OPTIMIZE_FOR_SPEED))
6983 if (dump_enabled_p ())
6984 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6985 "reduc op not supported by target.\n");
6987 reduc_fn = IFN_LAST;
6990 else
6992 if (!nested_cycle || double_reduc)
6994 if (dump_enabled_p ())
6995 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6996 "no reduc code for scalar code.\n");
6998 return false;
7002 else if (reduction_type == COND_REDUCTION)
7004 int scalar_precision
7005 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type));
7006 cr_index_scalar_type = make_unsigned_type (scalar_precision);
7007 cr_index_vector_type = get_same_sized_vectype (cr_index_scalar_type,
7008 vectype_out);
7010 if (direct_internal_fn_supported_p (IFN_REDUC_MAX, cr_index_vector_type,
7011 OPTIMIZE_FOR_SPEED))
7012 reduc_fn = IFN_REDUC_MAX;
7014 STMT_VINFO_REDUC_FN (reduc_info) = reduc_fn;
7016 if (reduction_type != EXTRACT_LAST_REDUCTION
7017 && (!nested_cycle || double_reduc)
7018 && reduc_fn == IFN_LAST
7019 && !nunits_out.is_constant ())
7021 if (dump_enabled_p ())
7022 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7023 "missing target support for reduction on"
7024 " variable-length vectors.\n");
7025 return false;
7028 /* For SLP reductions, see if there is a neutral value we can use. */
7029 tree neutral_op = NULL_TREE;
7030 if (slp_node)
7031 neutral_op = neutral_op_for_slp_reduction
7032 (slp_node_instance->reduc_phis, vectype_out, orig_code,
7033 REDUC_GROUP_FIRST_ELEMENT (stmt_info) != NULL);
7035 if (double_reduc && reduction_type == FOLD_LEFT_REDUCTION)
7037 /* We can't support in-order reductions of code such as this:
7039 for (int i = 0; i < n1; ++i)
7040 for (int j = 0; j < n2; ++j)
7041 l += a[j];
7043 since GCC effectively transforms the loop when vectorizing:
7045 for (int i = 0; i < n1 / VF; ++i)
7046 for (int j = 0; j < n2; ++j)
7047 for (int k = 0; k < VF; ++k)
7048 l += a[j];
7050 which is a reassociation of the original operation. */
7051 if (dump_enabled_p ())
7052 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7053 "in-order double reduction not supported.\n");
7055 return false;
7058 if (reduction_type == FOLD_LEFT_REDUCTION
7059 && slp_node
7060 && !REDUC_GROUP_FIRST_ELEMENT (stmt_info))
7062 /* We cannot use in-order reductions in this case because there is
7063 an implicit reassociation of the operations involved. */
7064 if (dump_enabled_p ())
7065 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7066 "in-order unchained SLP reductions not supported.\n");
7067 return false;
7070 /* For double reductions, and for SLP reductions with a neutral value,
7071 we construct a variable-length initial vector by loading a vector
7072 full of the neutral value and then shift-and-inserting the start
7073 values into the low-numbered elements. */
7074 if ((double_reduc || neutral_op)
7075 && !nunits_out.is_constant ()
7076 && !direct_internal_fn_supported_p (IFN_VEC_SHL_INSERT,
7077 vectype_out, OPTIMIZE_FOR_SPEED))
7079 if (dump_enabled_p ())
7080 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7081 "reduction on variable-length vectors requires"
7082 " target support for a vector-shift-and-insert"
7083 " operation.\n");
7084 return false;
7087 /* Check extra constraints for variable-length unchained SLP reductions. */
7088 if (STMT_SLP_TYPE (stmt_info)
7089 && !REDUC_GROUP_FIRST_ELEMENT (stmt_info)
7090 && !nunits_out.is_constant ())
7092 /* We checked above that we could build the initial vector when
7093 there's a neutral element value. Check here for the case in
7094 which each SLP statement has its own initial value and in which
7095 that value needs to be repeated for every instance of the
7096 statement within the initial vector. */
7097 unsigned int group_size = SLP_TREE_LANES (slp_node);
7098 if (!neutral_op
7099 && !can_duplicate_and_interleave_p (loop_vinfo, group_size,
7100 TREE_TYPE (vectype_out)))
7102 if (dump_enabled_p ())
7103 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7104 "unsupported form of SLP reduction for"
7105 " variable-length vectors: cannot build"
7106 " initial vector.\n");
7107 return false;
7109 /* The epilogue code relies on the number of elements being a multiple
7110 of the group size. The duplicate-and-interleave approach to setting
7111 up the initial vector does too. */
7112 if (!multiple_p (nunits_out, group_size))
7114 if (dump_enabled_p ())
7115 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7116 "unsupported form of SLP reduction for"
7117 " variable-length vectors: the vector size"
7118 " is not a multiple of the number of results.\n");
7119 return false;
7123 if (reduction_type == COND_REDUCTION)
7125 widest_int ni;
7127 if (! max_loop_iterations (loop, &ni))
7129 if (dump_enabled_p ())
7130 dump_printf_loc (MSG_NOTE, vect_location,
7131 "loop count not known, cannot create cond "
7132 "reduction.\n");
7133 return false;
7135 /* Convert backedges to iterations. */
7136 ni += 1;
7138 /* The additional index will be the same type as the condition. Check
7139 that the loop can fit into this less one (because we'll use up the
7140 zero slot for when there are no matches). */
7141 tree max_index = TYPE_MAX_VALUE (cr_index_scalar_type);
7142 if (wi::geu_p (ni, wi::to_widest (max_index)))
7144 if (dump_enabled_p ())
7145 dump_printf_loc (MSG_NOTE, vect_location,
7146 "loop size is greater than data size.\n");
7147 return false;
7151 /* In case the vectorization factor (VF) is bigger than the number
7152 of elements that we can fit in a vectype (nunits), we have to generate
7153 more than one vector stmt - i.e - we need to "unroll" the
7154 vector stmt by a factor VF/nunits. For more details see documentation
7155 in vectorizable_operation. */
7157 /* If the reduction is used in an outer loop we need to generate
7158 VF intermediate results, like so (e.g. for ncopies=2):
7159 r0 = phi (init, r0)
7160 r1 = phi (init, r1)
7161 r0 = x0 + r0;
7162 r1 = x1 + r1;
7163 (i.e. we generate VF results in 2 registers).
7164 In this case we have a separate def-use cycle for each copy, and therefore
7165 for each copy we get the vector def for the reduction variable from the
7166 respective phi node created for this copy.
7168 Otherwise (the reduction is unused in the loop nest), we can combine
7169 together intermediate results, like so (e.g. for ncopies=2):
7170 r = phi (init, r)
7171 r = x0 + r;
7172 r = x1 + r;
7173 (i.e. we generate VF/2 results in a single register).
7174 In this case for each copy we get the vector def for the reduction variable
7175 from the vectorized reduction operation generated in the previous iteration.
7177 This only works when we see both the reduction PHI and its only consumer
7178 in vectorizable_reduction and there are no intermediate stmts
7179 participating. */
7180 if (ncopies > 1
7181 && (STMT_VINFO_RELEVANT (stmt_info) <= vect_used_only_live)
7182 && reduc_chain_length == 1)
7183 single_defuse_cycle = true;
7185 if (single_defuse_cycle || lane_reduc_code_p)
7187 gcc_assert (code != COND_EXPR);
7189 /* 4. Supportable by target? */
7190 bool ok = true;
7192 /* 4.1. check support for the operation in the loop */
7193 optab optab = optab_for_tree_code (code, vectype_in, optab_vector);
7194 if (!optab)
7196 if (dump_enabled_p ())
7197 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7198 "no optab.\n");
7199 ok = false;
7202 machine_mode vec_mode = TYPE_MODE (vectype_in);
7203 if (ok && optab_handler (optab, vec_mode) == CODE_FOR_nothing)
7205 if (dump_enabled_p ())
7206 dump_printf (MSG_NOTE, "op not supported by target.\n");
7207 if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
7208 || !vect_worthwhile_without_simd_p (loop_vinfo, code))
7209 ok = false;
7210 else
7211 if (dump_enabled_p ())
7212 dump_printf (MSG_NOTE, "proceeding using word mode.\n");
7215 /* Worthwhile without SIMD support? */
7216 if (ok
7217 && !VECTOR_MODE_P (TYPE_MODE (vectype_in))
7218 && !vect_worthwhile_without_simd_p (loop_vinfo, code))
7220 if (dump_enabled_p ())
7221 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7222 "not worthwhile without SIMD support.\n");
7223 ok = false;
7226 /* lane-reducing operations have to go through vect_transform_reduction.
7227 For the other cases try without the single cycle optimization. */
7228 if (!ok)
7230 if (lane_reduc_code_p)
7231 return false;
7232 else
7233 single_defuse_cycle = false;
7236 STMT_VINFO_FORCE_SINGLE_CYCLE (reduc_info) = single_defuse_cycle;
7238 /* If the reduction stmt is one of the patterns that have lane
7239 reduction embedded we cannot handle the case of ! single_defuse_cycle. */
7240 if ((ncopies > 1 && ! single_defuse_cycle)
7241 && lane_reduc_code_p)
7243 if (dump_enabled_p ())
7244 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7245 "multi def-use cycle not possible for lane-reducing "
7246 "reduction operation\n");
7247 return false;
7250 if (slp_node
7251 && !(!single_defuse_cycle
7252 && code != DOT_PROD_EXPR
7253 && code != WIDEN_SUM_EXPR
7254 && code != SAD_EXPR
7255 && reduction_type != FOLD_LEFT_REDUCTION))
7256 for (i = 0; i < op_type; i++)
7257 if (!vect_maybe_update_slp_op_vectype (slp_op[i], vectype_in))
7259 if (dump_enabled_p ())
7260 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7261 "incompatible vector types for invariants\n");
7262 return false;
7265 if (slp_node)
7266 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7267 else
7268 vec_num = 1;
7270 vect_model_reduction_cost (loop_vinfo, stmt_info, reduc_fn,
7271 reduction_type, ncopies, cost_vec);
7272 /* Cost the reduction op inside the loop if transformed via
7273 vect_transform_reduction. Otherwise this is costed by the
7274 separate vectorizable_* routines. */
7275 if (single_defuse_cycle
7276 || code == DOT_PROD_EXPR
7277 || code == WIDEN_SUM_EXPR
7278 || code == SAD_EXPR)
7279 record_stmt_cost (cost_vec, ncopies, vector_stmt, stmt_info, 0, vect_body);
7281 if (dump_enabled_p ()
7282 && reduction_type == FOLD_LEFT_REDUCTION)
7283 dump_printf_loc (MSG_NOTE, vect_location,
7284 "using an in-order (fold-left) reduction.\n");
7285 STMT_VINFO_TYPE (orig_stmt_of_analysis) = cycle_phi_info_type;
7286 /* All but single defuse-cycle optimized, lane-reducing and fold-left
7287 reductions go through their own vectorizable_* routines. */
7288 if (!single_defuse_cycle
7289 && code != DOT_PROD_EXPR
7290 && code != WIDEN_SUM_EXPR
7291 && code != SAD_EXPR
7292 && reduction_type != FOLD_LEFT_REDUCTION)
7294 stmt_vec_info tem
7295 = vect_stmt_to_vectorize (STMT_VINFO_REDUC_DEF (phi_info));
7296 if (slp_node && REDUC_GROUP_FIRST_ELEMENT (tem))
7298 gcc_assert (!REDUC_GROUP_NEXT_ELEMENT (tem));
7299 tem = REDUC_GROUP_FIRST_ELEMENT (tem);
7301 STMT_VINFO_DEF_TYPE (vect_orig_stmt (tem)) = vect_internal_def;
7302 STMT_VINFO_DEF_TYPE (tem) = vect_internal_def;
7304 else if (loop_vinfo && LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo))
7306 vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
7307 internal_fn cond_fn = get_conditional_internal_fn (code);
7309 if (reduction_type != FOLD_LEFT_REDUCTION
7310 && !use_mask_by_cond_expr_p (code, cond_fn, vectype_in)
7311 && (cond_fn == IFN_LAST
7312 || !direct_internal_fn_supported_p (cond_fn, vectype_in,
7313 OPTIMIZE_FOR_SPEED)))
7315 if (dump_enabled_p ())
7316 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7317 "can't operate on partial vectors because"
7318 " no conditional operation is available.\n");
7319 LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false;
7321 else if (reduction_type == FOLD_LEFT_REDUCTION
7322 && reduc_fn == IFN_LAST
7323 && !expand_vec_cond_expr_p (vectype_in,
7324 truth_type_for (vectype_in),
7325 SSA_NAME))
7327 if (dump_enabled_p ())
7328 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7329 "can't operate on partial vectors because"
7330 " no conditional operation is available.\n");
7331 LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false;
7333 else
7334 vect_record_loop_mask (loop_vinfo, masks, ncopies * vec_num,
7335 vectype_in, NULL);
7337 return true;
7340 /* Transform the definition stmt STMT_INFO of a reduction PHI backedge
7341 value. */
7343 bool
7344 vect_transform_reduction (loop_vec_info loop_vinfo,
7345 stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
7346 gimple **vec_stmt, slp_tree slp_node)
7348 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
7349 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
7350 int i;
7351 int ncopies;
7352 int vec_num;
7354 stmt_vec_info reduc_info = info_for_reduction (loop_vinfo, stmt_info);
7355 gcc_assert (reduc_info->is_reduc_info);
7357 if (nested_in_vect_loop_p (loop, stmt_info))
7359 loop = loop->inner;
7360 gcc_assert (STMT_VINFO_DEF_TYPE (reduc_info) == vect_double_reduction_def);
7363 gassign *stmt = as_a <gassign *> (stmt_info->stmt);
7364 enum tree_code code = gimple_assign_rhs_code (stmt);
7365 int op_type = TREE_CODE_LENGTH (code);
7367 /* Flatten RHS. */
7368 tree ops[3];
7369 switch (get_gimple_rhs_class (code))
7371 case GIMPLE_TERNARY_RHS:
7372 ops[2] = gimple_assign_rhs3 (stmt);
7373 /* Fall thru. */
7374 case GIMPLE_BINARY_RHS:
7375 ops[0] = gimple_assign_rhs1 (stmt);
7376 ops[1] = gimple_assign_rhs2 (stmt);
7377 break;
7378 default:
7379 gcc_unreachable ();
7382 /* All uses but the last are expected to be defined in the loop.
7383 The last use is the reduction variable. In case of nested cycle this
7384 assumption is not true: we use reduc_index to record the index of the
7385 reduction variable. */
7386 stmt_vec_info phi_info = STMT_VINFO_REDUC_DEF (vect_orig_stmt (stmt_info));
7387 gphi *reduc_def_phi = as_a <gphi *> (phi_info->stmt);
7388 int reduc_index = STMT_VINFO_REDUC_IDX (stmt_info);
7389 tree vectype_in = STMT_VINFO_REDUC_VECTYPE_IN (reduc_info);
7391 if (slp_node)
7393 ncopies = 1;
7394 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7396 else
7398 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
7399 vec_num = 1;
7402 internal_fn cond_fn = get_conditional_internal_fn (code);
7403 vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
7404 bool mask_by_cond_expr = use_mask_by_cond_expr_p (code, cond_fn, vectype_in);
7406 /* Transform. */
7407 tree new_temp = NULL_TREE;
7408 auto_vec<tree> vec_oprnds0;
7409 auto_vec<tree> vec_oprnds1;
7410 auto_vec<tree> vec_oprnds2;
7411 tree def0;
7413 if (dump_enabled_p ())
7414 dump_printf_loc (MSG_NOTE, vect_location, "transform reduction.\n");
7416 /* FORNOW: Multiple types are not supported for condition. */
7417 if (code == COND_EXPR)
7418 gcc_assert (ncopies == 1);
7420 bool masked_loop_p = LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
7422 vect_reduction_type reduction_type = STMT_VINFO_REDUC_TYPE (reduc_info);
7423 if (reduction_type == FOLD_LEFT_REDUCTION)
7425 internal_fn reduc_fn = STMT_VINFO_REDUC_FN (reduc_info);
7426 return vectorize_fold_left_reduction
7427 (loop_vinfo, stmt_info, gsi, vec_stmt, slp_node, reduc_def_phi, code,
7428 reduc_fn, ops, vectype_in, reduc_index, masks);
7431 bool single_defuse_cycle = STMT_VINFO_FORCE_SINGLE_CYCLE (reduc_info);
7432 gcc_assert (single_defuse_cycle
7433 || code == DOT_PROD_EXPR
7434 || code == WIDEN_SUM_EXPR
7435 || code == SAD_EXPR);
7437 /* Create the destination vector */
7438 tree scalar_dest = gimple_assign_lhs (stmt);
7439 tree vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
7441 vect_get_vec_defs (loop_vinfo, stmt_info, slp_node, ncopies,
7442 single_defuse_cycle && reduc_index == 0
7443 ? NULL_TREE : ops[0], &vec_oprnds0,
7444 single_defuse_cycle && reduc_index == 1
7445 ? NULL_TREE : ops[1], &vec_oprnds1,
7446 op_type == ternary_op
7447 && !(single_defuse_cycle && reduc_index == 2)
7448 ? ops[2] : NULL_TREE, &vec_oprnds2);
7449 if (single_defuse_cycle)
7451 gcc_assert (!slp_node);
7452 vect_get_vec_defs_for_operand (loop_vinfo, stmt_info, 1,
7453 ops[reduc_index],
7454 reduc_index == 0 ? &vec_oprnds0
7455 : (reduc_index == 1 ? &vec_oprnds1
7456 : &vec_oprnds2));
7459 FOR_EACH_VEC_ELT (vec_oprnds0, i, def0)
7461 gimple *new_stmt;
7462 tree vop[3] = { def0, vec_oprnds1[i], NULL_TREE };
7463 if (masked_loop_p && !mask_by_cond_expr)
7465 /* Make sure that the reduction accumulator is vop[0]. */
7466 if (reduc_index == 1)
7468 gcc_assert (commutative_tree_code (code));
7469 std::swap (vop[0], vop[1]);
7471 tree mask = vect_get_loop_mask (gsi, masks, vec_num * ncopies,
7472 vectype_in, i);
7473 gcall *call = gimple_build_call_internal (cond_fn, 4, mask,
7474 vop[0], vop[1], vop[0]);
7475 new_temp = make_ssa_name (vec_dest, call);
7476 gimple_call_set_lhs (call, new_temp);
7477 gimple_call_set_nothrow (call, true);
7478 vect_finish_stmt_generation (loop_vinfo, stmt_info, call, gsi);
7479 new_stmt = call;
7481 else
7483 if (op_type == ternary_op)
7484 vop[2] = vec_oprnds2[i];
7486 if (masked_loop_p && mask_by_cond_expr)
7488 tree mask = vect_get_loop_mask (gsi, masks, vec_num * ncopies,
7489 vectype_in, i);
7490 build_vect_cond_expr (code, vop, mask, gsi);
7493 new_stmt = gimple_build_assign (vec_dest, code,
7494 vop[0], vop[1], vop[2]);
7495 new_temp = make_ssa_name (vec_dest, new_stmt);
7496 gimple_assign_set_lhs (new_stmt, new_temp);
7497 vect_finish_stmt_generation (loop_vinfo, stmt_info, new_stmt, gsi);
7500 if (slp_node)
7501 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7502 else if (single_defuse_cycle
7503 && i < ncopies - 1)
7505 if (reduc_index == 0)
7506 vec_oprnds0.safe_push (gimple_get_lhs (new_stmt));
7507 else if (reduc_index == 1)
7508 vec_oprnds1.safe_push (gimple_get_lhs (new_stmt));
7509 else if (reduc_index == 2)
7510 vec_oprnds2.safe_push (gimple_get_lhs (new_stmt));
7512 else
7513 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
7516 if (!slp_node)
7517 *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0];
7519 return true;
7522 /* Transform phase of a cycle PHI. */
7524 bool
7525 vect_transform_cycle_phi (loop_vec_info loop_vinfo,
7526 stmt_vec_info stmt_info, gimple **vec_stmt,
7527 slp_tree slp_node, slp_instance slp_node_instance)
7529 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
7530 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
7531 int i;
7532 int ncopies;
7533 int j;
7534 bool nested_cycle = false;
7535 int vec_num;
7537 if (nested_in_vect_loop_p (loop, stmt_info))
7539 loop = loop->inner;
7540 nested_cycle = true;
7543 stmt_vec_info reduc_stmt_info = STMT_VINFO_REDUC_DEF (stmt_info);
7544 reduc_stmt_info = vect_stmt_to_vectorize (reduc_stmt_info);
7545 stmt_vec_info reduc_info = info_for_reduction (loop_vinfo, stmt_info);
7546 gcc_assert (reduc_info->is_reduc_info);
7548 if (STMT_VINFO_REDUC_TYPE (reduc_info) == EXTRACT_LAST_REDUCTION
7549 || STMT_VINFO_REDUC_TYPE (reduc_info) == FOLD_LEFT_REDUCTION)
7550 /* Leave the scalar phi in place. */
7551 return true;
7553 tree vectype_in = STMT_VINFO_REDUC_VECTYPE_IN (reduc_info);
7554 /* For a nested cycle we do not fill the above. */
7555 if (!vectype_in)
7556 vectype_in = STMT_VINFO_VECTYPE (stmt_info);
7557 gcc_assert (vectype_in);
7559 if (slp_node)
7561 /* The size vect_schedule_slp_instance computes is off for us. */
7562 vec_num = vect_get_num_vectors (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
7563 * SLP_TREE_LANES (slp_node), vectype_in);
7564 ncopies = 1;
7566 else
7568 vec_num = 1;
7569 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
7572 /* Check whether we should use a single PHI node and accumulate
7573 vectors to one before the backedge. */
7574 if (STMT_VINFO_FORCE_SINGLE_CYCLE (reduc_info))
7575 ncopies = 1;
7577 /* Create the destination vector */
7578 gphi *phi = as_a <gphi *> (stmt_info->stmt);
7579 tree vec_dest = vect_create_destination_var (gimple_phi_result (phi),
7580 vectype_out);
7582 /* Get the loop-entry arguments. */
7583 tree vec_initial_def;
7584 auto_vec<tree> vec_initial_defs;
7585 if (slp_node)
7587 vec_initial_defs.reserve (vec_num);
7588 if (nested_cycle)
7590 unsigned phi_idx = loop_preheader_edge (loop)->dest_idx;
7591 vect_get_slp_defs (SLP_TREE_CHILDREN (slp_node)[phi_idx],
7592 &vec_initial_defs);
7594 else
7596 gcc_assert (slp_node == slp_node_instance->reduc_phis);
7597 stmt_vec_info first = REDUC_GROUP_FIRST_ELEMENT (reduc_stmt_info);
7598 tree neutral_op
7599 = neutral_op_for_slp_reduction (slp_node, vectype_out,
7600 STMT_VINFO_REDUC_CODE (reduc_info),
7601 first != NULL);
7602 get_initial_defs_for_reduction (loop_vinfo, slp_node_instance->reduc_phis,
7603 &vec_initial_defs, vec_num,
7604 first != NULL, neutral_op);
7607 else
7609 /* Get at the scalar def before the loop, that defines the initial
7610 value of the reduction variable. */
7611 tree initial_def = PHI_ARG_DEF_FROM_EDGE (phi,
7612 loop_preheader_edge (loop));
7613 /* Optimize: if initial_def is for REDUC_MAX smaller than the base
7614 and we can't use zero for induc_val, use initial_def. Similarly
7615 for REDUC_MIN and initial_def larger than the base. */
7616 if (STMT_VINFO_REDUC_TYPE (reduc_info) == INTEGER_INDUC_COND_REDUCTION)
7618 tree induc_val = STMT_VINFO_VEC_INDUC_COND_INITIAL_VAL (reduc_info);
7619 if (TREE_CODE (initial_def) == INTEGER_CST
7620 && !integer_zerop (induc_val)
7621 && ((STMT_VINFO_REDUC_CODE (reduc_info) == MAX_EXPR
7622 && tree_int_cst_lt (initial_def, induc_val))
7623 || (STMT_VINFO_REDUC_CODE (reduc_info) == MIN_EXPR
7624 && tree_int_cst_lt (induc_val, initial_def))))
7626 induc_val = initial_def;
7627 /* Communicate we used the initial_def to epilouge
7628 generation. */
7629 STMT_VINFO_VEC_INDUC_COND_INITIAL_VAL (reduc_info) = NULL_TREE;
7631 vec_initial_def = build_vector_from_val (vectype_out, induc_val);
7632 vec_initial_defs.create (ncopies);
7633 for (i = 0; i < ncopies; ++i)
7634 vec_initial_defs.quick_push (vec_initial_def);
7636 else if (nested_cycle)
7638 /* Do not use an adjustment def as that case is not supported
7639 correctly if ncopies is not one. */
7640 vect_get_vec_defs_for_operand (loop_vinfo, reduc_stmt_info,
7641 ncopies, initial_def,
7642 &vec_initial_defs);
7644 else
7646 tree adjustment_def = NULL_TREE;
7647 tree *adjustment_defp = &adjustment_def;
7648 enum tree_code code = STMT_VINFO_REDUC_CODE (reduc_info);
7649 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_double_reduction_def)
7650 adjustment_defp = NULL;
7651 vec_initial_def
7652 = get_initial_def_for_reduction (loop_vinfo, reduc_stmt_info, code,
7653 initial_def, adjustment_defp);
7654 STMT_VINFO_REDUC_EPILOGUE_ADJUSTMENT (reduc_info) = adjustment_def;
7655 vec_initial_defs.create (ncopies);
7656 for (i = 0; i < ncopies; ++i)
7657 vec_initial_defs.quick_push (vec_initial_def);
7661 /* Generate the reduction PHIs upfront. */
7662 for (i = 0; i < vec_num; i++)
7664 tree vec_init_def = vec_initial_defs[i];
7665 for (j = 0; j < ncopies; j++)
7667 /* Create the reduction-phi that defines the reduction
7668 operand. */
7669 gphi *new_phi = create_phi_node (vec_dest, loop->header);
7671 /* Set the loop-entry arg of the reduction-phi. */
7672 if (j != 0 && nested_cycle)
7673 vec_init_def = vec_initial_defs[j];
7674 add_phi_arg (new_phi, vec_init_def, loop_preheader_edge (loop),
7675 UNKNOWN_LOCATION);
7677 /* The loop-latch arg is set in epilogue processing. */
7679 if (slp_node)
7680 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_phi);
7681 else
7683 if (j == 0)
7684 *vec_stmt = new_phi;
7685 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_phi);
7690 return true;
7693 /* Vectorizes LC PHIs. */
7695 bool
7696 vectorizable_lc_phi (loop_vec_info loop_vinfo,
7697 stmt_vec_info stmt_info, gimple **vec_stmt,
7698 slp_tree slp_node)
7700 if (!loop_vinfo
7701 || !is_a <gphi *> (stmt_info->stmt)
7702 || gimple_phi_num_args (stmt_info->stmt) != 1)
7703 return false;
7705 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
7706 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_double_reduction_def)
7707 return false;
7709 if (!vec_stmt) /* transformation not required. */
7711 /* Deal with copies from externs or constants that disguise as
7712 loop-closed PHI nodes (PR97886). */
7713 if (slp_node
7714 && !vect_maybe_update_slp_op_vectype (SLP_TREE_CHILDREN (slp_node)[0],
7715 SLP_TREE_VECTYPE (slp_node)))
7717 if (dump_enabled_p ())
7718 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7719 "incompatible vector types for invariants\n");
7720 return false;
7722 STMT_VINFO_TYPE (stmt_info) = lc_phi_info_type;
7723 return true;
7726 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7727 tree scalar_dest = gimple_phi_result (stmt_info->stmt);
7728 basic_block bb = gimple_bb (stmt_info->stmt);
7729 edge e = single_pred_edge (bb);
7730 tree vec_dest = vect_create_destination_var (scalar_dest, vectype);
7731 auto_vec<tree> vec_oprnds;
7732 vect_get_vec_defs (loop_vinfo, stmt_info, slp_node,
7733 !slp_node ? vect_get_num_copies (loop_vinfo, vectype) : 1,
7734 gimple_phi_arg_def (stmt_info->stmt, 0), &vec_oprnds);
7735 for (unsigned i = 0; i < vec_oprnds.length (); i++)
7737 /* Create the vectorized LC PHI node. */
7738 gphi *new_phi = create_phi_node (vec_dest, bb);
7739 add_phi_arg (new_phi, vec_oprnds[i], e, UNKNOWN_LOCATION);
7740 if (slp_node)
7741 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_phi);
7742 else
7743 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_phi);
7745 if (!slp_node)
7746 *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0];
7748 return true;
7751 /* Vectorizes PHIs. */
7753 bool
7754 vectorizable_phi (vec_info *,
7755 stmt_vec_info stmt_info, gimple **vec_stmt,
7756 slp_tree slp_node, stmt_vector_for_cost *cost_vec)
7758 if (!is_a <gphi *> (stmt_info->stmt) || !slp_node)
7759 return false;
7761 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
7762 return false;
7764 tree vectype = SLP_TREE_VECTYPE (slp_node);
7766 if (!vec_stmt) /* transformation not required. */
7768 slp_tree child;
7769 unsigned i;
7770 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (slp_node), i, child)
7771 if (!child)
7773 if (dump_enabled_p ())
7774 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7775 "PHI node with unvectorized backedge def\n");
7776 return false;
7778 else if (!vect_maybe_update_slp_op_vectype (child, vectype))
7780 if (dump_enabled_p ())
7781 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7782 "incompatible vector types for invariants\n");
7783 return false;
7785 /* For single-argument PHIs assume coalescing which means zero cost
7786 for the scalar and the vector PHIs. This avoids artificially
7787 favoring the vector path (but may pessimize it in some cases). */
7788 if (gimple_phi_num_args (as_a <gphi *> (stmt_info->stmt)) > 1)
7789 record_stmt_cost (cost_vec, SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node),
7790 vector_stmt, stmt_info, vectype, 0, vect_body);
7791 STMT_VINFO_TYPE (stmt_info) = phi_info_type;
7792 return true;
7795 tree scalar_dest = gimple_phi_result (stmt_info->stmt);
7796 basic_block bb = gimple_bb (stmt_info->stmt);
7797 tree vec_dest = vect_create_destination_var (scalar_dest, vectype);
7798 auto_vec<gphi *> new_phis;
7799 for (unsigned i = 0; i < gimple_phi_num_args (stmt_info->stmt); ++i)
7801 slp_tree child = SLP_TREE_CHILDREN (slp_node)[i];
7803 /* Skip not yet vectorized defs. */
7804 if (SLP_TREE_DEF_TYPE (child) == vect_internal_def
7805 && SLP_TREE_VEC_STMTS (child).is_empty ())
7806 continue;
7808 auto_vec<tree> vec_oprnds;
7809 vect_get_slp_defs (SLP_TREE_CHILDREN (slp_node)[i], &vec_oprnds);
7810 if (!new_phis.exists ())
7812 new_phis.create (vec_oprnds.length ());
7813 for (unsigned j = 0; j < vec_oprnds.length (); j++)
7815 /* Create the vectorized LC PHI node. */
7816 new_phis.quick_push (create_phi_node (vec_dest, bb));
7817 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_phis[j]);
7820 edge e = gimple_phi_arg_edge (as_a <gphi *> (stmt_info->stmt), i);
7821 for (unsigned j = 0; j < vec_oprnds.length (); j++)
7822 add_phi_arg (new_phis[j], vec_oprnds[j], e, UNKNOWN_LOCATION);
7824 /* We should have at least one already vectorized child. */
7825 gcc_assert (new_phis.exists ());
7827 return true;
7831 /* Function vect_min_worthwhile_factor.
7833 For a loop where we could vectorize the operation indicated by CODE,
7834 return the minimum vectorization factor that makes it worthwhile
7835 to use generic vectors. */
7836 static unsigned int
7837 vect_min_worthwhile_factor (enum tree_code code)
7839 switch (code)
7841 case PLUS_EXPR:
7842 case MINUS_EXPR:
7843 case NEGATE_EXPR:
7844 return 4;
7846 case BIT_AND_EXPR:
7847 case BIT_IOR_EXPR:
7848 case BIT_XOR_EXPR:
7849 case BIT_NOT_EXPR:
7850 return 2;
7852 default:
7853 return INT_MAX;
7857 /* Return true if VINFO indicates we are doing loop vectorization and if
7858 it is worth decomposing CODE operations into scalar operations for
7859 that loop's vectorization factor. */
7861 bool
7862 vect_worthwhile_without_simd_p (vec_info *vinfo, tree_code code)
7864 loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
7865 unsigned HOST_WIDE_INT value;
7866 return (loop_vinfo
7867 && LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&value)
7868 && value >= vect_min_worthwhile_factor (code));
7871 /* Function vectorizable_induction
7873 Check if STMT_INFO performs an induction computation that can be vectorized.
7874 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
7875 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
7876 Return true if STMT_INFO is vectorizable in this way. */
7878 bool
7879 vectorizable_induction (loop_vec_info loop_vinfo,
7880 stmt_vec_info stmt_info,
7881 gimple **vec_stmt, slp_tree slp_node,
7882 stmt_vector_for_cost *cost_vec)
7884 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
7885 unsigned ncopies;
7886 bool nested_in_vect_loop = false;
7887 class loop *iv_loop;
7888 tree vec_def;
7889 edge pe = loop_preheader_edge (loop);
7890 basic_block new_bb;
7891 tree new_vec, vec_init, vec_step, t;
7892 tree new_name;
7893 gimple *new_stmt;
7894 gphi *induction_phi;
7895 tree induc_def, vec_dest;
7896 tree init_expr, step_expr;
7897 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
7898 unsigned i;
7899 tree expr;
7900 gimple_stmt_iterator si;
7902 gphi *phi = dyn_cast <gphi *> (stmt_info->stmt);
7903 if (!phi)
7904 return false;
7906 if (!STMT_VINFO_RELEVANT_P (stmt_info))
7907 return false;
7909 /* Make sure it was recognized as induction computation. */
7910 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
7911 return false;
7913 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7914 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
7916 if (slp_node)
7917 ncopies = 1;
7918 else
7919 ncopies = vect_get_num_copies (loop_vinfo, vectype);
7920 gcc_assert (ncopies >= 1);
7922 /* FORNOW. These restrictions should be relaxed. */
7923 if (nested_in_vect_loop_p (loop, stmt_info))
7925 imm_use_iterator imm_iter;
7926 use_operand_p use_p;
7927 gimple *exit_phi;
7928 edge latch_e;
7929 tree loop_arg;
7931 if (ncopies > 1)
7933 if (dump_enabled_p ())
7934 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7935 "multiple types in nested loop.\n");
7936 return false;
7939 exit_phi = NULL;
7940 latch_e = loop_latch_edge (loop->inner);
7941 loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
7942 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
7944 gimple *use_stmt = USE_STMT (use_p);
7945 if (is_gimple_debug (use_stmt))
7946 continue;
7948 if (!flow_bb_inside_loop_p (loop->inner, gimple_bb (use_stmt)))
7950 exit_phi = use_stmt;
7951 break;
7954 if (exit_phi)
7956 stmt_vec_info exit_phi_vinfo = loop_vinfo->lookup_stmt (exit_phi);
7957 if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
7958 && !STMT_VINFO_LIVE_P (exit_phi_vinfo)))
7960 if (dump_enabled_p ())
7961 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7962 "inner-loop induction only used outside "
7963 "of the outer vectorized loop.\n");
7964 return false;
7968 nested_in_vect_loop = true;
7969 iv_loop = loop->inner;
7971 else
7972 iv_loop = loop;
7973 gcc_assert (iv_loop == (gimple_bb (phi))->loop_father);
7975 if (slp_node && !nunits.is_constant ())
7977 /* The current SLP code creates the step value element-by-element. */
7978 if (dump_enabled_p ())
7979 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7980 "SLP induction not supported for variable-length"
7981 " vectors.\n");
7982 return false;
7985 if (!vec_stmt) /* transformation not required. */
7987 unsigned inside_cost = 0, prologue_cost = 0;
7988 if (slp_node)
7990 /* We eventually need to set a vector type on invariant
7991 arguments. */
7992 unsigned j;
7993 slp_tree child;
7994 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (slp_node), j, child)
7995 if (!vect_maybe_update_slp_op_vectype
7996 (child, SLP_TREE_VECTYPE (slp_node)))
7998 if (dump_enabled_p ())
7999 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8000 "incompatible vector types for "
8001 "invariants\n");
8002 return false;
8004 /* loop cost for vec_loop. */
8005 inside_cost
8006 = record_stmt_cost (cost_vec,
8007 SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node),
8008 vector_stmt, stmt_info, 0, vect_body);
8009 /* prologue cost for vec_init (if not nested) and step. */
8010 prologue_cost = record_stmt_cost (cost_vec, 1 + !nested_in_vect_loop,
8011 scalar_to_vec,
8012 stmt_info, 0, vect_prologue);
8014 else /* if (!slp_node) */
8016 /* loop cost for vec_loop. */
8017 inside_cost = record_stmt_cost (cost_vec, ncopies, vector_stmt,
8018 stmt_info, 0, vect_body);
8019 /* prologue cost for vec_init and vec_step. */
8020 prologue_cost = record_stmt_cost (cost_vec, 2, scalar_to_vec,
8021 stmt_info, 0, vect_prologue);
8023 if (dump_enabled_p ())
8024 dump_printf_loc (MSG_NOTE, vect_location,
8025 "vect_model_induction_cost: inside_cost = %d, "
8026 "prologue_cost = %d .\n", inside_cost,
8027 prologue_cost);
8029 STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
8030 DUMP_VECT_SCOPE ("vectorizable_induction");
8031 return true;
8034 /* Transform. */
8036 /* Compute a vector variable, initialized with the first VF values of
8037 the induction variable. E.g., for an iv with IV_PHI='X' and
8038 evolution S, for a vector of 4 units, we want to compute:
8039 [X, X + S, X + 2*S, X + 3*S]. */
8041 if (dump_enabled_p ())
8042 dump_printf_loc (MSG_NOTE, vect_location, "transform induction phi.\n");
8044 step_expr = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_info);
8045 gcc_assert (step_expr != NULL_TREE);
8046 tree step_vectype = get_same_sized_vectype (TREE_TYPE (step_expr), vectype);
8048 pe = loop_preheader_edge (iv_loop);
8049 /* Find the first insertion point in the BB. */
8050 basic_block bb = gimple_bb (phi);
8051 si = gsi_after_labels (bb);
8053 /* For SLP induction we have to generate several IVs as for example
8054 with group size 3 we need
8055 [i0, i1, i2, i0 + S0] [i1 + S1, i2 + S2, i0 + 2*S0, i1 + 2*S1]
8056 [i2 + 2*S2, i0 + 3*S0, i1 + 3*S1, i2 + 3*S2]. */
8057 if (slp_node)
8059 /* Enforced above. */
8060 unsigned int const_nunits = nunits.to_constant ();
8062 /* The initial values are vectorized, but any lanes > group_size
8063 need adjustment. */
8064 slp_tree init_node
8065 = SLP_TREE_CHILDREN (slp_node)[pe->dest_idx];
8067 /* Gather steps. Since we do not vectorize inductions as
8068 cycles we have to reconstruct the step from SCEV data. */
8069 unsigned group_size = SLP_TREE_LANES (slp_node);
8070 tree *steps = XALLOCAVEC (tree, group_size);
8071 tree *inits = XALLOCAVEC (tree, group_size);
8072 stmt_vec_info phi_info;
8073 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node), i, phi_info)
8075 steps[i] = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (phi_info);
8076 if (!init_node)
8077 inits[i] = gimple_phi_arg_def (as_a<gphi *> (phi_info->stmt),
8078 pe->dest_idx);
8081 /* Now generate the IVs. */
8082 unsigned nvects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
8083 gcc_assert ((const_nunits * nvects) % group_size == 0);
8084 unsigned nivs;
8085 if (nested_in_vect_loop)
8086 nivs = nvects;
8087 else
8089 /* Compute the number of distinct IVs we need. First reduce
8090 group_size if it is a multiple of const_nunits so we get
8091 one IV for a group_size of 4 but const_nunits 2. */
8092 unsigned group_sizep = group_size;
8093 if (group_sizep % const_nunits == 0)
8094 group_sizep = group_sizep / const_nunits;
8095 nivs = least_common_multiple (group_sizep,
8096 const_nunits) / const_nunits;
8098 tree stept = TREE_TYPE (step_vectype);
8099 tree lupdate_mul = NULL_TREE;
8100 if (!nested_in_vect_loop)
8102 /* The number of iterations covered in one vector iteration. */
8103 unsigned lup_mul = (nvects * const_nunits) / group_size;
8104 lupdate_mul
8105 = build_vector_from_val (step_vectype,
8106 SCALAR_FLOAT_TYPE_P (stept)
8107 ? build_real_from_wide (stept, lup_mul,
8108 UNSIGNED)
8109 : build_int_cstu (stept, lup_mul));
8111 tree peel_mul = NULL_TREE;
8112 gimple_seq init_stmts = NULL;
8113 if (LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo))
8115 if (SCALAR_FLOAT_TYPE_P (stept))
8116 peel_mul = gimple_build (&init_stmts, FLOAT_EXPR, stept,
8117 LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo));
8118 else
8119 peel_mul = gimple_convert (&init_stmts, stept,
8120 LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo));
8121 peel_mul = gimple_build_vector_from_val (&init_stmts,
8122 step_vectype, peel_mul);
8124 unsigned ivn;
8125 auto_vec<tree> vec_steps;
8126 for (ivn = 0; ivn < nivs; ++ivn)
8128 tree_vector_builder step_elts (step_vectype, const_nunits, 1);
8129 tree_vector_builder init_elts (vectype, const_nunits, 1);
8130 tree_vector_builder mul_elts (step_vectype, const_nunits, 1);
8131 for (unsigned eltn = 0; eltn < const_nunits; ++eltn)
8133 /* The scalar steps of the IVs. */
8134 tree elt = steps[(ivn*const_nunits + eltn) % group_size];
8135 elt = gimple_convert (&init_stmts, TREE_TYPE (step_vectype), elt);
8136 step_elts.quick_push (elt);
8137 if (!init_node)
8139 /* The scalar inits of the IVs if not vectorized. */
8140 elt = inits[(ivn*const_nunits + eltn) % group_size];
8141 if (!useless_type_conversion_p (TREE_TYPE (vectype),
8142 TREE_TYPE (elt)))
8143 elt = gimple_build (&init_stmts, VIEW_CONVERT_EXPR,
8144 TREE_TYPE (vectype), elt);
8145 init_elts.quick_push (elt);
8147 /* The number of steps to add to the initial values. */
8148 unsigned mul_elt = (ivn*const_nunits + eltn) / group_size;
8149 mul_elts.quick_push (SCALAR_FLOAT_TYPE_P (stept)
8150 ? build_real_from_wide (stept,
8151 mul_elt, UNSIGNED)
8152 : build_int_cstu (stept, mul_elt));
8154 vec_step = gimple_build_vector (&init_stmts, &step_elts);
8155 vec_steps.safe_push (vec_step);
8156 tree step_mul = gimple_build_vector (&init_stmts, &mul_elts);
8157 if (peel_mul)
8158 step_mul = gimple_build (&init_stmts, PLUS_EXPR, step_vectype,
8159 step_mul, peel_mul);
8160 if (!init_node)
8161 vec_init = gimple_build_vector (&init_stmts, &init_elts);
8163 /* Create the induction-phi that defines the induction-operand. */
8164 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var,
8165 "vec_iv_");
8166 induction_phi = create_phi_node (vec_dest, iv_loop->header);
8167 induc_def = PHI_RESULT (induction_phi);
8169 /* Create the iv update inside the loop */
8170 tree up = vec_step;
8171 if (lupdate_mul)
8172 up = gimple_build (&init_stmts, MULT_EXPR, step_vectype,
8173 vec_step, lupdate_mul);
8174 gimple_seq stmts = NULL;
8175 vec_def = gimple_convert (&stmts, step_vectype, induc_def);
8176 vec_def = gimple_build (&stmts,
8177 PLUS_EXPR, step_vectype, vec_def, up);
8178 vec_def = gimple_convert (&stmts, vectype, vec_def);
8179 gsi_insert_seq_before (&si, stmts, GSI_SAME_STMT);
8180 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
8181 UNKNOWN_LOCATION);
8183 if (init_node)
8184 vec_init = vect_get_slp_vect_def (init_node, ivn);
8185 if (!nested_in_vect_loop
8186 && !integer_zerop (step_mul))
8188 vec_def = gimple_convert (&init_stmts, step_vectype, vec_init);
8189 up = gimple_build (&init_stmts, MULT_EXPR, step_vectype,
8190 vec_step, step_mul);
8191 vec_def = gimple_build (&init_stmts, PLUS_EXPR, step_vectype,
8192 vec_def, up);
8193 vec_init = gimple_convert (&init_stmts, vectype, vec_def);
8196 /* Set the arguments of the phi node: */
8197 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
8199 SLP_TREE_VEC_STMTS (slp_node).quick_push (induction_phi);
8201 if (!nested_in_vect_loop)
8203 /* Fill up to the number of vectors we need for the whole group. */
8204 nivs = least_common_multiple (group_size,
8205 const_nunits) / const_nunits;
8206 vec_steps.reserve (nivs-ivn);
8207 for (; ivn < nivs; ++ivn)
8209 SLP_TREE_VEC_STMTS (slp_node)
8210 .quick_push (SLP_TREE_VEC_STMTS (slp_node)[0]);
8211 vec_steps.quick_push (vec_steps[0]);
8215 /* Re-use IVs when we can. We are generating further vector
8216 stmts by adding VF' * stride to the IVs generated above. */
8217 if (ivn < nvects)
8219 unsigned vfp
8220 = least_common_multiple (group_size, const_nunits) / group_size;
8221 tree lupdate_mul
8222 = build_vector_from_val (step_vectype,
8223 SCALAR_FLOAT_TYPE_P (stept)
8224 ? build_real_from_wide (stept,
8225 vfp, UNSIGNED)
8226 : build_int_cstu (stept, vfp));
8227 for (; ivn < nvects; ++ivn)
8229 gimple *iv = SLP_TREE_VEC_STMTS (slp_node)[ivn - nivs];
8230 tree def = gimple_get_lhs (iv);
8231 if (ivn < 2*nivs)
8232 vec_steps[ivn - nivs]
8233 = gimple_build (&init_stmts, MULT_EXPR, step_vectype,
8234 vec_steps[ivn - nivs], lupdate_mul);
8235 gimple_seq stmts = NULL;
8236 def = gimple_convert (&stmts, step_vectype, def);
8237 def = gimple_build (&stmts, PLUS_EXPR, step_vectype,
8238 def, vec_steps[ivn % nivs]);
8239 def = gimple_convert (&stmts, vectype, def);
8240 if (gimple_code (iv) == GIMPLE_PHI)
8241 gsi_insert_seq_before (&si, stmts, GSI_SAME_STMT);
8242 else
8244 gimple_stmt_iterator tgsi = gsi_for_stmt (iv);
8245 gsi_insert_seq_after (&tgsi, stmts, GSI_CONTINUE_LINKING);
8247 SLP_TREE_VEC_STMTS (slp_node)
8248 .quick_push (SSA_NAME_DEF_STMT (def));
8252 new_bb = gsi_insert_seq_on_edge_immediate (pe, init_stmts);
8253 gcc_assert (!new_bb);
8255 return true;
8258 init_expr = PHI_ARG_DEF_FROM_EDGE (phi,
8259 loop_preheader_edge (iv_loop));
8261 gimple_seq stmts = NULL;
8262 if (!nested_in_vect_loop)
8264 /* Convert the initial value to the IV update type. */
8265 tree new_type = TREE_TYPE (step_expr);
8266 init_expr = gimple_convert (&stmts, new_type, init_expr);
8268 /* If we are using the loop mask to "peel" for alignment then we need
8269 to adjust the start value here. */
8270 tree skip_niters = LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo);
8271 if (skip_niters != NULL_TREE)
8273 if (FLOAT_TYPE_P (vectype))
8274 skip_niters = gimple_build (&stmts, FLOAT_EXPR, new_type,
8275 skip_niters);
8276 else
8277 skip_niters = gimple_convert (&stmts, new_type, skip_niters);
8278 tree skip_step = gimple_build (&stmts, MULT_EXPR, new_type,
8279 skip_niters, step_expr);
8280 init_expr = gimple_build (&stmts, MINUS_EXPR, new_type,
8281 init_expr, skip_step);
8285 if (stmts)
8287 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
8288 gcc_assert (!new_bb);
8291 /* Create the vector that holds the initial_value of the induction. */
8292 if (nested_in_vect_loop)
8294 /* iv_loop is nested in the loop to be vectorized. init_expr had already
8295 been created during vectorization of previous stmts. We obtain it
8296 from the STMT_VINFO_VEC_STMT of the defining stmt. */
8297 auto_vec<tree> vec_inits;
8298 vect_get_vec_defs_for_operand (loop_vinfo, stmt_info, 1,
8299 init_expr, &vec_inits);
8300 vec_init = vec_inits[0];
8301 /* If the initial value is not of proper type, convert it. */
8302 if (!useless_type_conversion_p (vectype, TREE_TYPE (vec_init)))
8304 new_stmt
8305 = gimple_build_assign (vect_get_new_ssa_name (vectype,
8306 vect_simple_var,
8307 "vec_iv_"),
8308 VIEW_CONVERT_EXPR,
8309 build1 (VIEW_CONVERT_EXPR, vectype,
8310 vec_init));
8311 vec_init = gimple_assign_lhs (new_stmt);
8312 new_bb = gsi_insert_on_edge_immediate (loop_preheader_edge (iv_loop),
8313 new_stmt);
8314 gcc_assert (!new_bb);
8317 else
8319 /* iv_loop is the loop to be vectorized. Create:
8320 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
8321 stmts = NULL;
8322 new_name = gimple_convert (&stmts, TREE_TYPE (step_expr), init_expr);
8324 unsigned HOST_WIDE_INT const_nunits;
8325 if (nunits.is_constant (&const_nunits))
8327 tree_vector_builder elts (step_vectype, const_nunits, 1);
8328 elts.quick_push (new_name);
8329 for (i = 1; i < const_nunits; i++)
8331 /* Create: new_name_i = new_name + step_expr */
8332 new_name = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (new_name),
8333 new_name, step_expr);
8334 elts.quick_push (new_name);
8336 /* Create a vector from [new_name_0, new_name_1, ...,
8337 new_name_nunits-1] */
8338 vec_init = gimple_build_vector (&stmts, &elts);
8340 else if (INTEGRAL_TYPE_P (TREE_TYPE (step_expr)))
8341 /* Build the initial value directly from a VEC_SERIES_EXPR. */
8342 vec_init = gimple_build (&stmts, VEC_SERIES_EXPR, step_vectype,
8343 new_name, step_expr);
8344 else
8346 /* Build:
8347 [base, base, base, ...]
8348 + (vectype) [0, 1, 2, ...] * [step, step, step, ...]. */
8349 gcc_assert (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)));
8350 gcc_assert (flag_associative_math);
8351 tree index = build_index_vector (step_vectype, 0, 1);
8352 tree base_vec = gimple_build_vector_from_val (&stmts, step_vectype,
8353 new_name);
8354 tree step_vec = gimple_build_vector_from_val (&stmts, step_vectype,
8355 step_expr);
8356 vec_init = gimple_build (&stmts, FLOAT_EXPR, step_vectype, index);
8357 vec_init = gimple_build (&stmts, MULT_EXPR, step_vectype,
8358 vec_init, step_vec);
8359 vec_init = gimple_build (&stmts, PLUS_EXPR, step_vectype,
8360 vec_init, base_vec);
8362 vec_init = gimple_convert (&stmts, vectype, vec_init);
8364 if (stmts)
8366 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
8367 gcc_assert (!new_bb);
8372 /* Create the vector that holds the step of the induction. */
8373 if (nested_in_vect_loop)
8374 /* iv_loop is nested in the loop to be vectorized. Generate:
8375 vec_step = [S, S, S, S] */
8376 new_name = step_expr;
8377 else
8379 /* iv_loop is the loop to be vectorized. Generate:
8380 vec_step = [VF*S, VF*S, VF*S, VF*S] */
8381 gimple_seq seq = NULL;
8382 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
8384 expr = build_int_cst (integer_type_node, vf);
8385 expr = gimple_build (&seq, FLOAT_EXPR, TREE_TYPE (step_expr), expr);
8387 else
8388 expr = build_int_cst (TREE_TYPE (step_expr), vf);
8389 new_name = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step_expr),
8390 expr, step_expr);
8391 if (seq)
8393 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
8394 gcc_assert (!new_bb);
8398 t = unshare_expr (new_name);
8399 gcc_assert (CONSTANT_CLASS_P (new_name)
8400 || TREE_CODE (new_name) == SSA_NAME);
8401 new_vec = build_vector_from_val (step_vectype, t);
8402 vec_step = vect_init_vector (loop_vinfo, stmt_info,
8403 new_vec, step_vectype, NULL);
8406 /* Create the following def-use cycle:
8407 loop prolog:
8408 vec_init = ...
8409 vec_step = ...
8410 loop:
8411 vec_iv = PHI <vec_init, vec_loop>
8413 STMT
8415 vec_loop = vec_iv + vec_step; */
8417 /* Create the induction-phi that defines the induction-operand. */
8418 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
8419 induction_phi = create_phi_node (vec_dest, iv_loop->header);
8420 induc_def = PHI_RESULT (induction_phi);
8422 /* Create the iv update inside the loop */
8423 stmts = NULL;
8424 vec_def = gimple_convert (&stmts, step_vectype, induc_def);
8425 vec_def = gimple_build (&stmts, PLUS_EXPR, step_vectype, vec_def, vec_step);
8426 vec_def = gimple_convert (&stmts, vectype, vec_def);
8427 gsi_insert_seq_before (&si, stmts, GSI_SAME_STMT);
8428 new_stmt = SSA_NAME_DEF_STMT (vec_def);
8430 /* Set the arguments of the phi node: */
8431 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
8432 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
8433 UNKNOWN_LOCATION);
8435 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (induction_phi);
8436 *vec_stmt = induction_phi;
8438 /* In case that vectorization factor (VF) is bigger than the number
8439 of elements that we can fit in a vectype (nunits), we have to generate
8440 more than one vector stmt - i.e - we need to "unroll" the
8441 vector stmt by a factor VF/nunits. For more details see documentation
8442 in vectorizable_operation. */
8444 if (ncopies > 1)
8446 gimple_seq seq = NULL;
8447 /* FORNOW. This restriction should be relaxed. */
8448 gcc_assert (!nested_in_vect_loop);
8450 /* Create the vector that holds the step of the induction. */
8451 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
8453 expr = build_int_cst (integer_type_node, nunits);
8454 expr = gimple_build (&seq, FLOAT_EXPR, TREE_TYPE (step_expr), expr);
8456 else
8457 expr = build_int_cst (TREE_TYPE (step_expr), nunits);
8458 new_name = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step_expr),
8459 expr, step_expr);
8460 if (seq)
8462 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
8463 gcc_assert (!new_bb);
8466 t = unshare_expr (new_name);
8467 gcc_assert (CONSTANT_CLASS_P (new_name)
8468 || TREE_CODE (new_name) == SSA_NAME);
8469 new_vec = build_vector_from_val (step_vectype, t);
8470 vec_step = vect_init_vector (loop_vinfo, stmt_info,
8471 new_vec, step_vectype, NULL);
8473 vec_def = induc_def;
8474 for (i = 1; i < ncopies; i++)
8476 /* vec_i = vec_prev + vec_step */
8477 gimple_seq stmts = NULL;
8478 vec_def = gimple_convert (&stmts, step_vectype, vec_def);
8479 vec_def = gimple_build (&stmts,
8480 PLUS_EXPR, step_vectype, vec_def, vec_step);
8481 vec_def = gimple_convert (&stmts, vectype, vec_def);
8483 gsi_insert_seq_before (&si, stmts, GSI_SAME_STMT);
8484 new_stmt = SSA_NAME_DEF_STMT (vec_def);
8485 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
8489 if (dump_enabled_p ())
8490 dump_printf_loc (MSG_NOTE, vect_location,
8491 "transform induction: created def-use cycle: %G%G",
8492 induction_phi, SSA_NAME_DEF_STMT (vec_def));
8494 return true;
8497 /* Function vectorizable_live_operation.
8499 STMT_INFO computes a value that is used outside the loop. Check if
8500 it can be supported. */
8502 bool
8503 vectorizable_live_operation (vec_info *vinfo,
8504 stmt_vec_info stmt_info,
8505 gimple_stmt_iterator *gsi,
8506 slp_tree slp_node, slp_instance slp_node_instance,
8507 int slp_index, bool vec_stmt_p,
8508 stmt_vector_for_cost *cost_vec)
8510 loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
8511 imm_use_iterator imm_iter;
8512 tree lhs, lhs_type, bitsize;
8513 tree vectype = (slp_node
8514 ? SLP_TREE_VECTYPE (slp_node)
8515 : STMT_VINFO_VECTYPE (stmt_info));
8516 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
8517 int ncopies;
8518 gimple *use_stmt;
8519 auto_vec<tree> vec_oprnds;
8520 int vec_entry = 0;
8521 poly_uint64 vec_index = 0;
8523 gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
8525 /* If a stmt of a reduction is live, vectorize it via
8526 vect_create_epilog_for_reduction. vectorizable_reduction assessed
8527 validity so just trigger the transform here. */
8528 if (STMT_VINFO_REDUC_DEF (vect_orig_stmt (stmt_info)))
8530 if (!vec_stmt_p)
8531 return true;
8532 if (slp_node)
8534 /* For reduction chains the meta-info is attached to
8535 the group leader. */
8536 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info))
8537 stmt_info = REDUC_GROUP_FIRST_ELEMENT (stmt_info);
8538 /* For SLP reductions we vectorize the epilogue for
8539 all involved stmts together. */
8540 else if (slp_index != 0)
8541 return true;
8542 else
8543 /* For SLP reductions the meta-info is attached to
8544 the representative. */
8545 stmt_info = SLP_TREE_REPRESENTATIVE (slp_node);
8547 stmt_vec_info reduc_info = info_for_reduction (loop_vinfo, stmt_info);
8548 gcc_assert (reduc_info->is_reduc_info);
8549 if (STMT_VINFO_REDUC_TYPE (reduc_info) == FOLD_LEFT_REDUCTION
8550 || STMT_VINFO_REDUC_TYPE (reduc_info) == EXTRACT_LAST_REDUCTION)
8551 return true;
8552 vect_create_epilog_for_reduction (loop_vinfo, stmt_info, slp_node,
8553 slp_node_instance);
8554 return true;
8557 /* If STMT is not relevant and it is a simple assignment and its inputs are
8558 invariant then it can remain in place, unvectorized. The original last
8559 scalar value that it computes will be used. */
8560 if (!STMT_VINFO_RELEVANT_P (stmt_info))
8562 gcc_assert (is_simple_and_all_uses_invariant (stmt_info, loop_vinfo));
8563 if (dump_enabled_p ())
8564 dump_printf_loc (MSG_NOTE, vect_location,
8565 "statement is simple and uses invariant. Leaving in "
8566 "place.\n");
8567 return true;
8570 if (slp_node)
8571 ncopies = 1;
8572 else
8573 ncopies = vect_get_num_copies (loop_vinfo, vectype);
8575 if (slp_node)
8577 gcc_assert (slp_index >= 0);
8579 /* Get the last occurrence of the scalar index from the concatenation of
8580 all the slp vectors. Calculate which slp vector it is and the index
8581 within. */
8582 int num_scalar = SLP_TREE_LANES (slp_node);
8583 int num_vec = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
8584 poly_uint64 pos = (num_vec * nunits) - num_scalar + slp_index;
8586 /* Calculate which vector contains the result, and which lane of
8587 that vector we need. */
8588 if (!can_div_trunc_p (pos, nunits, &vec_entry, &vec_index))
8590 if (dump_enabled_p ())
8591 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8592 "Cannot determine which vector holds the"
8593 " final result.\n");
8594 return false;
8598 if (!vec_stmt_p)
8600 /* No transformation required. */
8601 if (loop_vinfo && LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo))
8603 if (!direct_internal_fn_supported_p (IFN_EXTRACT_LAST, vectype,
8604 OPTIMIZE_FOR_SPEED))
8606 if (dump_enabled_p ())
8607 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8608 "can't operate on partial vectors "
8609 "because the target doesn't support extract "
8610 "last reduction.\n");
8611 LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false;
8613 else if (slp_node)
8615 if (dump_enabled_p ())
8616 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8617 "can't operate on partial vectors "
8618 "because an SLP statement is live after "
8619 "the loop.\n");
8620 LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false;
8622 else if (ncopies > 1)
8624 if (dump_enabled_p ())
8625 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8626 "can't operate on partial vectors "
8627 "because ncopies is greater than 1.\n");
8628 LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false;
8630 else
8632 gcc_assert (ncopies == 1 && !slp_node);
8633 vect_record_loop_mask (loop_vinfo,
8634 &LOOP_VINFO_MASKS (loop_vinfo),
8635 1, vectype, NULL);
8638 /* ??? Enable for loop costing as well. */
8639 if (!loop_vinfo)
8640 record_stmt_cost (cost_vec, 1, vec_to_scalar, stmt_info, NULL_TREE,
8641 0, vect_epilogue);
8642 return true;
8645 /* Use the lhs of the original scalar statement. */
8646 gimple *stmt = vect_orig_stmt (stmt_info)->stmt;
8647 if (dump_enabled_p ())
8648 dump_printf_loc (MSG_NOTE, vect_location, "extracting lane for live "
8649 "stmt %G", stmt);
8651 lhs = gimple_get_lhs (stmt);
8652 lhs_type = TREE_TYPE (lhs);
8654 bitsize = vector_element_bits_tree (vectype);
8656 /* Get the vectorized lhs of STMT and the lane to use (counted in bits). */
8657 tree vec_lhs, bitstart;
8658 gimple *vec_stmt;
8659 if (slp_node)
8661 gcc_assert (!loop_vinfo || !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
8663 /* Get the correct slp vectorized stmt. */
8664 vec_stmt = SLP_TREE_VEC_STMTS (slp_node)[vec_entry];
8665 vec_lhs = gimple_get_lhs (vec_stmt);
8667 /* Get entry to use. */
8668 bitstart = bitsize_int (vec_index);
8669 bitstart = int_const_binop (MULT_EXPR, bitsize, bitstart);
8671 else
8673 /* For multiple copies, get the last copy. */
8674 vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info).last ();
8675 vec_lhs = gimple_get_lhs (vec_stmt);
8677 /* Get the last lane in the vector. */
8678 bitstart = int_const_binop (MULT_EXPR, bitsize, bitsize_int (nunits - 1));
8681 if (loop_vinfo)
8683 /* Ensure the VEC_LHS for lane extraction stmts satisfy loop-closed PHI
8684 requirement, insert one phi node for it. It looks like:
8685 loop;
8687 # lhs' = PHI <lhs>
8689 loop;
8691 # vec_lhs' = PHI <vec_lhs>
8692 new_tree = lane_extract <vec_lhs', ...>;
8693 lhs' = new_tree; */
8695 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
8696 basic_block exit_bb = single_exit (loop)->dest;
8697 gcc_assert (single_pred_p (exit_bb));
8699 tree vec_lhs_phi = copy_ssa_name (vec_lhs);
8700 gimple *phi = create_phi_node (vec_lhs_phi, exit_bb);
8701 SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, vec_lhs);
8703 gimple_seq stmts = NULL;
8704 tree new_tree;
8705 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
8707 /* Emit:
8709 SCALAR_RES = EXTRACT_LAST <VEC_LHS, MASK>
8711 where VEC_LHS is the vectorized live-out result and MASK is
8712 the loop mask for the final iteration. */
8713 gcc_assert (ncopies == 1 && !slp_node);
8714 tree scalar_type = TREE_TYPE (STMT_VINFO_VECTYPE (stmt_info));
8715 tree mask = vect_get_loop_mask (gsi, &LOOP_VINFO_MASKS (loop_vinfo),
8716 1, vectype, 0);
8717 tree scalar_res = gimple_build (&stmts, CFN_EXTRACT_LAST, scalar_type,
8718 mask, vec_lhs_phi);
8720 /* Convert the extracted vector element to the scalar type. */
8721 new_tree = gimple_convert (&stmts, lhs_type, scalar_res);
8723 else
8725 tree bftype = TREE_TYPE (vectype);
8726 if (VECTOR_BOOLEAN_TYPE_P (vectype))
8727 bftype = build_nonstandard_integer_type (tree_to_uhwi (bitsize), 1);
8728 new_tree = build3 (BIT_FIELD_REF, bftype,
8729 vec_lhs_phi, bitsize, bitstart);
8730 new_tree = force_gimple_operand (fold_convert (lhs_type, new_tree),
8731 &stmts, true, NULL_TREE);
8734 if (stmts)
8736 gimple_stmt_iterator exit_gsi = gsi_after_labels (exit_bb);
8737 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
8739 /* Remove existing phi from lhs and create one copy from new_tree. */
8740 tree lhs_phi = NULL_TREE;
8741 gimple_stmt_iterator gsi;
8742 for (gsi = gsi_start_phis (exit_bb);
8743 !gsi_end_p (gsi); gsi_next (&gsi))
8745 gimple *phi = gsi_stmt (gsi);
8746 if ((gimple_phi_arg_def (phi, 0) == lhs))
8748 remove_phi_node (&gsi, false);
8749 lhs_phi = gimple_phi_result (phi);
8750 gimple *copy = gimple_build_assign (lhs_phi, new_tree);
8751 gsi_insert_before (&exit_gsi, copy, GSI_SAME_STMT);
8752 break;
8757 /* Replace use of lhs with newly computed result. If the use stmt is a
8758 single arg PHI, just replace all uses of PHI result. It's necessary
8759 because lcssa PHI defining lhs may be before newly inserted stmt. */
8760 use_operand_p use_p;
8761 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, lhs)
8762 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
8763 && !is_gimple_debug (use_stmt))
8765 if (gimple_code (use_stmt) == GIMPLE_PHI
8766 && gimple_phi_num_args (use_stmt) == 1)
8768 replace_uses_by (gimple_phi_result (use_stmt), new_tree);
8770 else
8772 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
8773 SET_USE (use_p, new_tree);
8775 update_stmt (use_stmt);
8778 else
8780 /* For basic-block vectorization simply insert the lane-extraction. */
8781 tree bftype = TREE_TYPE (vectype);
8782 if (VECTOR_BOOLEAN_TYPE_P (vectype))
8783 bftype = build_nonstandard_integer_type (tree_to_uhwi (bitsize), 1);
8784 tree new_tree = build3 (BIT_FIELD_REF, bftype,
8785 vec_lhs, bitsize, bitstart);
8786 gimple_seq stmts = NULL;
8787 new_tree = force_gimple_operand (fold_convert (lhs_type, new_tree),
8788 &stmts, true, NULL_TREE);
8789 if (TREE_CODE (new_tree) == SSA_NAME
8790 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
8791 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_tree) = 1;
8792 if (is_a <gphi *> (vec_stmt))
8794 gimple_stmt_iterator si = gsi_after_labels (gimple_bb (vec_stmt));
8795 gsi_insert_seq_before (&si, stmts, GSI_SAME_STMT);
8797 else
8799 gimple_stmt_iterator si = gsi_for_stmt (vec_stmt);
8800 gsi_insert_seq_after (&si, stmts, GSI_SAME_STMT);
8803 /* Replace use of lhs with newly computed result. If the use stmt is a
8804 single arg PHI, just replace all uses of PHI result. It's necessary
8805 because lcssa PHI defining lhs may be before newly inserted stmt. */
8806 use_operand_p use_p;
8807 stmt_vec_info use_stmt_info;
8808 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, lhs)
8809 if (!is_gimple_debug (use_stmt)
8810 && (!(use_stmt_info = vinfo->lookup_stmt (use_stmt))
8811 || !PURE_SLP_STMT (vect_stmt_to_vectorize (use_stmt_info))))
8813 /* ??? This can happen when the live lane ends up being
8814 used in a vector construction code-generated by an
8815 external SLP node (and code-generation for that already
8816 happened). See gcc.dg/vect/bb-slp-47.c.
8817 Doing this is what would happen if that vector CTOR
8818 were not code-generated yet so it is not too bad.
8819 ??? In fact we'd likely want to avoid this situation
8820 in the first place. */
8821 if (TREE_CODE (new_tree) == SSA_NAME
8822 && !SSA_NAME_IS_DEFAULT_DEF (new_tree)
8823 && gimple_code (use_stmt) != GIMPLE_PHI
8824 && !vect_stmt_dominates_stmt_p (SSA_NAME_DEF_STMT (new_tree),
8825 use_stmt))
8827 enum tree_code code = gimple_assign_rhs_code (use_stmt);
8828 gcc_assert (code == CONSTRUCTOR
8829 || code == VIEW_CONVERT_EXPR
8830 || CONVERT_EXPR_CODE_P (code));
8831 if (dump_enabled_p ())
8832 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8833 "Using original scalar computation for "
8834 "live lane because use preceeds vector "
8835 "def\n");
8836 continue;
8838 /* ??? It can also happen that we end up pulling a def into
8839 a loop where replacing out-of-loop uses would require
8840 a new LC SSA PHI node. Retain the original scalar in
8841 those cases as well. PR98064. */
8842 if (TREE_CODE (new_tree) == SSA_NAME
8843 && !SSA_NAME_IS_DEFAULT_DEF (new_tree)
8844 && (gimple_bb (use_stmt)->loop_father
8845 != gimple_bb (vec_stmt)->loop_father)
8846 && !flow_loop_nested_p (gimple_bb (vec_stmt)->loop_father,
8847 gimple_bb (use_stmt)->loop_father))
8849 if (dump_enabled_p ())
8850 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8851 "Using original scalar computation for "
8852 "live lane because there is an out-of-loop "
8853 "definition for it\n");
8854 continue;
8856 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
8857 SET_USE (use_p, new_tree);
8858 update_stmt (use_stmt);
8862 return true;
8865 /* Kill any debug uses outside LOOP of SSA names defined in STMT_INFO. */
8867 static void
8868 vect_loop_kill_debug_uses (class loop *loop, stmt_vec_info stmt_info)
8870 ssa_op_iter op_iter;
8871 imm_use_iterator imm_iter;
8872 def_operand_p def_p;
8873 gimple *ustmt;
8875 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt_info->stmt, op_iter, SSA_OP_DEF)
8877 FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p))
8879 basic_block bb;
8881 if (!is_gimple_debug (ustmt))
8882 continue;
8884 bb = gimple_bb (ustmt);
8886 if (!flow_bb_inside_loop_p (loop, bb))
8888 if (gimple_debug_bind_p (ustmt))
8890 if (dump_enabled_p ())
8891 dump_printf_loc (MSG_NOTE, vect_location,
8892 "killing debug use\n");
8894 gimple_debug_bind_reset_value (ustmt);
8895 update_stmt (ustmt);
8897 else
8898 gcc_unreachable ();
8904 /* Given loop represented by LOOP_VINFO, return true if computation of
8905 LOOP_VINFO_NITERS (= LOOP_VINFO_NITERSM1 + 1) doesn't overflow, false
8906 otherwise. */
8908 static bool
8909 loop_niters_no_overflow (loop_vec_info loop_vinfo)
8911 /* Constant case. */
8912 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
8914 tree cst_niters = LOOP_VINFO_NITERS (loop_vinfo);
8915 tree cst_nitersm1 = LOOP_VINFO_NITERSM1 (loop_vinfo);
8917 gcc_assert (TREE_CODE (cst_niters) == INTEGER_CST);
8918 gcc_assert (TREE_CODE (cst_nitersm1) == INTEGER_CST);
8919 if (wi::to_widest (cst_nitersm1) < wi::to_widest (cst_niters))
8920 return true;
8923 widest_int max;
8924 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
8925 /* Check the upper bound of loop niters. */
8926 if (get_max_loop_iterations (loop, &max))
8928 tree type = TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo));
8929 signop sgn = TYPE_SIGN (type);
8930 widest_int type_max = widest_int::from (wi::max_value (type), sgn);
8931 if (max < type_max)
8932 return true;
8934 return false;
8937 /* Return a mask type with half the number of elements as OLD_TYPE,
8938 given that it should have mode NEW_MODE. */
8940 tree
8941 vect_halve_mask_nunits (tree old_type, machine_mode new_mode)
8943 poly_uint64 nunits = exact_div (TYPE_VECTOR_SUBPARTS (old_type), 2);
8944 return build_truth_vector_type_for_mode (nunits, new_mode);
8947 /* Return a mask type with twice as many elements as OLD_TYPE,
8948 given that it should have mode NEW_MODE. */
8950 tree
8951 vect_double_mask_nunits (tree old_type, machine_mode new_mode)
8953 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (old_type) * 2;
8954 return build_truth_vector_type_for_mode (nunits, new_mode);
8957 /* Record that a fully-masked version of LOOP_VINFO would need MASKS to
8958 contain a sequence of NVECTORS masks that each control a vector of type
8959 VECTYPE. If SCALAR_MASK is nonnull, the fully-masked loop would AND
8960 these vector masks with the vector version of SCALAR_MASK. */
8962 void
8963 vect_record_loop_mask (loop_vec_info loop_vinfo, vec_loop_masks *masks,
8964 unsigned int nvectors, tree vectype, tree scalar_mask)
8966 gcc_assert (nvectors != 0);
8967 if (masks->length () < nvectors)
8968 masks->safe_grow_cleared (nvectors, true);
8969 rgroup_controls *rgm = &(*masks)[nvectors - 1];
8970 /* The number of scalars per iteration and the number of vectors are
8971 both compile-time constants. */
8972 unsigned int nscalars_per_iter
8973 = exact_div (nvectors * TYPE_VECTOR_SUBPARTS (vectype),
8974 LOOP_VINFO_VECT_FACTOR (loop_vinfo)).to_constant ();
8976 if (scalar_mask)
8978 scalar_cond_masked_key cond (scalar_mask, nvectors);
8979 loop_vinfo->scalar_cond_masked_set.add (cond);
8982 if (rgm->max_nscalars_per_iter < nscalars_per_iter)
8984 rgm->max_nscalars_per_iter = nscalars_per_iter;
8985 rgm->type = truth_type_for (vectype);
8986 rgm->factor = 1;
8990 /* Given a complete set of masks MASKS, extract mask number INDEX
8991 for an rgroup that operates on NVECTORS vectors of type VECTYPE,
8992 where 0 <= INDEX < NVECTORS. Insert any set-up statements before GSI.
8994 See the comment above vec_loop_masks for more details about the mask
8995 arrangement. */
8997 tree
8998 vect_get_loop_mask (gimple_stmt_iterator *gsi, vec_loop_masks *masks,
8999 unsigned int nvectors, tree vectype, unsigned int index)
9001 rgroup_controls *rgm = &(*masks)[nvectors - 1];
9002 tree mask_type = rgm->type;
9004 /* Populate the rgroup's mask array, if this is the first time we've
9005 used it. */
9006 if (rgm->controls.is_empty ())
9008 rgm->controls.safe_grow_cleared (nvectors, true);
9009 for (unsigned int i = 0; i < nvectors; ++i)
9011 tree mask = make_temp_ssa_name (mask_type, NULL, "loop_mask");
9012 /* Provide a dummy definition until the real one is available. */
9013 SSA_NAME_DEF_STMT (mask) = gimple_build_nop ();
9014 rgm->controls[i] = mask;
9018 tree mask = rgm->controls[index];
9019 if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type),
9020 TYPE_VECTOR_SUBPARTS (vectype)))
9022 /* A loop mask for data type X can be reused for data type Y
9023 if X has N times more elements than Y and if Y's elements
9024 are N times bigger than X's. In this case each sequence
9025 of N elements in the loop mask will be all-zero or all-one.
9026 We can then view-convert the mask so that each sequence of
9027 N elements is replaced by a single element. */
9028 gcc_assert (multiple_p (TYPE_VECTOR_SUBPARTS (mask_type),
9029 TYPE_VECTOR_SUBPARTS (vectype)));
9030 gimple_seq seq = NULL;
9031 mask_type = truth_type_for (vectype);
9032 mask = gimple_build (&seq, VIEW_CONVERT_EXPR, mask_type, mask);
9033 if (seq)
9034 gsi_insert_seq_before (gsi, seq, GSI_SAME_STMT);
9036 return mask;
9039 /* Record that LOOP_VINFO would need LENS to contain a sequence of NVECTORS
9040 lengths for controlling an operation on VECTYPE. The operation splits
9041 each element of VECTYPE into FACTOR separate subelements, measuring the
9042 length as a number of these subelements. */
9044 void
9045 vect_record_loop_len (loop_vec_info loop_vinfo, vec_loop_lens *lens,
9046 unsigned int nvectors, tree vectype, unsigned int factor)
9048 gcc_assert (nvectors != 0);
9049 if (lens->length () < nvectors)
9050 lens->safe_grow_cleared (nvectors, true);
9051 rgroup_controls *rgl = &(*lens)[nvectors - 1];
9053 /* The number of scalars per iteration, scalar occupied bytes and
9054 the number of vectors are both compile-time constants. */
9055 unsigned int nscalars_per_iter
9056 = exact_div (nvectors * TYPE_VECTOR_SUBPARTS (vectype),
9057 LOOP_VINFO_VECT_FACTOR (loop_vinfo)).to_constant ();
9059 if (rgl->max_nscalars_per_iter < nscalars_per_iter)
9061 /* For now, we only support cases in which all loads and stores fall back
9062 to VnQI or none do. */
9063 gcc_assert (!rgl->max_nscalars_per_iter
9064 || (rgl->factor == 1 && factor == 1)
9065 || (rgl->max_nscalars_per_iter * rgl->factor
9066 == nscalars_per_iter * factor));
9067 rgl->max_nscalars_per_iter = nscalars_per_iter;
9068 rgl->type = vectype;
9069 rgl->factor = factor;
9073 /* Given a complete set of length LENS, extract length number INDEX for an
9074 rgroup that operates on NVECTORS vectors, where 0 <= INDEX < NVECTORS. */
9076 tree
9077 vect_get_loop_len (loop_vec_info loop_vinfo, vec_loop_lens *lens,
9078 unsigned int nvectors, unsigned int index)
9080 rgroup_controls *rgl = &(*lens)[nvectors - 1];
9082 /* Populate the rgroup's len array, if this is the first time we've
9083 used it. */
9084 if (rgl->controls.is_empty ())
9086 rgl->controls.safe_grow_cleared (nvectors, true);
9087 for (unsigned int i = 0; i < nvectors; ++i)
9089 tree len_type = LOOP_VINFO_RGROUP_COMPARE_TYPE (loop_vinfo);
9090 gcc_assert (len_type != NULL_TREE);
9091 tree len = make_temp_ssa_name (len_type, NULL, "loop_len");
9093 /* Provide a dummy definition until the real one is available. */
9094 SSA_NAME_DEF_STMT (len) = gimple_build_nop ();
9095 rgl->controls[i] = len;
9099 return rgl->controls[index];
9102 /* Scale profiling counters by estimation for LOOP which is vectorized
9103 by factor VF. */
9105 static void
9106 scale_profile_for_vect_loop (class loop *loop, unsigned vf)
9108 edge preheader = loop_preheader_edge (loop);
9109 /* Reduce loop iterations by the vectorization factor. */
9110 gcov_type new_est_niter = niter_for_unrolled_loop (loop, vf);
9111 profile_count freq_h = loop->header->count, freq_e = preheader->count ();
9113 if (freq_h.nonzero_p ())
9115 profile_probability p;
9117 /* Avoid dropping loop body profile counter to 0 because of zero count
9118 in loop's preheader. */
9119 if (!(freq_e == profile_count::zero ()))
9120 freq_e = freq_e.force_nonzero ();
9121 p = freq_e.apply_scale (new_est_niter + 1, 1).probability_in (freq_h);
9122 scale_loop_frequencies (loop, p);
9125 edge exit_e = single_exit (loop);
9126 exit_e->probability = profile_probability::always ()
9127 .apply_scale (1, new_est_niter + 1);
9129 edge exit_l = single_pred_edge (loop->latch);
9130 profile_probability prob = exit_l->probability;
9131 exit_l->probability = exit_e->probability.invert ();
9132 if (prob.initialized_p () && exit_l->probability.initialized_p ())
9133 scale_bbs_frequencies (&loop->latch, 1, exit_l->probability / prob);
9136 /* For a vectorized stmt DEF_STMT_INFO adjust all vectorized PHI
9137 latch edge values originally defined by it. */
9139 static void
9140 maybe_set_vectorized_backedge_value (loop_vec_info loop_vinfo,
9141 stmt_vec_info def_stmt_info)
9143 tree def = gimple_get_lhs (vect_orig_stmt (def_stmt_info)->stmt);
9144 if (!def || TREE_CODE (def) != SSA_NAME)
9145 return;
9146 stmt_vec_info phi_info;
9147 imm_use_iterator iter;
9148 use_operand_p use_p;
9149 FOR_EACH_IMM_USE_FAST (use_p, iter, def)
9150 if (gphi *phi = dyn_cast <gphi *> (USE_STMT (use_p)))
9151 if (gimple_bb (phi)->loop_father->header == gimple_bb (phi)
9152 && (phi_info = loop_vinfo->lookup_stmt (phi))
9153 && STMT_VINFO_RELEVANT_P (phi_info)
9154 && VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (phi_info))
9155 && STMT_VINFO_REDUC_TYPE (phi_info) != FOLD_LEFT_REDUCTION
9156 && STMT_VINFO_REDUC_TYPE (phi_info) != EXTRACT_LAST_REDUCTION)
9158 loop_p loop = gimple_bb (phi)->loop_father;
9159 edge e = loop_latch_edge (loop);
9160 if (PHI_ARG_DEF_FROM_EDGE (phi, e) == def)
9162 vec<gimple *> &phi_defs = STMT_VINFO_VEC_STMTS (phi_info);
9163 vec<gimple *> &latch_defs = STMT_VINFO_VEC_STMTS (def_stmt_info);
9164 gcc_assert (phi_defs.length () == latch_defs.length ());
9165 for (unsigned i = 0; i < phi_defs.length (); ++i)
9166 add_phi_arg (as_a <gphi *> (phi_defs[i]),
9167 gimple_get_lhs (latch_defs[i]), e,
9168 gimple_phi_arg_location (phi, e->dest_idx));
9173 /* Vectorize STMT_INFO if relevant, inserting any new instructions before GSI.
9174 When vectorizing STMT_INFO as a store, set *SEEN_STORE to its
9175 stmt_vec_info. */
9177 static bool
9178 vect_transform_loop_stmt (loop_vec_info loop_vinfo, stmt_vec_info stmt_info,
9179 gimple_stmt_iterator *gsi, stmt_vec_info *seen_store)
9181 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
9182 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
9184 if (dump_enabled_p ())
9185 dump_printf_loc (MSG_NOTE, vect_location,
9186 "------>vectorizing statement: %G", stmt_info->stmt);
9188 if (MAY_HAVE_DEBUG_BIND_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
9189 vect_loop_kill_debug_uses (loop, stmt_info);
9191 if (!STMT_VINFO_RELEVANT_P (stmt_info)
9192 && !STMT_VINFO_LIVE_P (stmt_info))
9193 return false;
9195 if (STMT_VINFO_VECTYPE (stmt_info))
9197 poly_uint64 nunits
9198 = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
9199 if (!STMT_SLP_TYPE (stmt_info)
9200 && maybe_ne (nunits, vf)
9201 && dump_enabled_p ())
9202 /* For SLP VF is set according to unrolling factor, and not
9203 to vector size, hence for SLP this print is not valid. */
9204 dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
9207 /* Pure SLP statements have already been vectorized. We still need
9208 to apply loop vectorization to hybrid SLP statements. */
9209 if (PURE_SLP_STMT (stmt_info))
9210 return false;
9212 if (dump_enabled_p ())
9213 dump_printf_loc (MSG_NOTE, vect_location, "transform statement.\n");
9215 if (vect_transform_stmt (loop_vinfo, stmt_info, gsi, NULL, NULL))
9216 *seen_store = stmt_info;
9218 return true;
9221 /* Helper function to pass to simplify_replace_tree to enable replacing tree's
9222 in the hash_map with its corresponding values. */
9224 static tree
9225 find_in_mapping (tree t, void *context)
9227 hash_map<tree,tree>* mapping = (hash_map<tree, tree>*) context;
9229 tree *value = mapping->get (t);
9230 return value ? *value : t;
9233 /* Update EPILOGUE's loop_vec_info. EPILOGUE was constructed as a copy of the
9234 original loop that has now been vectorized.
9236 The inits of the data_references need to be advanced with the number of
9237 iterations of the main loop. This has been computed in vect_do_peeling and
9238 is stored in parameter ADVANCE. We first restore the data_references
9239 initial offset with the values recored in ORIG_DRS_INIT.
9241 Since the loop_vec_info of this EPILOGUE was constructed for the original
9242 loop, its stmt_vec_infos all point to the original statements. These need
9243 to be updated to point to their corresponding copies as well as the SSA_NAMES
9244 in their PATTERN_DEF_SEQs and RELATED_STMTs.
9246 The data_reference's connections also need to be updated. Their
9247 corresponding dr_vec_info need to be reconnected to the EPILOGUE's
9248 stmt_vec_infos, their statements need to point to their corresponding copy,
9249 if they are gather loads or scatter stores then their reference needs to be
9250 updated to point to its corresponding copy and finally we set
9251 'base_misaligned' to false as we have already peeled for alignment in the
9252 prologue of the main loop. */
9254 static void
9255 update_epilogue_loop_vinfo (class loop *epilogue, tree advance)
9257 loop_vec_info epilogue_vinfo = loop_vec_info_for_loop (epilogue);
9258 auto_vec<gimple *> stmt_worklist;
9259 hash_map<tree,tree> mapping;
9260 gimple *orig_stmt, *new_stmt;
9261 gimple_stmt_iterator epilogue_gsi;
9262 gphi_iterator epilogue_phi_gsi;
9263 stmt_vec_info stmt_vinfo = NULL, related_vinfo;
9264 basic_block *epilogue_bbs = get_loop_body (epilogue);
9265 unsigned i;
9267 free (LOOP_VINFO_BBS (epilogue_vinfo));
9268 LOOP_VINFO_BBS (epilogue_vinfo) = epilogue_bbs;
9270 /* Advance data_reference's with the number of iterations of the previous
9271 loop and its prologue. */
9272 vect_update_inits_of_drs (epilogue_vinfo, advance, PLUS_EXPR);
9275 /* The EPILOGUE loop is a copy of the original loop so they share the same
9276 gimple UIDs. In this loop we update the loop_vec_info of the EPILOGUE to
9277 point to the copied statements. We also create a mapping of all LHS' in
9278 the original loop and all the LHS' in the EPILOGUE and create worklists to
9279 update teh STMT_VINFO_PATTERN_DEF_SEQs and STMT_VINFO_RELATED_STMTs. */
9280 for (unsigned i = 0; i < epilogue->num_nodes; ++i)
9282 for (epilogue_phi_gsi = gsi_start_phis (epilogue_bbs[i]);
9283 !gsi_end_p (epilogue_phi_gsi); gsi_next (&epilogue_phi_gsi))
9285 new_stmt = epilogue_phi_gsi.phi ();
9287 gcc_assert (gimple_uid (new_stmt) > 0);
9288 stmt_vinfo
9289 = epilogue_vinfo->stmt_vec_infos[gimple_uid (new_stmt) - 1];
9291 orig_stmt = STMT_VINFO_STMT (stmt_vinfo);
9292 STMT_VINFO_STMT (stmt_vinfo) = new_stmt;
9294 mapping.put (gimple_phi_result (orig_stmt),
9295 gimple_phi_result (new_stmt));
9296 /* PHI nodes can not have patterns or related statements. */
9297 gcc_assert (STMT_VINFO_PATTERN_DEF_SEQ (stmt_vinfo) == NULL
9298 && STMT_VINFO_RELATED_STMT (stmt_vinfo) == NULL);
9301 for (epilogue_gsi = gsi_start_bb (epilogue_bbs[i]);
9302 !gsi_end_p (epilogue_gsi); gsi_next (&epilogue_gsi))
9304 new_stmt = gsi_stmt (epilogue_gsi);
9305 if (is_gimple_debug (new_stmt))
9306 continue;
9308 gcc_assert (gimple_uid (new_stmt) > 0);
9309 stmt_vinfo
9310 = epilogue_vinfo->stmt_vec_infos[gimple_uid (new_stmt) - 1];
9312 orig_stmt = STMT_VINFO_STMT (stmt_vinfo);
9313 STMT_VINFO_STMT (stmt_vinfo) = new_stmt;
9315 if (tree old_lhs = gimple_get_lhs (orig_stmt))
9316 mapping.put (old_lhs, gimple_get_lhs (new_stmt));
9318 if (STMT_VINFO_PATTERN_DEF_SEQ (stmt_vinfo))
9320 gimple_seq seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_vinfo);
9321 for (gimple_stmt_iterator gsi = gsi_start (seq);
9322 !gsi_end_p (gsi); gsi_next (&gsi))
9323 stmt_worklist.safe_push (gsi_stmt (gsi));
9326 related_vinfo = STMT_VINFO_RELATED_STMT (stmt_vinfo);
9327 if (related_vinfo != NULL && related_vinfo != stmt_vinfo)
9329 gimple *stmt = STMT_VINFO_STMT (related_vinfo);
9330 stmt_worklist.safe_push (stmt);
9331 /* Set BB such that the assert in
9332 'get_initial_def_for_reduction' is able to determine that
9333 the BB of the related stmt is inside this loop. */
9334 gimple_set_bb (stmt,
9335 gimple_bb (new_stmt));
9336 related_vinfo = STMT_VINFO_RELATED_STMT (related_vinfo);
9337 gcc_assert (related_vinfo == NULL
9338 || related_vinfo == stmt_vinfo);
9343 /* The PATTERN_DEF_SEQs and RELATED_STMTs in the epilogue were constructed
9344 using the original main loop and thus need to be updated to refer to the
9345 cloned variables used in the epilogue. */
9346 for (unsigned i = 0; i < stmt_worklist.length (); ++i)
9348 gimple *stmt = stmt_worklist[i];
9349 tree *new_op;
9351 for (unsigned j = 1; j < gimple_num_ops (stmt); ++j)
9353 tree op = gimple_op (stmt, j);
9354 if ((new_op = mapping.get(op)))
9355 gimple_set_op (stmt, j, *new_op);
9356 else
9358 /* PR92429: The last argument of simplify_replace_tree disables
9359 folding when replacing arguments. This is required as
9360 otherwise you might end up with different statements than the
9361 ones analyzed in vect_loop_analyze, leading to different
9362 vectorization. */
9363 op = simplify_replace_tree (op, NULL_TREE, NULL_TREE,
9364 &find_in_mapping, &mapping, false);
9365 gimple_set_op (stmt, j, op);
9370 struct data_reference *dr;
9371 vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (epilogue_vinfo);
9372 FOR_EACH_VEC_ELT (datarefs, i, dr)
9374 orig_stmt = DR_STMT (dr);
9375 gcc_assert (gimple_uid (orig_stmt) > 0);
9376 stmt_vinfo = epilogue_vinfo->stmt_vec_infos[gimple_uid (orig_stmt) - 1];
9377 /* Data references for gather loads and scatter stores do not use the
9378 updated offset we set using ADVANCE. Instead we have to make sure the
9379 reference in the data references point to the corresponding copy of
9380 the original in the epilogue. */
9381 if (STMT_VINFO_MEMORY_ACCESS_TYPE (vect_stmt_to_vectorize (stmt_vinfo))
9382 == VMAT_GATHER_SCATTER)
9384 DR_REF (dr)
9385 = simplify_replace_tree (DR_REF (dr), NULL_TREE, NULL_TREE,
9386 &find_in_mapping, &mapping);
9387 DR_BASE_ADDRESS (dr)
9388 = simplify_replace_tree (DR_BASE_ADDRESS (dr), NULL_TREE, NULL_TREE,
9389 &find_in_mapping, &mapping);
9391 DR_STMT (dr) = STMT_VINFO_STMT (stmt_vinfo);
9392 stmt_vinfo->dr_aux.stmt = stmt_vinfo;
9393 /* The vector size of the epilogue is smaller than that of the main loop
9394 so the alignment is either the same or lower. This means the dr will
9395 thus by definition be aligned. */
9396 STMT_VINFO_DR_INFO (stmt_vinfo)->base_misaligned = false;
9399 epilogue_vinfo->shared->datarefs_copy.release ();
9400 epilogue_vinfo->shared->save_datarefs ();
9403 /* Function vect_transform_loop.
9405 The analysis phase has determined that the loop is vectorizable.
9406 Vectorize the loop - created vectorized stmts to replace the scalar
9407 stmts in the loop, and update the loop exit condition.
9408 Returns scalar epilogue loop if any. */
9410 class loop *
9411 vect_transform_loop (loop_vec_info loop_vinfo, gimple *loop_vectorized_call)
9413 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
9414 class loop *epilogue = NULL;
9415 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
9416 int nbbs = loop->num_nodes;
9417 int i;
9418 tree niters_vector = NULL_TREE;
9419 tree step_vector = NULL_TREE;
9420 tree niters_vector_mult_vf = NULL_TREE;
9421 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
9422 unsigned int lowest_vf = constant_lower_bound (vf);
9423 gimple *stmt;
9424 bool check_profitability = false;
9425 unsigned int th;
9427 DUMP_VECT_SCOPE ("vec_transform_loop");
9429 loop_vinfo->shared->check_datarefs ();
9431 /* Use the more conservative vectorization threshold. If the number
9432 of iterations is constant assume the cost check has been performed
9433 by our caller. If the threshold makes all loops profitable that
9434 run at least the (estimated) vectorization factor number of times
9435 checking is pointless, too. */
9436 th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
9437 if (vect_apply_runtime_profitability_check_p (loop_vinfo))
9439 if (dump_enabled_p ())
9440 dump_printf_loc (MSG_NOTE, vect_location,
9441 "Profitability threshold is %d loop iterations.\n",
9442 th);
9443 check_profitability = true;
9446 /* Make sure there exists a single-predecessor exit bb. Do this before
9447 versioning. */
9448 edge e = single_exit (loop);
9449 if (! single_pred_p (e->dest))
9451 split_loop_exit_edge (e, true);
9452 if (dump_enabled_p ())
9453 dump_printf (MSG_NOTE, "split exit edge\n");
9456 /* Version the loop first, if required, so the profitability check
9457 comes first. */
9459 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
9461 class loop *sloop
9462 = vect_loop_versioning (loop_vinfo, loop_vectorized_call);
9463 sloop->force_vectorize = false;
9464 check_profitability = false;
9467 /* Make sure there exists a single-predecessor exit bb also on the
9468 scalar loop copy. Do this after versioning but before peeling
9469 so CFG structure is fine for both scalar and if-converted loop
9470 to make slpeel_duplicate_current_defs_from_edges face matched
9471 loop closed PHI nodes on the exit. */
9472 if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo))
9474 e = single_exit (LOOP_VINFO_SCALAR_LOOP (loop_vinfo));
9475 if (! single_pred_p (e->dest))
9477 split_loop_exit_edge (e, true);
9478 if (dump_enabled_p ())
9479 dump_printf (MSG_NOTE, "split exit edge of scalar loop\n");
9483 tree niters = vect_build_loop_niters (loop_vinfo);
9484 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = niters;
9485 tree nitersm1 = unshare_expr (LOOP_VINFO_NITERSM1 (loop_vinfo));
9486 bool niters_no_overflow = loop_niters_no_overflow (loop_vinfo);
9487 tree advance;
9488 drs_init_vec orig_drs_init;
9490 epilogue = vect_do_peeling (loop_vinfo, niters, nitersm1, &niters_vector,
9491 &step_vector, &niters_vector_mult_vf, th,
9492 check_profitability, niters_no_overflow,
9493 &advance);
9495 if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo)
9496 && LOOP_VINFO_SCALAR_LOOP_SCALING (loop_vinfo).initialized_p ())
9497 scale_loop_frequencies (LOOP_VINFO_SCALAR_LOOP (loop_vinfo),
9498 LOOP_VINFO_SCALAR_LOOP_SCALING (loop_vinfo));
9500 if (niters_vector == NULL_TREE)
9502 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
9503 && !LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo)
9504 && known_eq (lowest_vf, vf))
9506 niters_vector
9507 = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
9508 LOOP_VINFO_INT_NITERS (loop_vinfo) / lowest_vf);
9509 step_vector = build_one_cst (TREE_TYPE (niters));
9511 else if (vect_use_loop_mask_for_alignment_p (loop_vinfo))
9512 vect_gen_vector_loop_niters (loop_vinfo, niters, &niters_vector,
9513 &step_vector, niters_no_overflow);
9514 else
9515 /* vect_do_peeling subtracted the number of peeled prologue
9516 iterations from LOOP_VINFO_NITERS. */
9517 vect_gen_vector_loop_niters (loop_vinfo, LOOP_VINFO_NITERS (loop_vinfo),
9518 &niters_vector, &step_vector,
9519 niters_no_overflow);
9522 /* 1) Make sure the loop header has exactly two entries
9523 2) Make sure we have a preheader basic block. */
9525 gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
9527 split_edge (loop_preheader_edge (loop));
9529 if (vect_use_loop_mask_for_alignment_p (loop_vinfo))
9530 /* This will deal with any possible peeling. */
9531 vect_prepare_for_masked_peels (loop_vinfo);
9533 /* Schedule the SLP instances first, then handle loop vectorization
9534 below. */
9535 if (!loop_vinfo->slp_instances.is_empty ())
9537 DUMP_VECT_SCOPE ("scheduling SLP instances");
9538 vect_schedule_slp (loop_vinfo, LOOP_VINFO_SLP_INSTANCES (loop_vinfo));
9541 /* FORNOW: the vectorizer supports only loops which body consist
9542 of one basic block (header + empty latch). When the vectorizer will
9543 support more involved loop forms, the order by which the BBs are
9544 traversed need to be reconsidered. */
9546 for (i = 0; i < nbbs; i++)
9548 basic_block bb = bbs[i];
9549 stmt_vec_info stmt_info;
9551 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
9552 gsi_next (&si))
9554 gphi *phi = si.phi ();
9555 if (dump_enabled_p ())
9556 dump_printf_loc (MSG_NOTE, vect_location,
9557 "------>vectorizing phi: %G", phi);
9558 stmt_info = loop_vinfo->lookup_stmt (phi);
9559 if (!stmt_info)
9560 continue;
9562 if (MAY_HAVE_DEBUG_BIND_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
9563 vect_loop_kill_debug_uses (loop, stmt_info);
9565 if (!STMT_VINFO_RELEVANT_P (stmt_info)
9566 && !STMT_VINFO_LIVE_P (stmt_info))
9567 continue;
9569 if (STMT_VINFO_VECTYPE (stmt_info)
9570 && (maybe_ne
9571 (TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)), vf))
9572 && dump_enabled_p ())
9573 dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
9575 if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def
9576 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
9577 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_double_reduction_def
9578 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
9579 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_internal_def)
9580 && ! PURE_SLP_STMT (stmt_info))
9582 if (dump_enabled_p ())
9583 dump_printf_loc (MSG_NOTE, vect_location, "transform phi.\n");
9584 vect_transform_stmt (loop_vinfo, stmt_info, NULL, NULL, NULL);
9588 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
9589 gsi_next (&si))
9591 gphi *phi = si.phi ();
9592 stmt_info = loop_vinfo->lookup_stmt (phi);
9593 if (!stmt_info)
9594 continue;
9596 if (!STMT_VINFO_RELEVANT_P (stmt_info)
9597 && !STMT_VINFO_LIVE_P (stmt_info))
9598 continue;
9600 if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def
9601 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
9602 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_double_reduction_def
9603 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
9604 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_internal_def)
9605 && ! PURE_SLP_STMT (stmt_info))
9606 maybe_set_vectorized_backedge_value (loop_vinfo, stmt_info);
9609 for (gimple_stmt_iterator si = gsi_start_bb (bb);
9610 !gsi_end_p (si);)
9612 stmt = gsi_stmt (si);
9613 /* During vectorization remove existing clobber stmts. */
9614 if (gimple_clobber_p (stmt))
9616 unlink_stmt_vdef (stmt);
9617 gsi_remove (&si, true);
9618 release_defs (stmt);
9620 else
9622 /* Ignore vector stmts created in the outer loop. */
9623 stmt_info = loop_vinfo->lookup_stmt (stmt);
9625 /* vector stmts created in the outer-loop during vectorization of
9626 stmts in an inner-loop may not have a stmt_info, and do not
9627 need to be vectorized. */
9628 stmt_vec_info seen_store = NULL;
9629 if (stmt_info)
9631 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
9633 gimple *def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
9634 for (gimple_stmt_iterator subsi = gsi_start (def_seq);
9635 !gsi_end_p (subsi); gsi_next (&subsi))
9637 stmt_vec_info pat_stmt_info
9638 = loop_vinfo->lookup_stmt (gsi_stmt (subsi));
9639 vect_transform_loop_stmt (loop_vinfo, pat_stmt_info,
9640 &si, &seen_store);
9642 stmt_vec_info pat_stmt_info
9643 = STMT_VINFO_RELATED_STMT (stmt_info);
9644 if (vect_transform_loop_stmt (loop_vinfo, pat_stmt_info,
9645 &si, &seen_store))
9646 maybe_set_vectorized_backedge_value (loop_vinfo,
9647 pat_stmt_info);
9649 else
9651 if (vect_transform_loop_stmt (loop_vinfo, stmt_info, &si,
9652 &seen_store))
9653 maybe_set_vectorized_backedge_value (loop_vinfo,
9654 stmt_info);
9657 gsi_next (&si);
9658 if (seen_store)
9660 if (STMT_VINFO_GROUPED_ACCESS (seen_store))
9661 /* Interleaving. If IS_STORE is TRUE, the
9662 vectorization of the interleaving chain was
9663 completed - free all the stores in the chain. */
9664 vect_remove_stores (loop_vinfo,
9665 DR_GROUP_FIRST_ELEMENT (seen_store));
9666 else
9667 /* Free the attached stmt_vec_info and remove the stmt. */
9668 loop_vinfo->remove_stmt (stmt_info);
9673 /* Stub out scalar statements that must not survive vectorization.
9674 Doing this here helps with grouped statements, or statements that
9675 are involved in patterns. */
9676 for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
9677 !gsi_end_p (gsi); gsi_next (&gsi))
9679 gcall *call = dyn_cast <gcall *> (gsi_stmt (gsi));
9680 if (!call || !gimple_call_internal_p (call))
9681 continue;
9682 internal_fn ifn = gimple_call_internal_fn (call);
9683 if (ifn == IFN_MASK_LOAD)
9685 tree lhs = gimple_get_lhs (call);
9686 if (!VECTOR_TYPE_P (TREE_TYPE (lhs)))
9688 tree zero = build_zero_cst (TREE_TYPE (lhs));
9689 gimple *new_stmt = gimple_build_assign (lhs, zero);
9690 gsi_replace (&gsi, new_stmt, true);
9693 else if (conditional_internal_fn_code (ifn) != ERROR_MARK)
9695 tree lhs = gimple_get_lhs (call);
9696 if (!VECTOR_TYPE_P (TREE_TYPE (lhs)))
9698 tree else_arg
9699 = gimple_call_arg (call, gimple_call_num_args (call) - 1);
9700 gimple *new_stmt = gimple_build_assign (lhs, else_arg);
9701 gsi_replace (&gsi, new_stmt, true);
9705 } /* BBs in loop */
9707 /* The vectorization factor is always > 1, so if we use an IV increment of 1.
9708 a zero NITERS becomes a nonzero NITERS_VECTOR. */
9709 if (integer_onep (step_vector))
9710 niters_no_overflow = true;
9711 vect_set_loop_condition (loop, loop_vinfo, niters_vector, step_vector,
9712 niters_vector_mult_vf, !niters_no_overflow);
9714 unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
9715 scale_profile_for_vect_loop (loop, assumed_vf);
9717 /* True if the final iteration might not handle a full vector's
9718 worth of scalar iterations. */
9719 bool final_iter_may_be_partial
9720 = LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo);
9721 /* The minimum number of iterations performed by the epilogue. This
9722 is 1 when peeling for gaps because we always need a final scalar
9723 iteration. */
9724 int min_epilogue_iters = LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) ? 1 : 0;
9725 /* +1 to convert latch counts to loop iteration counts,
9726 -min_epilogue_iters to remove iterations that cannot be performed
9727 by the vector code. */
9728 int bias_for_lowest = 1 - min_epilogue_iters;
9729 int bias_for_assumed = bias_for_lowest;
9730 int alignment_npeels = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
9731 if (alignment_npeels && LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo))
9733 /* When the amount of peeling is known at compile time, the first
9734 iteration will have exactly alignment_npeels active elements.
9735 In the worst case it will have at least one. */
9736 int min_first_active = (alignment_npeels > 0 ? alignment_npeels : 1);
9737 bias_for_lowest += lowest_vf - min_first_active;
9738 bias_for_assumed += assumed_vf - min_first_active;
9740 /* In these calculations the "- 1" converts loop iteration counts
9741 back to latch counts. */
9742 if (loop->any_upper_bound)
9744 loop_vec_info main_vinfo = LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo);
9745 loop->nb_iterations_upper_bound
9746 = (final_iter_may_be_partial
9747 ? wi::udiv_ceil (loop->nb_iterations_upper_bound + bias_for_lowest,
9748 lowest_vf) - 1
9749 : wi::udiv_floor (loop->nb_iterations_upper_bound + bias_for_lowest,
9750 lowest_vf) - 1);
9751 if (main_vinfo)
9753 unsigned int bound;
9754 poly_uint64 main_iters
9755 = upper_bound (LOOP_VINFO_VECT_FACTOR (main_vinfo),
9756 LOOP_VINFO_COST_MODEL_THRESHOLD (main_vinfo));
9757 main_iters
9758 = upper_bound (main_iters,
9759 LOOP_VINFO_VERSIONING_THRESHOLD (main_vinfo));
9760 if (can_div_away_from_zero_p (main_iters,
9761 LOOP_VINFO_VECT_FACTOR (loop_vinfo),
9762 &bound))
9763 loop->nb_iterations_upper_bound
9764 = wi::umin ((widest_int) (bound - 1),
9765 loop->nb_iterations_upper_bound);
9768 if (loop->any_likely_upper_bound)
9769 loop->nb_iterations_likely_upper_bound
9770 = (final_iter_may_be_partial
9771 ? wi::udiv_ceil (loop->nb_iterations_likely_upper_bound
9772 + bias_for_lowest, lowest_vf) - 1
9773 : wi::udiv_floor (loop->nb_iterations_likely_upper_bound
9774 + bias_for_lowest, lowest_vf) - 1);
9775 if (loop->any_estimate)
9776 loop->nb_iterations_estimate
9777 = (final_iter_may_be_partial
9778 ? wi::udiv_ceil (loop->nb_iterations_estimate + bias_for_assumed,
9779 assumed_vf) - 1
9780 : wi::udiv_floor (loop->nb_iterations_estimate + bias_for_assumed,
9781 assumed_vf) - 1);
9783 if (dump_enabled_p ())
9785 if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo))
9787 dump_printf_loc (MSG_NOTE, vect_location,
9788 "LOOP VECTORIZED\n");
9789 if (loop->inner)
9790 dump_printf_loc (MSG_NOTE, vect_location,
9791 "OUTER LOOP VECTORIZED\n");
9792 dump_printf (MSG_NOTE, "\n");
9794 else
9795 dump_printf_loc (MSG_NOTE, vect_location,
9796 "LOOP EPILOGUE VECTORIZED (MODE=%s)\n",
9797 GET_MODE_NAME (loop_vinfo->vector_mode));
9800 /* Loops vectorized with a variable factor won't benefit from
9801 unrolling/peeling. */
9802 if (!vf.is_constant ())
9804 loop->unroll = 1;
9805 if (dump_enabled_p ())
9806 dump_printf_loc (MSG_NOTE, vect_location, "Disabling unrolling due to"
9807 " variable-length vectorization factor\n");
9809 /* Free SLP instances here because otherwise stmt reference counting
9810 won't work. */
9811 slp_instance instance;
9812 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance)
9813 vect_free_slp_instance (instance);
9814 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
9815 /* Clear-up safelen field since its value is invalid after vectorization
9816 since vectorized loop can have loop-carried dependencies. */
9817 loop->safelen = 0;
9819 if (epilogue)
9821 update_epilogue_loop_vinfo (epilogue, advance);
9823 epilogue->simduid = loop->simduid;
9824 epilogue->force_vectorize = loop->force_vectorize;
9825 epilogue->dont_vectorize = false;
9828 return epilogue;
9831 /* The code below is trying to perform simple optimization - revert
9832 if-conversion for masked stores, i.e. if the mask of a store is zero
9833 do not perform it and all stored value producers also if possible.
9834 For example,
9835 for (i=0; i<n; i++)
9836 if (c[i])
9838 p1[i] += 1;
9839 p2[i] = p3[i] +2;
9841 this transformation will produce the following semi-hammock:
9843 if (!mask__ifc__42.18_165 == { 0, 0, 0, 0, 0, 0, 0, 0 })
9845 vect__11.19_170 = MASK_LOAD (vectp_p1.20_168, 0B, mask__ifc__42.18_165);
9846 vect__12.22_172 = vect__11.19_170 + vect_cst__171;
9847 MASK_STORE (vectp_p1.23_175, 0B, mask__ifc__42.18_165, vect__12.22_172);
9848 vect__18.25_182 = MASK_LOAD (vectp_p3.26_180, 0B, mask__ifc__42.18_165);
9849 vect__19.28_184 = vect__18.25_182 + vect_cst__183;
9850 MASK_STORE (vectp_p2.29_187, 0B, mask__ifc__42.18_165, vect__19.28_184);
9854 void
9855 optimize_mask_stores (class loop *loop)
9857 basic_block *bbs = get_loop_body (loop);
9858 unsigned nbbs = loop->num_nodes;
9859 unsigned i;
9860 basic_block bb;
9861 class loop *bb_loop;
9862 gimple_stmt_iterator gsi;
9863 gimple *stmt;
9864 auto_vec<gimple *> worklist;
9865 auto_purge_vect_location sentinel;
9867 vect_location = find_loop_location (loop);
9868 /* Pick up all masked stores in loop if any. */
9869 for (i = 0; i < nbbs; i++)
9871 bb = bbs[i];
9872 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
9873 gsi_next (&gsi))
9875 stmt = gsi_stmt (gsi);
9876 if (gimple_call_internal_p (stmt, IFN_MASK_STORE))
9877 worklist.safe_push (stmt);
9881 free (bbs);
9882 if (worklist.is_empty ())
9883 return;
9885 /* Loop has masked stores. */
9886 while (!worklist.is_empty ())
9888 gimple *last, *last_store;
9889 edge e, efalse;
9890 tree mask;
9891 basic_block store_bb, join_bb;
9892 gimple_stmt_iterator gsi_to;
9893 tree vdef, new_vdef;
9894 gphi *phi;
9895 tree vectype;
9896 tree zero;
9898 last = worklist.pop ();
9899 mask = gimple_call_arg (last, 2);
9900 bb = gimple_bb (last);
9901 /* Create then_bb and if-then structure in CFG, then_bb belongs to
9902 the same loop as if_bb. It could be different to LOOP when two
9903 level loop-nest is vectorized and mask_store belongs to the inner
9904 one. */
9905 e = split_block (bb, last);
9906 bb_loop = bb->loop_father;
9907 gcc_assert (loop == bb_loop || flow_loop_nested_p (loop, bb_loop));
9908 join_bb = e->dest;
9909 store_bb = create_empty_bb (bb);
9910 add_bb_to_loop (store_bb, bb_loop);
9911 e->flags = EDGE_TRUE_VALUE;
9912 efalse = make_edge (bb, store_bb, EDGE_FALSE_VALUE);
9913 /* Put STORE_BB to likely part. */
9914 efalse->probability = profile_probability::unlikely ();
9915 store_bb->count = efalse->count ();
9916 make_single_succ_edge (store_bb, join_bb, EDGE_FALLTHRU);
9917 if (dom_info_available_p (CDI_DOMINATORS))
9918 set_immediate_dominator (CDI_DOMINATORS, store_bb, bb);
9919 if (dump_enabled_p ())
9920 dump_printf_loc (MSG_NOTE, vect_location,
9921 "Create new block %d to sink mask stores.",
9922 store_bb->index);
9923 /* Create vector comparison with boolean result. */
9924 vectype = TREE_TYPE (mask);
9925 zero = build_zero_cst (vectype);
9926 stmt = gimple_build_cond (EQ_EXPR, mask, zero, NULL_TREE, NULL_TREE);
9927 gsi = gsi_last_bb (bb);
9928 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
9929 /* Create new PHI node for vdef of the last masked store:
9930 .MEM_2 = VDEF <.MEM_1>
9931 will be converted to
9932 .MEM.3 = VDEF <.MEM_1>
9933 and new PHI node will be created in join bb
9934 .MEM_2 = PHI <.MEM_1, .MEM_3>
9936 vdef = gimple_vdef (last);
9937 new_vdef = make_ssa_name (gimple_vop (cfun), last);
9938 gimple_set_vdef (last, new_vdef);
9939 phi = create_phi_node (vdef, join_bb);
9940 add_phi_arg (phi, new_vdef, EDGE_SUCC (store_bb, 0), UNKNOWN_LOCATION);
9942 /* Put all masked stores with the same mask to STORE_BB if possible. */
9943 while (true)
9945 gimple_stmt_iterator gsi_from;
9946 gimple *stmt1 = NULL;
9948 /* Move masked store to STORE_BB. */
9949 last_store = last;
9950 gsi = gsi_for_stmt (last);
9951 gsi_from = gsi;
9952 /* Shift GSI to the previous stmt for further traversal. */
9953 gsi_prev (&gsi);
9954 gsi_to = gsi_start_bb (store_bb);
9955 gsi_move_before (&gsi_from, &gsi_to);
9956 /* Setup GSI_TO to the non-empty block start. */
9957 gsi_to = gsi_start_bb (store_bb);
9958 if (dump_enabled_p ())
9959 dump_printf_loc (MSG_NOTE, vect_location,
9960 "Move stmt to created bb\n%G", last);
9961 /* Move all stored value producers if possible. */
9962 while (!gsi_end_p (gsi))
9964 tree lhs;
9965 imm_use_iterator imm_iter;
9966 use_operand_p use_p;
9967 bool res;
9969 /* Skip debug statements. */
9970 if (is_gimple_debug (gsi_stmt (gsi)))
9972 gsi_prev (&gsi);
9973 continue;
9975 stmt1 = gsi_stmt (gsi);
9976 /* Do not consider statements writing to memory or having
9977 volatile operand. */
9978 if (gimple_vdef (stmt1)
9979 || gimple_has_volatile_ops (stmt1))
9980 break;
9981 gsi_from = gsi;
9982 gsi_prev (&gsi);
9983 lhs = gimple_get_lhs (stmt1);
9984 if (!lhs)
9985 break;
9987 /* LHS of vectorized stmt must be SSA_NAME. */
9988 if (TREE_CODE (lhs) != SSA_NAME)
9989 break;
9991 if (!VECTOR_TYPE_P (TREE_TYPE (lhs)))
9993 /* Remove dead scalar statement. */
9994 if (has_zero_uses (lhs))
9996 gsi_remove (&gsi_from, true);
9997 continue;
10001 /* Check that LHS does not have uses outside of STORE_BB. */
10002 res = true;
10003 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
10005 gimple *use_stmt;
10006 use_stmt = USE_STMT (use_p);
10007 if (is_gimple_debug (use_stmt))
10008 continue;
10009 if (gimple_bb (use_stmt) != store_bb)
10011 res = false;
10012 break;
10015 if (!res)
10016 break;
10018 if (gimple_vuse (stmt1)
10019 && gimple_vuse (stmt1) != gimple_vuse (last_store))
10020 break;
10022 /* Can move STMT1 to STORE_BB. */
10023 if (dump_enabled_p ())
10024 dump_printf_loc (MSG_NOTE, vect_location,
10025 "Move stmt to created bb\n%G", stmt1);
10026 gsi_move_before (&gsi_from, &gsi_to);
10027 /* Shift GSI_TO for further insertion. */
10028 gsi_prev (&gsi_to);
10030 /* Put other masked stores with the same mask to STORE_BB. */
10031 if (worklist.is_empty ()
10032 || gimple_call_arg (worklist.last (), 2) != mask
10033 || worklist.last () != stmt1)
10034 break;
10035 last = worklist.pop ();
10037 add_phi_arg (phi, gimple_vuse (last_store), e, UNKNOWN_LOCATION);
10041 /* Decide whether it is possible to use a zero-based induction variable
10042 when vectorizing LOOP_VINFO with partial vectors. If it is, return
10043 the value that the induction variable must be able to hold in order
10044 to ensure that the rgroups eventually have no active vector elements.
10045 Return -1 otherwise. */
10047 widest_int
10048 vect_iv_limit_for_partial_vectors (loop_vec_info loop_vinfo)
10050 tree niters_skip = LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo);
10051 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
10052 unsigned HOST_WIDE_INT max_vf = vect_max_vf (loop_vinfo);
10054 /* Calculate the value that the induction variable must be able
10055 to hit in order to ensure that we end the loop with an all-false mask.
10056 This involves adding the maximum number of inactive trailing scalar
10057 iterations. */
10058 widest_int iv_limit = -1;
10059 if (max_loop_iterations (loop, &iv_limit))
10061 if (niters_skip)
10063 /* Add the maximum number of skipped iterations to the
10064 maximum iteration count. */
10065 if (TREE_CODE (niters_skip) == INTEGER_CST)
10066 iv_limit += wi::to_widest (niters_skip);
10067 else
10068 iv_limit += max_vf - 1;
10070 else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo))
10071 /* Make a conservatively-correct assumption. */
10072 iv_limit += max_vf - 1;
10074 /* IV_LIMIT is the maximum number of latch iterations, which is also
10075 the maximum in-range IV value. Round this value down to the previous
10076 vector alignment boundary and then add an extra full iteration. */
10077 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
10078 iv_limit = (iv_limit & -(int) known_alignment (vf)) + max_vf;
10080 return iv_limit;
10083 /* For the given rgroup_controls RGC, check whether an induction variable
10084 would ever hit a value that produces a set of all-false masks or zero
10085 lengths before wrapping around. Return true if it's possible to wrap
10086 around before hitting the desirable value, otherwise return false. */
10088 bool
10089 vect_rgroup_iv_might_wrap_p (loop_vec_info loop_vinfo, rgroup_controls *rgc)
10091 widest_int iv_limit = vect_iv_limit_for_partial_vectors (loop_vinfo);
10093 if (iv_limit == -1)
10094 return true;
10096 tree compare_type = LOOP_VINFO_RGROUP_COMPARE_TYPE (loop_vinfo);
10097 unsigned int compare_precision = TYPE_PRECISION (compare_type);
10098 unsigned nitems = rgc->max_nscalars_per_iter * rgc->factor;
10100 if (wi::min_precision (iv_limit * nitems, UNSIGNED) > compare_precision)
10101 return true;
10103 return false;