Avoid is_constant calls in vectorizable_bswap
[official-gcc.git] / gcc / tree-vect-loop.c
blobd5e35521cefb9dd13b0059fe11e9ec0dc3d6caca
1 /* Loop Vectorization
2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
4 Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "cfghooks.h"
31 #include "tree-pass.h"
32 #include "ssa.h"
33 #include "optabs-tree.h"
34 #include "diagnostic-core.h"
35 #include "fold-const.h"
36 #include "stor-layout.h"
37 #include "cfganal.h"
38 #include "gimplify.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
41 #include "tree-ssa-loop-ivopts.h"
42 #include "tree-ssa-loop-manip.h"
43 #include "tree-ssa-loop-niter.h"
44 #include "tree-ssa-loop.h"
45 #include "cfgloop.h"
46 #include "params.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
49 #include "gimple-fold.h"
50 #include "cgraph.h"
51 #include "tree-cfg.h"
52 #include "tree-if-conv.h"
53 #include "internal-fn.h"
54 #include "tree-vector-builder.h"
55 #include "vec-perm-indices.h"
56 #include "tree-eh.h"
58 /* Loop Vectorization Pass.
60 This pass tries to vectorize loops.
62 For example, the vectorizer transforms the following simple loop:
64 short a[N]; short b[N]; short c[N]; int i;
66 for (i=0; i<N; i++){
67 a[i] = b[i] + c[i];
70 as if it was manually vectorized by rewriting the source code into:
72 typedef int __attribute__((mode(V8HI))) v8hi;
73 short a[N]; short b[N]; short c[N]; int i;
74 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
75 v8hi va, vb, vc;
77 for (i=0; i<N/8; i++){
78 vb = pb[i];
79 vc = pc[i];
80 va = vb + vc;
81 pa[i] = va;
84 The main entry to this pass is vectorize_loops(), in which
85 the vectorizer applies a set of analyses on a given set of loops,
86 followed by the actual vectorization transformation for the loops that
87 had successfully passed the analysis phase.
88 Throughout this pass we make a distinction between two types of
89 data: scalars (which are represented by SSA_NAMES), and memory references
90 ("data-refs"). These two types of data require different handling both
91 during analysis and transformation. The types of data-refs that the
92 vectorizer currently supports are ARRAY_REFS which base is an array DECL
93 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
94 accesses are required to have a simple (consecutive) access pattern.
96 Analysis phase:
97 ===============
98 The driver for the analysis phase is vect_analyze_loop().
99 It applies a set of analyses, some of which rely on the scalar evolution
100 analyzer (scev) developed by Sebastian Pop.
102 During the analysis phase the vectorizer records some information
103 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
104 loop, as well as general information about the loop as a whole, which is
105 recorded in a "loop_vec_info" struct attached to each loop.
107 Transformation phase:
108 =====================
109 The loop transformation phase scans all the stmts in the loop, and
110 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
111 the loop that needs to be vectorized. It inserts the vector code sequence
112 just before the scalar stmt S, and records a pointer to the vector code
113 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
114 attached to S). This pointer will be used for the vectorization of following
115 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
116 otherwise, we rely on dead code elimination for removing it.
118 For example, say stmt S1 was vectorized into stmt VS1:
120 VS1: vb = px[i];
121 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
122 S2: a = b;
124 To vectorize stmt S2, the vectorizer first finds the stmt that defines
125 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
126 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
127 resulting sequence would be:
129 VS1: vb = px[i];
130 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
131 VS2: va = vb;
132 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
134 Operands that are not SSA_NAMEs, are data-refs that appear in
135 load/store operations (like 'x[i]' in S1), and are handled differently.
137 Target modeling:
138 =================
139 Currently the only target specific information that is used is the
140 size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".
141 Targets that can support different sizes of vectors, for now will need
142 to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More
143 flexibility will be added in the future.
145 Since we only vectorize operations which vector form can be
146 expressed using existing tree codes, to verify that an operation is
147 supported, the vectorizer checks the relevant optab at the relevant
148 machine_mode (e.g, optab_handler (add_optab, V8HImode)). If
149 the value found is CODE_FOR_nothing, then there's no target support, and
150 we can't vectorize the stmt.
152 For additional information on this project see:
153 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
156 static void vect_estimate_min_profitable_iters (loop_vec_info, int *, int *);
158 /* Subroutine of vect_determine_vf_for_stmt that handles only one
159 statement. VECTYPE_MAYBE_SET_P is true if STMT_VINFO_VECTYPE
160 may already be set for general statements (not just data refs). */
162 static bool
163 vect_determine_vf_for_stmt_1 (stmt_vec_info stmt_info,
164 bool vectype_maybe_set_p,
165 poly_uint64 *vf,
166 vec<stmt_vec_info > *mask_producers)
168 gimple *stmt = stmt_info->stmt;
170 if ((!STMT_VINFO_RELEVANT_P (stmt_info)
171 && !STMT_VINFO_LIVE_P (stmt_info))
172 || gimple_clobber_p (stmt))
174 if (dump_enabled_p ())
175 dump_printf_loc (MSG_NOTE, vect_location, "skip.\n");
176 return true;
179 tree stmt_vectype, nunits_vectype;
180 if (!vect_get_vector_types_for_stmt (stmt_info, &stmt_vectype,
181 &nunits_vectype))
182 return false;
184 if (stmt_vectype)
186 if (STMT_VINFO_VECTYPE (stmt_info))
187 /* The only case when a vectype had been already set is for stmts
188 that contain a data ref, or for "pattern-stmts" (stmts generated
189 by the vectorizer to represent/replace a certain idiom). */
190 gcc_assert ((STMT_VINFO_DATA_REF (stmt_info)
191 || vectype_maybe_set_p)
192 && STMT_VINFO_VECTYPE (stmt_info) == stmt_vectype);
193 else if (stmt_vectype == boolean_type_node)
194 mask_producers->safe_push (stmt_info);
195 else
196 STMT_VINFO_VECTYPE (stmt_info) = stmt_vectype;
199 if (nunits_vectype)
200 vect_update_max_nunits (vf, nunits_vectype);
202 return true;
205 /* Subroutine of vect_determine_vectorization_factor. Set the vector
206 types of STMT_INFO and all attached pattern statements and update
207 the vectorization factor VF accordingly. If some of the statements
208 produce a mask result whose vector type can only be calculated later,
209 add them to MASK_PRODUCERS. Return true on success or false if
210 something prevented vectorization. */
212 static bool
213 vect_determine_vf_for_stmt (stmt_vec_info stmt_info, poly_uint64 *vf,
214 vec<stmt_vec_info > *mask_producers)
216 vec_info *vinfo = stmt_info->vinfo;
217 if (dump_enabled_p ())
219 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
220 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
222 if (!vect_determine_vf_for_stmt_1 (stmt_info, false, vf, mask_producers))
223 return false;
225 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
226 && STMT_VINFO_RELATED_STMT (stmt_info))
228 gimple *pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
229 stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
231 /* If a pattern statement has def stmts, analyze them too. */
232 for (gimple_stmt_iterator si = gsi_start (pattern_def_seq);
233 !gsi_end_p (si); gsi_next (&si))
235 stmt_vec_info def_stmt_info = vinfo->lookup_stmt (gsi_stmt (si));
236 if (dump_enabled_p ())
238 dump_printf_loc (MSG_NOTE, vect_location,
239 "==> examining pattern def stmt: ");
240 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
241 def_stmt_info->stmt, 0);
243 if (!vect_determine_vf_for_stmt_1 (def_stmt_info, true,
244 vf, mask_producers))
245 return false;
248 if (dump_enabled_p ())
250 dump_printf_loc (MSG_NOTE, vect_location,
251 "==> examining pattern statement: ");
252 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
254 if (!vect_determine_vf_for_stmt_1 (stmt_info, true, vf, mask_producers))
255 return false;
258 return true;
261 /* Function vect_determine_vectorization_factor
263 Determine the vectorization factor (VF). VF is the number of data elements
264 that are operated upon in parallel in a single iteration of the vectorized
265 loop. For example, when vectorizing a loop that operates on 4byte elements,
266 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
267 elements can fit in a single vector register.
269 We currently support vectorization of loops in which all types operated upon
270 are of the same size. Therefore this function currently sets VF according to
271 the size of the types operated upon, and fails if there are multiple sizes
272 in the loop.
274 VF is also the factor by which the loop iterations are strip-mined, e.g.:
275 original loop:
276 for (i=0; i<N; i++){
277 a[i] = b[i] + c[i];
280 vectorized loop:
281 for (i=0; i<N; i+=VF){
282 a[i:VF] = b[i:VF] + c[i:VF];
286 static bool
287 vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
289 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
290 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
291 unsigned nbbs = loop->num_nodes;
292 poly_uint64 vectorization_factor = 1;
293 tree scalar_type = NULL_TREE;
294 gphi *phi;
295 tree vectype;
296 stmt_vec_info stmt_info;
297 unsigned i;
298 auto_vec<stmt_vec_info> mask_producers;
300 DUMP_VECT_SCOPE ("vect_determine_vectorization_factor");
302 for (i = 0; i < nbbs; i++)
304 basic_block bb = bbs[i];
306 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
307 gsi_next (&si))
309 phi = si.phi ();
310 stmt_info = loop_vinfo->lookup_stmt (phi);
311 if (dump_enabled_p ())
313 dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: ");
314 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
317 gcc_assert (stmt_info);
319 if (STMT_VINFO_RELEVANT_P (stmt_info)
320 || STMT_VINFO_LIVE_P (stmt_info))
322 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
323 scalar_type = TREE_TYPE (PHI_RESULT (phi));
325 if (dump_enabled_p ())
327 dump_printf_loc (MSG_NOTE, vect_location,
328 "get vectype for scalar type: ");
329 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
330 dump_printf (MSG_NOTE, "\n");
333 vectype = get_vectype_for_scalar_type (scalar_type);
334 if (!vectype)
336 if (dump_enabled_p ())
338 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
339 "not vectorized: unsupported "
340 "data-type ");
341 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
342 scalar_type);
343 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
345 return false;
347 STMT_VINFO_VECTYPE (stmt_info) = vectype;
349 if (dump_enabled_p ())
351 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
352 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
353 dump_printf (MSG_NOTE, "\n");
356 if (dump_enabled_p ())
358 dump_printf_loc (MSG_NOTE, vect_location, "nunits = ");
359 dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (vectype));
360 dump_printf (MSG_NOTE, "\n");
363 vect_update_max_nunits (&vectorization_factor, vectype);
367 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
368 gsi_next (&si))
370 stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
371 if (!vect_determine_vf_for_stmt (stmt_info, &vectorization_factor,
372 &mask_producers))
373 return false;
377 /* TODO: Analyze cost. Decide if worth while to vectorize. */
378 if (dump_enabled_p ())
380 dump_printf_loc (MSG_NOTE, vect_location, "vectorization factor = ");
381 dump_dec (MSG_NOTE, vectorization_factor);
382 dump_printf (MSG_NOTE, "\n");
385 if (known_le (vectorization_factor, 1U))
387 if (dump_enabled_p ())
388 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
389 "not vectorized: unsupported data-type\n");
390 return false;
392 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
394 for (i = 0; i < mask_producers.length (); i++)
396 stmt_info = mask_producers[i];
397 tree mask_type = vect_get_mask_type_for_stmt (stmt_info);
398 if (!mask_type)
399 return false;
400 STMT_VINFO_VECTYPE (stmt_info) = mask_type;
403 return true;
407 /* Function vect_is_simple_iv_evolution.
409 FORNOW: A simple evolution of an induction variables in the loop is
410 considered a polynomial evolution. */
412 static bool
413 vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
414 tree * step)
416 tree init_expr;
417 tree step_expr;
418 tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
419 basic_block bb;
421 /* When there is no evolution in this loop, the evolution function
422 is not "simple". */
423 if (evolution_part == NULL_TREE)
424 return false;
426 /* When the evolution is a polynomial of degree >= 2
427 the evolution function is not "simple". */
428 if (tree_is_chrec (evolution_part))
429 return false;
431 step_expr = evolution_part;
432 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
434 if (dump_enabled_p ())
436 dump_printf_loc (MSG_NOTE, vect_location, "step: ");
437 dump_generic_expr (MSG_NOTE, TDF_SLIM, step_expr);
438 dump_printf (MSG_NOTE, ", init: ");
439 dump_generic_expr (MSG_NOTE, TDF_SLIM, init_expr);
440 dump_printf (MSG_NOTE, "\n");
443 *init = init_expr;
444 *step = step_expr;
446 if (TREE_CODE (step_expr) != INTEGER_CST
447 && (TREE_CODE (step_expr) != SSA_NAME
448 || ((bb = gimple_bb (SSA_NAME_DEF_STMT (step_expr)))
449 && flow_bb_inside_loop_p (get_loop (cfun, loop_nb), bb))
450 || (!INTEGRAL_TYPE_P (TREE_TYPE (step_expr))
451 && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))
452 || !flag_associative_math)))
453 && (TREE_CODE (step_expr) != REAL_CST
454 || !flag_associative_math))
456 if (dump_enabled_p ())
457 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
458 "step unknown.\n");
459 return false;
462 return true;
465 /* Return true if PHI, described by STMT_INFO, is the inner PHI in
466 what we are assuming is a double reduction. For example, given
467 a structure like this:
469 outer1:
470 x_1 = PHI <x_4(outer2), ...>;
473 inner:
474 x_2 = PHI <x_1(outer1), ...>;
476 x_3 = ...;
479 outer2:
480 x_4 = PHI <x_3(inner)>;
483 outer loop analysis would treat x_1 as a double reduction phi and
484 this function would then return true for x_2. */
486 static bool
487 vect_inner_phi_in_double_reduction_p (stmt_vec_info stmt_info, gphi *phi)
489 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
490 use_operand_p use_p;
491 ssa_op_iter op_iter;
492 FOR_EACH_PHI_ARG (use_p, phi, op_iter, SSA_OP_USE)
493 if (stmt_vec_info def_info = loop_vinfo->lookup_def (USE_FROM_PTR (use_p)))
494 if (STMT_VINFO_DEF_TYPE (def_info) == vect_double_reduction_def)
495 return true;
496 return false;
499 /* Function vect_analyze_scalar_cycles_1.
501 Examine the cross iteration def-use cycles of scalar variables
502 in LOOP. LOOP_VINFO represents the loop that is now being
503 considered for vectorization (can be LOOP, or an outer-loop
504 enclosing LOOP). */
506 static void
507 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
509 basic_block bb = loop->header;
510 tree init, step;
511 auto_vec<stmt_vec_info, 64> worklist;
512 gphi_iterator gsi;
513 bool double_reduc;
515 DUMP_VECT_SCOPE ("vect_analyze_scalar_cycles");
517 /* First - identify all inductions. Reduction detection assumes that all the
518 inductions have been identified, therefore, this order must not be
519 changed. */
520 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
522 gphi *phi = gsi.phi ();
523 tree access_fn = NULL;
524 tree def = PHI_RESULT (phi);
525 stmt_vec_info stmt_vinfo = loop_vinfo->lookup_stmt (phi);
527 if (dump_enabled_p ())
529 dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
530 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
533 /* Skip virtual phi's. The data dependences that are associated with
534 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
535 if (virtual_operand_p (def))
536 continue;
538 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
540 /* Analyze the evolution function. */
541 access_fn = analyze_scalar_evolution (loop, def);
542 if (access_fn)
544 STRIP_NOPS (access_fn);
545 if (dump_enabled_p ())
547 dump_printf_loc (MSG_NOTE, vect_location,
548 "Access function of PHI: ");
549 dump_generic_expr (MSG_NOTE, TDF_SLIM, access_fn);
550 dump_printf (MSG_NOTE, "\n");
552 STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo)
553 = initial_condition_in_loop_num (access_fn, loop->num);
554 STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo)
555 = evolution_part_in_loop_num (access_fn, loop->num);
558 if (!access_fn
559 || vect_inner_phi_in_double_reduction_p (stmt_vinfo, phi)
560 || !vect_is_simple_iv_evolution (loop->num, access_fn, &init, &step)
561 || (LOOP_VINFO_LOOP (loop_vinfo) != loop
562 && TREE_CODE (step) != INTEGER_CST))
564 worklist.safe_push (stmt_vinfo);
565 continue;
568 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo)
569 != NULL_TREE);
570 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) != NULL_TREE);
572 if (dump_enabled_p ())
573 dump_printf_loc (MSG_NOTE, vect_location, "Detected induction.\n");
574 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
578 /* Second - identify all reductions and nested cycles. */
579 while (worklist.length () > 0)
581 stmt_vec_info stmt_vinfo = worklist.pop ();
582 gphi *phi = as_a <gphi *> (stmt_vinfo->stmt);
583 tree def = PHI_RESULT (phi);
585 if (dump_enabled_p ())
587 dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
588 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
591 gcc_assert (!virtual_operand_p (def)
592 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
594 stmt_vec_info reduc_stmt_info
595 = vect_force_simple_reduction (loop_vinfo, stmt_vinfo,
596 &double_reduc, false);
597 if (reduc_stmt_info)
599 if (double_reduc)
601 if (dump_enabled_p ())
602 dump_printf_loc (MSG_NOTE, vect_location,
603 "Detected double reduction.\n");
605 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
606 STMT_VINFO_DEF_TYPE (reduc_stmt_info)
607 = vect_double_reduction_def;
609 else
611 if (loop != LOOP_VINFO_LOOP (loop_vinfo))
613 if (dump_enabled_p ())
614 dump_printf_loc (MSG_NOTE, vect_location,
615 "Detected vectorizable nested cycle.\n");
617 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
618 STMT_VINFO_DEF_TYPE (reduc_stmt_info) = vect_nested_cycle;
620 else
622 if (dump_enabled_p ())
623 dump_printf_loc (MSG_NOTE, vect_location,
624 "Detected reduction.\n");
626 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
627 STMT_VINFO_DEF_TYPE (reduc_stmt_info) = vect_reduction_def;
628 /* Store the reduction cycles for possible vectorization in
629 loop-aware SLP if it was not detected as reduction
630 chain. */
631 if (! REDUC_GROUP_FIRST_ELEMENT (reduc_stmt_info))
632 LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push
633 (reduc_stmt_info);
637 else
638 if (dump_enabled_p ())
639 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
640 "Unknown def-use cycle pattern.\n");
645 /* Function vect_analyze_scalar_cycles.
647 Examine the cross iteration def-use cycles of scalar variables, by
648 analyzing the loop-header PHIs of scalar variables. Classify each
649 cycle as one of the following: invariant, induction, reduction, unknown.
650 We do that for the loop represented by LOOP_VINFO, and also to its
651 inner-loop, if exists.
652 Examples for scalar cycles:
654 Example1: reduction:
656 loop1:
657 for (i=0; i<N; i++)
658 sum += a[i];
660 Example2: induction:
662 loop2:
663 for (i=0; i<N; i++)
664 a[i] = i; */
666 static void
667 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
669 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
671 vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
673 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
674 Reductions in such inner-loop therefore have different properties than
675 the reductions in the nest that gets vectorized:
676 1. When vectorized, they are executed in the same order as in the original
677 scalar loop, so we can't change the order of computation when
678 vectorizing them.
679 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
680 current checks are too strict. */
682 if (loop->inner)
683 vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
686 /* Transfer group and reduction information from STMT_INFO to its
687 pattern stmt. */
689 static void
690 vect_fixup_reduc_chain (stmt_vec_info stmt_info)
692 stmt_vec_info firstp = STMT_VINFO_RELATED_STMT (stmt_info);
693 stmt_vec_info stmtp;
694 gcc_assert (!REDUC_GROUP_FIRST_ELEMENT (firstp)
695 && REDUC_GROUP_FIRST_ELEMENT (stmt_info));
696 REDUC_GROUP_SIZE (firstp) = REDUC_GROUP_SIZE (stmt_info);
699 stmtp = STMT_VINFO_RELATED_STMT (stmt_info);
700 REDUC_GROUP_FIRST_ELEMENT (stmtp) = firstp;
701 stmt_info = REDUC_GROUP_NEXT_ELEMENT (stmt_info);
702 if (stmt_info)
703 REDUC_GROUP_NEXT_ELEMENT (stmtp)
704 = STMT_VINFO_RELATED_STMT (stmt_info);
706 while (stmt_info);
707 STMT_VINFO_DEF_TYPE (stmtp) = vect_reduction_def;
710 /* Fixup scalar cycles that now have their stmts detected as patterns. */
712 static void
713 vect_fixup_scalar_cycles_with_patterns (loop_vec_info loop_vinfo)
715 stmt_vec_info first;
716 unsigned i;
718 FOR_EACH_VEC_ELT (LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo), i, first)
719 if (STMT_VINFO_IN_PATTERN_P (first))
721 stmt_vec_info next = REDUC_GROUP_NEXT_ELEMENT (first);
722 while (next)
724 if (! STMT_VINFO_IN_PATTERN_P (next))
725 break;
726 next = REDUC_GROUP_NEXT_ELEMENT (next);
728 /* If not all stmt in the chain are patterns try to handle
729 the chain without patterns. */
730 if (! next)
732 vect_fixup_reduc_chain (first);
733 LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo)[i]
734 = STMT_VINFO_RELATED_STMT (first);
739 /* Function vect_get_loop_niters.
741 Determine how many iterations the loop is executed and place it
742 in NUMBER_OF_ITERATIONS. Place the number of latch iterations
743 in NUMBER_OF_ITERATIONSM1. Place the condition under which the
744 niter information holds in ASSUMPTIONS.
746 Return the loop exit condition. */
749 static gcond *
750 vect_get_loop_niters (struct loop *loop, tree *assumptions,
751 tree *number_of_iterations, tree *number_of_iterationsm1)
753 edge exit = single_exit (loop);
754 struct tree_niter_desc niter_desc;
755 tree niter_assumptions, niter, may_be_zero;
756 gcond *cond = get_loop_exit_condition (loop);
758 *assumptions = boolean_true_node;
759 *number_of_iterationsm1 = chrec_dont_know;
760 *number_of_iterations = chrec_dont_know;
761 DUMP_VECT_SCOPE ("get_loop_niters");
763 if (!exit)
764 return cond;
766 niter = chrec_dont_know;
767 may_be_zero = NULL_TREE;
768 niter_assumptions = boolean_true_node;
769 if (!number_of_iterations_exit_assumptions (loop, exit, &niter_desc, NULL)
770 || chrec_contains_undetermined (niter_desc.niter))
771 return cond;
773 niter_assumptions = niter_desc.assumptions;
774 may_be_zero = niter_desc.may_be_zero;
775 niter = niter_desc.niter;
777 if (may_be_zero && integer_zerop (may_be_zero))
778 may_be_zero = NULL_TREE;
780 if (may_be_zero)
782 if (COMPARISON_CLASS_P (may_be_zero))
784 /* Try to combine may_be_zero with assumptions, this can simplify
785 computation of niter expression. */
786 if (niter_assumptions && !integer_nonzerop (niter_assumptions))
787 niter_assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
788 niter_assumptions,
789 fold_build1 (TRUTH_NOT_EXPR,
790 boolean_type_node,
791 may_be_zero));
792 else
793 niter = fold_build3 (COND_EXPR, TREE_TYPE (niter), may_be_zero,
794 build_int_cst (TREE_TYPE (niter), 0),
795 rewrite_to_non_trapping_overflow (niter));
797 may_be_zero = NULL_TREE;
799 else if (integer_nonzerop (may_be_zero))
801 *number_of_iterationsm1 = build_int_cst (TREE_TYPE (niter), 0);
802 *number_of_iterations = build_int_cst (TREE_TYPE (niter), 1);
803 return cond;
805 else
806 return cond;
809 *assumptions = niter_assumptions;
810 *number_of_iterationsm1 = niter;
812 /* We want the number of loop header executions which is the number
813 of latch executions plus one.
814 ??? For UINT_MAX latch executions this number overflows to zero
815 for loops like do { n++; } while (n != 0); */
816 if (niter && !chrec_contains_undetermined (niter))
817 niter = fold_build2 (PLUS_EXPR, TREE_TYPE (niter), unshare_expr (niter),
818 build_int_cst (TREE_TYPE (niter), 1));
819 *number_of_iterations = niter;
821 return cond;
824 /* Function bb_in_loop_p
826 Used as predicate for dfs order traversal of the loop bbs. */
828 static bool
829 bb_in_loop_p (const_basic_block bb, const void *data)
831 const struct loop *const loop = (const struct loop *)data;
832 if (flow_bb_inside_loop_p (loop, bb))
833 return true;
834 return false;
838 /* Create and initialize a new loop_vec_info struct for LOOP_IN, as well as
839 stmt_vec_info structs for all the stmts in LOOP_IN. */
841 _loop_vec_info::_loop_vec_info (struct loop *loop_in, vec_info_shared *shared)
842 : vec_info (vec_info::loop, init_cost (loop_in), shared),
843 loop (loop_in),
844 bbs (XCNEWVEC (basic_block, loop->num_nodes)),
845 num_itersm1 (NULL_TREE),
846 num_iters (NULL_TREE),
847 num_iters_unchanged (NULL_TREE),
848 num_iters_assumptions (NULL_TREE),
849 th (0),
850 versioning_threshold (0),
851 vectorization_factor (0),
852 max_vectorization_factor (0),
853 mask_skip_niters (NULL_TREE),
854 mask_compare_type (NULL_TREE),
855 unaligned_dr (NULL),
856 peeling_for_alignment (0),
857 ptr_mask (0),
858 ivexpr_map (NULL),
859 slp_unrolling_factor (1),
860 single_scalar_iteration_cost (0),
861 vectorizable (false),
862 can_fully_mask_p (true),
863 fully_masked_p (false),
864 peeling_for_gaps (false),
865 peeling_for_niter (false),
866 operands_swapped (false),
867 no_data_dependencies (false),
868 has_mask_store (false),
869 scalar_loop (NULL),
870 orig_loop_info (NULL)
872 /* CHECKME: We want to visit all BBs before their successors (except for
873 latch blocks, for which this assertion wouldn't hold). In the simple
874 case of the loop forms we allow, a dfs order of the BBs would the same
875 as reversed postorder traversal, so we are safe. */
877 unsigned int nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
878 bbs, loop->num_nodes, loop);
879 gcc_assert (nbbs == loop->num_nodes);
881 for (unsigned int i = 0; i < nbbs; i++)
883 basic_block bb = bbs[i];
884 gimple_stmt_iterator si;
886 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
888 gimple *phi = gsi_stmt (si);
889 gimple_set_uid (phi, 0);
890 add_stmt (phi);
893 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
895 gimple *stmt = gsi_stmt (si);
896 gimple_set_uid (stmt, 0);
897 add_stmt (stmt);
902 /* Free all levels of MASKS. */
904 void
905 release_vec_loop_masks (vec_loop_masks *masks)
907 rgroup_masks *rgm;
908 unsigned int i;
909 FOR_EACH_VEC_ELT (*masks, i, rgm)
910 rgm->masks.release ();
911 masks->release ();
914 /* Free all memory used by the _loop_vec_info, as well as all the
915 stmt_vec_info structs of all the stmts in the loop. */
917 _loop_vec_info::~_loop_vec_info ()
919 int nbbs;
920 gimple_stmt_iterator si;
921 int j;
923 nbbs = loop->num_nodes;
924 for (j = 0; j < nbbs; j++)
926 basic_block bb = bbs[j];
927 for (si = gsi_start_bb (bb); !gsi_end_p (si); )
929 gimple *stmt = gsi_stmt (si);
931 /* We may have broken canonical form by moving a constant
932 into RHS1 of a commutative op. Fix such occurrences. */
933 if (operands_swapped && is_gimple_assign (stmt))
935 enum tree_code code = gimple_assign_rhs_code (stmt);
937 if ((code == PLUS_EXPR
938 || code == POINTER_PLUS_EXPR
939 || code == MULT_EXPR)
940 && CONSTANT_CLASS_P (gimple_assign_rhs1 (stmt)))
941 swap_ssa_operands (stmt,
942 gimple_assign_rhs1_ptr (stmt),
943 gimple_assign_rhs2_ptr (stmt));
944 else if (code == COND_EXPR
945 && CONSTANT_CLASS_P (gimple_assign_rhs2 (stmt)))
947 tree cond_expr = gimple_assign_rhs1 (stmt);
948 enum tree_code cond_code = TREE_CODE (cond_expr);
950 if (TREE_CODE_CLASS (cond_code) == tcc_comparison)
952 bool honor_nans = HONOR_NANS (TREE_OPERAND (cond_expr,
953 0));
954 cond_code = invert_tree_comparison (cond_code,
955 honor_nans);
956 if (cond_code != ERROR_MARK)
958 TREE_SET_CODE (cond_expr, cond_code);
959 swap_ssa_operands (stmt,
960 gimple_assign_rhs2_ptr (stmt),
961 gimple_assign_rhs3_ptr (stmt));
966 gsi_next (&si);
970 free (bbs);
972 release_vec_loop_masks (&masks);
973 delete ivexpr_map;
975 loop->aux = NULL;
978 /* Return an invariant or register for EXPR and emit necessary
979 computations in the LOOP_VINFO loop preheader. */
981 tree
982 cse_and_gimplify_to_preheader (loop_vec_info loop_vinfo, tree expr)
984 if (is_gimple_reg (expr)
985 || is_gimple_min_invariant (expr))
986 return expr;
988 if (! loop_vinfo->ivexpr_map)
989 loop_vinfo->ivexpr_map = new hash_map<tree_operand_hash, tree>;
990 tree &cached = loop_vinfo->ivexpr_map->get_or_insert (expr);
991 if (! cached)
993 gimple_seq stmts = NULL;
994 cached = force_gimple_operand (unshare_expr (expr),
995 &stmts, true, NULL_TREE);
996 if (stmts)
998 edge e = loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo));
999 gsi_insert_seq_on_edge_immediate (e, stmts);
1002 return cached;
1005 /* Return true if we can use CMP_TYPE as the comparison type to produce
1006 all masks required to mask LOOP_VINFO. */
1008 static bool
1009 can_produce_all_loop_masks_p (loop_vec_info loop_vinfo, tree cmp_type)
1011 rgroup_masks *rgm;
1012 unsigned int i;
1013 FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), i, rgm)
1014 if (rgm->mask_type != NULL_TREE
1015 && !direct_internal_fn_supported_p (IFN_WHILE_ULT,
1016 cmp_type, rgm->mask_type,
1017 OPTIMIZE_FOR_SPEED))
1018 return false;
1019 return true;
1022 /* Calculate the maximum number of scalars per iteration for every
1023 rgroup in LOOP_VINFO. */
1025 static unsigned int
1026 vect_get_max_nscalars_per_iter (loop_vec_info loop_vinfo)
1028 unsigned int res = 1;
1029 unsigned int i;
1030 rgroup_masks *rgm;
1031 FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), i, rgm)
1032 res = MAX (res, rgm->max_nscalars_per_iter);
1033 return res;
1036 /* Each statement in LOOP_VINFO can be masked where necessary. Check
1037 whether we can actually generate the masks required. Return true if so,
1038 storing the type of the scalar IV in LOOP_VINFO_MASK_COMPARE_TYPE. */
1040 static bool
1041 vect_verify_full_masking (loop_vec_info loop_vinfo)
1043 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1044 unsigned int min_ni_width;
1046 /* Use a normal loop if there are no statements that need masking.
1047 This only happens in rare degenerate cases: it means that the loop
1048 has no loads, no stores, and no live-out values. */
1049 if (LOOP_VINFO_MASKS (loop_vinfo).is_empty ())
1050 return false;
1052 /* Get the maximum number of iterations that is representable
1053 in the counter type. */
1054 tree ni_type = TREE_TYPE (LOOP_VINFO_NITERSM1 (loop_vinfo));
1055 widest_int max_ni = wi::to_widest (TYPE_MAX_VALUE (ni_type)) + 1;
1057 /* Get a more refined estimate for the number of iterations. */
1058 widest_int max_back_edges;
1059 if (max_loop_iterations (loop, &max_back_edges))
1060 max_ni = wi::smin (max_ni, max_back_edges + 1);
1062 /* Account for rgroup masks, in which each bit is replicated N times. */
1063 max_ni *= vect_get_max_nscalars_per_iter (loop_vinfo);
1065 /* Work out how many bits we need to represent the limit. */
1066 min_ni_width = wi::min_precision (max_ni, UNSIGNED);
1068 /* Find a scalar mode for which WHILE_ULT is supported. */
1069 opt_scalar_int_mode cmp_mode_iter;
1070 tree cmp_type = NULL_TREE;
1071 FOR_EACH_MODE_IN_CLASS (cmp_mode_iter, MODE_INT)
1073 unsigned int cmp_bits = GET_MODE_BITSIZE (cmp_mode_iter.require ());
1074 if (cmp_bits >= min_ni_width
1075 && targetm.scalar_mode_supported_p (cmp_mode_iter.require ()))
1077 tree this_type = build_nonstandard_integer_type (cmp_bits, true);
1078 if (this_type
1079 && can_produce_all_loop_masks_p (loop_vinfo, this_type))
1081 /* Although we could stop as soon as we find a valid mode,
1082 it's often better to continue until we hit Pmode, since the
1083 operands to the WHILE are more likely to be reusable in
1084 address calculations. */
1085 cmp_type = this_type;
1086 if (cmp_bits >= GET_MODE_BITSIZE (Pmode))
1087 break;
1092 if (!cmp_type)
1093 return false;
1095 LOOP_VINFO_MASK_COMPARE_TYPE (loop_vinfo) = cmp_type;
1096 return true;
1099 /* Calculate the cost of one scalar iteration of the loop. */
1100 static void
1101 vect_compute_single_scalar_iteration_cost (loop_vec_info loop_vinfo)
1103 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1104 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1105 int nbbs = loop->num_nodes, factor;
1106 int innerloop_iters, i;
1108 /* Gather costs for statements in the scalar loop. */
1110 /* FORNOW. */
1111 innerloop_iters = 1;
1112 if (loop->inner)
1113 innerloop_iters = 50; /* FIXME */
1115 for (i = 0; i < nbbs; i++)
1117 gimple_stmt_iterator si;
1118 basic_block bb = bbs[i];
1120 if (bb->loop_father == loop->inner)
1121 factor = innerloop_iters;
1122 else
1123 factor = 1;
1125 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1127 gimple *stmt = gsi_stmt (si);
1128 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (stmt);
1130 if (!is_gimple_assign (stmt) && !is_gimple_call (stmt))
1131 continue;
1133 /* Skip stmts that are not vectorized inside the loop. */
1134 if (stmt_info
1135 && !STMT_VINFO_RELEVANT_P (stmt_info)
1136 && (!STMT_VINFO_LIVE_P (stmt_info)
1137 || !VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1138 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
1139 continue;
1141 vect_cost_for_stmt kind;
1142 if (STMT_VINFO_DATA_REF (stmt_info))
1144 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
1145 kind = scalar_load;
1146 else
1147 kind = scalar_store;
1149 else
1150 kind = scalar_stmt;
1152 record_stmt_cost (&LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
1153 factor, kind, stmt_info, 0, vect_prologue);
1157 /* Now accumulate cost. */
1158 void *target_cost_data = init_cost (loop);
1159 stmt_info_for_cost *si;
1160 int j;
1161 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
1162 j, si)
1163 (void) add_stmt_cost (target_cost_data, si->count,
1164 si->kind, si->stmt_info, si->misalign,
1165 vect_body);
1166 unsigned dummy, body_cost = 0;
1167 finish_cost (target_cost_data, &dummy, &body_cost, &dummy);
1168 destroy_cost_data (target_cost_data);
1169 LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo) = body_cost;
1173 /* Function vect_analyze_loop_form_1.
1175 Verify that certain CFG restrictions hold, including:
1176 - the loop has a pre-header
1177 - the loop has a single entry and exit
1178 - the loop exit condition is simple enough
1179 - the number of iterations can be analyzed, i.e, a countable loop. The
1180 niter could be analyzed under some assumptions. */
1182 bool
1183 vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
1184 tree *assumptions, tree *number_of_iterationsm1,
1185 tree *number_of_iterations, gcond **inner_loop_cond)
1187 DUMP_VECT_SCOPE ("vect_analyze_loop_form");
1189 /* Different restrictions apply when we are considering an inner-most loop,
1190 vs. an outer (nested) loop.
1191 (FORNOW. May want to relax some of these restrictions in the future). */
1193 if (!loop->inner)
1195 /* Inner-most loop. We currently require that the number of BBs is
1196 exactly 2 (the header and latch). Vectorizable inner-most loops
1197 look like this:
1199 (pre-header)
1201 header <--------+
1202 | | |
1203 | +--> latch --+
1205 (exit-bb) */
1207 if (loop->num_nodes != 2)
1209 if (dump_enabled_p ())
1210 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1211 "not vectorized: control flow in loop.\n");
1212 return false;
1215 if (empty_block_p (loop->header))
1217 if (dump_enabled_p ())
1218 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1219 "not vectorized: empty loop.\n");
1220 return false;
1223 else
1225 struct loop *innerloop = loop->inner;
1226 edge entryedge;
1228 /* Nested loop. We currently require that the loop is doubly-nested,
1229 contains a single inner loop, and the number of BBs is exactly 5.
1230 Vectorizable outer-loops look like this:
1232 (pre-header)
1234 header <---+
1236 inner-loop |
1238 tail ------+
1240 (exit-bb)
1242 The inner-loop has the properties expected of inner-most loops
1243 as described above. */
1245 if ((loop->inner)->inner || (loop->inner)->next)
1247 if (dump_enabled_p ())
1248 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1249 "not vectorized: multiple nested loops.\n");
1250 return false;
1253 if (loop->num_nodes != 5)
1255 if (dump_enabled_p ())
1256 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1257 "not vectorized: control flow in loop.\n");
1258 return false;
1261 entryedge = loop_preheader_edge (innerloop);
1262 if (entryedge->src != loop->header
1263 || !single_exit (innerloop)
1264 || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
1266 if (dump_enabled_p ())
1267 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1268 "not vectorized: unsupported outerloop form.\n");
1269 return false;
1272 /* Analyze the inner-loop. */
1273 tree inner_niterm1, inner_niter, inner_assumptions;
1274 if (! vect_analyze_loop_form_1 (loop->inner, inner_loop_cond,
1275 &inner_assumptions, &inner_niterm1,
1276 &inner_niter, NULL)
1277 /* Don't support analyzing niter under assumptions for inner
1278 loop. */
1279 || !integer_onep (inner_assumptions))
1281 if (dump_enabled_p ())
1282 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1283 "not vectorized: Bad inner loop.\n");
1284 return false;
1287 if (!expr_invariant_in_loop_p (loop, inner_niter))
1289 if (dump_enabled_p ())
1290 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1291 "not vectorized: inner-loop count not"
1292 " invariant.\n");
1293 return false;
1296 if (dump_enabled_p ())
1297 dump_printf_loc (MSG_NOTE, vect_location,
1298 "Considering outer-loop vectorization.\n");
1301 if (!single_exit (loop)
1302 || EDGE_COUNT (loop->header->preds) != 2)
1304 if (dump_enabled_p ())
1306 if (!single_exit (loop))
1307 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1308 "not vectorized: multiple exits.\n");
1309 else if (EDGE_COUNT (loop->header->preds) != 2)
1310 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1311 "not vectorized: too many incoming edges.\n");
1313 return false;
1316 /* We assume that the loop exit condition is at the end of the loop. i.e,
1317 that the loop is represented as a do-while (with a proper if-guard
1318 before the loop if needed), where the loop header contains all the
1319 executable statements, and the latch is empty. */
1320 if (!empty_block_p (loop->latch)
1321 || !gimple_seq_empty_p (phi_nodes (loop->latch)))
1323 if (dump_enabled_p ())
1324 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1325 "not vectorized: latch block not empty.\n");
1326 return false;
1329 /* Make sure the exit is not abnormal. */
1330 edge e = single_exit (loop);
1331 if (e->flags & EDGE_ABNORMAL)
1333 if (dump_enabled_p ())
1334 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1335 "not vectorized: abnormal loop exit edge.\n");
1336 return false;
1339 *loop_cond = vect_get_loop_niters (loop, assumptions, number_of_iterations,
1340 number_of_iterationsm1);
1341 if (!*loop_cond)
1343 if (dump_enabled_p ())
1344 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1345 "not vectorized: complicated exit condition.\n");
1346 return false;
1349 if (integer_zerop (*assumptions)
1350 || !*number_of_iterations
1351 || chrec_contains_undetermined (*number_of_iterations))
1353 if (dump_enabled_p ())
1354 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1355 "not vectorized: number of iterations cannot be "
1356 "computed.\n");
1357 return false;
1360 if (integer_zerop (*number_of_iterations))
1362 if (dump_enabled_p ())
1363 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1364 "not vectorized: number of iterations = 0.\n");
1365 return false;
1368 return true;
1371 /* Analyze LOOP form and return a loop_vec_info if it is of suitable form. */
1373 loop_vec_info
1374 vect_analyze_loop_form (struct loop *loop, vec_info_shared *shared)
1376 tree assumptions, number_of_iterations, number_of_iterationsm1;
1377 gcond *loop_cond, *inner_loop_cond = NULL;
1379 if (! vect_analyze_loop_form_1 (loop, &loop_cond,
1380 &assumptions, &number_of_iterationsm1,
1381 &number_of_iterations, &inner_loop_cond))
1382 return NULL;
1384 loop_vec_info loop_vinfo = new _loop_vec_info (loop, shared);
1385 LOOP_VINFO_NITERSM1 (loop_vinfo) = number_of_iterationsm1;
1386 LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
1387 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
1388 if (!integer_onep (assumptions))
1390 /* We consider to vectorize this loop by versioning it under
1391 some assumptions. In order to do this, we need to clear
1392 existing information computed by scev and niter analyzer. */
1393 scev_reset_htab ();
1394 free_numbers_of_iterations_estimates (loop);
1395 /* Also set flag for this loop so that following scev and niter
1396 analysis are done under the assumptions. */
1397 loop_constraint_set (loop, LOOP_C_FINITE);
1398 /* Also record the assumptions for versioning. */
1399 LOOP_VINFO_NITERS_ASSUMPTIONS (loop_vinfo) = assumptions;
1402 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
1404 if (dump_enabled_p ())
1406 dump_printf_loc (MSG_NOTE, vect_location,
1407 "Symbolic number of iterations is ");
1408 dump_generic_expr (MSG_NOTE, TDF_DETAILS, number_of_iterations);
1409 dump_printf (MSG_NOTE, "\n");
1413 stmt_vec_info loop_cond_info = loop_vinfo->lookup_stmt (loop_cond);
1414 STMT_VINFO_TYPE (loop_cond_info) = loop_exit_ctrl_vec_info_type;
1415 if (inner_loop_cond)
1417 stmt_vec_info inner_loop_cond_info
1418 = loop_vinfo->lookup_stmt (inner_loop_cond);
1419 STMT_VINFO_TYPE (inner_loop_cond_info) = loop_exit_ctrl_vec_info_type;
1422 gcc_assert (!loop->aux);
1423 loop->aux = loop_vinfo;
1424 return loop_vinfo;
1429 /* Scan the loop stmts and dependent on whether there are any (non-)SLP
1430 statements update the vectorization factor. */
1432 static void
1433 vect_update_vf_for_slp (loop_vec_info loop_vinfo)
1435 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1436 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1437 int nbbs = loop->num_nodes;
1438 poly_uint64 vectorization_factor;
1439 int i;
1441 DUMP_VECT_SCOPE ("vect_update_vf_for_slp");
1443 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1444 gcc_assert (known_ne (vectorization_factor, 0U));
1446 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1447 vectorization factor of the loop is the unrolling factor required by
1448 the SLP instances. If that unrolling factor is 1, we say, that we
1449 perform pure SLP on loop - cross iteration parallelism is not
1450 exploited. */
1451 bool only_slp_in_loop = true;
1452 for (i = 0; i < nbbs; i++)
1454 basic_block bb = bbs[i];
1455 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
1456 gsi_next (&si))
1458 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
1459 stmt_info = vect_stmt_to_vectorize (stmt_info);
1460 if ((STMT_VINFO_RELEVANT_P (stmt_info)
1461 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1462 && !PURE_SLP_STMT (stmt_info))
1463 /* STMT needs both SLP and loop-based vectorization. */
1464 only_slp_in_loop = false;
1468 if (only_slp_in_loop)
1470 dump_printf_loc (MSG_NOTE, vect_location,
1471 "Loop contains only SLP stmts\n");
1472 vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
1474 else
1476 dump_printf_loc (MSG_NOTE, vect_location,
1477 "Loop contains SLP and non-SLP stmts\n");
1478 /* Both the vectorization factor and unroll factor have the form
1479 current_vector_size * X for some rational X, so they must have
1480 a common multiple. */
1481 vectorization_factor
1482 = force_common_multiple (vectorization_factor,
1483 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
1486 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
1487 if (dump_enabled_p ())
1489 dump_printf_loc (MSG_NOTE, vect_location,
1490 "Updating vectorization factor to ");
1491 dump_dec (MSG_NOTE, vectorization_factor);
1492 dump_printf (MSG_NOTE, ".\n");
1496 /* Return true if STMT_INFO describes a double reduction phi and if
1497 the other phi in the reduction is also relevant for vectorization.
1498 This rejects cases such as:
1500 outer1:
1501 x_1 = PHI <x_3(outer2), ...>;
1504 inner:
1505 x_2 = ...;
1508 outer2:
1509 x_3 = PHI <x_2(inner)>;
1511 if nothing in x_2 or elsewhere makes x_1 relevant. */
1513 static bool
1514 vect_active_double_reduction_p (stmt_vec_info stmt_info)
1516 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_double_reduction_def)
1517 return false;
1519 return STMT_VINFO_RELEVANT_P (STMT_VINFO_REDUC_DEF (stmt_info));
1522 /* Function vect_analyze_loop_operations.
1524 Scan the loop stmts and make sure they are all vectorizable. */
1526 static bool
1527 vect_analyze_loop_operations (loop_vec_info loop_vinfo)
1529 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1530 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1531 int nbbs = loop->num_nodes;
1532 int i;
1533 stmt_vec_info stmt_info;
1534 bool need_to_vectorize = false;
1535 bool ok;
1537 DUMP_VECT_SCOPE ("vect_analyze_loop_operations");
1539 stmt_vector_for_cost cost_vec;
1540 cost_vec.create (2);
1542 for (i = 0; i < nbbs; i++)
1544 basic_block bb = bbs[i];
1546 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
1547 gsi_next (&si))
1549 gphi *phi = si.phi ();
1550 ok = true;
1552 stmt_info = loop_vinfo->lookup_stmt (phi);
1553 if (dump_enabled_p ())
1555 dump_printf_loc (MSG_NOTE, vect_location, "examining phi: ");
1556 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
1558 if (virtual_operand_p (gimple_phi_result (phi)))
1559 continue;
1561 /* Inner-loop loop-closed exit phi in outer-loop vectorization
1562 (i.e., a phi in the tail of the outer-loop). */
1563 if (! is_loop_header_bb_p (bb))
1565 /* FORNOW: we currently don't support the case that these phis
1566 are not used in the outerloop (unless it is double reduction,
1567 i.e., this phi is vect_reduction_def), cause this case
1568 requires to actually do something here. */
1569 if (STMT_VINFO_LIVE_P (stmt_info)
1570 && !vect_active_double_reduction_p (stmt_info))
1572 if (dump_enabled_p ())
1573 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1574 "Unsupported loop-closed phi in "
1575 "outer-loop.\n");
1576 return false;
1579 /* If PHI is used in the outer loop, we check that its operand
1580 is defined in the inner loop. */
1581 if (STMT_VINFO_RELEVANT_P (stmt_info))
1583 tree phi_op;
1585 if (gimple_phi_num_args (phi) != 1)
1586 return false;
1588 phi_op = PHI_ARG_DEF (phi, 0);
1589 stmt_vec_info op_def_info = loop_vinfo->lookup_def (phi_op);
1590 if (!op_def_info)
1591 return false;
1593 if (STMT_VINFO_RELEVANT (op_def_info) != vect_used_in_outer
1594 && (STMT_VINFO_RELEVANT (op_def_info)
1595 != vect_used_in_outer_by_reduction))
1596 return false;
1599 continue;
1602 gcc_assert (stmt_info);
1604 if ((STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
1605 || STMT_VINFO_LIVE_P (stmt_info))
1606 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
1608 /* A scalar-dependence cycle that we don't support. */
1609 if (dump_enabled_p ())
1610 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1611 "not vectorized: scalar dependence cycle.\n");
1612 return false;
1615 if (STMT_VINFO_RELEVANT_P (stmt_info))
1617 need_to_vectorize = true;
1618 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def
1619 && ! PURE_SLP_STMT (stmt_info))
1620 ok = vectorizable_induction (stmt_info, NULL, NULL, NULL,
1621 &cost_vec);
1622 else if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
1623 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
1624 && ! PURE_SLP_STMT (stmt_info))
1625 ok = vectorizable_reduction (stmt_info, NULL, NULL, NULL, NULL,
1626 &cost_vec);
1629 /* SLP PHIs are tested by vect_slp_analyze_node_operations. */
1630 if (ok
1631 && STMT_VINFO_LIVE_P (stmt_info)
1632 && !PURE_SLP_STMT (stmt_info))
1633 ok = vectorizable_live_operation (stmt_info, NULL, NULL, -1, NULL,
1634 &cost_vec);
1636 if (!ok)
1638 if (dump_enabled_p ())
1640 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1641 "not vectorized: relevant phi not "
1642 "supported: ");
1643 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, phi, 0);
1645 return false;
1649 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
1650 gsi_next (&si))
1652 gimple *stmt = gsi_stmt (si);
1653 if (!gimple_clobber_p (stmt)
1654 && !vect_analyze_stmt (loop_vinfo->lookup_stmt (stmt),
1655 &need_to_vectorize,
1656 NULL, NULL, &cost_vec))
1657 return false;
1659 } /* bbs */
1661 add_stmt_costs (loop_vinfo->target_cost_data, &cost_vec);
1662 cost_vec.release ();
1664 /* All operations in the loop are either irrelevant (deal with loop
1665 control, or dead), or only used outside the loop and can be moved
1666 out of the loop (e.g. invariants, inductions). The loop can be
1667 optimized away by scalar optimizations. We're better off not
1668 touching this loop. */
1669 if (!need_to_vectorize)
1671 if (dump_enabled_p ())
1672 dump_printf_loc (MSG_NOTE, vect_location,
1673 "All the computation can be taken out of the loop.\n");
1674 if (dump_enabled_p ())
1675 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1676 "not vectorized: redundant loop. no profit to "
1677 "vectorize.\n");
1678 return false;
1681 return true;
1684 /* Analyze the cost of the loop described by LOOP_VINFO. Decide if it
1685 is worthwhile to vectorize. Return 1 if definitely yes, 0 if
1686 definitely no, or -1 if it's worth retrying. */
1688 static int
1689 vect_analyze_loop_costing (loop_vec_info loop_vinfo)
1691 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1692 unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
1694 /* Only fully-masked loops can have iteration counts less than the
1695 vectorization factor. */
1696 if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
1698 HOST_WIDE_INT max_niter;
1700 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
1701 max_niter = LOOP_VINFO_INT_NITERS (loop_vinfo);
1702 else
1703 max_niter = max_stmt_executions_int (loop);
1705 if (max_niter != -1
1706 && (unsigned HOST_WIDE_INT) max_niter < assumed_vf)
1708 if (dump_enabled_p ())
1709 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1710 "not vectorized: iteration count smaller than "
1711 "vectorization factor.\n");
1712 return 0;
1716 int min_profitable_iters, min_profitable_estimate;
1717 vect_estimate_min_profitable_iters (loop_vinfo, &min_profitable_iters,
1718 &min_profitable_estimate);
1720 if (min_profitable_iters < 0)
1722 if (dump_enabled_p ())
1723 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1724 "not vectorized: vectorization not profitable.\n");
1725 if (dump_enabled_p ())
1726 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1727 "not vectorized: vector version will never be "
1728 "profitable.\n");
1729 return -1;
1732 int min_scalar_loop_bound = (PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
1733 * assumed_vf);
1735 /* Use the cost model only if it is more conservative than user specified
1736 threshold. */
1737 unsigned int th = (unsigned) MAX (min_scalar_loop_bound,
1738 min_profitable_iters);
1740 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = th;
1742 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1743 && LOOP_VINFO_INT_NITERS (loop_vinfo) < th)
1745 if (dump_enabled_p ())
1746 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1747 "not vectorized: vectorization not profitable.\n");
1748 if (dump_enabled_p ())
1749 dump_printf_loc (MSG_NOTE, vect_location,
1750 "not vectorized: iteration count smaller than user "
1751 "specified loop bound parameter or minimum profitable "
1752 "iterations (whichever is more conservative).\n");
1753 return 0;
1756 HOST_WIDE_INT estimated_niter = estimated_stmt_executions_int (loop);
1757 if (estimated_niter == -1)
1758 estimated_niter = likely_max_stmt_executions_int (loop);
1759 if (estimated_niter != -1
1760 && ((unsigned HOST_WIDE_INT) estimated_niter
1761 < MAX (th, (unsigned) min_profitable_estimate)))
1763 if (dump_enabled_p ())
1764 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1765 "not vectorized: estimated iteration count too "
1766 "small.\n");
1767 if (dump_enabled_p ())
1768 dump_printf_loc (MSG_NOTE, vect_location,
1769 "not vectorized: estimated iteration count smaller "
1770 "than specified loop bound parameter or minimum "
1771 "profitable iterations (whichever is more "
1772 "conservative).\n");
1773 return -1;
1776 return 1;
1779 static bool
1780 vect_get_datarefs_in_loop (loop_p loop, basic_block *bbs,
1781 vec<data_reference_p> *datarefs,
1782 unsigned int *n_stmts)
1784 *n_stmts = 0;
1785 for (unsigned i = 0; i < loop->num_nodes; i++)
1786 for (gimple_stmt_iterator gsi = gsi_start_bb (bbs[i]);
1787 !gsi_end_p (gsi); gsi_next (&gsi))
1789 gimple *stmt = gsi_stmt (gsi);
1790 if (is_gimple_debug (stmt))
1791 continue;
1792 ++(*n_stmts);
1793 if (!vect_find_stmt_data_reference (loop, stmt, datarefs))
1795 if (is_gimple_call (stmt) && loop->safelen)
1797 tree fndecl = gimple_call_fndecl (stmt), op;
1798 if (fndecl != NULL_TREE)
1800 cgraph_node *node = cgraph_node::get (fndecl);
1801 if (node != NULL && node->simd_clones != NULL)
1803 unsigned int j, n = gimple_call_num_args (stmt);
1804 for (j = 0; j < n; j++)
1806 op = gimple_call_arg (stmt, j);
1807 if (DECL_P (op)
1808 || (REFERENCE_CLASS_P (op)
1809 && get_base_address (op)))
1810 break;
1812 op = gimple_call_lhs (stmt);
1813 /* Ignore #pragma omp declare simd functions
1814 if they don't have data references in the
1815 call stmt itself. */
1816 if (j == n
1817 && !(op
1818 && (DECL_P (op)
1819 || (REFERENCE_CLASS_P (op)
1820 && get_base_address (op)))))
1821 continue;
1825 return false;
1827 /* If dependence analysis will give up due to the limit on the
1828 number of datarefs stop here and fail fatally. */
1829 if (datarefs->length ()
1830 > (unsigned)PARAM_VALUE (PARAM_LOOP_MAX_DATAREFS_FOR_DATADEPS))
1831 return false;
1833 return true;
1836 /* Function vect_analyze_loop_2.
1838 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1839 for it. The different analyses will record information in the
1840 loop_vec_info struct. */
1841 static bool
1842 vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal, unsigned *n_stmts)
1844 bool ok;
1845 int res;
1846 unsigned int max_vf = MAX_VECTORIZATION_FACTOR;
1847 poly_uint64 min_vf = 2;
1849 /* The first group of checks is independent of the vector size. */
1850 fatal = true;
1852 /* Find all data references in the loop (which correspond to vdefs/vuses)
1853 and analyze their evolution in the loop. */
1855 loop_p loop = LOOP_VINFO_LOOP (loop_vinfo);
1857 /* Gather the data references and count stmts in the loop. */
1858 if (!LOOP_VINFO_DATAREFS (loop_vinfo).exists ())
1860 if (!vect_get_datarefs_in_loop (loop, LOOP_VINFO_BBS (loop_vinfo),
1861 &LOOP_VINFO_DATAREFS (loop_vinfo),
1862 n_stmts))
1864 if (dump_enabled_p ())
1865 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1866 "not vectorized: loop contains function "
1867 "calls or data references that cannot "
1868 "be analyzed\n");
1869 return false;
1871 loop_vinfo->shared->save_datarefs ();
1873 else
1874 loop_vinfo->shared->check_datarefs ();
1876 /* Analyze the data references and also adjust the minimal
1877 vectorization factor according to the loads and stores. */
1879 ok = vect_analyze_data_refs (loop_vinfo, &min_vf);
1880 if (!ok)
1882 if (dump_enabled_p ())
1883 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1884 "bad data references.\n");
1885 return false;
1888 /* Classify all cross-iteration scalar data-flow cycles.
1889 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1890 vect_analyze_scalar_cycles (loop_vinfo);
1892 vect_pattern_recog (loop_vinfo);
1894 vect_fixup_scalar_cycles_with_patterns (loop_vinfo);
1896 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1897 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1899 ok = vect_analyze_data_ref_accesses (loop_vinfo);
1900 if (!ok)
1902 if (dump_enabled_p ())
1903 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1904 "bad data access.\n");
1905 return false;
1908 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1910 ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
1911 if (!ok)
1913 if (dump_enabled_p ())
1914 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1915 "unexpected pattern.\n");
1916 return false;
1919 /* While the rest of the analysis below depends on it in some way. */
1920 fatal = false;
1922 /* Analyze data dependences between the data-refs in the loop
1923 and adjust the maximum vectorization factor according to
1924 the dependences.
1925 FORNOW: fail at the first data dependence that we encounter. */
1927 ok = vect_analyze_data_ref_dependences (loop_vinfo, &max_vf);
1928 if (!ok
1929 || (max_vf != MAX_VECTORIZATION_FACTOR
1930 && maybe_lt (max_vf, min_vf)))
1932 if (dump_enabled_p ())
1933 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1934 "bad data dependence.\n");
1935 return false;
1937 LOOP_VINFO_MAX_VECT_FACTOR (loop_vinfo) = max_vf;
1939 ok = vect_determine_vectorization_factor (loop_vinfo);
1940 if (!ok)
1942 if (dump_enabled_p ())
1943 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1944 "can't determine vectorization factor.\n");
1945 return false;
1947 if (max_vf != MAX_VECTORIZATION_FACTOR
1948 && maybe_lt (max_vf, LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
1950 if (dump_enabled_p ())
1951 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1952 "bad data dependence.\n");
1953 return false;
1956 /* Compute the scalar iteration cost. */
1957 vect_compute_single_scalar_iteration_cost (loop_vinfo);
1959 poly_uint64 saved_vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1960 unsigned th;
1962 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
1963 ok = vect_analyze_slp (loop_vinfo, *n_stmts);
1964 if (!ok)
1965 return false;
1967 /* If there are any SLP instances mark them as pure_slp. */
1968 bool slp = vect_make_slp_decision (loop_vinfo);
1969 if (slp)
1971 /* Find stmts that need to be both vectorized and SLPed. */
1972 vect_detect_hybrid_slp (loop_vinfo);
1974 /* Update the vectorization factor based on the SLP decision. */
1975 vect_update_vf_for_slp (loop_vinfo);
1978 bool saved_can_fully_mask_p = LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo);
1980 /* We don't expect to have to roll back to anything other than an empty
1981 set of rgroups. */
1982 gcc_assert (LOOP_VINFO_MASKS (loop_vinfo).is_empty ());
1984 /* This is the point where we can re-start analysis with SLP forced off. */
1985 start_over:
1987 /* Now the vectorization factor is final. */
1988 poly_uint64 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1989 gcc_assert (known_ne (vectorization_factor, 0U));
1991 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && dump_enabled_p ())
1993 dump_printf_loc (MSG_NOTE, vect_location,
1994 "vectorization_factor = ");
1995 dump_dec (MSG_NOTE, vectorization_factor);
1996 dump_printf (MSG_NOTE, ", niters = %wd\n",
1997 LOOP_VINFO_INT_NITERS (loop_vinfo));
2000 HOST_WIDE_INT max_niter
2001 = likely_max_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo));
2003 /* Analyze the alignment of the data-refs in the loop.
2004 Fail if a data reference is found that cannot be vectorized. */
2006 ok = vect_analyze_data_refs_alignment (loop_vinfo);
2007 if (!ok)
2009 if (dump_enabled_p ())
2010 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2011 "bad data alignment.\n");
2012 return false;
2015 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
2016 It is important to call pruning after vect_analyze_data_ref_accesses,
2017 since we use grouping information gathered by interleaving analysis. */
2018 ok = vect_prune_runtime_alias_test_list (loop_vinfo);
2019 if (!ok)
2020 return false;
2022 /* Do not invoke vect_enhance_data_refs_alignment for eplilogue
2023 vectorization. */
2024 if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo))
2026 /* This pass will decide on using loop versioning and/or loop peeling in
2027 order to enhance the alignment of data references in the loop. */
2028 ok = vect_enhance_data_refs_alignment (loop_vinfo);
2029 if (!ok)
2031 if (dump_enabled_p ())
2032 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2033 "bad data alignment.\n");
2034 return false;
2038 if (slp)
2040 /* Analyze operations in the SLP instances. Note this may
2041 remove unsupported SLP instances which makes the above
2042 SLP kind detection invalid. */
2043 unsigned old_size = LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length ();
2044 vect_slp_analyze_operations (loop_vinfo);
2045 if (LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length () != old_size)
2046 goto again;
2049 /* Scan all the remaining operations in the loop that are not subject
2050 to SLP and make sure they are vectorizable. */
2051 ok = vect_analyze_loop_operations (loop_vinfo);
2052 if (!ok)
2054 if (dump_enabled_p ())
2055 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2056 "bad operation or unsupported loop bound.\n");
2057 return false;
2060 /* Decide whether to use a fully-masked loop for this vectorization
2061 factor. */
2062 LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
2063 = (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo)
2064 && vect_verify_full_masking (loop_vinfo));
2065 if (dump_enabled_p ())
2067 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
2068 dump_printf_loc (MSG_NOTE, vect_location,
2069 "using a fully-masked loop.\n");
2070 else
2071 dump_printf_loc (MSG_NOTE, vect_location,
2072 "not using a fully-masked loop.\n");
2075 /* If epilog loop is required because of data accesses with gaps,
2076 one additional iteration needs to be peeled. Check if there is
2077 enough iterations for vectorization. */
2078 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
2079 && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2080 && !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
2082 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2083 tree scalar_niters = LOOP_VINFO_NITERSM1 (loop_vinfo);
2085 if (known_lt (wi::to_widest (scalar_niters), vf))
2087 if (dump_enabled_p ())
2088 dump_printf_loc (MSG_NOTE, vect_location,
2089 "loop has no enough iterations to support"
2090 " peeling for gaps.\n");
2091 return false;
2095 /* Check the costings of the loop make vectorizing worthwhile. */
2096 res = vect_analyze_loop_costing (loop_vinfo);
2097 if (res < 0)
2098 goto again;
2099 if (!res)
2101 if (dump_enabled_p ())
2102 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2103 "Loop costings not worthwhile.\n");
2104 return false;
2107 /* Decide whether we need to create an epilogue loop to handle
2108 remaining scalar iterations. */
2109 th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
2111 unsigned HOST_WIDE_INT const_vf;
2112 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
2113 /* The main loop handles all iterations. */
2114 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false;
2115 else if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2116 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) > 0)
2118 if (!multiple_p (LOOP_VINFO_INT_NITERS (loop_vinfo)
2119 - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo),
2120 LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
2121 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
2123 else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
2124 || !LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&const_vf)
2125 || ((tree_ctz (LOOP_VINFO_NITERS (loop_vinfo))
2126 < (unsigned) exact_log2 (const_vf))
2127 /* In case of versioning, check if the maximum number of
2128 iterations is greater than th. If they are identical,
2129 the epilogue is unnecessary. */
2130 && (!LOOP_REQUIRES_VERSIONING (loop_vinfo)
2131 || ((unsigned HOST_WIDE_INT) max_niter
2132 > (th / const_vf) * const_vf))))
2133 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
2135 /* If an epilogue loop is required make sure we can create one. */
2136 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
2137 || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo))
2139 if (dump_enabled_p ())
2140 dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required\n");
2141 if (!vect_can_advance_ivs_p (loop_vinfo)
2142 || !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo),
2143 single_exit (LOOP_VINFO_LOOP
2144 (loop_vinfo))))
2146 if (dump_enabled_p ())
2147 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2148 "not vectorized: can't create required "
2149 "epilog loop\n");
2150 goto again;
2154 /* During peeling, we need to check if number of loop iterations is
2155 enough for both peeled prolog loop and vector loop. This check
2156 can be merged along with threshold check of loop versioning, so
2157 increase threshold for this case if necessary. */
2158 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
2160 poly_uint64 niters_th = 0;
2162 if (!vect_use_loop_mask_for_alignment_p (loop_vinfo))
2164 /* Niters for peeled prolog loop. */
2165 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
2167 dr_vec_info *dr_info = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
2168 tree vectype = STMT_VINFO_VECTYPE (dr_info->stmt);
2169 niters_th += TYPE_VECTOR_SUBPARTS (vectype) - 1;
2171 else
2172 niters_th += LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
2175 /* Niters for at least one iteration of vectorized loop. */
2176 if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
2177 niters_th += LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2178 /* One additional iteration because of peeling for gap. */
2179 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
2180 niters_th += 1;
2181 LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = niters_th;
2184 gcc_assert (known_eq (vectorization_factor,
2185 LOOP_VINFO_VECT_FACTOR (loop_vinfo)));
2187 /* Ok to vectorize! */
2188 return true;
2190 again:
2191 /* Try again with SLP forced off but if we didn't do any SLP there is
2192 no point in re-trying. */
2193 if (!slp)
2194 return false;
2196 /* If there are reduction chains re-trying will fail anyway. */
2197 if (! LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).is_empty ())
2198 return false;
2200 /* Likewise if the grouped loads or stores in the SLP cannot be handled
2201 via interleaving or lane instructions. */
2202 slp_instance instance;
2203 slp_tree node;
2204 unsigned i, j;
2205 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance)
2207 stmt_vec_info vinfo;
2208 vinfo = SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance))[0];
2209 if (! STMT_VINFO_GROUPED_ACCESS (vinfo))
2210 continue;
2211 vinfo = DR_GROUP_FIRST_ELEMENT (vinfo);
2212 unsigned int size = DR_GROUP_SIZE (vinfo);
2213 tree vectype = STMT_VINFO_VECTYPE (vinfo);
2214 if (! vect_store_lanes_supported (vectype, size, false)
2215 && ! known_eq (TYPE_VECTOR_SUBPARTS (vectype), 1U)
2216 && ! vect_grouped_store_supported (vectype, size))
2217 return false;
2218 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), j, node)
2220 vinfo = SLP_TREE_SCALAR_STMTS (node)[0];
2221 vinfo = DR_GROUP_FIRST_ELEMENT (vinfo);
2222 bool single_element_p = !DR_GROUP_NEXT_ELEMENT (vinfo);
2223 size = DR_GROUP_SIZE (vinfo);
2224 vectype = STMT_VINFO_VECTYPE (vinfo);
2225 if (! vect_load_lanes_supported (vectype, size, false)
2226 && ! vect_grouped_load_supported (vectype, single_element_p,
2227 size))
2228 return false;
2232 if (dump_enabled_p ())
2233 dump_printf_loc (MSG_NOTE, vect_location,
2234 "re-trying with SLP disabled\n");
2236 /* Roll back state appropriately. No SLP this time. */
2237 slp = false;
2238 /* Restore vectorization factor as it were without SLP. */
2239 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = saved_vectorization_factor;
2240 /* Free the SLP instances. */
2241 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), j, instance)
2242 vect_free_slp_instance (instance, false);
2243 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
2244 /* Reset SLP type to loop_vect on all stmts. */
2245 for (i = 0; i < LOOP_VINFO_LOOP (loop_vinfo)->num_nodes; ++i)
2247 basic_block bb = LOOP_VINFO_BBS (loop_vinfo)[i];
2248 for (gimple_stmt_iterator si = gsi_start_phis (bb);
2249 !gsi_end_p (si); gsi_next (&si))
2251 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
2252 STMT_SLP_TYPE (stmt_info) = loop_vect;
2254 for (gimple_stmt_iterator si = gsi_start_bb (bb);
2255 !gsi_end_p (si); gsi_next (&si))
2257 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
2258 STMT_SLP_TYPE (stmt_info) = loop_vect;
2259 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
2261 gimple *pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
2262 stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
2263 STMT_SLP_TYPE (stmt_info) = loop_vect;
2264 for (gimple_stmt_iterator pi = gsi_start (pattern_def_seq);
2265 !gsi_end_p (pi); gsi_next (&pi))
2266 STMT_SLP_TYPE (loop_vinfo->lookup_stmt (gsi_stmt (pi)))
2267 = loop_vect;
2271 /* Free optimized alias test DDRS. */
2272 LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).truncate (0);
2273 LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).release ();
2274 LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).release ();
2275 /* Reset target cost data. */
2276 destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo));
2277 LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)
2278 = init_cost (LOOP_VINFO_LOOP (loop_vinfo));
2279 /* Reset accumulated rgroup information. */
2280 release_vec_loop_masks (&LOOP_VINFO_MASKS (loop_vinfo));
2281 /* Reset assorted flags. */
2282 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false;
2283 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = false;
2284 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = 0;
2285 LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = 0;
2286 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = saved_can_fully_mask_p;
2288 goto start_over;
2291 /* Function vect_analyze_loop.
2293 Apply a set of analyses on LOOP, and create a loop_vec_info struct
2294 for it. The different analyses will record information in the
2295 loop_vec_info struct. If ORIG_LOOP_VINFO is not NULL epilogue must
2296 be vectorized. */
2297 loop_vec_info
2298 vect_analyze_loop (struct loop *loop, loop_vec_info orig_loop_vinfo,
2299 vec_info_shared *shared)
2301 loop_vec_info loop_vinfo;
2302 auto_vector_sizes vector_sizes;
2304 /* Autodetect first vector size we try. */
2305 current_vector_size = 0;
2306 targetm.vectorize.autovectorize_vector_sizes (&vector_sizes);
2307 unsigned int next_size = 0;
2309 DUMP_VECT_SCOPE ("analyze_loop_nest");
2311 if (loop_outer (loop)
2312 && loop_vec_info_for_loop (loop_outer (loop))
2313 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
2315 if (dump_enabled_p ())
2316 dump_printf_loc (MSG_NOTE, vect_location,
2317 "outer-loop already vectorized.\n");
2318 return NULL;
2321 if (!find_loop_nest (loop, &shared->loop_nest))
2323 if (dump_enabled_p ())
2324 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2325 "not vectorized: loop nest containing two "
2326 "or more consecutive inner loops cannot be "
2327 "vectorized\n");
2328 return NULL;
2331 unsigned n_stmts = 0;
2332 poly_uint64 autodetected_vector_size = 0;
2333 while (1)
2335 /* Check the CFG characteristics of the loop (nesting, entry/exit). */
2336 loop_vinfo = vect_analyze_loop_form (loop, shared);
2337 if (!loop_vinfo)
2339 if (dump_enabled_p ())
2340 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2341 "bad loop form.\n");
2342 return NULL;
2345 bool fatal = false;
2347 if (orig_loop_vinfo)
2348 LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo) = orig_loop_vinfo;
2350 if (vect_analyze_loop_2 (loop_vinfo, fatal, &n_stmts))
2352 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
2354 return loop_vinfo;
2357 delete loop_vinfo;
2359 if (next_size == 0)
2360 autodetected_vector_size = current_vector_size;
2362 if (next_size < vector_sizes.length ()
2363 && known_eq (vector_sizes[next_size], autodetected_vector_size))
2364 next_size += 1;
2366 if (fatal
2367 || next_size == vector_sizes.length ()
2368 || known_eq (current_vector_size, 0U))
2369 return NULL;
2371 /* Try the next biggest vector size. */
2372 current_vector_size = vector_sizes[next_size++];
2373 if (dump_enabled_p ())
2375 dump_printf_loc (MSG_NOTE, vect_location,
2376 "***** Re-trying analysis with "
2377 "vector size ");
2378 dump_dec (MSG_NOTE, current_vector_size);
2379 dump_printf (MSG_NOTE, "\n");
2384 /* Return true if there is an in-order reduction function for CODE, storing
2385 it in *REDUC_FN if so. */
2387 static bool
2388 fold_left_reduction_fn (tree_code code, internal_fn *reduc_fn)
2390 switch (code)
2392 case PLUS_EXPR:
2393 *reduc_fn = IFN_FOLD_LEFT_PLUS;
2394 return true;
2396 default:
2397 return false;
2401 /* Function reduction_fn_for_scalar_code
2403 Input:
2404 CODE - tree_code of a reduction operations.
2406 Output:
2407 REDUC_FN - the corresponding internal function to be used to reduce the
2408 vector of partial results into a single scalar result, or IFN_LAST
2409 if the operation is a supported reduction operation, but does not have
2410 such an internal function.
2412 Return FALSE if CODE currently cannot be vectorized as reduction. */
2414 static bool
2415 reduction_fn_for_scalar_code (enum tree_code code, internal_fn *reduc_fn)
2417 switch (code)
2419 case MAX_EXPR:
2420 *reduc_fn = IFN_REDUC_MAX;
2421 return true;
2423 case MIN_EXPR:
2424 *reduc_fn = IFN_REDUC_MIN;
2425 return true;
2427 case PLUS_EXPR:
2428 *reduc_fn = IFN_REDUC_PLUS;
2429 return true;
2431 case BIT_AND_EXPR:
2432 *reduc_fn = IFN_REDUC_AND;
2433 return true;
2435 case BIT_IOR_EXPR:
2436 *reduc_fn = IFN_REDUC_IOR;
2437 return true;
2439 case BIT_XOR_EXPR:
2440 *reduc_fn = IFN_REDUC_XOR;
2441 return true;
2443 case MULT_EXPR:
2444 case MINUS_EXPR:
2445 *reduc_fn = IFN_LAST;
2446 return true;
2448 default:
2449 return false;
2453 /* If there is a neutral value X such that SLP reduction NODE would not
2454 be affected by the introduction of additional X elements, return that X,
2455 otherwise return null. CODE is the code of the reduction. REDUC_CHAIN
2456 is true if the SLP statements perform a single reduction, false if each
2457 statement performs an independent reduction. */
2459 static tree
2460 neutral_op_for_slp_reduction (slp_tree slp_node, tree_code code,
2461 bool reduc_chain)
2463 vec<stmt_vec_info> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
2464 stmt_vec_info stmt_vinfo = stmts[0];
2465 tree vector_type = STMT_VINFO_VECTYPE (stmt_vinfo);
2466 tree scalar_type = TREE_TYPE (vector_type);
2467 struct loop *loop = gimple_bb (stmt_vinfo->stmt)->loop_father;
2468 gcc_assert (loop);
2470 switch (code)
2472 case WIDEN_SUM_EXPR:
2473 case DOT_PROD_EXPR:
2474 case SAD_EXPR:
2475 case PLUS_EXPR:
2476 case MINUS_EXPR:
2477 case BIT_IOR_EXPR:
2478 case BIT_XOR_EXPR:
2479 return build_zero_cst (scalar_type);
2481 case MULT_EXPR:
2482 return build_one_cst (scalar_type);
2484 case BIT_AND_EXPR:
2485 return build_all_ones_cst (scalar_type);
2487 case MAX_EXPR:
2488 case MIN_EXPR:
2489 /* For MIN/MAX the initial values are neutral. A reduction chain
2490 has only a single initial value, so that value is neutral for
2491 all statements. */
2492 if (reduc_chain)
2493 return PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt,
2494 loop_preheader_edge (loop));
2495 return NULL_TREE;
2497 default:
2498 return NULL_TREE;
2502 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
2503 STMT is printed with a message MSG. */
2505 static void
2506 report_vect_op (dump_flags_t msg_type, gimple *stmt, const char *msg)
2508 dump_printf_loc (msg_type, vect_location, "%s", msg);
2509 dump_gimple_stmt (msg_type, TDF_SLIM, stmt, 0);
2512 /* DEF_STMT_INFO occurs in a loop that contains a potential reduction
2513 operation. Return true if the results of DEF_STMT_INFO are something
2514 that can be accumulated by such a reduction. */
2516 static bool
2517 vect_valid_reduction_input_p (stmt_vec_info def_stmt_info)
2519 return (is_gimple_assign (def_stmt_info->stmt)
2520 || is_gimple_call (def_stmt_info->stmt)
2521 || STMT_VINFO_DEF_TYPE (def_stmt_info) == vect_induction_def
2522 || (gimple_code (def_stmt_info->stmt) == GIMPLE_PHI
2523 && STMT_VINFO_DEF_TYPE (def_stmt_info) == vect_internal_def
2524 && !is_loop_header_bb_p (gimple_bb (def_stmt_info->stmt))));
2527 /* Detect SLP reduction of the form:
2529 #a1 = phi <a5, a0>
2530 a2 = operation (a1)
2531 a3 = operation (a2)
2532 a4 = operation (a3)
2533 a5 = operation (a4)
2535 #a = phi <a5>
2537 PHI is the reduction phi node (#a1 = phi <a5, a0> above)
2538 FIRST_STMT is the first reduction stmt in the chain
2539 (a2 = operation (a1)).
2541 Return TRUE if a reduction chain was detected. */
2543 static bool
2544 vect_is_slp_reduction (loop_vec_info loop_info, gimple *phi,
2545 gimple *first_stmt)
2547 struct loop *loop = (gimple_bb (phi))->loop_father;
2548 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
2549 enum tree_code code;
2550 gimple *loop_use_stmt = NULL;
2551 stmt_vec_info use_stmt_info, current_stmt_info = NULL;
2552 tree lhs;
2553 imm_use_iterator imm_iter;
2554 use_operand_p use_p;
2555 int nloop_uses, size = 0, n_out_of_loop_uses;
2556 bool found = false;
2558 if (loop != vect_loop)
2559 return false;
2561 lhs = PHI_RESULT (phi);
2562 code = gimple_assign_rhs_code (first_stmt);
2563 while (1)
2565 nloop_uses = 0;
2566 n_out_of_loop_uses = 0;
2567 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
2569 gimple *use_stmt = USE_STMT (use_p);
2570 if (is_gimple_debug (use_stmt))
2571 continue;
2573 /* Check if we got back to the reduction phi. */
2574 if (use_stmt == phi)
2576 loop_use_stmt = use_stmt;
2577 found = true;
2578 break;
2581 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2583 loop_use_stmt = use_stmt;
2584 nloop_uses++;
2586 else
2587 n_out_of_loop_uses++;
2589 /* There are can be either a single use in the loop or two uses in
2590 phi nodes. */
2591 if (nloop_uses > 1 || (n_out_of_loop_uses && nloop_uses))
2592 return false;
2595 if (found)
2596 break;
2598 /* We reached a statement with no loop uses. */
2599 if (nloop_uses == 0)
2600 return false;
2602 /* This is a loop exit phi, and we haven't reached the reduction phi. */
2603 if (gimple_code (loop_use_stmt) == GIMPLE_PHI)
2604 return false;
2606 if (!is_gimple_assign (loop_use_stmt)
2607 || code != gimple_assign_rhs_code (loop_use_stmt)
2608 || !flow_bb_inside_loop_p (loop, gimple_bb (loop_use_stmt)))
2609 return false;
2611 /* Insert USE_STMT into reduction chain. */
2612 use_stmt_info = loop_info->lookup_stmt (loop_use_stmt);
2613 if (current_stmt_info)
2615 REDUC_GROUP_NEXT_ELEMENT (current_stmt_info) = use_stmt_info;
2616 REDUC_GROUP_FIRST_ELEMENT (use_stmt_info)
2617 = REDUC_GROUP_FIRST_ELEMENT (current_stmt_info);
2619 else
2620 REDUC_GROUP_FIRST_ELEMENT (use_stmt_info) = use_stmt_info;
2622 lhs = gimple_assign_lhs (loop_use_stmt);
2623 current_stmt_info = use_stmt_info;
2624 size++;
2627 if (!found || loop_use_stmt != phi || size < 2)
2628 return false;
2630 /* Swap the operands, if needed, to make the reduction operand be the second
2631 operand. */
2632 lhs = PHI_RESULT (phi);
2633 stmt_vec_info next_stmt_info = REDUC_GROUP_FIRST_ELEMENT (current_stmt_info);
2634 while (next_stmt_info)
2636 gassign *next_stmt = as_a <gassign *> (next_stmt_info->stmt);
2637 if (gimple_assign_rhs2 (next_stmt) == lhs)
2639 tree op = gimple_assign_rhs1 (next_stmt);
2640 stmt_vec_info def_stmt_info = loop_info->lookup_def (op);
2642 /* Check that the other def is either defined in the loop
2643 ("vect_internal_def"), or it's an induction (defined by a
2644 loop-header phi-node). */
2645 if (def_stmt_info
2646 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt_info->stmt))
2647 && vect_valid_reduction_input_p (def_stmt_info))
2649 lhs = gimple_assign_lhs (next_stmt);
2650 next_stmt_info = REDUC_GROUP_NEXT_ELEMENT (next_stmt_info);
2651 continue;
2654 return false;
2656 else
2658 tree op = gimple_assign_rhs2 (next_stmt);
2659 stmt_vec_info def_stmt_info = loop_info->lookup_def (op);
2661 /* Check that the other def is either defined in the loop
2662 ("vect_internal_def"), or it's an induction (defined by a
2663 loop-header phi-node). */
2664 if (def_stmt_info
2665 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt_info->stmt))
2666 && vect_valid_reduction_input_p (def_stmt_info))
2668 if (dump_enabled_p ())
2670 dump_printf_loc (MSG_NOTE, vect_location, "swapping oprnds: ");
2671 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, next_stmt, 0);
2674 swap_ssa_operands (next_stmt,
2675 gimple_assign_rhs1_ptr (next_stmt),
2676 gimple_assign_rhs2_ptr (next_stmt));
2677 update_stmt (next_stmt);
2679 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (next_stmt)))
2680 LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true;
2682 else
2683 return false;
2686 lhs = gimple_assign_lhs (next_stmt);
2687 next_stmt_info = REDUC_GROUP_NEXT_ELEMENT (next_stmt_info);
2690 /* Save the chain for further analysis in SLP detection. */
2691 stmt_vec_info first_stmt_info
2692 = REDUC_GROUP_FIRST_ELEMENT (current_stmt_info);
2693 LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (first_stmt_info);
2694 REDUC_GROUP_SIZE (first_stmt_info) = size;
2696 return true;
2699 /* Return true if we need an in-order reduction for operation CODE
2700 on type TYPE. NEED_WRAPPING_INTEGRAL_OVERFLOW is true if integer
2701 overflow must wrap. */
2703 static bool
2704 needs_fold_left_reduction_p (tree type, tree_code code,
2705 bool need_wrapping_integral_overflow)
2707 /* CHECKME: check for !flag_finite_math_only too? */
2708 if (SCALAR_FLOAT_TYPE_P (type))
2709 switch (code)
2711 case MIN_EXPR:
2712 case MAX_EXPR:
2713 return false;
2715 default:
2716 return !flag_associative_math;
2719 if (INTEGRAL_TYPE_P (type))
2721 if (!operation_no_trapping_overflow (type, code))
2722 return true;
2723 if (need_wrapping_integral_overflow
2724 && !TYPE_OVERFLOW_WRAPS (type)
2725 && operation_can_overflow (code))
2726 return true;
2727 return false;
2730 if (SAT_FIXED_POINT_TYPE_P (type))
2731 return true;
2733 return false;
2736 /* Return true if the reduction PHI in LOOP with latch arg LOOP_ARG and
2737 reduction operation CODE has a handled computation expression. */
2739 bool
2740 check_reduction_path (dump_user_location_t loc, loop_p loop, gphi *phi,
2741 tree loop_arg, enum tree_code code)
2743 auto_vec<std::pair<ssa_op_iter, use_operand_p> > path;
2744 auto_bitmap visited;
2745 tree lookfor = PHI_RESULT (phi);
2746 ssa_op_iter curri;
2747 use_operand_p curr = op_iter_init_phiuse (&curri, phi, SSA_OP_USE);
2748 while (USE_FROM_PTR (curr) != loop_arg)
2749 curr = op_iter_next_use (&curri);
2750 curri.i = curri.numops;
2753 path.safe_push (std::make_pair (curri, curr));
2754 tree use = USE_FROM_PTR (curr);
2755 if (use == lookfor)
2756 break;
2757 gimple *def = SSA_NAME_DEF_STMT (use);
2758 if (gimple_nop_p (def)
2759 || ! flow_bb_inside_loop_p (loop, gimple_bb (def)))
2761 pop:
2764 std::pair<ssa_op_iter, use_operand_p> x = path.pop ();
2765 curri = x.first;
2766 curr = x.second;
2768 curr = op_iter_next_use (&curri);
2769 /* Skip already visited or non-SSA operands (from iterating
2770 over PHI args). */
2771 while (curr != NULL_USE_OPERAND_P
2772 && (TREE_CODE (USE_FROM_PTR (curr)) != SSA_NAME
2773 || ! bitmap_set_bit (visited,
2774 SSA_NAME_VERSION
2775 (USE_FROM_PTR (curr)))));
2777 while (curr == NULL_USE_OPERAND_P && ! path.is_empty ());
2778 if (curr == NULL_USE_OPERAND_P)
2779 break;
2781 else
2783 if (gimple_code (def) == GIMPLE_PHI)
2784 curr = op_iter_init_phiuse (&curri, as_a <gphi *>(def), SSA_OP_USE);
2785 else
2786 curr = op_iter_init_use (&curri, def, SSA_OP_USE);
2787 while (curr != NULL_USE_OPERAND_P
2788 && (TREE_CODE (USE_FROM_PTR (curr)) != SSA_NAME
2789 || ! bitmap_set_bit (visited,
2790 SSA_NAME_VERSION
2791 (USE_FROM_PTR (curr)))))
2792 curr = op_iter_next_use (&curri);
2793 if (curr == NULL_USE_OPERAND_P)
2794 goto pop;
2797 while (1);
2798 if (dump_file && (dump_flags & TDF_DETAILS))
2800 dump_printf_loc (MSG_NOTE, loc, "reduction path: ");
2801 unsigned i;
2802 std::pair<ssa_op_iter, use_operand_p> *x;
2803 FOR_EACH_VEC_ELT (path, i, x)
2805 dump_generic_expr (MSG_NOTE, TDF_SLIM, USE_FROM_PTR (x->second));
2806 dump_printf (MSG_NOTE, " ");
2808 dump_printf (MSG_NOTE, "\n");
2811 /* Check whether the reduction path detected is valid. */
2812 bool fail = path.length () == 0;
2813 bool neg = false;
2814 for (unsigned i = 1; i < path.length (); ++i)
2816 gimple *use_stmt = USE_STMT (path[i].second);
2817 tree op = USE_FROM_PTR (path[i].second);
2818 if (! has_single_use (op)
2819 || ! is_gimple_assign (use_stmt))
2821 fail = true;
2822 break;
2824 if (gimple_assign_rhs_code (use_stmt) != code)
2826 if (code == PLUS_EXPR
2827 && gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
2829 /* Track whether we negate the reduction value each iteration. */
2830 if (gimple_assign_rhs2 (use_stmt) == op)
2831 neg = ! neg;
2833 else
2835 fail = true;
2836 break;
2840 return ! fail && ! neg;
2844 /* Function vect_is_simple_reduction
2846 (1) Detect a cross-iteration def-use cycle that represents a simple
2847 reduction computation. We look for the following pattern:
2849 loop_header:
2850 a1 = phi < a0, a2 >
2851 a3 = ...
2852 a2 = operation (a3, a1)
2856 a3 = ...
2857 loop_header:
2858 a1 = phi < a0, a2 >
2859 a2 = operation (a3, a1)
2861 such that:
2862 1. operation is commutative and associative and it is safe to
2863 change the order of the computation
2864 2. no uses for a2 in the loop (a2 is used out of the loop)
2865 3. no uses of a1 in the loop besides the reduction operation
2866 4. no uses of a1 outside the loop.
2868 Conditions 1,4 are tested here.
2869 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
2871 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
2872 nested cycles.
2874 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
2875 reductions:
2877 a1 = phi < a0, a2 >
2878 inner loop (def of a3)
2879 a2 = phi < a3 >
2881 (4) Detect condition expressions, ie:
2882 for (int i = 0; i < N; i++)
2883 if (a[i] < val)
2884 ret_val = a[i];
2888 static stmt_vec_info
2889 vect_is_simple_reduction (loop_vec_info loop_info, stmt_vec_info phi_info,
2890 bool *double_reduc,
2891 bool need_wrapping_integral_overflow,
2892 enum vect_reduction_type *v_reduc_type)
2894 gphi *phi = as_a <gphi *> (phi_info->stmt);
2895 struct loop *loop = (gimple_bb (phi))->loop_father;
2896 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
2897 gimple *phi_use_stmt = NULL;
2898 enum tree_code orig_code, code;
2899 tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE;
2900 tree type;
2901 int nloop_uses;
2902 tree name;
2903 imm_use_iterator imm_iter;
2904 use_operand_p use_p;
2905 bool phi_def;
2907 *double_reduc = false;
2908 *v_reduc_type = TREE_CODE_REDUCTION;
2910 tree phi_name = PHI_RESULT (phi);
2911 /* ??? If there are no uses of the PHI result the inner loop reduction
2912 won't be detected as possibly double-reduction by vectorizable_reduction
2913 because that tries to walk the PHI arg from the preheader edge which
2914 can be constant. See PR60382. */
2915 if (has_zero_uses (phi_name))
2916 return NULL;
2917 nloop_uses = 0;
2918 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, phi_name)
2920 gimple *use_stmt = USE_STMT (use_p);
2921 if (is_gimple_debug (use_stmt))
2922 continue;
2924 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2926 if (dump_enabled_p ())
2927 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2928 "intermediate value used outside loop.\n");
2930 return NULL;
2933 nloop_uses++;
2934 if (nloop_uses > 1)
2936 if (dump_enabled_p ())
2937 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2938 "reduction value used in loop.\n");
2939 return NULL;
2942 phi_use_stmt = use_stmt;
2945 edge latch_e = loop_latch_edge (loop);
2946 tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
2947 if (TREE_CODE (loop_arg) != SSA_NAME)
2949 if (dump_enabled_p ())
2951 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2952 "reduction: not ssa_name: ");
2953 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, loop_arg);
2954 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2956 return NULL;
2959 stmt_vec_info def_stmt_info = loop_info->lookup_def (loop_arg);
2960 if (!def_stmt_info
2961 || !flow_bb_inside_loop_p (loop, gimple_bb (def_stmt_info->stmt)))
2962 return NULL;
2964 if (gassign *def_stmt = dyn_cast <gassign *> (def_stmt_info->stmt))
2966 name = gimple_assign_lhs (def_stmt);
2967 phi_def = false;
2969 else if (gphi *def_stmt = dyn_cast <gphi *> (def_stmt_info->stmt))
2971 name = PHI_RESULT (def_stmt);
2972 phi_def = true;
2974 else
2976 if (dump_enabled_p ())
2978 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2979 "reduction: unhandled reduction operation: ");
2980 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
2981 def_stmt_info->stmt, 0);
2983 return NULL;
2986 nloop_uses = 0;
2987 auto_vec<gphi *, 3> lcphis;
2988 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
2990 gimple *use_stmt = USE_STMT (use_p);
2991 if (is_gimple_debug (use_stmt))
2992 continue;
2993 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2994 nloop_uses++;
2995 else
2996 /* We can have more than one loop-closed PHI. */
2997 lcphis.safe_push (as_a <gphi *> (use_stmt));
2998 if (nloop_uses > 1)
3000 if (dump_enabled_p ())
3001 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3002 "reduction used in loop.\n");
3003 return NULL;
3007 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
3008 defined in the inner loop. */
3009 if (phi_def)
3011 gphi *def_stmt = as_a <gphi *> (def_stmt_info->stmt);
3012 op1 = PHI_ARG_DEF (def_stmt, 0);
3014 if (gimple_phi_num_args (def_stmt) != 1
3015 || TREE_CODE (op1) != SSA_NAME)
3017 if (dump_enabled_p ())
3018 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3019 "unsupported phi node definition.\n");
3021 return NULL;
3024 gimple *def1 = SSA_NAME_DEF_STMT (op1);
3025 if (gimple_bb (def1)
3026 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
3027 && loop->inner
3028 && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1))
3029 && is_gimple_assign (def1)
3030 && is_a <gphi *> (phi_use_stmt)
3031 && flow_bb_inside_loop_p (loop->inner, gimple_bb (phi_use_stmt)))
3033 if (dump_enabled_p ())
3034 report_vect_op (MSG_NOTE, def_stmt,
3035 "detected double reduction: ");
3037 *double_reduc = true;
3038 return def_stmt_info;
3041 return NULL;
3044 /* If we are vectorizing an inner reduction we are executing that
3045 in the original order only in case we are not dealing with a
3046 double reduction. */
3047 bool check_reduction = true;
3048 if (flow_loop_nested_p (vect_loop, loop))
3050 gphi *lcphi;
3051 unsigned i;
3052 check_reduction = false;
3053 FOR_EACH_VEC_ELT (lcphis, i, lcphi)
3054 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_phi_result (lcphi))
3056 gimple *use_stmt = USE_STMT (use_p);
3057 if (is_gimple_debug (use_stmt))
3058 continue;
3059 if (! flow_bb_inside_loop_p (vect_loop, gimple_bb (use_stmt)))
3060 check_reduction = true;
3064 gassign *def_stmt = as_a <gassign *> (def_stmt_info->stmt);
3065 bool nested_in_vect_loop = flow_loop_nested_p (vect_loop, loop);
3066 code = orig_code = gimple_assign_rhs_code (def_stmt);
3068 /* We can handle "res -= x[i]", which is non-associative by
3069 simply rewriting this into "res += -x[i]". Avoid changing
3070 gimple instruction for the first simple tests and only do this
3071 if we're allowed to change code at all. */
3072 if (code == MINUS_EXPR && gimple_assign_rhs2 (def_stmt) != phi_name)
3073 code = PLUS_EXPR;
3075 if (code == COND_EXPR)
3077 if (! nested_in_vect_loop)
3078 *v_reduc_type = COND_REDUCTION;
3080 op3 = gimple_assign_rhs1 (def_stmt);
3081 if (COMPARISON_CLASS_P (op3))
3083 op4 = TREE_OPERAND (op3, 1);
3084 op3 = TREE_OPERAND (op3, 0);
3086 if (op3 == phi_name || op4 == phi_name)
3088 if (dump_enabled_p ())
3089 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3090 "reduction: condition depends on previous"
3091 " iteration: ");
3092 return NULL;
3095 op1 = gimple_assign_rhs2 (def_stmt);
3096 op2 = gimple_assign_rhs3 (def_stmt);
3098 else if (!commutative_tree_code (code) || !associative_tree_code (code))
3100 if (dump_enabled_p ())
3101 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3102 "reduction: not commutative/associative: ");
3103 return NULL;
3105 else if (get_gimple_rhs_class (code) == GIMPLE_BINARY_RHS)
3107 op1 = gimple_assign_rhs1 (def_stmt);
3108 op2 = gimple_assign_rhs2 (def_stmt);
3110 else
3112 if (dump_enabled_p ())
3113 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3114 "reduction: not handled operation: ");
3115 return NULL;
3118 if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
3120 if (dump_enabled_p ())
3121 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3122 "reduction: both uses not ssa_names: ");
3124 return NULL;
3127 type = TREE_TYPE (gimple_assign_lhs (def_stmt));
3128 if ((TREE_CODE (op1) == SSA_NAME
3129 && !types_compatible_p (type,TREE_TYPE (op1)))
3130 || (TREE_CODE (op2) == SSA_NAME
3131 && !types_compatible_p (type, TREE_TYPE (op2)))
3132 || (op3 && TREE_CODE (op3) == SSA_NAME
3133 && !types_compatible_p (type, TREE_TYPE (op3)))
3134 || (op4 && TREE_CODE (op4) == SSA_NAME
3135 && !types_compatible_p (type, TREE_TYPE (op4))))
3137 if (dump_enabled_p ())
3139 dump_printf_loc (MSG_NOTE, vect_location,
3140 "reduction: multiple types: operation type: ");
3141 dump_generic_expr (MSG_NOTE, TDF_SLIM, type);
3142 dump_printf (MSG_NOTE, ", operands types: ");
3143 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3144 TREE_TYPE (op1));
3145 dump_printf (MSG_NOTE, ",");
3146 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3147 TREE_TYPE (op2));
3148 if (op3)
3150 dump_printf (MSG_NOTE, ",");
3151 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3152 TREE_TYPE (op3));
3155 if (op4)
3157 dump_printf (MSG_NOTE, ",");
3158 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3159 TREE_TYPE (op4));
3161 dump_printf (MSG_NOTE, "\n");
3164 return NULL;
3167 /* Check whether it's ok to change the order of the computation.
3168 Generally, when vectorizing a reduction we change the order of the
3169 computation. This may change the behavior of the program in some
3170 cases, so we need to check that this is ok. One exception is when
3171 vectorizing an outer-loop: the inner-loop is executed sequentially,
3172 and therefore vectorizing reductions in the inner-loop during
3173 outer-loop vectorization is safe. */
3174 if (check_reduction
3175 && *v_reduc_type == TREE_CODE_REDUCTION
3176 && needs_fold_left_reduction_p (type, code,
3177 need_wrapping_integral_overflow))
3178 *v_reduc_type = FOLD_LEFT_REDUCTION;
3180 /* Reduction is safe. We're dealing with one of the following:
3181 1) integer arithmetic and no trapv
3182 2) floating point arithmetic, and special flags permit this optimization
3183 3) nested cycle (i.e., outer loop vectorization). */
3184 stmt_vec_info def1_info = loop_info->lookup_def (op1);
3185 stmt_vec_info def2_info = loop_info->lookup_def (op2);
3186 if (code != COND_EXPR && !def1_info && !def2_info)
3188 if (dump_enabled_p ())
3189 report_vect_op (MSG_NOTE, def_stmt, "reduction: no defs for operands: ");
3190 return NULL;
3193 /* Check that one def is the reduction def, defined by PHI,
3194 the other def is either defined in the loop ("vect_internal_def"),
3195 or it's an induction (defined by a loop-header phi-node). */
3197 if (def2_info
3198 && def2_info->stmt == phi
3199 && (code == COND_EXPR
3200 || !def1_info
3201 || !flow_bb_inside_loop_p (loop, gimple_bb (def1_info->stmt))
3202 || vect_valid_reduction_input_p (def1_info)))
3204 if (dump_enabled_p ())
3205 report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
3206 return def_stmt_info;
3209 if (def1_info
3210 && def1_info->stmt == phi
3211 && (code == COND_EXPR
3212 || !def2_info
3213 || !flow_bb_inside_loop_p (loop, gimple_bb (def2_info->stmt))
3214 || vect_valid_reduction_input_p (def2_info)))
3216 if (! nested_in_vect_loop && orig_code != MINUS_EXPR)
3218 /* Check if we can swap operands (just for simplicity - so that
3219 the rest of the code can assume that the reduction variable
3220 is always the last (second) argument). */
3221 if (code == COND_EXPR)
3223 /* Swap cond_expr by inverting the condition. */
3224 tree cond_expr = gimple_assign_rhs1 (def_stmt);
3225 enum tree_code invert_code = ERROR_MARK;
3226 enum tree_code cond_code = TREE_CODE (cond_expr);
3228 if (TREE_CODE_CLASS (cond_code) == tcc_comparison)
3230 bool honor_nans = HONOR_NANS (TREE_OPERAND (cond_expr, 0));
3231 invert_code = invert_tree_comparison (cond_code, honor_nans);
3233 if (invert_code != ERROR_MARK)
3235 TREE_SET_CODE (cond_expr, invert_code);
3236 swap_ssa_operands (def_stmt,
3237 gimple_assign_rhs2_ptr (def_stmt),
3238 gimple_assign_rhs3_ptr (def_stmt));
3240 else
3242 if (dump_enabled_p ())
3243 report_vect_op (MSG_NOTE, def_stmt,
3244 "detected reduction: cannot swap operands "
3245 "for cond_expr");
3246 return NULL;
3249 else
3250 swap_ssa_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
3251 gimple_assign_rhs2_ptr (def_stmt));
3253 if (dump_enabled_p ())
3254 report_vect_op (MSG_NOTE, def_stmt,
3255 "detected reduction: need to swap operands: ");
3257 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (def_stmt)))
3258 LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true;
3260 else
3262 if (dump_enabled_p ())
3263 report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
3266 return def_stmt_info;
3269 /* Try to find SLP reduction chain. */
3270 if (! nested_in_vect_loop
3271 && code != COND_EXPR
3272 && orig_code != MINUS_EXPR
3273 && vect_is_slp_reduction (loop_info, phi, def_stmt))
3275 if (dump_enabled_p ())
3276 report_vect_op (MSG_NOTE, def_stmt,
3277 "reduction: detected reduction chain: ");
3279 return def_stmt_info;
3282 /* Dissolve group eventually half-built by vect_is_slp_reduction. */
3283 stmt_vec_info first = REDUC_GROUP_FIRST_ELEMENT (def_stmt_info);
3284 while (first)
3286 stmt_vec_info next = REDUC_GROUP_NEXT_ELEMENT (first);
3287 REDUC_GROUP_FIRST_ELEMENT (first) = NULL;
3288 REDUC_GROUP_NEXT_ELEMENT (first) = NULL;
3289 first = next;
3292 /* Look for the expression computing loop_arg from loop PHI result. */
3293 if (check_reduction_path (vect_location, loop, phi, loop_arg, code))
3294 return def_stmt_info;
3296 if (dump_enabled_p ())
3298 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3299 "reduction: unknown pattern: ");
3302 return NULL;
3305 /* Wrapper around vect_is_simple_reduction, which will modify code
3306 in-place if it enables detection of more reductions. Arguments
3307 as there. */
3309 stmt_vec_info
3310 vect_force_simple_reduction (loop_vec_info loop_info, stmt_vec_info phi_info,
3311 bool *double_reduc,
3312 bool need_wrapping_integral_overflow)
3314 enum vect_reduction_type v_reduc_type;
3315 stmt_vec_info def_info
3316 = vect_is_simple_reduction (loop_info, phi_info, double_reduc,
3317 need_wrapping_integral_overflow,
3318 &v_reduc_type);
3319 if (def_info)
3321 STMT_VINFO_REDUC_TYPE (phi_info) = v_reduc_type;
3322 STMT_VINFO_REDUC_DEF (phi_info) = def_info;
3323 STMT_VINFO_REDUC_TYPE (def_info) = v_reduc_type;
3324 STMT_VINFO_REDUC_DEF (def_info) = phi_info;
3326 return def_info;
3329 /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */
3331 vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue,
3332 int *peel_iters_epilogue,
3333 stmt_vector_for_cost *scalar_cost_vec,
3334 stmt_vector_for_cost *prologue_cost_vec,
3335 stmt_vector_for_cost *epilogue_cost_vec)
3337 int retval = 0;
3338 int assumed_vf = vect_vf_for_cost (loop_vinfo);
3340 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
3342 *peel_iters_epilogue = assumed_vf / 2;
3343 if (dump_enabled_p ())
3344 dump_printf_loc (MSG_NOTE, vect_location,
3345 "cost model: epilogue peel iters set to vf/2 "
3346 "because loop iterations are unknown .\n");
3348 /* If peeled iterations are known but number of scalar loop
3349 iterations are unknown, count a taken branch per peeled loop. */
3350 retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken,
3351 NULL, 0, vect_prologue);
3352 retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken,
3353 NULL, 0, vect_epilogue);
3355 else
3357 int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
3358 peel_iters_prologue = niters < peel_iters_prologue ?
3359 niters : peel_iters_prologue;
3360 *peel_iters_epilogue = (niters - peel_iters_prologue) % assumed_vf;
3361 /* If we need to peel for gaps, but no peeling is required, we have to
3362 peel VF iterations. */
3363 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && !*peel_iters_epilogue)
3364 *peel_iters_epilogue = assumed_vf;
3367 stmt_info_for_cost *si;
3368 int j;
3369 if (peel_iters_prologue)
3370 FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si)
3371 retval += record_stmt_cost (prologue_cost_vec,
3372 si->count * peel_iters_prologue,
3373 si->kind, si->stmt_info, si->misalign,
3374 vect_prologue);
3375 if (*peel_iters_epilogue)
3376 FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si)
3377 retval += record_stmt_cost (epilogue_cost_vec,
3378 si->count * *peel_iters_epilogue,
3379 si->kind, si->stmt_info, si->misalign,
3380 vect_epilogue);
3382 return retval;
3385 /* Function vect_estimate_min_profitable_iters
3387 Return the number of iterations required for the vector version of the
3388 loop to be profitable relative to the cost of the scalar version of the
3389 loop.
3391 *RET_MIN_PROFITABLE_NITERS is a cost model profitability threshold
3392 of iterations for vectorization. -1 value means loop vectorization
3393 is not profitable. This returned value may be used for dynamic
3394 profitability check.
3396 *RET_MIN_PROFITABLE_ESTIMATE is a profitability threshold to be used
3397 for static check against estimated number of iterations. */
3399 static void
3400 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
3401 int *ret_min_profitable_niters,
3402 int *ret_min_profitable_estimate)
3404 int min_profitable_iters;
3405 int min_profitable_estimate;
3406 int peel_iters_prologue;
3407 int peel_iters_epilogue;
3408 unsigned vec_inside_cost = 0;
3409 int vec_outside_cost = 0;
3410 unsigned vec_prologue_cost = 0;
3411 unsigned vec_epilogue_cost = 0;
3412 int scalar_single_iter_cost = 0;
3413 int scalar_outside_cost = 0;
3414 int assumed_vf = vect_vf_for_cost (loop_vinfo);
3415 int npeel = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
3416 void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
3418 /* Cost model disabled. */
3419 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
3421 dump_printf_loc (MSG_NOTE, vect_location, "cost model disabled.\n");
3422 *ret_min_profitable_niters = 0;
3423 *ret_min_profitable_estimate = 0;
3424 return;
3427 /* Requires loop versioning tests to handle misalignment. */
3428 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
3430 /* FIXME: Make cost depend on complexity of individual check. */
3431 unsigned len = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ();
3432 (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
3433 vect_prologue);
3434 dump_printf (MSG_NOTE,
3435 "cost model: Adding cost of checks for loop "
3436 "versioning to treat misalignment.\n");
3439 /* Requires loop versioning with alias checks. */
3440 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
3442 /* FIXME: Make cost depend on complexity of individual check. */
3443 unsigned len = LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).length ();
3444 (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
3445 vect_prologue);
3446 len = LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).length ();
3447 if (len)
3448 /* Count LEN - 1 ANDs and LEN comparisons. */
3449 (void) add_stmt_cost (target_cost_data, len * 2 - 1, scalar_stmt,
3450 NULL, 0, vect_prologue);
3451 len = LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).length ();
3452 if (len)
3454 /* Count LEN - 1 ANDs and LEN comparisons. */
3455 unsigned int nstmts = len * 2 - 1;
3456 /* +1 for each bias that needs adding. */
3457 for (unsigned int i = 0; i < len; ++i)
3458 if (!LOOP_VINFO_LOWER_BOUNDS (loop_vinfo)[i].unsigned_p)
3459 nstmts += 1;
3460 (void) add_stmt_cost (target_cost_data, nstmts, scalar_stmt,
3461 NULL, 0, vect_prologue);
3463 dump_printf (MSG_NOTE,
3464 "cost model: Adding cost of checks for loop "
3465 "versioning aliasing.\n");
3468 /* Requires loop versioning with niter checks. */
3469 if (LOOP_REQUIRES_VERSIONING_FOR_NITERS (loop_vinfo))
3471 /* FIXME: Make cost depend on complexity of individual check. */
3472 (void) add_stmt_cost (target_cost_data, 1, vector_stmt, NULL, 0,
3473 vect_prologue);
3474 dump_printf (MSG_NOTE,
3475 "cost model: Adding cost of checks for loop "
3476 "versioning niters.\n");
3479 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
3480 (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, NULL, 0,
3481 vect_prologue);
3483 /* Count statements in scalar loop. Using this as scalar cost for a single
3484 iteration for now.
3486 TODO: Add outer loop support.
3488 TODO: Consider assigning different costs to different scalar
3489 statements. */
3491 scalar_single_iter_cost
3492 = LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo);
3494 /* Add additional cost for the peeled instructions in prologue and epilogue
3495 loop. (For fully-masked loops there will be no peeling.)
3497 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
3498 at compile-time - we assume it's vf/2 (the worst would be vf-1).
3500 TODO: Build an expression that represents peel_iters for prologue and
3501 epilogue to be used in a run-time test. */
3503 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
3505 peel_iters_prologue = 0;
3506 peel_iters_epilogue = 0;
3508 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
3510 /* We need to peel exactly one iteration. */
3511 peel_iters_epilogue += 1;
3512 stmt_info_for_cost *si;
3513 int j;
3514 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
3515 j, si)
3516 (void) add_stmt_cost (target_cost_data, si->count,
3517 si->kind, si->stmt_info, si->misalign,
3518 vect_epilogue);
3521 else if (npeel < 0)
3523 peel_iters_prologue = assumed_vf / 2;
3524 dump_printf (MSG_NOTE, "cost model: "
3525 "prologue peel iters set to vf/2.\n");
3527 /* If peeling for alignment is unknown, loop bound of main loop becomes
3528 unknown. */
3529 peel_iters_epilogue = assumed_vf / 2;
3530 dump_printf (MSG_NOTE, "cost model: "
3531 "epilogue peel iters set to vf/2 because "
3532 "peeling for alignment is unknown.\n");
3534 /* If peeled iterations are unknown, count a taken branch and a not taken
3535 branch per peeled loop. Even if scalar loop iterations are known,
3536 vector iterations are not known since peeled prologue iterations are
3537 not known. Hence guards remain the same. */
3538 (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken,
3539 NULL, 0, vect_prologue);
3540 (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken,
3541 NULL, 0, vect_prologue);
3542 (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken,
3543 NULL, 0, vect_epilogue);
3544 (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken,
3545 NULL, 0, vect_epilogue);
3546 stmt_info_for_cost *si;
3547 int j;
3548 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), j, si)
3550 (void) add_stmt_cost (target_cost_data,
3551 si->count * peel_iters_prologue,
3552 si->kind, si->stmt_info, si->misalign,
3553 vect_prologue);
3554 (void) add_stmt_cost (target_cost_data,
3555 si->count * peel_iters_epilogue,
3556 si->kind, si->stmt_info, si->misalign,
3557 vect_epilogue);
3560 else
3562 stmt_vector_for_cost prologue_cost_vec, epilogue_cost_vec;
3563 stmt_info_for_cost *si;
3564 int j;
3565 void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
3567 prologue_cost_vec.create (2);
3568 epilogue_cost_vec.create (2);
3569 peel_iters_prologue = npeel;
3571 (void) vect_get_known_peeling_cost (loop_vinfo, peel_iters_prologue,
3572 &peel_iters_epilogue,
3573 &LOOP_VINFO_SCALAR_ITERATION_COST
3574 (loop_vinfo),
3575 &prologue_cost_vec,
3576 &epilogue_cost_vec);
3578 FOR_EACH_VEC_ELT (prologue_cost_vec, j, si)
3579 (void) add_stmt_cost (data, si->count, si->kind, si->stmt_info,
3580 si->misalign, vect_prologue);
3582 FOR_EACH_VEC_ELT (epilogue_cost_vec, j, si)
3583 (void) add_stmt_cost (data, si->count, si->kind, si->stmt_info,
3584 si->misalign, vect_epilogue);
3586 prologue_cost_vec.release ();
3587 epilogue_cost_vec.release ();
3590 /* FORNOW: The scalar outside cost is incremented in one of the
3591 following ways:
3593 1. The vectorizer checks for alignment and aliasing and generates
3594 a condition that allows dynamic vectorization. A cost model
3595 check is ANDED with the versioning condition. Hence scalar code
3596 path now has the added cost of the versioning check.
3598 if (cost > th & versioning_check)
3599 jmp to vector code
3601 Hence run-time scalar is incremented by not-taken branch cost.
3603 2. The vectorizer then checks if a prologue is required. If the
3604 cost model check was not done before during versioning, it has to
3605 be done before the prologue check.
3607 if (cost <= th)
3608 prologue = scalar_iters
3609 if (prologue == 0)
3610 jmp to vector code
3611 else
3612 execute prologue
3613 if (prologue == num_iters)
3614 go to exit
3616 Hence the run-time scalar cost is incremented by a taken branch,
3617 plus a not-taken branch, plus a taken branch cost.
3619 3. The vectorizer then checks if an epilogue is required. If the
3620 cost model check was not done before during prologue check, it
3621 has to be done with the epilogue check.
3623 if (prologue == 0)
3624 jmp to vector code
3625 else
3626 execute prologue
3627 if (prologue == num_iters)
3628 go to exit
3629 vector code:
3630 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
3631 jmp to epilogue
3633 Hence the run-time scalar cost should be incremented by 2 taken
3634 branches.
3636 TODO: The back end may reorder the BBS's differently and reverse
3637 conditions/branch directions. Change the estimates below to
3638 something more reasonable. */
3640 /* If the number of iterations is known and we do not do versioning, we can
3641 decide whether to vectorize at compile time. Hence the scalar version
3642 do not carry cost model guard costs. */
3643 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
3644 || LOOP_REQUIRES_VERSIONING (loop_vinfo))
3646 /* Cost model check occurs at versioning. */
3647 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
3648 scalar_outside_cost += vect_get_stmt_cost (cond_branch_not_taken);
3649 else
3651 /* Cost model check occurs at prologue generation. */
3652 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
3653 scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken)
3654 + vect_get_stmt_cost (cond_branch_not_taken);
3655 /* Cost model check occurs at epilogue generation. */
3656 else
3657 scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken);
3661 /* Complete the target-specific cost calculations. */
3662 finish_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo), &vec_prologue_cost,
3663 &vec_inside_cost, &vec_epilogue_cost);
3665 vec_outside_cost = (int)(vec_prologue_cost + vec_epilogue_cost);
3667 if (dump_enabled_p ())
3669 dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
3670 dump_printf (MSG_NOTE, " Vector inside of loop cost: %d\n",
3671 vec_inside_cost);
3672 dump_printf (MSG_NOTE, " Vector prologue cost: %d\n",
3673 vec_prologue_cost);
3674 dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n",
3675 vec_epilogue_cost);
3676 dump_printf (MSG_NOTE, " Scalar iteration cost: %d\n",
3677 scalar_single_iter_cost);
3678 dump_printf (MSG_NOTE, " Scalar outside cost: %d\n",
3679 scalar_outside_cost);
3680 dump_printf (MSG_NOTE, " Vector outside cost: %d\n",
3681 vec_outside_cost);
3682 dump_printf (MSG_NOTE, " prologue iterations: %d\n",
3683 peel_iters_prologue);
3684 dump_printf (MSG_NOTE, " epilogue iterations: %d\n",
3685 peel_iters_epilogue);
3688 /* Calculate number of iterations required to make the vector version
3689 profitable, relative to the loop bodies only. The following condition
3690 must hold true:
3691 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
3692 where
3693 SIC = scalar iteration cost, VIC = vector iteration cost,
3694 VOC = vector outside cost, VF = vectorization factor,
3695 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
3696 SOC = scalar outside cost for run time cost model check. */
3698 if ((scalar_single_iter_cost * assumed_vf) > (int) vec_inside_cost)
3700 min_profitable_iters = ((vec_outside_cost - scalar_outside_cost)
3701 * assumed_vf
3702 - vec_inside_cost * peel_iters_prologue
3703 - vec_inside_cost * peel_iters_epilogue);
3704 if (min_profitable_iters <= 0)
3705 min_profitable_iters = 0;
3706 else
3708 min_profitable_iters /= ((scalar_single_iter_cost * assumed_vf)
3709 - vec_inside_cost);
3711 if ((scalar_single_iter_cost * assumed_vf * min_profitable_iters)
3712 <= (((int) vec_inside_cost * min_profitable_iters)
3713 + (((int) vec_outside_cost - scalar_outside_cost)
3714 * assumed_vf)))
3715 min_profitable_iters++;
3718 /* vector version will never be profitable. */
3719 else
3721 if (LOOP_VINFO_LOOP (loop_vinfo)->force_vectorize)
3722 warning_at (vect_location.get_location_t (), OPT_Wopenmp_simd,
3723 "vectorization did not happen for a simd loop");
3725 if (dump_enabled_p ())
3726 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3727 "cost model: the vector iteration cost = %d "
3728 "divided by the scalar iteration cost = %d "
3729 "is greater or equal to the vectorization factor = %d"
3730 ".\n",
3731 vec_inside_cost, scalar_single_iter_cost, assumed_vf);
3732 *ret_min_profitable_niters = -1;
3733 *ret_min_profitable_estimate = -1;
3734 return;
3737 dump_printf (MSG_NOTE,
3738 " Calculated minimum iters for profitability: %d\n",
3739 min_profitable_iters);
3741 if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
3742 && min_profitable_iters < (assumed_vf + peel_iters_prologue))
3743 /* We want the vectorized loop to execute at least once. */
3744 min_profitable_iters = assumed_vf + peel_iters_prologue;
3746 if (dump_enabled_p ())
3747 dump_printf_loc (MSG_NOTE, vect_location,
3748 " Runtime profitability threshold = %d\n",
3749 min_profitable_iters);
3751 *ret_min_profitable_niters = min_profitable_iters;
3753 /* Calculate number of iterations required to make the vector version
3754 profitable, relative to the loop bodies only.
3756 Non-vectorized variant is SIC * niters and it must win over vector
3757 variant on the expected loop trip count. The following condition must hold true:
3758 SIC * niters > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC + SOC */
3760 if (vec_outside_cost <= 0)
3761 min_profitable_estimate = 0;
3762 else
3764 min_profitable_estimate = ((vec_outside_cost + scalar_outside_cost)
3765 * assumed_vf
3766 - vec_inside_cost * peel_iters_prologue
3767 - vec_inside_cost * peel_iters_epilogue)
3768 / ((scalar_single_iter_cost * assumed_vf)
3769 - vec_inside_cost);
3771 min_profitable_estimate = MAX (min_profitable_estimate, min_profitable_iters);
3772 if (dump_enabled_p ())
3773 dump_printf_loc (MSG_NOTE, vect_location,
3774 " Static estimate profitability threshold = %d\n",
3775 min_profitable_estimate);
3777 *ret_min_profitable_estimate = min_profitable_estimate;
3780 /* Writes into SEL a mask for a vec_perm, equivalent to a vec_shr by OFFSET
3781 vector elements (not bits) for a vector with NELT elements. */
3782 static void
3783 calc_vec_perm_mask_for_shift (unsigned int offset, unsigned int nelt,
3784 vec_perm_builder *sel)
3786 /* The encoding is a single stepped pattern. Any wrap-around is handled
3787 by vec_perm_indices. */
3788 sel->new_vector (nelt, 1, 3);
3789 for (unsigned int i = 0; i < 3; i++)
3790 sel->quick_push (i + offset);
3793 /* Checks whether the target supports whole-vector shifts for vectors of mode
3794 MODE. This is the case if _either_ the platform handles vec_shr_optab, _or_
3795 it supports vec_perm_const with masks for all necessary shift amounts. */
3796 static bool
3797 have_whole_vector_shift (machine_mode mode)
3799 if (optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
3800 return true;
3802 /* Variable-length vectors should be handled via the optab. */
3803 unsigned int nelt;
3804 if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
3805 return false;
3807 vec_perm_builder sel;
3808 vec_perm_indices indices;
3809 for (unsigned int i = nelt / 2; i >= 1; i /= 2)
3811 calc_vec_perm_mask_for_shift (i, nelt, &sel);
3812 indices.new_vector (sel, 2, nelt);
3813 if (!can_vec_perm_const_p (mode, indices, false))
3814 return false;
3816 return true;
3819 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
3820 functions. Design better to avoid maintenance issues. */
3822 /* Function vect_model_reduction_cost.
3824 Models cost for a reduction operation, including the vector ops
3825 generated within the strip-mine loop, the initial definition before
3826 the loop, and the epilogue code that must be generated. */
3828 static void
3829 vect_model_reduction_cost (stmt_vec_info stmt_info, internal_fn reduc_fn,
3830 int ncopies, stmt_vector_for_cost *cost_vec)
3832 int prologue_cost = 0, epilogue_cost = 0, inside_cost;
3833 enum tree_code code;
3834 optab optab;
3835 tree vectype;
3836 machine_mode mode;
3837 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3838 struct loop *loop = NULL;
3840 if (loop_vinfo)
3841 loop = LOOP_VINFO_LOOP (loop_vinfo);
3843 /* Condition reductions generate two reductions in the loop. */
3844 vect_reduction_type reduction_type
3845 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info);
3846 if (reduction_type == COND_REDUCTION)
3847 ncopies *= 2;
3849 vectype = STMT_VINFO_VECTYPE (stmt_info);
3850 mode = TYPE_MODE (vectype);
3851 stmt_vec_info orig_stmt_info = vect_orig_stmt (stmt_info);
3853 code = gimple_assign_rhs_code (orig_stmt_info->stmt);
3855 if (reduction_type == EXTRACT_LAST_REDUCTION
3856 || reduction_type == FOLD_LEFT_REDUCTION)
3858 /* No extra instructions needed in the prologue. */
3859 prologue_cost = 0;
3861 if (reduction_type == EXTRACT_LAST_REDUCTION || reduc_fn != IFN_LAST)
3862 /* Count one reduction-like operation per vector. */
3863 inside_cost = record_stmt_cost (cost_vec, ncopies, vec_to_scalar,
3864 stmt_info, 0, vect_body);
3865 else
3867 /* Use NELEMENTS extracts and NELEMENTS scalar ops. */
3868 unsigned int nelements = ncopies * vect_nunits_for_cost (vectype);
3869 inside_cost = record_stmt_cost (cost_vec, nelements,
3870 vec_to_scalar, stmt_info, 0,
3871 vect_body);
3872 inside_cost += record_stmt_cost (cost_vec, nelements,
3873 scalar_stmt, stmt_info, 0,
3874 vect_body);
3877 else
3879 /* Add in cost for initial definition.
3880 For cond reduction we have four vectors: initial index, step,
3881 initial result of the data reduction, initial value of the index
3882 reduction. */
3883 int prologue_stmts = reduction_type == COND_REDUCTION ? 4 : 1;
3884 prologue_cost += record_stmt_cost (cost_vec, prologue_stmts,
3885 scalar_to_vec, stmt_info, 0,
3886 vect_prologue);
3888 /* Cost of reduction op inside loop. */
3889 inside_cost = record_stmt_cost (cost_vec, ncopies, vector_stmt,
3890 stmt_info, 0, vect_body);
3893 /* Determine cost of epilogue code.
3895 We have a reduction operator that will reduce the vector in one statement.
3896 Also requires scalar extract. */
3898 if (!loop || !nested_in_vect_loop_p (loop, orig_stmt_info))
3900 if (reduc_fn != IFN_LAST)
3902 if (reduction_type == COND_REDUCTION)
3904 /* An EQ stmt and an COND_EXPR stmt. */
3905 epilogue_cost += record_stmt_cost (cost_vec, 2,
3906 vector_stmt, stmt_info, 0,
3907 vect_epilogue);
3908 /* Reduction of the max index and a reduction of the found
3909 values. */
3910 epilogue_cost += record_stmt_cost (cost_vec, 2,
3911 vec_to_scalar, stmt_info, 0,
3912 vect_epilogue);
3913 /* A broadcast of the max value. */
3914 epilogue_cost += record_stmt_cost (cost_vec, 1,
3915 scalar_to_vec, stmt_info, 0,
3916 vect_epilogue);
3918 else
3920 epilogue_cost += record_stmt_cost (cost_vec, 1, vector_stmt,
3921 stmt_info, 0, vect_epilogue);
3922 epilogue_cost += record_stmt_cost (cost_vec, 1,
3923 vec_to_scalar, stmt_info, 0,
3924 vect_epilogue);
3927 else if (reduction_type == COND_REDUCTION)
3929 unsigned estimated_nunits = vect_nunits_for_cost (vectype);
3930 /* Extraction of scalar elements. */
3931 epilogue_cost += record_stmt_cost (cost_vec,
3932 2 * estimated_nunits,
3933 vec_to_scalar, stmt_info, 0,
3934 vect_epilogue);
3935 /* Scalar max reductions via COND_EXPR / MAX_EXPR. */
3936 epilogue_cost += record_stmt_cost (cost_vec,
3937 2 * estimated_nunits - 3,
3938 scalar_stmt, stmt_info, 0,
3939 vect_epilogue);
3941 else if (reduction_type == EXTRACT_LAST_REDUCTION
3942 || reduction_type == FOLD_LEFT_REDUCTION)
3943 /* No extra instructions need in the epilogue. */
3945 else
3947 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
3948 tree bitsize =
3949 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt_info->stmt)));
3950 int element_bitsize = tree_to_uhwi (bitsize);
3951 int nelements = vec_size_in_bits / element_bitsize;
3953 if (code == COND_EXPR)
3954 code = MAX_EXPR;
3956 optab = optab_for_tree_code (code, vectype, optab_default);
3958 /* We have a whole vector shift available. */
3959 if (optab != unknown_optab
3960 && VECTOR_MODE_P (mode)
3961 && optab_handler (optab, mode) != CODE_FOR_nothing
3962 && have_whole_vector_shift (mode))
3964 /* Final reduction via vector shifts and the reduction operator.
3965 Also requires scalar extract. */
3966 epilogue_cost += record_stmt_cost (cost_vec,
3967 exact_log2 (nelements) * 2,
3968 vector_stmt, stmt_info, 0,
3969 vect_epilogue);
3970 epilogue_cost += record_stmt_cost (cost_vec, 1,
3971 vec_to_scalar, stmt_info, 0,
3972 vect_epilogue);
3974 else
3975 /* Use extracts and reduction op for final reduction. For N
3976 elements, we have N extracts and N-1 reduction ops. */
3977 epilogue_cost += record_stmt_cost (cost_vec,
3978 nelements + nelements - 1,
3979 vector_stmt, stmt_info, 0,
3980 vect_epilogue);
3984 if (dump_enabled_p ())
3985 dump_printf (MSG_NOTE,
3986 "vect_model_reduction_cost: inside_cost = %d, "
3987 "prologue_cost = %d, epilogue_cost = %d .\n", inside_cost,
3988 prologue_cost, epilogue_cost);
3992 /* Function vect_model_induction_cost.
3994 Models cost for induction operations. */
3996 static void
3997 vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies,
3998 stmt_vector_for_cost *cost_vec)
4000 unsigned inside_cost, prologue_cost;
4002 if (PURE_SLP_STMT (stmt_info))
4003 return;
4005 /* loop cost for vec_loop. */
4006 inside_cost = record_stmt_cost (cost_vec, ncopies, vector_stmt,
4007 stmt_info, 0, vect_body);
4009 /* prologue cost for vec_init and vec_step. */
4010 prologue_cost = record_stmt_cost (cost_vec, 2, scalar_to_vec,
4011 stmt_info, 0, vect_prologue);
4013 if (dump_enabled_p ())
4014 dump_printf_loc (MSG_NOTE, vect_location,
4015 "vect_model_induction_cost: inside_cost = %d, "
4016 "prologue_cost = %d .\n", inside_cost, prologue_cost);
4021 /* Function get_initial_def_for_reduction
4023 Input:
4024 STMT_VINFO - a stmt that performs a reduction operation in the loop.
4025 INIT_VAL - the initial value of the reduction variable
4027 Output:
4028 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
4029 of the reduction (used for adjusting the epilog - see below).
4030 Return a vector variable, initialized according to the operation that
4031 STMT_VINFO performs. This vector will be used as the initial value
4032 of the vector of partial results.
4034 Option1 (adjust in epilog): Initialize the vector as follows:
4035 add/bit or/xor: [0,0,...,0,0]
4036 mult/bit and: [1,1,...,1,1]
4037 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
4038 and when necessary (e.g. add/mult case) let the caller know
4039 that it needs to adjust the result by init_val.
4041 Option2: Initialize the vector as follows:
4042 add/bit or/xor: [init_val,0,0,...,0]
4043 mult/bit and: [init_val,1,1,...,1]
4044 min/max/cond_expr: [init_val,init_val,...,init_val]
4045 and no adjustments are needed.
4047 For example, for the following code:
4049 s = init_val;
4050 for (i=0;i<n;i++)
4051 s = s + a[i];
4053 STMT_VINFO is 's = s + a[i]', and the reduction variable is 's'.
4054 For a vector of 4 units, we want to return either [0,0,0,init_val],
4055 or [0,0,0,0] and let the caller know that it needs to adjust
4056 the result at the end by 'init_val'.
4058 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
4059 initialization vector is simpler (same element in all entries), if
4060 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
4062 A cost model should help decide between these two schemes. */
4064 tree
4065 get_initial_def_for_reduction (stmt_vec_info stmt_vinfo, tree init_val,
4066 tree *adjustment_def)
4068 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
4069 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4070 tree scalar_type = TREE_TYPE (init_val);
4071 tree vectype = get_vectype_for_scalar_type (scalar_type);
4072 enum tree_code code = gimple_assign_rhs_code (stmt_vinfo->stmt);
4073 tree def_for_init;
4074 tree init_def;
4075 REAL_VALUE_TYPE real_init_val = dconst0;
4076 int int_init_val = 0;
4077 gimple_seq stmts = NULL;
4079 gcc_assert (vectype);
4081 gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
4082 || SCALAR_FLOAT_TYPE_P (scalar_type));
4084 gcc_assert (nested_in_vect_loop_p (loop, stmt_vinfo)
4085 || loop == (gimple_bb (stmt_vinfo->stmt))->loop_father);
4087 vect_reduction_type reduction_type
4088 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_vinfo);
4090 switch (code)
4092 case WIDEN_SUM_EXPR:
4093 case DOT_PROD_EXPR:
4094 case SAD_EXPR:
4095 case PLUS_EXPR:
4096 case MINUS_EXPR:
4097 case BIT_IOR_EXPR:
4098 case BIT_XOR_EXPR:
4099 case MULT_EXPR:
4100 case BIT_AND_EXPR:
4102 /* ADJUSTMENT_DEF is NULL when called from
4103 vect_create_epilog_for_reduction to vectorize double reduction. */
4104 if (adjustment_def)
4105 *adjustment_def = init_val;
4107 if (code == MULT_EXPR)
4109 real_init_val = dconst1;
4110 int_init_val = 1;
4113 if (code == BIT_AND_EXPR)
4114 int_init_val = -1;
4116 if (SCALAR_FLOAT_TYPE_P (scalar_type))
4117 def_for_init = build_real (scalar_type, real_init_val);
4118 else
4119 def_for_init = build_int_cst (scalar_type, int_init_val);
4121 if (adjustment_def)
4122 /* Option1: the first element is '0' or '1' as well. */
4123 init_def = gimple_build_vector_from_val (&stmts, vectype,
4124 def_for_init);
4125 else if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant ())
4127 /* Option2 (variable length): the first element is INIT_VAL. */
4128 init_def = gimple_build_vector_from_val (&stmts, vectype,
4129 def_for_init);
4130 init_def = gimple_build (&stmts, CFN_VEC_SHL_INSERT,
4131 vectype, init_def, init_val);
4133 else
4135 /* Option2: the first element is INIT_VAL. */
4136 tree_vector_builder elts (vectype, 1, 2);
4137 elts.quick_push (init_val);
4138 elts.quick_push (def_for_init);
4139 init_def = gimple_build_vector (&stmts, &elts);
4142 break;
4144 case MIN_EXPR:
4145 case MAX_EXPR:
4146 case COND_EXPR:
4148 if (adjustment_def)
4150 *adjustment_def = NULL_TREE;
4151 if (reduction_type != COND_REDUCTION
4152 && reduction_type != EXTRACT_LAST_REDUCTION)
4154 init_def = vect_get_vec_def_for_operand (init_val, stmt_vinfo);
4155 break;
4158 init_val = gimple_convert (&stmts, TREE_TYPE (vectype), init_val);
4159 init_def = gimple_build_vector_from_val (&stmts, vectype, init_val);
4161 break;
4163 default:
4164 gcc_unreachable ();
4167 if (stmts)
4168 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
4169 return init_def;
4172 /* Get at the initial defs for the reduction PHIs in SLP_NODE.
4173 NUMBER_OF_VECTORS is the number of vector defs to create.
4174 If NEUTRAL_OP is nonnull, introducing extra elements of that
4175 value will not change the result. */
4177 static void
4178 get_initial_defs_for_reduction (slp_tree slp_node,
4179 vec<tree> *vec_oprnds,
4180 unsigned int number_of_vectors,
4181 bool reduc_chain, tree neutral_op)
4183 vec<stmt_vec_info> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
4184 stmt_vec_info stmt_vinfo = stmts[0];
4185 unsigned HOST_WIDE_INT nunits;
4186 unsigned j, number_of_places_left_in_vector;
4187 tree vector_type;
4188 tree vop;
4189 int group_size = stmts.length ();
4190 unsigned int vec_num, i;
4191 unsigned number_of_copies = 1;
4192 vec<tree> voprnds;
4193 voprnds.create (number_of_vectors);
4194 struct loop *loop;
4195 auto_vec<tree, 16> permute_results;
4197 vector_type = STMT_VINFO_VECTYPE (stmt_vinfo);
4199 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def);
4201 loop = (gimple_bb (stmt_vinfo->stmt))->loop_father;
4202 gcc_assert (loop);
4203 edge pe = loop_preheader_edge (loop);
4205 gcc_assert (!reduc_chain || neutral_op);
4207 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
4208 created vectors. It is greater than 1 if unrolling is performed.
4210 For example, we have two scalar operands, s1 and s2 (e.g., group of
4211 strided accesses of size two), while NUNITS is four (i.e., four scalars
4212 of this type can be packed in a vector). The output vector will contain
4213 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
4214 will be 2).
4216 If REDUC_GROUP_SIZE > NUNITS, the scalars will be split into several
4217 vectors containing the operands.
4219 For example, NUNITS is four as before, and the group size is 8
4220 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
4221 {s5, s6, s7, s8}. */
4223 if (!TYPE_VECTOR_SUBPARTS (vector_type).is_constant (&nunits))
4224 nunits = group_size;
4226 number_of_copies = nunits * number_of_vectors / group_size;
4228 number_of_places_left_in_vector = nunits;
4229 bool constant_p = true;
4230 tree_vector_builder elts (vector_type, nunits, 1);
4231 elts.quick_grow (nunits);
4232 for (j = 0; j < number_of_copies; j++)
4234 for (i = group_size - 1; stmts.iterate (i, &stmt_vinfo); i--)
4236 tree op;
4237 /* Get the def before the loop. In reduction chain we have only
4238 one initial value. */
4239 if ((j != (number_of_copies - 1)
4240 || (reduc_chain && i != 0))
4241 && neutral_op)
4242 op = neutral_op;
4243 else
4244 op = PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt, pe);
4246 /* Create 'vect_ = {op0,op1,...,opn}'. */
4247 number_of_places_left_in_vector--;
4248 elts[number_of_places_left_in_vector] = op;
4249 if (!CONSTANT_CLASS_P (op))
4250 constant_p = false;
4252 if (number_of_places_left_in_vector == 0)
4254 gimple_seq ctor_seq = NULL;
4255 tree init;
4256 if (constant_p && !neutral_op
4257 ? multiple_p (TYPE_VECTOR_SUBPARTS (vector_type), nunits)
4258 : known_eq (TYPE_VECTOR_SUBPARTS (vector_type), nunits))
4259 /* Build the vector directly from ELTS. */
4260 init = gimple_build_vector (&ctor_seq, &elts);
4261 else if (neutral_op)
4263 /* Build a vector of the neutral value and shift the
4264 other elements into place. */
4265 init = gimple_build_vector_from_val (&ctor_seq, vector_type,
4266 neutral_op);
4267 int k = nunits;
4268 while (k > 0 && elts[k - 1] == neutral_op)
4269 k -= 1;
4270 while (k > 0)
4272 k -= 1;
4273 init = gimple_build (&ctor_seq, CFN_VEC_SHL_INSERT,
4274 vector_type, init, elts[k]);
4277 else
4279 /* First time round, duplicate ELTS to fill the
4280 required number of vectors, then cherry pick the
4281 appropriate result for each iteration. */
4282 if (vec_oprnds->is_empty ())
4283 duplicate_and_interleave (&ctor_seq, vector_type, elts,
4284 number_of_vectors,
4285 permute_results);
4286 init = permute_results[number_of_vectors - j - 1];
4288 if (ctor_seq != NULL)
4289 gsi_insert_seq_on_edge_immediate (pe, ctor_seq);
4290 voprnds.quick_push (init);
4292 number_of_places_left_in_vector = nunits;
4293 elts.new_vector (vector_type, nunits, 1);
4294 elts.quick_grow (nunits);
4295 constant_p = true;
4300 /* Since the vectors are created in the reverse order, we should invert
4301 them. */
4302 vec_num = voprnds.length ();
4303 for (j = vec_num; j != 0; j--)
4305 vop = voprnds[j - 1];
4306 vec_oprnds->quick_push (vop);
4309 voprnds.release ();
4311 /* In case that VF is greater than the unrolling factor needed for the SLP
4312 group of stmts, NUMBER_OF_VECTORS to be created is greater than
4313 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
4314 to replicate the vectors. */
4315 tree neutral_vec = NULL;
4316 while (number_of_vectors > vec_oprnds->length ())
4318 if (neutral_op)
4320 if (!neutral_vec)
4322 gimple_seq ctor_seq = NULL;
4323 neutral_vec = gimple_build_vector_from_val
4324 (&ctor_seq, vector_type, neutral_op);
4325 if (ctor_seq != NULL)
4326 gsi_insert_seq_on_edge_immediate (pe, ctor_seq);
4328 vec_oprnds->quick_push (neutral_vec);
4330 else
4332 for (i = 0; vec_oprnds->iterate (i, &vop) && i < vec_num; i++)
4333 vec_oprnds->quick_push (vop);
4339 /* Function vect_create_epilog_for_reduction
4341 Create code at the loop-epilog to finalize the result of a reduction
4342 computation.
4344 VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector
4345 reduction statements.
4346 STMT_INFO is the scalar reduction stmt that is being vectorized.
4347 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
4348 number of elements that we can fit in a vectype (nunits). In this case
4349 we have to generate more than one vector stmt - i.e - we need to "unroll"
4350 the vector stmt by a factor VF/nunits. For more details see documentation
4351 in vectorizable_operation.
4352 REDUC_FN is the internal function for the epilog reduction.
4353 REDUCTION_PHIS is a list of the phi-nodes that carry the reduction
4354 computation.
4355 REDUC_INDEX is the index of the operand in the right hand side of the
4356 statement that is defined by REDUCTION_PHI.
4357 DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
4358 SLP_NODE is an SLP node containing a group of reduction statements. The
4359 first one in this group is STMT_INFO.
4360 INDUC_VAL is for INTEGER_INDUC_COND_REDUCTION the value to use for the case
4361 when the COND_EXPR is never true in the loop. For MAX_EXPR, it needs to
4362 be smaller than any value of the IV in the loop, for MIN_EXPR larger than
4363 any value of the IV in the loop.
4364 INDUC_CODE is the code for epilog reduction if INTEGER_INDUC_COND_REDUCTION.
4365 NEUTRAL_OP is the value given by neutral_op_for_slp_reduction; it is
4366 null if this is not an SLP reduction
4368 This function:
4369 1. Creates the reduction def-use cycles: sets the arguments for
4370 REDUCTION_PHIS:
4371 The loop-entry argument is the vectorized initial-value of the reduction.
4372 The loop-latch argument is taken from VECT_DEFS - the vector of partial
4373 sums.
4374 2. "Reduces" each vector of partial results VECT_DEFS into a single result,
4375 by calling the function specified by REDUC_FN if available, or by
4376 other means (whole-vector shifts or a scalar loop).
4377 The function also creates a new phi node at the loop exit to preserve
4378 loop-closed form, as illustrated below.
4380 The flow at the entry to this function:
4382 loop:
4383 vec_def = phi <null, null> # REDUCTION_PHI
4384 VECT_DEF = vector_stmt # vectorized form of STMT_INFO
4385 s_loop = scalar_stmt # (scalar) STMT_INFO
4386 loop_exit:
4387 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4388 use <s_out0>
4389 use <s_out0>
4391 The above is transformed by this function into:
4393 loop:
4394 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
4395 VECT_DEF = vector_stmt # vectorized form of STMT_INFO
4396 s_loop = scalar_stmt # (scalar) STMT_INFO
4397 loop_exit:
4398 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4399 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4400 v_out2 = reduce <v_out1>
4401 s_out3 = extract_field <v_out2, 0>
4402 s_out4 = adjust_result <s_out3>
4403 use <s_out4>
4404 use <s_out4>
4407 static void
4408 vect_create_epilog_for_reduction (vec<tree> vect_defs,
4409 stmt_vec_info stmt_info,
4410 gimple *reduc_def_stmt,
4411 int ncopies, internal_fn reduc_fn,
4412 vec<stmt_vec_info> reduction_phis,
4413 bool double_reduc,
4414 slp_tree slp_node,
4415 slp_instance slp_node_instance,
4416 tree induc_val, enum tree_code induc_code,
4417 tree neutral_op)
4419 stmt_vec_info prev_phi_info;
4420 tree vectype;
4421 machine_mode mode;
4422 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4423 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
4424 basic_block exit_bb;
4425 tree scalar_dest;
4426 tree scalar_type;
4427 gimple *new_phi = NULL, *phi;
4428 stmt_vec_info phi_info;
4429 gimple_stmt_iterator exit_gsi;
4430 tree vec_dest;
4431 tree new_temp = NULL_TREE, new_dest, new_name, new_scalar_dest;
4432 gimple *epilog_stmt = NULL;
4433 enum tree_code code = gimple_assign_rhs_code (stmt_info->stmt);
4434 gimple *exit_phi;
4435 tree bitsize;
4436 tree adjustment_def = NULL;
4437 tree vec_initial_def = NULL;
4438 tree expr, def, initial_def = NULL;
4439 tree orig_name, scalar_result;
4440 imm_use_iterator imm_iter, phi_imm_iter;
4441 use_operand_p use_p, phi_use_p;
4442 gimple *use_stmt;
4443 stmt_vec_info reduction_phi_info = NULL;
4444 bool nested_in_vect_loop = false;
4445 auto_vec<gimple *> new_phis;
4446 auto_vec<stmt_vec_info> inner_phis;
4447 int j, i;
4448 auto_vec<tree> scalar_results;
4449 unsigned int group_size = 1, k, ratio;
4450 auto_vec<tree> vec_initial_defs;
4451 auto_vec<gimple *> phis;
4452 bool slp_reduc = false;
4453 bool direct_slp_reduc;
4454 tree new_phi_result;
4455 stmt_vec_info inner_phi = NULL;
4456 tree induction_index = NULL_TREE;
4458 if (slp_node)
4459 group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
4461 if (nested_in_vect_loop_p (loop, stmt_info))
4463 outer_loop = loop;
4464 loop = loop->inner;
4465 nested_in_vect_loop = true;
4466 gcc_assert (!slp_node);
4469 vectype = STMT_VINFO_VECTYPE (stmt_info);
4470 gcc_assert (vectype);
4471 mode = TYPE_MODE (vectype);
4473 /* 1. Create the reduction def-use cycle:
4474 Set the arguments of REDUCTION_PHIS, i.e., transform
4476 loop:
4477 vec_def = phi <null, null> # REDUCTION_PHI
4478 VECT_DEF = vector_stmt # vectorized form of STMT
4481 into:
4483 loop:
4484 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
4485 VECT_DEF = vector_stmt # vectorized form of STMT
4488 (in case of SLP, do it for all the phis). */
4490 /* Get the loop-entry arguments. */
4491 enum vect_def_type initial_def_dt = vect_unknown_def_type;
4492 if (slp_node)
4494 unsigned vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
4495 vec_initial_defs.reserve (vec_num);
4496 get_initial_defs_for_reduction (slp_node_instance->reduc_phis,
4497 &vec_initial_defs, vec_num,
4498 REDUC_GROUP_FIRST_ELEMENT (stmt_info),
4499 neutral_op);
4501 else
4503 /* Get at the scalar def before the loop, that defines the initial value
4504 of the reduction variable. */
4505 initial_def = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
4506 loop_preheader_edge (loop));
4507 /* Optimize: if initial_def is for REDUC_MAX smaller than the base
4508 and we can't use zero for induc_val, use initial_def. Similarly
4509 for REDUC_MIN and initial_def larger than the base. */
4510 if (TREE_CODE (initial_def) == INTEGER_CST
4511 && (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
4512 == INTEGER_INDUC_COND_REDUCTION)
4513 && !integer_zerop (induc_val)
4514 && ((induc_code == MAX_EXPR
4515 && tree_int_cst_lt (initial_def, induc_val))
4516 || (induc_code == MIN_EXPR
4517 && tree_int_cst_lt (induc_val, initial_def))))
4518 induc_val = initial_def;
4520 if (double_reduc)
4521 /* In case of double reduction we only create a vector variable
4522 to be put in the reduction phi node. The actual statement
4523 creation is done later in this function. */
4524 vec_initial_def = vect_create_destination_var (initial_def, vectype);
4525 else if (nested_in_vect_loop)
4527 /* Do not use an adjustment def as that case is not supported
4528 correctly if ncopies is not one. */
4529 vect_is_simple_use (initial_def, loop_vinfo, &initial_def_dt);
4530 vec_initial_def = vect_get_vec_def_for_operand (initial_def,
4531 stmt_info);
4533 else
4534 vec_initial_def
4535 = get_initial_def_for_reduction (stmt_info, initial_def,
4536 &adjustment_def);
4537 vec_initial_defs.create (1);
4538 vec_initial_defs.quick_push (vec_initial_def);
4541 /* Set phi nodes arguments. */
4542 FOR_EACH_VEC_ELT (reduction_phis, i, phi_info)
4544 tree vec_init_def = vec_initial_defs[i];
4545 tree def = vect_defs[i];
4546 for (j = 0; j < ncopies; j++)
4548 if (j != 0)
4550 phi_info = STMT_VINFO_RELATED_STMT (phi_info);
4551 if (nested_in_vect_loop)
4552 vec_init_def
4553 = vect_get_vec_def_for_stmt_copy (loop_vinfo, vec_init_def);
4556 /* Set the loop-entry arg of the reduction-phi. */
4558 gphi *phi = as_a <gphi *> (phi_info->stmt);
4559 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
4560 == INTEGER_INDUC_COND_REDUCTION)
4562 /* Initialise the reduction phi to zero. This prevents initial
4563 values of non-zero interferring with the reduction op. */
4564 gcc_assert (ncopies == 1);
4565 gcc_assert (i == 0);
4567 tree vec_init_def_type = TREE_TYPE (vec_init_def);
4568 tree induc_val_vec
4569 = build_vector_from_val (vec_init_def_type, induc_val);
4571 add_phi_arg (phi, induc_val_vec, loop_preheader_edge (loop),
4572 UNKNOWN_LOCATION);
4574 else
4575 add_phi_arg (phi, vec_init_def, loop_preheader_edge (loop),
4576 UNKNOWN_LOCATION);
4578 /* Set the loop-latch arg for the reduction-phi. */
4579 if (j > 0)
4580 def = vect_get_vec_def_for_stmt_copy (loop_vinfo, def);
4582 add_phi_arg (phi, def, loop_latch_edge (loop), UNKNOWN_LOCATION);
4584 if (dump_enabled_p ())
4586 dump_printf_loc (MSG_NOTE, vect_location,
4587 "transform reduction: created def-use cycle: ");
4588 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
4589 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (def), 0);
4594 /* For cond reductions we want to create a new vector (INDEX_COND_EXPR)
4595 which is updated with the current index of the loop for every match of
4596 the original loop's cond_expr (VEC_STMT). This results in a vector
4597 containing the last time the condition passed for that vector lane.
4598 The first match will be a 1 to allow 0 to be used for non-matching
4599 indexes. If there are no matches at all then the vector will be all
4600 zeroes. */
4601 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION)
4603 tree indx_before_incr, indx_after_incr;
4604 poly_uint64 nunits_out = TYPE_VECTOR_SUBPARTS (vectype);
4606 gimple *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info)->stmt;
4607 gcc_assert (gimple_assign_rhs_code (vec_stmt) == VEC_COND_EXPR);
4609 int scalar_precision
4610 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (TREE_TYPE (vectype)));
4611 tree cr_index_scalar_type = make_unsigned_type (scalar_precision);
4612 tree cr_index_vector_type = build_vector_type
4613 (cr_index_scalar_type, TYPE_VECTOR_SUBPARTS (vectype));
4615 /* First we create a simple vector induction variable which starts
4616 with the values {1,2,3,...} (SERIES_VECT) and increments by the
4617 vector size (STEP). */
4619 /* Create a {1,2,3,...} vector. */
4620 tree series_vect = build_index_vector (cr_index_vector_type, 1, 1);
4622 /* Create a vector of the step value. */
4623 tree step = build_int_cst (cr_index_scalar_type, nunits_out);
4624 tree vec_step = build_vector_from_val (cr_index_vector_type, step);
4626 /* Create an induction variable. */
4627 gimple_stmt_iterator incr_gsi;
4628 bool insert_after;
4629 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
4630 create_iv (series_vect, vec_step, NULL_TREE, loop, &incr_gsi,
4631 insert_after, &indx_before_incr, &indx_after_incr);
4633 /* Next create a new phi node vector (NEW_PHI_TREE) which starts
4634 filled with zeros (VEC_ZERO). */
4636 /* Create a vector of 0s. */
4637 tree zero = build_zero_cst (cr_index_scalar_type);
4638 tree vec_zero = build_vector_from_val (cr_index_vector_type, zero);
4640 /* Create a vector phi node. */
4641 tree new_phi_tree = make_ssa_name (cr_index_vector_type);
4642 new_phi = create_phi_node (new_phi_tree, loop->header);
4643 loop_vinfo->add_stmt (new_phi);
4644 add_phi_arg (as_a <gphi *> (new_phi), vec_zero,
4645 loop_preheader_edge (loop), UNKNOWN_LOCATION);
4647 /* Now take the condition from the loops original cond_expr
4648 (VEC_STMT) and produce a new cond_expr (INDEX_COND_EXPR) which for
4649 every match uses values from the induction variable
4650 (INDEX_BEFORE_INCR) otherwise uses values from the phi node
4651 (NEW_PHI_TREE).
4652 Finally, we update the phi (NEW_PHI_TREE) to take the value of
4653 the new cond_expr (INDEX_COND_EXPR). */
4655 /* Duplicate the condition from vec_stmt. */
4656 tree ccompare = unshare_expr (gimple_assign_rhs1 (vec_stmt));
4658 /* Create a conditional, where the condition is taken from vec_stmt
4659 (CCOMPARE), then is the induction index (INDEX_BEFORE_INCR) and
4660 else is the phi (NEW_PHI_TREE). */
4661 tree index_cond_expr = build3 (VEC_COND_EXPR, cr_index_vector_type,
4662 ccompare, indx_before_incr,
4663 new_phi_tree);
4664 induction_index = make_ssa_name (cr_index_vector_type);
4665 gimple *index_condition = gimple_build_assign (induction_index,
4666 index_cond_expr);
4667 gsi_insert_before (&incr_gsi, index_condition, GSI_SAME_STMT);
4668 stmt_vec_info index_vec_info = loop_vinfo->add_stmt (index_condition);
4669 STMT_VINFO_VECTYPE (index_vec_info) = cr_index_vector_type;
4671 /* Update the phi with the vec cond. */
4672 add_phi_arg (as_a <gphi *> (new_phi), induction_index,
4673 loop_latch_edge (loop), UNKNOWN_LOCATION);
4676 /* 2. Create epilog code.
4677 The reduction epilog code operates across the elements of the vector
4678 of partial results computed by the vectorized loop.
4679 The reduction epilog code consists of:
4681 step 1: compute the scalar result in a vector (v_out2)
4682 step 2: extract the scalar result (s_out3) from the vector (v_out2)
4683 step 3: adjust the scalar result (s_out3) if needed.
4685 Step 1 can be accomplished using one the following three schemes:
4686 (scheme 1) using reduc_fn, if available.
4687 (scheme 2) using whole-vector shifts, if available.
4688 (scheme 3) using a scalar loop. In this case steps 1+2 above are
4689 combined.
4691 The overall epilog code looks like this:
4693 s_out0 = phi <s_loop> # original EXIT_PHI
4694 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4695 v_out2 = reduce <v_out1> # step 1
4696 s_out3 = extract_field <v_out2, 0> # step 2
4697 s_out4 = adjust_result <s_out3> # step 3
4699 (step 3 is optional, and steps 1 and 2 may be combined).
4700 Lastly, the uses of s_out0 are replaced by s_out4. */
4703 /* 2.1 Create new loop-exit-phis to preserve loop-closed form:
4704 v_out1 = phi <VECT_DEF>
4705 Store them in NEW_PHIS. */
4707 exit_bb = single_exit (loop)->dest;
4708 prev_phi_info = NULL;
4709 new_phis.create (vect_defs.length ());
4710 FOR_EACH_VEC_ELT (vect_defs, i, def)
4712 for (j = 0; j < ncopies; j++)
4714 tree new_def = copy_ssa_name (def);
4715 phi = create_phi_node (new_def, exit_bb);
4716 stmt_vec_info phi_info = loop_vinfo->add_stmt (phi);
4717 if (j == 0)
4718 new_phis.quick_push (phi);
4719 else
4721 def = vect_get_vec_def_for_stmt_copy (loop_vinfo, def);
4722 STMT_VINFO_RELATED_STMT (prev_phi_info) = phi_info;
4725 SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
4726 prev_phi_info = phi_info;
4730 /* The epilogue is created for the outer-loop, i.e., for the loop being
4731 vectorized. Create exit phis for the outer loop. */
4732 if (double_reduc)
4734 loop = outer_loop;
4735 exit_bb = single_exit (loop)->dest;
4736 inner_phis.create (vect_defs.length ());
4737 FOR_EACH_VEC_ELT (new_phis, i, phi)
4739 stmt_vec_info phi_info = loop_vinfo->lookup_stmt (phi);
4740 tree new_result = copy_ssa_name (PHI_RESULT (phi));
4741 gphi *outer_phi = create_phi_node (new_result, exit_bb);
4742 SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
4743 PHI_RESULT (phi));
4744 prev_phi_info = loop_vinfo->add_stmt (outer_phi);
4745 inner_phis.quick_push (phi_info);
4746 new_phis[i] = outer_phi;
4747 while (STMT_VINFO_RELATED_STMT (phi_info))
4749 phi_info = STMT_VINFO_RELATED_STMT (phi_info);
4750 new_result = copy_ssa_name (PHI_RESULT (phi_info->stmt));
4751 outer_phi = create_phi_node (new_result, exit_bb);
4752 SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
4753 PHI_RESULT (phi_info->stmt));
4754 stmt_vec_info outer_phi_info = loop_vinfo->add_stmt (outer_phi);
4755 STMT_VINFO_RELATED_STMT (prev_phi_info) = outer_phi_info;
4756 prev_phi_info = outer_phi_info;
4761 exit_gsi = gsi_after_labels (exit_bb);
4763 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
4764 (i.e. when reduc_fn is not available) and in the final adjustment
4765 code (if needed). Also get the original scalar reduction variable as
4766 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
4767 represents a reduction pattern), the tree-code and scalar-def are
4768 taken from the original stmt that the pattern-stmt (STMT) replaces.
4769 Otherwise (it is a regular reduction) - the tree-code and scalar-def
4770 are taken from STMT. */
4772 stmt_vec_info orig_stmt_info = vect_orig_stmt (stmt_info);
4773 if (orig_stmt_info != stmt_info)
4775 /* Reduction pattern */
4776 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
4777 gcc_assert (STMT_VINFO_RELATED_STMT (orig_stmt_info) == stmt_info);
4780 code = gimple_assign_rhs_code (orig_stmt_info->stmt);
4781 /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
4782 partial results are added and not subtracted. */
4783 if (code == MINUS_EXPR)
4784 code = PLUS_EXPR;
4786 scalar_dest = gimple_assign_lhs (orig_stmt_info->stmt);
4787 scalar_type = TREE_TYPE (scalar_dest);
4788 scalar_results.create (group_size);
4789 new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
4790 bitsize = TYPE_SIZE (scalar_type);
4792 /* In case this is a reduction in an inner-loop while vectorizing an outer
4793 loop - we don't need to extract a single scalar result at the end of the
4794 inner-loop (unless it is double reduction, i.e., the use of reduction is
4795 outside the outer-loop). The final vector of partial results will be used
4796 in the vectorized outer-loop, or reduced to a scalar result at the end of
4797 the outer-loop. */
4798 if (nested_in_vect_loop && !double_reduc)
4799 goto vect_finalize_reduction;
4801 /* SLP reduction without reduction chain, e.g.,
4802 # a1 = phi <a2, a0>
4803 # b1 = phi <b2, b0>
4804 a2 = operation (a1)
4805 b2 = operation (b1) */
4806 slp_reduc = (slp_node && !REDUC_GROUP_FIRST_ELEMENT (stmt_info));
4808 /* True if we should implement SLP_REDUC using native reduction operations
4809 instead of scalar operations. */
4810 direct_slp_reduc = (reduc_fn != IFN_LAST
4811 && slp_reduc
4812 && !TYPE_VECTOR_SUBPARTS (vectype).is_constant ());
4814 /* In case of reduction chain, e.g.,
4815 # a1 = phi <a3, a0>
4816 a2 = operation (a1)
4817 a3 = operation (a2),
4819 we may end up with more than one vector result. Here we reduce them to
4820 one vector. */
4821 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info) || direct_slp_reduc)
4823 tree first_vect = PHI_RESULT (new_phis[0]);
4824 gassign *new_vec_stmt = NULL;
4825 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4826 for (k = 1; k < new_phis.length (); k++)
4828 gimple *next_phi = new_phis[k];
4829 tree second_vect = PHI_RESULT (next_phi);
4830 tree tem = make_ssa_name (vec_dest, new_vec_stmt);
4831 new_vec_stmt = gimple_build_assign (tem, code,
4832 first_vect, second_vect);
4833 gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT);
4834 first_vect = tem;
4837 new_phi_result = first_vect;
4838 if (new_vec_stmt)
4840 new_phis.truncate (0);
4841 new_phis.safe_push (new_vec_stmt);
4844 /* Likewise if we couldn't use a single defuse cycle. */
4845 else if (ncopies > 1)
4847 gcc_assert (new_phis.length () == 1);
4848 tree first_vect = PHI_RESULT (new_phis[0]);
4849 gassign *new_vec_stmt = NULL;
4850 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4851 stmt_vec_info next_phi_info = loop_vinfo->lookup_stmt (new_phis[0]);
4852 for (int k = 1; k < ncopies; ++k)
4854 next_phi_info = STMT_VINFO_RELATED_STMT (next_phi_info);
4855 tree second_vect = PHI_RESULT (next_phi_info->stmt);
4856 tree tem = make_ssa_name (vec_dest, new_vec_stmt);
4857 new_vec_stmt = gimple_build_assign (tem, code,
4858 first_vect, second_vect);
4859 gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT);
4860 first_vect = tem;
4862 new_phi_result = first_vect;
4863 new_phis.truncate (0);
4864 new_phis.safe_push (new_vec_stmt);
4866 else
4867 new_phi_result = PHI_RESULT (new_phis[0]);
4869 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION
4870 && reduc_fn != IFN_LAST)
4872 /* For condition reductions, we have a vector (NEW_PHI_RESULT) containing
4873 various data values where the condition matched and another vector
4874 (INDUCTION_INDEX) containing all the indexes of those matches. We
4875 need to extract the last matching index (which will be the index with
4876 highest value) and use this to index into the data vector.
4877 For the case where there were no matches, the data vector will contain
4878 all default values and the index vector will be all zeros. */
4880 /* Get various versions of the type of the vector of indexes. */
4881 tree index_vec_type = TREE_TYPE (induction_index);
4882 gcc_checking_assert (TYPE_UNSIGNED (index_vec_type));
4883 tree index_scalar_type = TREE_TYPE (index_vec_type);
4884 tree index_vec_cmp_type = build_same_sized_truth_vector_type
4885 (index_vec_type);
4887 /* Get an unsigned integer version of the type of the data vector. */
4888 int scalar_precision
4889 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type));
4890 tree scalar_type_unsigned = make_unsigned_type (scalar_precision);
4891 tree vectype_unsigned = build_vector_type
4892 (scalar_type_unsigned, TYPE_VECTOR_SUBPARTS (vectype));
4894 /* First we need to create a vector (ZERO_VEC) of zeros and another
4895 vector (MAX_INDEX_VEC) filled with the last matching index, which we
4896 can create using a MAX reduction and then expanding.
4897 In the case where the loop never made any matches, the max index will
4898 be zero. */
4900 /* Vector of {0, 0, 0,...}. */
4901 tree zero_vec = make_ssa_name (vectype);
4902 tree zero_vec_rhs = build_zero_cst (vectype);
4903 gimple *zero_vec_stmt = gimple_build_assign (zero_vec, zero_vec_rhs);
4904 gsi_insert_before (&exit_gsi, zero_vec_stmt, GSI_SAME_STMT);
4906 /* Find maximum value from the vector of found indexes. */
4907 tree max_index = make_ssa_name (index_scalar_type);
4908 gcall *max_index_stmt = gimple_build_call_internal (IFN_REDUC_MAX,
4909 1, induction_index);
4910 gimple_call_set_lhs (max_index_stmt, max_index);
4911 gsi_insert_before (&exit_gsi, max_index_stmt, GSI_SAME_STMT);
4913 /* Vector of {max_index, max_index, max_index,...}. */
4914 tree max_index_vec = make_ssa_name (index_vec_type);
4915 tree max_index_vec_rhs = build_vector_from_val (index_vec_type,
4916 max_index);
4917 gimple *max_index_vec_stmt = gimple_build_assign (max_index_vec,
4918 max_index_vec_rhs);
4919 gsi_insert_before (&exit_gsi, max_index_vec_stmt, GSI_SAME_STMT);
4921 /* Next we compare the new vector (MAX_INDEX_VEC) full of max indexes
4922 with the vector (INDUCTION_INDEX) of found indexes, choosing values
4923 from the data vector (NEW_PHI_RESULT) for matches, 0 (ZERO_VEC)
4924 otherwise. Only one value should match, resulting in a vector
4925 (VEC_COND) with one data value and the rest zeros.
4926 In the case where the loop never made any matches, every index will
4927 match, resulting in a vector with all data values (which will all be
4928 the default value). */
4930 /* Compare the max index vector to the vector of found indexes to find
4931 the position of the max value. */
4932 tree vec_compare = make_ssa_name (index_vec_cmp_type);
4933 gimple *vec_compare_stmt = gimple_build_assign (vec_compare, EQ_EXPR,
4934 induction_index,
4935 max_index_vec);
4936 gsi_insert_before (&exit_gsi, vec_compare_stmt, GSI_SAME_STMT);
4938 /* Use the compare to choose either values from the data vector or
4939 zero. */
4940 tree vec_cond = make_ssa_name (vectype);
4941 gimple *vec_cond_stmt = gimple_build_assign (vec_cond, VEC_COND_EXPR,
4942 vec_compare, new_phi_result,
4943 zero_vec);
4944 gsi_insert_before (&exit_gsi, vec_cond_stmt, GSI_SAME_STMT);
4946 /* Finally we need to extract the data value from the vector (VEC_COND)
4947 into a scalar (MATCHED_DATA_REDUC). Logically we want to do a OR
4948 reduction, but because this doesn't exist, we can use a MAX reduction
4949 instead. The data value might be signed or a float so we need to cast
4950 it first.
4951 In the case where the loop never made any matches, the data values are
4952 all identical, and so will reduce down correctly. */
4954 /* Make the matched data values unsigned. */
4955 tree vec_cond_cast = make_ssa_name (vectype_unsigned);
4956 tree vec_cond_cast_rhs = build1 (VIEW_CONVERT_EXPR, vectype_unsigned,
4957 vec_cond);
4958 gimple *vec_cond_cast_stmt = gimple_build_assign (vec_cond_cast,
4959 VIEW_CONVERT_EXPR,
4960 vec_cond_cast_rhs);
4961 gsi_insert_before (&exit_gsi, vec_cond_cast_stmt, GSI_SAME_STMT);
4963 /* Reduce down to a scalar value. */
4964 tree data_reduc = make_ssa_name (scalar_type_unsigned);
4965 gcall *data_reduc_stmt = gimple_build_call_internal (IFN_REDUC_MAX,
4966 1, vec_cond_cast);
4967 gimple_call_set_lhs (data_reduc_stmt, data_reduc);
4968 gsi_insert_before (&exit_gsi, data_reduc_stmt, GSI_SAME_STMT);
4970 /* Convert the reduced value back to the result type and set as the
4971 result. */
4972 gimple_seq stmts = NULL;
4973 new_temp = gimple_build (&stmts, VIEW_CONVERT_EXPR, scalar_type,
4974 data_reduc);
4975 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
4976 scalar_results.safe_push (new_temp);
4978 else if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION
4979 && reduc_fn == IFN_LAST)
4981 /* Condition reduction without supported IFN_REDUC_MAX. Generate
4982 idx = 0;
4983 idx_val = induction_index[0];
4984 val = data_reduc[0];
4985 for (idx = 0, val = init, i = 0; i < nelts; ++i)
4986 if (induction_index[i] > idx_val)
4987 val = data_reduc[i], idx_val = induction_index[i];
4988 return val; */
4990 tree data_eltype = TREE_TYPE (TREE_TYPE (new_phi_result));
4991 tree idx_eltype = TREE_TYPE (TREE_TYPE (induction_index));
4992 unsigned HOST_WIDE_INT el_size = tree_to_uhwi (TYPE_SIZE (idx_eltype));
4993 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (TREE_TYPE (induction_index));
4994 /* Enforced by vectorizable_reduction, which ensures we have target
4995 support before allowing a conditional reduction on variable-length
4996 vectors. */
4997 unsigned HOST_WIDE_INT v_size = el_size * nunits.to_constant ();
4998 tree idx_val = NULL_TREE, val = NULL_TREE;
4999 for (unsigned HOST_WIDE_INT off = 0; off < v_size; off += el_size)
5001 tree old_idx_val = idx_val;
5002 tree old_val = val;
5003 idx_val = make_ssa_name (idx_eltype);
5004 epilog_stmt = gimple_build_assign (idx_val, BIT_FIELD_REF,
5005 build3 (BIT_FIELD_REF, idx_eltype,
5006 induction_index,
5007 bitsize_int (el_size),
5008 bitsize_int (off)));
5009 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5010 val = make_ssa_name (data_eltype);
5011 epilog_stmt = gimple_build_assign (val, BIT_FIELD_REF,
5012 build3 (BIT_FIELD_REF,
5013 data_eltype,
5014 new_phi_result,
5015 bitsize_int (el_size),
5016 bitsize_int (off)));
5017 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5018 if (off != 0)
5020 tree new_idx_val = idx_val;
5021 tree new_val = val;
5022 if (off != v_size - el_size)
5024 new_idx_val = make_ssa_name (idx_eltype);
5025 epilog_stmt = gimple_build_assign (new_idx_val,
5026 MAX_EXPR, idx_val,
5027 old_idx_val);
5028 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5030 new_val = make_ssa_name (data_eltype);
5031 epilog_stmt = gimple_build_assign (new_val,
5032 COND_EXPR,
5033 build2 (GT_EXPR,
5034 boolean_type_node,
5035 idx_val,
5036 old_idx_val),
5037 val, old_val);
5038 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5039 idx_val = new_idx_val;
5040 val = new_val;
5043 /* Convert the reduced value back to the result type and set as the
5044 result. */
5045 gimple_seq stmts = NULL;
5046 val = gimple_convert (&stmts, scalar_type, val);
5047 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
5048 scalar_results.safe_push (val);
5051 /* 2.3 Create the reduction code, using one of the three schemes described
5052 above. In SLP we simply need to extract all the elements from the
5053 vector (without reducing them), so we use scalar shifts. */
5054 else if (reduc_fn != IFN_LAST && !slp_reduc)
5056 tree tmp;
5057 tree vec_elem_type;
5059 /* Case 1: Create:
5060 v_out2 = reduc_expr <v_out1> */
5062 if (dump_enabled_p ())
5063 dump_printf_loc (MSG_NOTE, vect_location,
5064 "Reduce using direct vector reduction.\n");
5066 vec_elem_type = TREE_TYPE (TREE_TYPE (new_phi_result));
5067 if (!useless_type_conversion_p (scalar_type, vec_elem_type))
5069 tree tmp_dest
5070 = vect_create_destination_var (scalar_dest, vec_elem_type);
5071 epilog_stmt = gimple_build_call_internal (reduc_fn, 1,
5072 new_phi_result);
5073 gimple_set_lhs (epilog_stmt, tmp_dest);
5074 new_temp = make_ssa_name (tmp_dest, epilog_stmt);
5075 gimple_set_lhs (epilog_stmt, new_temp);
5076 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5078 epilog_stmt = gimple_build_assign (new_scalar_dest, NOP_EXPR,
5079 new_temp);
5081 else
5083 epilog_stmt = gimple_build_call_internal (reduc_fn, 1,
5084 new_phi_result);
5085 gimple_set_lhs (epilog_stmt, new_scalar_dest);
5088 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5089 gimple_set_lhs (epilog_stmt, new_temp);
5090 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5092 if ((STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5093 == INTEGER_INDUC_COND_REDUCTION)
5094 && !operand_equal_p (initial_def, induc_val, 0))
5096 /* Earlier we set the initial value to be a vector if induc_val
5097 values. Check the result and if it is induc_val then replace
5098 with the original initial value, unless induc_val is
5099 the same as initial_def already. */
5100 tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp,
5101 induc_val);
5103 tmp = make_ssa_name (new_scalar_dest);
5104 epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare,
5105 initial_def, new_temp);
5106 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5107 new_temp = tmp;
5110 scalar_results.safe_push (new_temp);
5112 else if (direct_slp_reduc)
5114 /* Here we create one vector for each of the REDUC_GROUP_SIZE results,
5115 with the elements for other SLP statements replaced with the
5116 neutral value. We can then do a normal reduction on each vector. */
5118 /* Enforced by vectorizable_reduction. */
5119 gcc_assert (new_phis.length () == 1);
5120 gcc_assert (pow2p_hwi (group_size));
5122 slp_tree orig_phis_slp_node = slp_node_instance->reduc_phis;
5123 vec<stmt_vec_info> orig_phis
5124 = SLP_TREE_SCALAR_STMTS (orig_phis_slp_node);
5125 gimple_seq seq = NULL;
5127 /* Build a vector {0, 1, 2, ...}, with the same number of elements
5128 and the same element size as VECTYPE. */
5129 tree index = build_index_vector (vectype, 0, 1);
5130 tree index_type = TREE_TYPE (index);
5131 tree index_elt_type = TREE_TYPE (index_type);
5132 tree mask_type = build_same_sized_truth_vector_type (index_type);
5134 /* Create a vector that, for each element, identifies which of
5135 the REDUC_GROUP_SIZE results should use it. */
5136 tree index_mask = build_int_cst (index_elt_type, group_size - 1);
5137 index = gimple_build (&seq, BIT_AND_EXPR, index_type, index,
5138 build_vector_from_val (index_type, index_mask));
5140 /* Get a neutral vector value. This is simply a splat of the neutral
5141 scalar value if we have one, otherwise the initial scalar value
5142 is itself a neutral value. */
5143 tree vector_identity = NULL_TREE;
5144 if (neutral_op)
5145 vector_identity = gimple_build_vector_from_val (&seq, vectype,
5146 neutral_op);
5147 for (unsigned int i = 0; i < group_size; ++i)
5149 /* If there's no univeral neutral value, we can use the
5150 initial scalar value from the original PHI. This is used
5151 for MIN and MAX reduction, for example. */
5152 if (!neutral_op)
5154 tree scalar_value
5155 = PHI_ARG_DEF_FROM_EDGE (orig_phis[i]->stmt,
5156 loop_preheader_edge (loop));
5157 vector_identity = gimple_build_vector_from_val (&seq, vectype,
5158 scalar_value);
5161 /* Calculate the equivalent of:
5163 sel[j] = (index[j] == i);
5165 which selects the elements of NEW_PHI_RESULT that should
5166 be included in the result. */
5167 tree compare_val = build_int_cst (index_elt_type, i);
5168 compare_val = build_vector_from_val (index_type, compare_val);
5169 tree sel = gimple_build (&seq, EQ_EXPR, mask_type,
5170 index, compare_val);
5172 /* Calculate the equivalent of:
5174 vec = seq ? new_phi_result : vector_identity;
5176 VEC is now suitable for a full vector reduction. */
5177 tree vec = gimple_build (&seq, VEC_COND_EXPR, vectype,
5178 sel, new_phi_result, vector_identity);
5180 /* Do the reduction and convert it to the appropriate type. */
5181 tree scalar = gimple_build (&seq, as_combined_fn (reduc_fn),
5182 TREE_TYPE (vectype), vec);
5183 scalar = gimple_convert (&seq, scalar_type, scalar);
5184 scalar_results.safe_push (scalar);
5186 gsi_insert_seq_before (&exit_gsi, seq, GSI_SAME_STMT);
5188 else
5190 bool reduce_with_shift;
5191 tree vec_temp;
5193 /* COND reductions all do the final reduction with MAX_EXPR
5194 or MIN_EXPR. */
5195 if (code == COND_EXPR)
5197 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5198 == INTEGER_INDUC_COND_REDUCTION)
5199 code = induc_code;
5200 else
5201 code = MAX_EXPR;
5204 /* See if the target wants to do the final (shift) reduction
5205 in a vector mode of smaller size and first reduce upper/lower
5206 halves against each other. */
5207 enum machine_mode mode1 = mode;
5208 tree vectype1 = vectype;
5209 unsigned sz = tree_to_uhwi (TYPE_SIZE_UNIT (vectype));
5210 unsigned sz1 = sz;
5211 if (!slp_reduc
5212 && (mode1 = targetm.vectorize.split_reduction (mode)) != mode)
5213 sz1 = GET_MODE_SIZE (mode1).to_constant ();
5215 vectype1 = get_vectype_for_scalar_type_and_size (scalar_type, sz1);
5216 reduce_with_shift = have_whole_vector_shift (mode1);
5217 if (!VECTOR_MODE_P (mode1))
5218 reduce_with_shift = false;
5219 else
5221 optab optab = optab_for_tree_code (code, vectype1, optab_default);
5222 if (optab_handler (optab, mode1) == CODE_FOR_nothing)
5223 reduce_with_shift = false;
5226 /* First reduce the vector to the desired vector size we should
5227 do shift reduction on by combining upper and lower halves. */
5228 new_temp = new_phi_result;
5229 while (sz > sz1)
5231 gcc_assert (!slp_reduc);
5232 sz /= 2;
5233 vectype1 = get_vectype_for_scalar_type_and_size (scalar_type, sz);
5235 /* The target has to make sure we support lowpart/highpart
5236 extraction, either via direct vector extract or through
5237 an integer mode punning. */
5238 tree dst1, dst2;
5239 if (convert_optab_handler (vec_extract_optab,
5240 TYPE_MODE (TREE_TYPE (new_temp)),
5241 TYPE_MODE (vectype1))
5242 != CODE_FOR_nothing)
5244 /* Extract sub-vectors directly once vec_extract becomes
5245 a conversion optab. */
5246 dst1 = make_ssa_name (vectype1);
5247 epilog_stmt
5248 = gimple_build_assign (dst1, BIT_FIELD_REF,
5249 build3 (BIT_FIELD_REF, vectype1,
5250 new_temp, TYPE_SIZE (vectype1),
5251 bitsize_int (0)));
5252 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5253 dst2 = make_ssa_name (vectype1);
5254 epilog_stmt
5255 = gimple_build_assign (dst2, BIT_FIELD_REF,
5256 build3 (BIT_FIELD_REF, vectype1,
5257 new_temp, TYPE_SIZE (vectype1),
5258 bitsize_int (sz * BITS_PER_UNIT)));
5259 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5261 else
5263 /* Extract via punning to appropriately sized integer mode
5264 vector. */
5265 tree eltype = build_nonstandard_integer_type (sz * BITS_PER_UNIT,
5267 tree etype = build_vector_type (eltype, 2);
5268 gcc_assert (convert_optab_handler (vec_extract_optab,
5269 TYPE_MODE (etype),
5270 TYPE_MODE (eltype))
5271 != CODE_FOR_nothing);
5272 tree tem = make_ssa_name (etype);
5273 epilog_stmt = gimple_build_assign (tem, VIEW_CONVERT_EXPR,
5274 build1 (VIEW_CONVERT_EXPR,
5275 etype, new_temp));
5276 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5277 new_temp = tem;
5278 tem = make_ssa_name (eltype);
5279 epilog_stmt
5280 = gimple_build_assign (tem, BIT_FIELD_REF,
5281 build3 (BIT_FIELD_REF, eltype,
5282 new_temp, TYPE_SIZE (eltype),
5283 bitsize_int (0)));
5284 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5285 dst1 = make_ssa_name (vectype1);
5286 epilog_stmt = gimple_build_assign (dst1, VIEW_CONVERT_EXPR,
5287 build1 (VIEW_CONVERT_EXPR,
5288 vectype1, tem));
5289 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5290 tem = make_ssa_name (eltype);
5291 epilog_stmt
5292 = gimple_build_assign (tem, BIT_FIELD_REF,
5293 build3 (BIT_FIELD_REF, eltype,
5294 new_temp, TYPE_SIZE (eltype),
5295 bitsize_int (sz * BITS_PER_UNIT)));
5296 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5297 dst2 = make_ssa_name (vectype1);
5298 epilog_stmt = gimple_build_assign (dst2, VIEW_CONVERT_EXPR,
5299 build1 (VIEW_CONVERT_EXPR,
5300 vectype1, tem));
5301 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5304 new_temp = make_ssa_name (vectype1);
5305 epilog_stmt = gimple_build_assign (new_temp, code, dst1, dst2);
5306 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5309 if (reduce_with_shift && !slp_reduc)
5311 int element_bitsize = tree_to_uhwi (bitsize);
5312 /* Enforced by vectorizable_reduction, which disallows SLP reductions
5313 for variable-length vectors and also requires direct target support
5314 for loop reductions. */
5315 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype1));
5316 int nelements = vec_size_in_bits / element_bitsize;
5317 vec_perm_builder sel;
5318 vec_perm_indices indices;
5320 int elt_offset;
5322 tree zero_vec = build_zero_cst (vectype1);
5323 /* Case 2: Create:
5324 for (offset = nelements/2; offset >= 1; offset/=2)
5326 Create: va' = vec_shift <va, offset>
5327 Create: va = vop <va, va'>
5328 } */
5330 tree rhs;
5332 if (dump_enabled_p ())
5333 dump_printf_loc (MSG_NOTE, vect_location,
5334 "Reduce using vector shifts\n");
5336 mode1 = TYPE_MODE (vectype1);
5337 vec_dest = vect_create_destination_var (scalar_dest, vectype1);
5338 for (elt_offset = nelements / 2;
5339 elt_offset >= 1;
5340 elt_offset /= 2)
5342 calc_vec_perm_mask_for_shift (elt_offset, nelements, &sel);
5343 indices.new_vector (sel, 2, nelements);
5344 tree mask = vect_gen_perm_mask_any (vectype1, indices);
5345 epilog_stmt = gimple_build_assign (vec_dest, VEC_PERM_EXPR,
5346 new_temp, zero_vec, mask);
5347 new_name = make_ssa_name (vec_dest, epilog_stmt);
5348 gimple_assign_set_lhs (epilog_stmt, new_name);
5349 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5351 epilog_stmt = gimple_build_assign (vec_dest, code, new_name,
5352 new_temp);
5353 new_temp = make_ssa_name (vec_dest, epilog_stmt);
5354 gimple_assign_set_lhs (epilog_stmt, new_temp);
5355 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5358 /* 2.4 Extract the final scalar result. Create:
5359 s_out3 = extract_field <v_out2, bitpos> */
5361 if (dump_enabled_p ())
5362 dump_printf_loc (MSG_NOTE, vect_location,
5363 "extract scalar result\n");
5365 rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp,
5366 bitsize, bitsize_zero_node);
5367 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
5368 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5369 gimple_assign_set_lhs (epilog_stmt, new_temp);
5370 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5371 scalar_results.safe_push (new_temp);
5373 else
5375 /* Case 3: Create:
5376 s = extract_field <v_out2, 0>
5377 for (offset = element_size;
5378 offset < vector_size;
5379 offset += element_size;)
5381 Create: s' = extract_field <v_out2, offset>
5382 Create: s = op <s, s'> // For non SLP cases
5383 } */
5385 if (dump_enabled_p ())
5386 dump_printf_loc (MSG_NOTE, vect_location,
5387 "Reduce using scalar code.\n");
5389 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype1));
5390 int element_bitsize = tree_to_uhwi (bitsize);
5391 FOR_EACH_VEC_ELT (new_phis, i, new_phi)
5393 int bit_offset;
5394 if (gimple_code (new_phi) == GIMPLE_PHI)
5395 vec_temp = PHI_RESULT (new_phi);
5396 else
5397 vec_temp = gimple_assign_lhs (new_phi);
5398 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
5399 bitsize_zero_node);
5400 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
5401 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5402 gimple_assign_set_lhs (epilog_stmt, new_temp);
5403 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5405 /* In SLP we don't need to apply reduction operation, so we just
5406 collect s' values in SCALAR_RESULTS. */
5407 if (slp_reduc)
5408 scalar_results.safe_push (new_temp);
5410 for (bit_offset = element_bitsize;
5411 bit_offset < vec_size_in_bits;
5412 bit_offset += element_bitsize)
5414 tree bitpos = bitsize_int (bit_offset);
5415 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp,
5416 bitsize, bitpos);
5418 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
5419 new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
5420 gimple_assign_set_lhs (epilog_stmt, new_name);
5421 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5423 if (slp_reduc)
5425 /* In SLP we don't need to apply reduction operation, so
5426 we just collect s' values in SCALAR_RESULTS. */
5427 new_temp = new_name;
5428 scalar_results.safe_push (new_name);
5430 else
5432 epilog_stmt = gimple_build_assign (new_scalar_dest, code,
5433 new_name, new_temp);
5434 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5435 gimple_assign_set_lhs (epilog_stmt, new_temp);
5436 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5441 /* The only case where we need to reduce scalar results in SLP, is
5442 unrolling. If the size of SCALAR_RESULTS is greater than
5443 REDUC_GROUP_SIZE, we reduce them combining elements modulo
5444 REDUC_GROUP_SIZE. */
5445 if (slp_reduc)
5447 tree res, first_res, new_res;
5448 gimple *new_stmt;
5450 /* Reduce multiple scalar results in case of SLP unrolling. */
5451 for (j = group_size; scalar_results.iterate (j, &res);
5452 j++)
5454 first_res = scalar_results[j % group_size];
5455 new_stmt = gimple_build_assign (new_scalar_dest, code,
5456 first_res, res);
5457 new_res = make_ssa_name (new_scalar_dest, new_stmt);
5458 gimple_assign_set_lhs (new_stmt, new_res);
5459 gsi_insert_before (&exit_gsi, new_stmt, GSI_SAME_STMT);
5460 scalar_results[j % group_size] = new_res;
5463 else
5464 /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
5465 scalar_results.safe_push (new_temp);
5468 if ((STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5469 == INTEGER_INDUC_COND_REDUCTION)
5470 && !operand_equal_p (initial_def, induc_val, 0))
5472 /* Earlier we set the initial value to be a vector if induc_val
5473 values. Check the result and if it is induc_val then replace
5474 with the original initial value, unless induc_val is
5475 the same as initial_def already. */
5476 tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp,
5477 induc_val);
5479 tree tmp = make_ssa_name (new_scalar_dest);
5480 epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare,
5481 initial_def, new_temp);
5482 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5483 scalar_results[0] = tmp;
5487 vect_finalize_reduction:
5489 if (double_reduc)
5490 loop = loop->inner;
5492 /* 2.5 Adjust the final result by the initial value of the reduction
5493 variable. (When such adjustment is not needed, then
5494 'adjustment_def' is zero). For example, if code is PLUS we create:
5495 new_temp = loop_exit_def + adjustment_def */
5497 if (adjustment_def)
5499 gcc_assert (!slp_reduc);
5500 if (nested_in_vect_loop)
5502 new_phi = new_phis[0];
5503 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
5504 expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
5505 new_dest = vect_create_destination_var (scalar_dest, vectype);
5507 else
5509 new_temp = scalar_results[0];
5510 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
5511 expr = build2 (code, scalar_type, new_temp, adjustment_def);
5512 new_dest = vect_create_destination_var (scalar_dest, scalar_type);
5515 epilog_stmt = gimple_build_assign (new_dest, expr);
5516 new_temp = make_ssa_name (new_dest, epilog_stmt);
5517 gimple_assign_set_lhs (epilog_stmt, new_temp);
5518 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5519 if (nested_in_vect_loop)
5521 stmt_vec_info epilog_stmt_info = loop_vinfo->add_stmt (epilog_stmt);
5522 STMT_VINFO_RELATED_STMT (epilog_stmt_info)
5523 = STMT_VINFO_RELATED_STMT (loop_vinfo->lookup_stmt (new_phi));
5525 if (!double_reduc)
5526 scalar_results.quick_push (new_temp);
5527 else
5528 scalar_results[0] = new_temp;
5530 else
5531 scalar_results[0] = new_temp;
5533 new_phis[0] = epilog_stmt;
5536 /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit
5537 phis with new adjusted scalar results, i.e., replace use <s_out0>
5538 with use <s_out4>.
5540 Transform:
5541 loop_exit:
5542 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
5543 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
5544 v_out2 = reduce <v_out1>
5545 s_out3 = extract_field <v_out2, 0>
5546 s_out4 = adjust_result <s_out3>
5547 use <s_out0>
5548 use <s_out0>
5550 into:
5552 loop_exit:
5553 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
5554 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
5555 v_out2 = reduce <v_out1>
5556 s_out3 = extract_field <v_out2, 0>
5557 s_out4 = adjust_result <s_out3>
5558 use <s_out4>
5559 use <s_out4> */
5562 /* In SLP reduction chain we reduce vector results into one vector if
5563 necessary, hence we set here REDUC_GROUP_SIZE to 1. SCALAR_DEST is the
5564 LHS of the last stmt in the reduction chain, since we are looking for
5565 the loop exit phi node. */
5566 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info))
5568 stmt_vec_info dest_stmt_info
5569 = vect_orig_stmt (SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1]);
5570 scalar_dest = gimple_assign_lhs (dest_stmt_info->stmt);
5571 group_size = 1;
5574 /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
5575 case that REDUC_GROUP_SIZE is greater than vectorization factor).
5576 Therefore, we need to match SCALAR_RESULTS with corresponding statements.
5577 The first (REDUC_GROUP_SIZE / number of new vector stmts) scalar results
5578 correspond to the first vector stmt, etc.
5579 (RATIO is equal to (REDUC_GROUP_SIZE / number of new vector stmts)). */
5580 if (group_size > new_phis.length ())
5582 ratio = group_size / new_phis.length ();
5583 gcc_assert (!(group_size % new_phis.length ()));
5585 else
5586 ratio = 1;
5588 stmt_vec_info epilog_stmt_info = NULL;
5589 for (k = 0; k < group_size; k++)
5591 if (k % ratio == 0)
5593 epilog_stmt_info = loop_vinfo->lookup_stmt (new_phis[k / ratio]);
5594 reduction_phi_info = reduction_phis[k / ratio];
5595 if (double_reduc)
5596 inner_phi = inner_phis[k / ratio];
5599 if (slp_reduc)
5601 stmt_vec_info scalar_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[k];
5603 orig_stmt_info = STMT_VINFO_RELATED_STMT (scalar_stmt_info);
5604 /* SLP statements can't participate in patterns. */
5605 gcc_assert (!orig_stmt_info);
5606 scalar_dest = gimple_assign_lhs (scalar_stmt_info->stmt);
5609 phis.create (3);
5610 /* Find the loop-closed-use at the loop exit of the original scalar
5611 result. (The reduction result is expected to have two immediate uses -
5612 one at the latch block, and one at the loop exit). */
5613 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
5614 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p)))
5615 && !is_gimple_debug (USE_STMT (use_p)))
5616 phis.safe_push (USE_STMT (use_p));
5618 /* While we expect to have found an exit_phi because of loop-closed-ssa
5619 form we can end up without one if the scalar cycle is dead. */
5621 FOR_EACH_VEC_ELT (phis, i, exit_phi)
5623 if (outer_loop)
5625 stmt_vec_info exit_phi_vinfo
5626 = loop_vinfo->lookup_stmt (exit_phi);
5627 gphi *vect_phi;
5629 /* FORNOW. Currently not supporting the case that an inner-loop
5630 reduction is not used in the outer-loop (but only outside the
5631 outer-loop), unless it is double reduction. */
5632 gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
5633 && !STMT_VINFO_LIVE_P (exit_phi_vinfo))
5634 || double_reduc);
5636 if (double_reduc)
5637 STMT_VINFO_VEC_STMT (exit_phi_vinfo) = inner_phi;
5638 else
5639 STMT_VINFO_VEC_STMT (exit_phi_vinfo) = epilog_stmt_info;
5640 if (!double_reduc
5641 || STMT_VINFO_DEF_TYPE (exit_phi_vinfo)
5642 != vect_double_reduction_def)
5643 continue;
5645 /* Handle double reduction:
5647 stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
5648 stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop)
5649 stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop)
5650 stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
5652 At that point the regular reduction (stmt2 and stmt3) is
5653 already vectorized, as well as the exit phi node, stmt4.
5654 Here we vectorize the phi node of double reduction, stmt1, and
5655 update all relevant statements. */
5657 /* Go through all the uses of s2 to find double reduction phi
5658 node, i.e., stmt1 above. */
5659 orig_name = PHI_RESULT (exit_phi);
5660 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
5662 stmt_vec_info use_stmt_vinfo;
5663 tree vect_phi_init, preheader_arg, vect_phi_res;
5664 basic_block bb = gimple_bb (use_stmt);
5666 /* Check that USE_STMT is really double reduction phi
5667 node. */
5668 if (gimple_code (use_stmt) != GIMPLE_PHI
5669 || gimple_phi_num_args (use_stmt) != 2
5670 || bb->loop_father != outer_loop)
5671 continue;
5672 use_stmt_vinfo = loop_vinfo->lookup_stmt (use_stmt);
5673 if (!use_stmt_vinfo
5674 || STMT_VINFO_DEF_TYPE (use_stmt_vinfo)
5675 != vect_double_reduction_def)
5676 continue;
5678 /* Create vector phi node for double reduction:
5679 vs1 = phi <vs0, vs2>
5680 vs1 was created previously in this function by a call to
5681 vect_get_vec_def_for_operand and is stored in
5682 vec_initial_def;
5683 vs2 is defined by INNER_PHI, the vectorized EXIT_PHI;
5684 vs0 is created here. */
5686 /* Create vector phi node. */
5687 vect_phi = create_phi_node (vec_initial_def, bb);
5688 loop_vec_info_for_loop (outer_loop)->add_stmt (vect_phi);
5690 /* Create vs0 - initial def of the double reduction phi. */
5691 preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt,
5692 loop_preheader_edge (outer_loop));
5693 vect_phi_init = get_initial_def_for_reduction
5694 (stmt_info, preheader_arg, NULL);
5696 /* Update phi node arguments with vs0 and vs2. */
5697 add_phi_arg (vect_phi, vect_phi_init,
5698 loop_preheader_edge (outer_loop),
5699 UNKNOWN_LOCATION);
5700 add_phi_arg (vect_phi, PHI_RESULT (inner_phi->stmt),
5701 loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
5702 if (dump_enabled_p ())
5704 dump_printf_loc (MSG_NOTE, vect_location,
5705 "created double reduction phi node: ");
5706 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vect_phi, 0);
5709 vect_phi_res = PHI_RESULT (vect_phi);
5711 /* Replace the use, i.e., set the correct vs1 in the regular
5712 reduction phi node. FORNOW, NCOPIES is always 1, so the
5713 loop is redundant. */
5714 stmt_vec_info use_info = reduction_phi_info;
5715 for (j = 0; j < ncopies; j++)
5717 edge pr_edge = loop_preheader_edge (loop);
5718 SET_PHI_ARG_DEF (as_a <gphi *> (use_info->stmt),
5719 pr_edge->dest_idx, vect_phi_res);
5720 use_info = STMT_VINFO_RELATED_STMT (use_info);
5726 phis.release ();
5727 if (nested_in_vect_loop)
5729 if (double_reduc)
5730 loop = outer_loop;
5731 else
5732 continue;
5735 phis.create (3);
5736 /* Find the loop-closed-use at the loop exit of the original scalar
5737 result. (The reduction result is expected to have two immediate uses,
5738 one at the latch block, and one at the loop exit). For double
5739 reductions we are looking for exit phis of the outer loop. */
5740 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
5742 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
5744 if (!is_gimple_debug (USE_STMT (use_p)))
5745 phis.safe_push (USE_STMT (use_p));
5747 else
5749 if (double_reduc && gimple_code (USE_STMT (use_p)) == GIMPLE_PHI)
5751 tree phi_res = PHI_RESULT (USE_STMT (use_p));
5753 FOR_EACH_IMM_USE_FAST (phi_use_p, phi_imm_iter, phi_res)
5755 if (!flow_bb_inside_loop_p (loop,
5756 gimple_bb (USE_STMT (phi_use_p)))
5757 && !is_gimple_debug (USE_STMT (phi_use_p)))
5758 phis.safe_push (USE_STMT (phi_use_p));
5764 FOR_EACH_VEC_ELT (phis, i, exit_phi)
5766 /* Replace the uses: */
5767 orig_name = PHI_RESULT (exit_phi);
5768 scalar_result = scalar_results[k];
5769 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
5770 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
5771 SET_USE (use_p, scalar_result);
5774 phis.release ();
5778 /* Return a vector of type VECTYPE that is equal to the vector select
5779 operation "MASK ? VEC : IDENTITY". Insert the select statements
5780 before GSI. */
5782 static tree
5783 merge_with_identity (gimple_stmt_iterator *gsi, tree mask, tree vectype,
5784 tree vec, tree identity)
5786 tree cond = make_temp_ssa_name (vectype, NULL, "cond");
5787 gimple *new_stmt = gimple_build_assign (cond, VEC_COND_EXPR,
5788 mask, vec, identity);
5789 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
5790 return cond;
5793 /* Successively apply CODE to each element of VECTOR_RHS, in left-to-right
5794 order, starting with LHS. Insert the extraction statements before GSI and
5795 associate the new scalar SSA names with variable SCALAR_DEST.
5796 Return the SSA name for the result. */
5798 static tree
5799 vect_expand_fold_left (gimple_stmt_iterator *gsi, tree scalar_dest,
5800 tree_code code, tree lhs, tree vector_rhs)
5802 tree vectype = TREE_TYPE (vector_rhs);
5803 tree scalar_type = TREE_TYPE (vectype);
5804 tree bitsize = TYPE_SIZE (scalar_type);
5805 unsigned HOST_WIDE_INT vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
5806 unsigned HOST_WIDE_INT element_bitsize = tree_to_uhwi (bitsize);
5808 for (unsigned HOST_WIDE_INT bit_offset = 0;
5809 bit_offset < vec_size_in_bits;
5810 bit_offset += element_bitsize)
5812 tree bitpos = bitsize_int (bit_offset);
5813 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vector_rhs,
5814 bitsize, bitpos);
5816 gassign *stmt = gimple_build_assign (scalar_dest, rhs);
5817 rhs = make_ssa_name (scalar_dest, stmt);
5818 gimple_assign_set_lhs (stmt, rhs);
5819 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
5821 stmt = gimple_build_assign (scalar_dest, code, lhs, rhs);
5822 tree new_name = make_ssa_name (scalar_dest, stmt);
5823 gimple_assign_set_lhs (stmt, new_name);
5824 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
5825 lhs = new_name;
5827 return lhs;
5830 /* Perform an in-order reduction (FOLD_LEFT_REDUCTION). STMT_INFO is the
5831 statement that sets the live-out value. REDUC_DEF_STMT is the phi
5832 statement. CODE is the operation performed by STMT_INFO and OPS are
5833 its scalar operands. REDUC_INDEX is the index of the operand in
5834 OPS that is set by REDUC_DEF_STMT. REDUC_FN is the function that
5835 implements in-order reduction, or IFN_LAST if we should open-code it.
5836 VECTYPE_IN is the type of the vector input. MASKS specifies the masks
5837 that should be used to control the operation in a fully-masked loop. */
5839 static bool
5840 vectorize_fold_left_reduction (stmt_vec_info stmt_info,
5841 gimple_stmt_iterator *gsi,
5842 stmt_vec_info *vec_stmt, slp_tree slp_node,
5843 gimple *reduc_def_stmt,
5844 tree_code code, internal_fn reduc_fn,
5845 tree ops[3], tree vectype_in,
5846 int reduc_index, vec_loop_masks *masks)
5848 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5849 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5850 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
5851 stmt_vec_info new_stmt_info = NULL;
5853 int ncopies;
5854 if (slp_node)
5855 ncopies = 1;
5856 else
5857 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
5859 gcc_assert (!nested_in_vect_loop_p (loop, stmt_info));
5860 gcc_assert (ncopies == 1);
5861 gcc_assert (TREE_CODE_LENGTH (code) == binary_op);
5862 gcc_assert (reduc_index == (code == MINUS_EXPR ? 0 : 1));
5863 gcc_assert (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5864 == FOLD_LEFT_REDUCTION);
5866 if (slp_node)
5867 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype_out),
5868 TYPE_VECTOR_SUBPARTS (vectype_in)));
5870 tree op0 = ops[1 - reduc_index];
5872 int group_size = 1;
5873 stmt_vec_info scalar_dest_def_info;
5874 auto_vec<tree> vec_oprnds0;
5875 if (slp_node)
5877 vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL,
5878 slp_node);
5879 group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
5880 scalar_dest_def_info = SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1];
5882 else
5884 tree loop_vec_def0 = vect_get_vec_def_for_operand (op0, stmt_info);
5885 vec_oprnds0.create (1);
5886 vec_oprnds0.quick_push (loop_vec_def0);
5887 scalar_dest_def_info = stmt_info;
5890 tree scalar_dest = gimple_assign_lhs (scalar_dest_def_info->stmt);
5891 tree scalar_type = TREE_TYPE (scalar_dest);
5892 tree reduc_var = gimple_phi_result (reduc_def_stmt);
5894 int vec_num = vec_oprnds0.length ();
5895 gcc_assert (vec_num == 1 || slp_node);
5896 tree vec_elem_type = TREE_TYPE (vectype_out);
5897 gcc_checking_assert (useless_type_conversion_p (scalar_type, vec_elem_type));
5899 tree vector_identity = NULL_TREE;
5900 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
5901 vector_identity = build_zero_cst (vectype_out);
5903 tree scalar_dest_var = vect_create_destination_var (scalar_dest, NULL);
5904 int i;
5905 tree def0;
5906 FOR_EACH_VEC_ELT (vec_oprnds0, i, def0)
5908 gimple *new_stmt;
5909 tree mask = NULL_TREE;
5910 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
5911 mask = vect_get_loop_mask (gsi, masks, vec_num, vectype_in, i);
5913 /* Handle MINUS by adding the negative. */
5914 if (reduc_fn != IFN_LAST && code == MINUS_EXPR)
5916 tree negated = make_ssa_name (vectype_out);
5917 new_stmt = gimple_build_assign (negated, NEGATE_EXPR, def0);
5918 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
5919 def0 = negated;
5922 if (mask)
5923 def0 = merge_with_identity (gsi, mask, vectype_out, def0,
5924 vector_identity);
5926 /* On the first iteration the input is simply the scalar phi
5927 result, and for subsequent iterations it is the output of
5928 the preceding operation. */
5929 if (reduc_fn != IFN_LAST)
5931 new_stmt = gimple_build_call_internal (reduc_fn, 2, reduc_var, def0);
5932 /* For chained SLP reductions the output of the previous reduction
5933 operation serves as the input of the next. For the final statement
5934 the output cannot be a temporary - we reuse the original
5935 scalar destination of the last statement. */
5936 if (i != vec_num - 1)
5938 gimple_set_lhs (new_stmt, scalar_dest_var);
5939 reduc_var = make_ssa_name (scalar_dest_var, new_stmt);
5940 gimple_set_lhs (new_stmt, reduc_var);
5943 else
5945 reduc_var = vect_expand_fold_left (gsi, scalar_dest_var, code,
5946 reduc_var, def0);
5947 new_stmt = SSA_NAME_DEF_STMT (reduc_var);
5948 /* Remove the statement, so that we can use the same code paths
5949 as for statements that we've just created. */
5950 gimple_stmt_iterator tmp_gsi = gsi_for_stmt (new_stmt);
5951 gsi_remove (&tmp_gsi, false);
5954 if (i == vec_num - 1)
5956 gimple_set_lhs (new_stmt, scalar_dest);
5957 new_stmt_info = vect_finish_replace_stmt (scalar_dest_def_info,
5958 new_stmt);
5960 else
5961 new_stmt_info = vect_finish_stmt_generation (scalar_dest_def_info,
5962 new_stmt, gsi);
5964 if (slp_node)
5965 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
5968 if (!slp_node)
5969 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
5971 return true;
5974 /* Function is_nonwrapping_integer_induction.
5976 Check if STMT_VINO (which is part of loop LOOP) both increments and
5977 does not cause overflow. */
5979 static bool
5980 is_nonwrapping_integer_induction (stmt_vec_info stmt_vinfo, struct loop *loop)
5982 gphi *phi = as_a <gphi *> (stmt_vinfo->stmt);
5983 tree base = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo);
5984 tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo);
5985 tree lhs_type = TREE_TYPE (gimple_phi_result (phi));
5986 widest_int ni, max_loop_value, lhs_max;
5987 wi::overflow_type overflow = wi::OVF_NONE;
5989 /* Make sure the loop is integer based. */
5990 if (TREE_CODE (base) != INTEGER_CST
5991 || TREE_CODE (step) != INTEGER_CST)
5992 return false;
5994 /* Check that the max size of the loop will not wrap. */
5996 if (TYPE_OVERFLOW_UNDEFINED (lhs_type))
5997 return true;
5999 if (! max_stmt_executions (loop, &ni))
6000 return false;
6002 max_loop_value = wi::mul (wi::to_widest (step), ni, TYPE_SIGN (lhs_type),
6003 &overflow);
6004 if (overflow)
6005 return false;
6007 max_loop_value = wi::add (wi::to_widest (base), max_loop_value,
6008 TYPE_SIGN (lhs_type), &overflow);
6009 if (overflow)
6010 return false;
6012 return (wi::min_precision (max_loop_value, TYPE_SIGN (lhs_type))
6013 <= TYPE_PRECISION (lhs_type));
6016 /* Function vectorizable_reduction.
6018 Check if STMT_INFO performs a reduction operation that can be vectorized.
6019 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
6020 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
6021 Return true if STMT_INFO is vectorizable in this way.
6023 This function also handles reduction idioms (patterns) that have been
6024 recognized in advance during vect_pattern_recog. In this case, STMT_INFO
6025 may be of this form:
6026 X = pattern_expr (arg0, arg1, ..., X)
6027 and its STMT_VINFO_RELATED_STMT points to the last stmt in the original
6028 sequence that had been detected and replaced by the pattern-stmt
6029 (STMT_INFO).
6031 This function also handles reduction of condition expressions, for example:
6032 for (int i = 0; i < N; i++)
6033 if (a[i] < value)
6034 last = a[i];
6035 This is handled by vectorising the loop and creating an additional vector
6036 containing the loop indexes for which "a[i] < value" was true. In the
6037 function epilogue this is reduced to a single max value and then used to
6038 index into the vector of results.
6040 In some cases of reduction patterns, the type of the reduction variable X is
6041 different than the type of the other arguments of STMT_INFO.
6042 In such cases, the vectype that is used when transforming STMT_INFO into
6043 a vector stmt is different than the vectype that is used to determine the
6044 vectorization factor, because it consists of a different number of elements
6045 than the actual number of elements that are being operated upon in parallel.
6047 For example, consider an accumulation of shorts into an int accumulator.
6048 On some targets it's possible to vectorize this pattern operating on 8
6049 shorts at a time (hence, the vectype for purposes of determining the
6050 vectorization factor should be V8HI); on the other hand, the vectype that
6051 is used to create the vector form is actually V4SI (the type of the result).
6053 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
6054 indicates what is the actual level of parallelism (V8HI in the example), so
6055 that the right vectorization factor would be derived. This vectype
6056 corresponds to the type of arguments to the reduction stmt, and should *NOT*
6057 be used to create the vectorized stmt. The right vectype for the vectorized
6058 stmt is obtained from the type of the result X:
6059 get_vectype_for_scalar_type (TREE_TYPE (X))
6061 This means that, contrary to "regular" reductions (or "regular" stmts in
6062 general), the following equation:
6063 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
6064 does *NOT* necessarily hold for reduction patterns. */
6066 bool
6067 vectorizable_reduction (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
6068 stmt_vec_info *vec_stmt, slp_tree slp_node,
6069 slp_instance slp_node_instance,
6070 stmt_vector_for_cost *cost_vec)
6072 tree vec_dest;
6073 tree scalar_dest;
6074 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
6075 tree vectype_in = NULL_TREE;
6076 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6077 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
6078 enum tree_code code, orig_code;
6079 internal_fn reduc_fn;
6080 machine_mode vec_mode;
6081 int op_type;
6082 optab optab;
6083 tree new_temp = NULL_TREE;
6084 enum vect_def_type dt, cond_reduc_dt = vect_unknown_def_type;
6085 stmt_vec_info cond_stmt_vinfo = NULL;
6086 enum tree_code cond_reduc_op_code = ERROR_MARK;
6087 tree scalar_type;
6088 bool is_simple_use;
6089 int i;
6090 int ncopies;
6091 int epilog_copies;
6092 stmt_vec_info prev_stmt_info, prev_phi_info;
6093 bool single_defuse_cycle = false;
6094 stmt_vec_info new_stmt_info = NULL;
6095 int j;
6096 tree ops[3];
6097 enum vect_def_type dts[3];
6098 bool nested_cycle = false, found_nested_cycle_def = false;
6099 bool double_reduc = false;
6100 basic_block def_bb;
6101 struct loop * def_stmt_loop;
6102 tree def_arg;
6103 auto_vec<tree> vec_oprnds0;
6104 auto_vec<tree> vec_oprnds1;
6105 auto_vec<tree> vec_oprnds2;
6106 auto_vec<tree> vect_defs;
6107 auto_vec<stmt_vec_info> phis;
6108 int vec_num;
6109 tree def0, tem;
6110 tree cr_index_scalar_type = NULL_TREE, cr_index_vector_type = NULL_TREE;
6111 tree cond_reduc_val = NULL_TREE;
6113 /* Make sure it was already recognized as a reduction computation. */
6114 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def
6115 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_nested_cycle)
6116 return false;
6118 if (nested_in_vect_loop_p (loop, stmt_info))
6120 loop = loop->inner;
6121 nested_cycle = true;
6124 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info))
6125 gcc_assert (slp_node
6126 && REDUC_GROUP_FIRST_ELEMENT (stmt_info) == stmt_info);
6128 if (gphi *phi = dyn_cast <gphi *> (stmt_info->stmt))
6130 tree phi_result = gimple_phi_result (phi);
6131 /* Analysis is fully done on the reduction stmt invocation. */
6132 if (! vec_stmt)
6134 if (slp_node)
6135 slp_node_instance->reduc_phis = slp_node;
6137 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
6138 return true;
6141 if (STMT_VINFO_REDUC_TYPE (stmt_info) == FOLD_LEFT_REDUCTION)
6142 /* Leave the scalar phi in place. Note that checking
6143 STMT_VINFO_VEC_REDUCTION_TYPE (as below) only works
6144 for reductions involving a single statement. */
6145 return true;
6147 stmt_vec_info reduc_stmt_info = STMT_VINFO_REDUC_DEF (stmt_info);
6148 reduc_stmt_info = vect_stmt_to_vectorize (reduc_stmt_info);
6150 if (STMT_VINFO_VEC_REDUCTION_TYPE (reduc_stmt_info)
6151 == EXTRACT_LAST_REDUCTION)
6152 /* Leave the scalar phi in place. */
6153 return true;
6155 gassign *reduc_stmt = as_a <gassign *> (reduc_stmt_info->stmt);
6156 for (unsigned k = 1; k < gimple_num_ops (reduc_stmt); ++k)
6158 tree op = gimple_op (reduc_stmt, k);
6159 if (op == phi_result)
6160 continue;
6161 if (k == 1
6162 && gimple_assign_rhs_code (reduc_stmt) == COND_EXPR)
6163 continue;
6164 if (!vectype_in
6165 || (GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (vectype_in)))
6166 < GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (op)))))
6167 vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op));
6168 break;
6170 gcc_assert (vectype_in);
6172 if (slp_node)
6173 ncopies = 1;
6174 else
6175 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
6177 stmt_vec_info use_stmt_info;
6178 if (ncopies > 1
6179 && STMT_VINFO_RELEVANT (reduc_stmt_info) <= vect_used_only_live
6180 && (use_stmt_info = loop_vinfo->lookup_single_use (phi_result))
6181 && vect_stmt_to_vectorize (use_stmt_info) == reduc_stmt_info)
6182 single_defuse_cycle = true;
6184 /* Create the destination vector */
6185 scalar_dest = gimple_assign_lhs (reduc_stmt);
6186 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
6188 if (slp_node)
6189 /* The size vect_schedule_slp_instance computes is off for us. */
6190 vec_num = vect_get_num_vectors
6191 (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6192 * SLP_TREE_SCALAR_STMTS (slp_node).length (),
6193 vectype_in);
6194 else
6195 vec_num = 1;
6197 /* Generate the reduction PHIs upfront. */
6198 prev_phi_info = NULL;
6199 for (j = 0; j < ncopies; j++)
6201 if (j == 0 || !single_defuse_cycle)
6203 for (i = 0; i < vec_num; i++)
6205 /* Create the reduction-phi that defines the reduction
6206 operand. */
6207 gimple *new_phi = create_phi_node (vec_dest, loop->header);
6208 stmt_vec_info new_phi_info = loop_vinfo->add_stmt (new_phi);
6210 if (slp_node)
6211 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_phi_info);
6212 else
6214 if (j == 0)
6215 STMT_VINFO_VEC_STMT (stmt_info)
6216 = *vec_stmt = new_phi_info;
6217 else
6218 STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi_info;
6219 prev_phi_info = new_phi_info;
6225 return true;
6228 /* 1. Is vectorizable reduction? */
6229 /* Not supportable if the reduction variable is used in the loop, unless
6230 it's a reduction chain. */
6231 if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer
6232 && !REDUC_GROUP_FIRST_ELEMENT (stmt_info))
6233 return false;
6235 /* Reductions that are not used even in an enclosing outer-loop,
6236 are expected to be "live" (used out of the loop). */
6237 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope
6238 && !STMT_VINFO_LIVE_P (stmt_info))
6239 return false;
6241 /* 2. Has this been recognized as a reduction pattern?
6243 Check if STMT represents a pattern that has been recognized
6244 in earlier analysis stages. For stmts that represent a pattern,
6245 the STMT_VINFO_RELATED_STMT field records the last stmt in
6246 the original sequence that constitutes the pattern. */
6248 stmt_vec_info orig_stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
6249 if (orig_stmt_info)
6251 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
6252 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
6255 /* 3. Check the operands of the operation. The first operands are defined
6256 inside the loop body. The last operand is the reduction variable,
6257 which is defined by the loop-header-phi. */
6259 gassign *stmt = as_a <gassign *> (stmt_info->stmt);
6261 /* Flatten RHS. */
6262 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
6264 case GIMPLE_BINARY_RHS:
6265 code = gimple_assign_rhs_code (stmt);
6266 op_type = TREE_CODE_LENGTH (code);
6267 gcc_assert (op_type == binary_op);
6268 ops[0] = gimple_assign_rhs1 (stmt);
6269 ops[1] = gimple_assign_rhs2 (stmt);
6270 break;
6272 case GIMPLE_TERNARY_RHS:
6273 code = gimple_assign_rhs_code (stmt);
6274 op_type = TREE_CODE_LENGTH (code);
6275 gcc_assert (op_type == ternary_op);
6276 ops[0] = gimple_assign_rhs1 (stmt);
6277 ops[1] = gimple_assign_rhs2 (stmt);
6278 ops[2] = gimple_assign_rhs3 (stmt);
6279 break;
6281 case GIMPLE_UNARY_RHS:
6282 return false;
6284 default:
6285 gcc_unreachable ();
6288 if (code == COND_EXPR && slp_node)
6289 return false;
6291 scalar_dest = gimple_assign_lhs (stmt);
6292 scalar_type = TREE_TYPE (scalar_dest);
6293 if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
6294 && !SCALAR_FLOAT_TYPE_P (scalar_type))
6295 return false;
6297 /* Do not try to vectorize bit-precision reductions. */
6298 if (!type_has_mode_precision_p (scalar_type))
6299 return false;
6301 /* All uses but the last are expected to be defined in the loop.
6302 The last use is the reduction variable. In case of nested cycle this
6303 assumption is not true: we use reduc_index to record the index of the
6304 reduction variable. */
6305 stmt_vec_info reduc_def_info = NULL;
6306 int reduc_index = -1;
6307 for (i = 0; i < op_type; i++)
6309 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
6310 if (i == 0 && code == COND_EXPR)
6311 continue;
6313 stmt_vec_info def_stmt_info;
6314 is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, &dts[i], &tem,
6315 &def_stmt_info);
6316 dt = dts[i];
6317 gcc_assert (is_simple_use);
6318 if (dt == vect_reduction_def)
6320 reduc_def_info = def_stmt_info;
6321 reduc_index = i;
6322 continue;
6324 else if (tem)
6326 /* To properly compute ncopies we are interested in the widest
6327 input type in case we're looking at a widening accumulation. */
6328 if (!vectype_in
6329 || (GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (vectype_in)))
6330 < GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (tem)))))
6331 vectype_in = tem;
6334 if (dt != vect_internal_def
6335 && dt != vect_external_def
6336 && dt != vect_constant_def
6337 && dt != vect_induction_def
6338 && !(dt == vect_nested_cycle && nested_cycle))
6339 return false;
6341 if (dt == vect_nested_cycle)
6343 found_nested_cycle_def = true;
6344 reduc_def_info = def_stmt_info;
6345 reduc_index = i;
6348 if (i == 1 && code == COND_EXPR)
6350 /* Record how value of COND_EXPR is defined. */
6351 if (dt == vect_constant_def)
6353 cond_reduc_dt = dt;
6354 cond_reduc_val = ops[i];
6356 if (dt == vect_induction_def
6357 && def_stmt_info
6358 && is_nonwrapping_integer_induction (def_stmt_info, loop))
6360 cond_reduc_dt = dt;
6361 cond_stmt_vinfo = def_stmt_info;
6366 if (!vectype_in)
6367 vectype_in = vectype_out;
6369 /* When vectorizing a reduction chain w/o SLP the reduction PHI is not
6370 directy used in stmt. */
6371 if (reduc_index == -1)
6373 if (STMT_VINFO_REDUC_TYPE (stmt_info) == FOLD_LEFT_REDUCTION)
6375 if (dump_enabled_p ())
6376 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6377 "in-order reduction chain without SLP.\n");
6378 return false;
6381 if (orig_stmt_info)
6382 reduc_def_info = STMT_VINFO_REDUC_DEF (orig_stmt_info);
6383 else
6384 reduc_def_info = STMT_VINFO_REDUC_DEF (stmt_info);
6387 if (! reduc_def_info)
6388 return false;
6390 gphi *reduc_def_phi = dyn_cast <gphi *> (reduc_def_info->stmt);
6391 if (!reduc_def_phi)
6392 return false;
6394 if (!(reduc_index == -1
6395 || dts[reduc_index] == vect_reduction_def
6396 || dts[reduc_index] == vect_nested_cycle
6397 || ((dts[reduc_index] == vect_internal_def
6398 || dts[reduc_index] == vect_external_def
6399 || dts[reduc_index] == vect_constant_def
6400 || dts[reduc_index] == vect_induction_def)
6401 && nested_cycle && found_nested_cycle_def)))
6403 /* For pattern recognized stmts, orig_stmt might be a reduction,
6404 but some helper statements for the pattern might not, or
6405 might be COND_EXPRs with reduction uses in the condition. */
6406 gcc_assert (orig_stmt_info);
6407 return false;
6410 /* PHIs should not participate in patterns. */
6411 gcc_assert (!STMT_VINFO_RELATED_STMT (reduc_def_info));
6412 enum vect_reduction_type v_reduc_type
6413 = STMT_VINFO_REDUC_TYPE (reduc_def_info);
6414 stmt_vec_info tmp = STMT_VINFO_REDUC_DEF (reduc_def_info);
6416 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = v_reduc_type;
6417 /* If we have a condition reduction, see if we can simplify it further. */
6418 if (v_reduc_type == COND_REDUCTION)
6420 /* TODO: We can't yet handle reduction chains, since we need to treat
6421 each COND_EXPR in the chain specially, not just the last one.
6422 E.g. for:
6424 x_1 = PHI <x_3, ...>
6425 x_2 = a_2 ? ... : x_1;
6426 x_3 = a_3 ? ... : x_2;
6428 we're interested in the last element in x_3 for which a_2 || a_3
6429 is true, whereas the current reduction chain handling would
6430 vectorize x_2 as a normal VEC_COND_EXPR and only treat x_3
6431 as a reduction operation. */
6432 if (reduc_index == -1)
6434 if (dump_enabled_p ())
6435 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6436 "conditional reduction chains not supported\n");
6437 return false;
6440 /* vect_is_simple_reduction ensured that operand 2 is the
6441 loop-carried operand. */
6442 gcc_assert (reduc_index == 2);
6444 /* Loop peeling modifies initial value of reduction PHI, which
6445 makes the reduction stmt to be transformed different to the
6446 original stmt analyzed. We need to record reduction code for
6447 CONST_COND_REDUCTION type reduction at analyzing stage, thus
6448 it can be used directly at transform stage. */
6449 if (STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info) == MAX_EXPR
6450 || STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info) == MIN_EXPR)
6452 /* Also set the reduction type to CONST_COND_REDUCTION. */
6453 gcc_assert (cond_reduc_dt == vect_constant_def);
6454 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = CONST_COND_REDUCTION;
6456 else if (direct_internal_fn_supported_p (IFN_FOLD_EXTRACT_LAST,
6457 vectype_in, OPTIMIZE_FOR_SPEED))
6459 if (dump_enabled_p ())
6460 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6461 "optimizing condition reduction with"
6462 " FOLD_EXTRACT_LAST.\n");
6463 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = EXTRACT_LAST_REDUCTION;
6465 else if (cond_reduc_dt == vect_induction_def)
6467 tree base
6468 = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (cond_stmt_vinfo);
6469 tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (cond_stmt_vinfo);
6471 gcc_assert (TREE_CODE (base) == INTEGER_CST
6472 && TREE_CODE (step) == INTEGER_CST);
6473 cond_reduc_val = NULL_TREE;
6474 /* Find a suitable value, for MAX_EXPR below base, for MIN_EXPR
6475 above base; punt if base is the minimum value of the type for
6476 MAX_EXPR or maximum value of the type for MIN_EXPR for now. */
6477 if (tree_int_cst_sgn (step) == -1)
6479 cond_reduc_op_code = MIN_EXPR;
6480 if (tree_int_cst_sgn (base) == -1)
6481 cond_reduc_val = build_int_cst (TREE_TYPE (base), 0);
6482 else if (tree_int_cst_lt (base,
6483 TYPE_MAX_VALUE (TREE_TYPE (base))))
6484 cond_reduc_val
6485 = int_const_binop (PLUS_EXPR, base, integer_one_node);
6487 else
6489 cond_reduc_op_code = MAX_EXPR;
6490 if (tree_int_cst_sgn (base) == 1)
6491 cond_reduc_val = build_int_cst (TREE_TYPE (base), 0);
6492 else if (tree_int_cst_lt (TYPE_MIN_VALUE (TREE_TYPE (base)),
6493 base))
6494 cond_reduc_val
6495 = int_const_binop (MINUS_EXPR, base, integer_one_node);
6497 if (cond_reduc_val)
6499 if (dump_enabled_p ())
6500 dump_printf_loc (MSG_NOTE, vect_location,
6501 "condition expression based on "
6502 "integer induction.\n");
6503 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
6504 = INTEGER_INDUC_COND_REDUCTION;
6507 else if (cond_reduc_dt == vect_constant_def)
6509 enum vect_def_type cond_initial_dt;
6510 gimple *def_stmt = SSA_NAME_DEF_STMT (ops[reduc_index]);
6511 tree cond_initial_val
6512 = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
6514 gcc_assert (cond_reduc_val != NULL_TREE);
6515 vect_is_simple_use (cond_initial_val, loop_vinfo, &cond_initial_dt);
6516 if (cond_initial_dt == vect_constant_def
6517 && types_compatible_p (TREE_TYPE (cond_initial_val),
6518 TREE_TYPE (cond_reduc_val)))
6520 tree e = fold_binary (LE_EXPR, boolean_type_node,
6521 cond_initial_val, cond_reduc_val);
6522 if (e && (integer_onep (e) || integer_zerop (e)))
6524 if (dump_enabled_p ())
6525 dump_printf_loc (MSG_NOTE, vect_location,
6526 "condition expression based on "
6527 "compile time constant.\n");
6528 /* Record reduction code at analysis stage. */
6529 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info)
6530 = integer_onep (e) ? MAX_EXPR : MIN_EXPR;
6531 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
6532 = CONST_COND_REDUCTION;
6538 if (orig_stmt_info)
6539 gcc_assert (tmp == orig_stmt_info
6540 || REDUC_GROUP_FIRST_ELEMENT (tmp) == orig_stmt_info);
6541 else
6542 /* We changed STMT to be the first stmt in reduction chain, hence we
6543 check that in this case the first element in the chain is STMT. */
6544 gcc_assert (tmp == stmt_info
6545 || REDUC_GROUP_FIRST_ELEMENT (tmp) == stmt_info);
6547 if (STMT_VINFO_LIVE_P (reduc_def_info))
6548 return false;
6550 if (slp_node)
6551 ncopies = 1;
6552 else
6553 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
6555 gcc_assert (ncopies >= 1);
6557 vec_mode = TYPE_MODE (vectype_in);
6558 poly_uint64 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
6560 if (code == COND_EXPR)
6562 /* Only call during the analysis stage, otherwise we'll lose
6563 STMT_VINFO_TYPE. */
6564 if (!vec_stmt && !vectorizable_condition (stmt_info, gsi, NULL,
6565 ops[reduc_index], 0, NULL,
6566 cost_vec))
6568 if (dump_enabled_p ())
6569 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6570 "unsupported condition in reduction\n");
6571 return false;
6574 else
6576 /* 4. Supportable by target? */
6578 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR
6579 || code == LROTATE_EXPR || code == RROTATE_EXPR)
6581 /* Shifts and rotates are only supported by vectorizable_shifts,
6582 not vectorizable_reduction. */
6583 if (dump_enabled_p ())
6584 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6585 "unsupported shift or rotation.\n");
6586 return false;
6589 /* 4.1. check support for the operation in the loop */
6590 optab = optab_for_tree_code (code, vectype_in, optab_default);
6591 if (!optab)
6593 if (dump_enabled_p ())
6594 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6595 "no optab.\n");
6597 return false;
6600 if (optab_handler (optab, vec_mode) == CODE_FOR_nothing)
6602 if (dump_enabled_p ())
6603 dump_printf (MSG_NOTE, "op not supported by target.\n");
6605 if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
6606 || !vect_worthwhile_without_simd_p (loop_vinfo, code))
6607 return false;
6609 if (dump_enabled_p ())
6610 dump_printf (MSG_NOTE, "proceeding using word mode.\n");
6613 /* Worthwhile without SIMD support? */
6614 if (!VECTOR_MODE_P (TYPE_MODE (vectype_in))
6615 && !vect_worthwhile_without_simd_p (loop_vinfo, code))
6617 if (dump_enabled_p ())
6618 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6619 "not worthwhile without SIMD support.\n");
6621 return false;
6625 /* 4.2. Check support for the epilog operation.
6627 If STMT represents a reduction pattern, then the type of the
6628 reduction variable may be different than the type of the rest
6629 of the arguments. For example, consider the case of accumulation
6630 of shorts into an int accumulator; The original code:
6631 S1: int_a = (int) short_a;
6632 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
6634 was replaced with:
6635 STMT: int_acc = widen_sum <short_a, int_acc>
6637 This means that:
6638 1. The tree-code that is used to create the vector operation in the
6639 epilog code (that reduces the partial results) is not the
6640 tree-code of STMT, but is rather the tree-code of the original
6641 stmt from the pattern that STMT is replacing. I.e, in the example
6642 above we want to use 'widen_sum' in the loop, but 'plus' in the
6643 epilog.
6644 2. The type (mode) we use to check available target support
6645 for the vector operation to be created in the *epilog*, is
6646 determined by the type of the reduction variable (in the example
6647 above we'd check this: optab_handler (plus_optab, vect_int_mode])).
6648 However the type (mode) we use to check available target support
6649 for the vector operation to be created *inside the loop*, is
6650 determined by the type of the other arguments to STMT (in the
6651 example we'd check this: optab_handler (widen_sum_optab,
6652 vect_short_mode)).
6654 This is contrary to "regular" reductions, in which the types of all
6655 the arguments are the same as the type of the reduction variable.
6656 For "regular" reductions we can therefore use the same vector type
6657 (and also the same tree-code) when generating the epilog code and
6658 when generating the code inside the loop. */
6660 vect_reduction_type reduction_type
6661 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info);
6662 if (orig_stmt_info
6663 && (reduction_type == TREE_CODE_REDUCTION
6664 || reduction_type == FOLD_LEFT_REDUCTION))
6666 /* This is a reduction pattern: get the vectype from the type of the
6667 reduction variable, and get the tree-code from orig_stmt. */
6668 orig_code = gimple_assign_rhs_code (orig_stmt_info->stmt);
6669 gcc_assert (vectype_out);
6670 vec_mode = TYPE_MODE (vectype_out);
6672 else
6674 /* Regular reduction: use the same vectype and tree-code as used for
6675 the vector code inside the loop can be used for the epilog code. */
6676 orig_code = code;
6678 if (code == MINUS_EXPR)
6679 orig_code = PLUS_EXPR;
6681 /* For simple condition reductions, replace with the actual expression
6682 we want to base our reduction around. */
6683 if (reduction_type == CONST_COND_REDUCTION)
6685 orig_code = STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info);
6686 gcc_assert (orig_code == MAX_EXPR || orig_code == MIN_EXPR);
6688 else if (reduction_type == INTEGER_INDUC_COND_REDUCTION)
6689 orig_code = cond_reduc_op_code;
6692 if (nested_cycle)
6694 def_bb = gimple_bb (reduc_def_phi);
6695 def_stmt_loop = def_bb->loop_father;
6696 def_arg = PHI_ARG_DEF_FROM_EDGE (reduc_def_phi,
6697 loop_preheader_edge (def_stmt_loop));
6698 stmt_vec_info def_arg_stmt_info = loop_vinfo->lookup_def (def_arg);
6699 if (def_arg_stmt_info
6700 && (STMT_VINFO_DEF_TYPE (def_arg_stmt_info)
6701 == vect_double_reduction_def))
6702 double_reduc = true;
6705 reduc_fn = IFN_LAST;
6707 if (reduction_type == TREE_CODE_REDUCTION
6708 || reduction_type == FOLD_LEFT_REDUCTION
6709 || reduction_type == INTEGER_INDUC_COND_REDUCTION
6710 || reduction_type == CONST_COND_REDUCTION)
6712 if (reduction_type == FOLD_LEFT_REDUCTION
6713 ? fold_left_reduction_fn (orig_code, &reduc_fn)
6714 : reduction_fn_for_scalar_code (orig_code, &reduc_fn))
6716 if (reduc_fn != IFN_LAST
6717 && !direct_internal_fn_supported_p (reduc_fn, vectype_out,
6718 OPTIMIZE_FOR_SPEED))
6720 if (dump_enabled_p ())
6721 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6722 "reduc op not supported by target.\n");
6724 reduc_fn = IFN_LAST;
6727 else
6729 if (!nested_cycle || double_reduc)
6731 if (dump_enabled_p ())
6732 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6733 "no reduc code for scalar code.\n");
6735 return false;
6739 else if (reduction_type == COND_REDUCTION)
6741 int scalar_precision
6742 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type));
6743 cr_index_scalar_type = make_unsigned_type (scalar_precision);
6744 cr_index_vector_type = build_vector_type (cr_index_scalar_type,
6745 nunits_out);
6747 if (direct_internal_fn_supported_p (IFN_REDUC_MAX, cr_index_vector_type,
6748 OPTIMIZE_FOR_SPEED))
6749 reduc_fn = IFN_REDUC_MAX;
6752 if (reduction_type != EXTRACT_LAST_REDUCTION
6753 && (!nested_cycle || double_reduc)
6754 && reduc_fn == IFN_LAST
6755 && !nunits_out.is_constant ())
6757 if (dump_enabled_p ())
6758 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6759 "missing target support for reduction on"
6760 " variable-length vectors.\n");
6761 return false;
6764 if ((double_reduc || reduction_type != TREE_CODE_REDUCTION)
6765 && ncopies > 1)
6767 if (dump_enabled_p ())
6768 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6769 "multiple types in double reduction or condition "
6770 "reduction.\n");
6771 return false;
6774 /* For SLP reductions, see if there is a neutral value we can use. */
6775 tree neutral_op = NULL_TREE;
6776 if (slp_node)
6777 neutral_op = neutral_op_for_slp_reduction
6778 (slp_node_instance->reduc_phis, code,
6779 REDUC_GROUP_FIRST_ELEMENT (stmt_info) != NULL);
6781 if (double_reduc && reduction_type == FOLD_LEFT_REDUCTION)
6783 /* We can't support in-order reductions of code such as this:
6785 for (int i = 0; i < n1; ++i)
6786 for (int j = 0; j < n2; ++j)
6787 l += a[j];
6789 since GCC effectively transforms the loop when vectorizing:
6791 for (int i = 0; i < n1 / VF; ++i)
6792 for (int j = 0; j < n2; ++j)
6793 for (int k = 0; k < VF; ++k)
6794 l += a[j];
6796 which is a reassociation of the original operation. */
6797 if (dump_enabled_p ())
6798 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6799 "in-order double reduction not supported.\n");
6801 return false;
6804 if (reduction_type == FOLD_LEFT_REDUCTION
6805 && slp_node
6806 && !REDUC_GROUP_FIRST_ELEMENT (stmt_info))
6808 /* We cannot use in-order reductions in this case because there is
6809 an implicit reassociation of the operations involved. */
6810 if (dump_enabled_p ())
6811 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6812 "in-order unchained SLP reductions not supported.\n");
6813 return false;
6816 /* For double reductions, and for SLP reductions with a neutral value,
6817 we construct a variable-length initial vector by loading a vector
6818 full of the neutral value and then shift-and-inserting the start
6819 values into the low-numbered elements. */
6820 if ((double_reduc || neutral_op)
6821 && !nunits_out.is_constant ()
6822 && !direct_internal_fn_supported_p (IFN_VEC_SHL_INSERT,
6823 vectype_out, OPTIMIZE_FOR_SPEED))
6825 if (dump_enabled_p ())
6826 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6827 "reduction on variable-length vectors requires"
6828 " target support for a vector-shift-and-insert"
6829 " operation.\n");
6830 return false;
6833 /* Check extra constraints for variable-length unchained SLP reductions. */
6834 if (STMT_SLP_TYPE (stmt_info)
6835 && !REDUC_GROUP_FIRST_ELEMENT (stmt_info)
6836 && !nunits_out.is_constant ())
6838 /* We checked above that we could build the initial vector when
6839 there's a neutral element value. Check here for the case in
6840 which each SLP statement has its own initial value and in which
6841 that value needs to be repeated for every instance of the
6842 statement within the initial vector. */
6843 unsigned int group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
6844 scalar_mode elt_mode = SCALAR_TYPE_MODE (TREE_TYPE (vectype_out));
6845 if (!neutral_op
6846 && !can_duplicate_and_interleave_p (group_size, elt_mode))
6848 if (dump_enabled_p ())
6849 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6850 "unsupported form of SLP reduction for"
6851 " variable-length vectors: cannot build"
6852 " initial vector.\n");
6853 return false;
6855 /* The epilogue code relies on the number of elements being a multiple
6856 of the group size. The duplicate-and-interleave approach to setting
6857 up the the initial vector does too. */
6858 if (!multiple_p (nunits_out, group_size))
6860 if (dump_enabled_p ())
6861 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6862 "unsupported form of SLP reduction for"
6863 " variable-length vectors: the vector size"
6864 " is not a multiple of the number of results.\n");
6865 return false;
6869 /* In case of widenning multiplication by a constant, we update the type
6870 of the constant to be the type of the other operand. We check that the
6871 constant fits the type in the pattern recognition pass. */
6872 if (code == DOT_PROD_EXPR
6873 && !types_compatible_p (TREE_TYPE (ops[0]), TREE_TYPE (ops[1])))
6875 if (TREE_CODE (ops[0]) == INTEGER_CST)
6876 ops[0] = fold_convert (TREE_TYPE (ops[1]), ops[0]);
6877 else if (TREE_CODE (ops[1]) == INTEGER_CST)
6878 ops[1] = fold_convert (TREE_TYPE (ops[0]), ops[1]);
6879 else
6881 if (dump_enabled_p ())
6882 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6883 "invalid types in dot-prod\n");
6885 return false;
6889 if (reduction_type == COND_REDUCTION)
6891 widest_int ni;
6893 if (! max_loop_iterations (loop, &ni))
6895 if (dump_enabled_p ())
6896 dump_printf_loc (MSG_NOTE, vect_location,
6897 "loop count not known, cannot create cond "
6898 "reduction.\n");
6899 return false;
6901 /* Convert backedges to iterations. */
6902 ni += 1;
6904 /* The additional index will be the same type as the condition. Check
6905 that the loop can fit into this less one (because we'll use up the
6906 zero slot for when there are no matches). */
6907 tree max_index = TYPE_MAX_VALUE (cr_index_scalar_type);
6908 if (wi::geu_p (ni, wi::to_widest (max_index)))
6910 if (dump_enabled_p ())
6911 dump_printf_loc (MSG_NOTE, vect_location,
6912 "loop size is greater than data size.\n");
6913 return false;
6917 /* In case the vectorization factor (VF) is bigger than the number
6918 of elements that we can fit in a vectype (nunits), we have to generate
6919 more than one vector stmt - i.e - we need to "unroll" the
6920 vector stmt by a factor VF/nunits. For more details see documentation
6921 in vectorizable_operation. */
6923 /* If the reduction is used in an outer loop we need to generate
6924 VF intermediate results, like so (e.g. for ncopies=2):
6925 r0 = phi (init, r0)
6926 r1 = phi (init, r1)
6927 r0 = x0 + r0;
6928 r1 = x1 + r1;
6929 (i.e. we generate VF results in 2 registers).
6930 In this case we have a separate def-use cycle for each copy, and therefore
6931 for each copy we get the vector def for the reduction variable from the
6932 respective phi node created for this copy.
6934 Otherwise (the reduction is unused in the loop nest), we can combine
6935 together intermediate results, like so (e.g. for ncopies=2):
6936 r = phi (init, r)
6937 r = x0 + r;
6938 r = x1 + r;
6939 (i.e. we generate VF/2 results in a single register).
6940 In this case for each copy we get the vector def for the reduction variable
6941 from the vectorized reduction operation generated in the previous iteration.
6943 This only works when we see both the reduction PHI and its only consumer
6944 in vectorizable_reduction and there are no intermediate stmts
6945 participating. */
6946 stmt_vec_info use_stmt_info;
6947 tree reduc_phi_result = gimple_phi_result (reduc_def_phi);
6948 if (ncopies > 1
6949 && (STMT_VINFO_RELEVANT (stmt_info) <= vect_used_only_live)
6950 && (use_stmt_info = loop_vinfo->lookup_single_use (reduc_phi_result))
6951 && vect_stmt_to_vectorize (use_stmt_info) == stmt_info)
6953 single_defuse_cycle = true;
6954 epilog_copies = 1;
6956 else
6957 epilog_copies = ncopies;
6959 /* If the reduction stmt is one of the patterns that have lane
6960 reduction embedded we cannot handle the case of ! single_defuse_cycle. */
6961 if ((ncopies > 1
6962 && ! single_defuse_cycle)
6963 && (code == DOT_PROD_EXPR
6964 || code == WIDEN_SUM_EXPR
6965 || code == SAD_EXPR))
6967 if (dump_enabled_p ())
6968 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6969 "multi def-use cycle not possible for lane-reducing "
6970 "reduction operation\n");
6971 return false;
6974 if (slp_node)
6975 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6976 else
6977 vec_num = 1;
6979 internal_fn cond_fn = get_conditional_internal_fn (code);
6980 vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
6982 if (!vec_stmt) /* transformation not required. */
6984 vect_model_reduction_cost (stmt_info, reduc_fn, ncopies, cost_vec);
6985 if (loop_vinfo && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
6987 if (reduction_type != FOLD_LEFT_REDUCTION
6988 && (cond_fn == IFN_LAST
6989 || !direct_internal_fn_supported_p (cond_fn, vectype_in,
6990 OPTIMIZE_FOR_SPEED)))
6992 if (dump_enabled_p ())
6993 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6994 "can't use a fully-masked loop because no"
6995 " conditional operation is available.\n");
6996 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
6998 else if (reduc_index == -1)
7000 if (dump_enabled_p ())
7001 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7002 "can't use a fully-masked loop for chained"
7003 " reductions.\n");
7004 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
7006 else
7007 vect_record_loop_mask (loop_vinfo, masks, ncopies * vec_num,
7008 vectype_in);
7010 if (dump_enabled_p ()
7011 && reduction_type == FOLD_LEFT_REDUCTION)
7012 dump_printf_loc (MSG_NOTE, vect_location,
7013 "using an in-order (fold-left) reduction.\n");
7014 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
7015 return true;
7018 /* Transform. */
7020 if (dump_enabled_p ())
7021 dump_printf_loc (MSG_NOTE, vect_location, "transform reduction.\n");
7023 /* FORNOW: Multiple types are not supported for condition. */
7024 if (code == COND_EXPR)
7025 gcc_assert (ncopies == 1);
7027 bool masked_loop_p = LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
7029 if (reduction_type == FOLD_LEFT_REDUCTION)
7030 return vectorize_fold_left_reduction
7031 (stmt_info, gsi, vec_stmt, slp_node, reduc_def_phi, code,
7032 reduc_fn, ops, vectype_in, reduc_index, masks);
7034 if (reduction_type == EXTRACT_LAST_REDUCTION)
7036 gcc_assert (!slp_node);
7037 return vectorizable_condition (stmt_info, gsi, vec_stmt,
7038 NULL, reduc_index, NULL, NULL);
7041 /* Create the destination vector */
7042 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
7044 prev_stmt_info = NULL;
7045 prev_phi_info = NULL;
7046 if (!slp_node)
7048 vec_oprnds0.create (1);
7049 vec_oprnds1.create (1);
7050 if (op_type == ternary_op)
7051 vec_oprnds2.create (1);
7054 phis.create (vec_num);
7055 vect_defs.create (vec_num);
7056 if (!slp_node)
7057 vect_defs.quick_push (NULL_TREE);
7059 if (slp_node)
7060 phis.splice (SLP_TREE_VEC_STMTS (slp_node_instance->reduc_phis));
7061 else
7062 phis.quick_push (STMT_VINFO_VEC_STMT (reduc_def_info));
7064 for (j = 0; j < ncopies; j++)
7066 if (code == COND_EXPR)
7068 gcc_assert (!slp_node);
7069 vectorizable_condition (stmt_info, gsi, vec_stmt,
7070 PHI_RESULT (phis[0]->stmt),
7071 reduc_index, NULL, NULL);
7072 /* Multiple types are not supported for condition. */
7073 break;
7076 /* Handle uses. */
7077 if (j == 0)
7079 if (slp_node)
7081 /* Get vec defs for all the operands except the reduction index,
7082 ensuring the ordering of the ops in the vector is kept. */
7083 auto_vec<tree, 3> slp_ops;
7084 auto_vec<vec<tree>, 3> vec_defs;
7086 slp_ops.quick_push (ops[0]);
7087 slp_ops.quick_push (ops[1]);
7088 if (op_type == ternary_op)
7089 slp_ops.quick_push (ops[2]);
7091 vect_get_slp_defs (slp_ops, slp_node, &vec_defs);
7093 vec_oprnds0.safe_splice (vec_defs[0]);
7094 vec_defs[0].release ();
7095 vec_oprnds1.safe_splice (vec_defs[1]);
7096 vec_defs[1].release ();
7097 if (op_type == ternary_op)
7099 vec_oprnds2.safe_splice (vec_defs[2]);
7100 vec_defs[2].release ();
7103 else
7105 vec_oprnds0.quick_push
7106 (vect_get_vec_def_for_operand (ops[0], stmt_info));
7107 vec_oprnds1.quick_push
7108 (vect_get_vec_def_for_operand (ops[1], stmt_info));
7109 if (op_type == ternary_op)
7110 vec_oprnds2.quick_push
7111 (vect_get_vec_def_for_operand (ops[2], stmt_info));
7114 else
7116 if (!slp_node)
7118 gcc_assert (reduc_index != -1 || ! single_defuse_cycle);
7120 if (single_defuse_cycle && reduc_index == 0)
7121 vec_oprnds0[0] = gimple_get_lhs (new_stmt_info->stmt);
7122 else
7123 vec_oprnds0[0]
7124 = vect_get_vec_def_for_stmt_copy (loop_vinfo,
7125 vec_oprnds0[0]);
7126 if (single_defuse_cycle && reduc_index == 1)
7127 vec_oprnds1[0] = gimple_get_lhs (new_stmt_info->stmt);
7128 else
7129 vec_oprnds1[0]
7130 = vect_get_vec_def_for_stmt_copy (loop_vinfo,
7131 vec_oprnds1[0]);
7132 if (op_type == ternary_op)
7134 if (single_defuse_cycle && reduc_index == 2)
7135 vec_oprnds2[0] = gimple_get_lhs (new_stmt_info->stmt);
7136 else
7137 vec_oprnds2[0]
7138 = vect_get_vec_def_for_stmt_copy (loop_vinfo,
7139 vec_oprnds2[0]);
7144 FOR_EACH_VEC_ELT (vec_oprnds0, i, def0)
7146 tree vop[3] = { def0, vec_oprnds1[i], NULL_TREE };
7147 if (masked_loop_p)
7149 /* Make sure that the reduction accumulator is vop[0]. */
7150 if (reduc_index == 1)
7152 gcc_assert (commutative_tree_code (code));
7153 std::swap (vop[0], vop[1]);
7155 tree mask = vect_get_loop_mask (gsi, masks, vec_num * ncopies,
7156 vectype_in, i * ncopies + j);
7157 gcall *call = gimple_build_call_internal (cond_fn, 4, mask,
7158 vop[0], vop[1],
7159 vop[0]);
7160 new_temp = make_ssa_name (vec_dest, call);
7161 gimple_call_set_lhs (call, new_temp);
7162 gimple_call_set_nothrow (call, true);
7163 new_stmt_info
7164 = vect_finish_stmt_generation (stmt_info, call, gsi);
7166 else
7168 if (op_type == ternary_op)
7169 vop[2] = vec_oprnds2[i];
7171 gassign *new_stmt = gimple_build_assign (vec_dest, code,
7172 vop[0], vop[1], vop[2]);
7173 new_temp = make_ssa_name (vec_dest, new_stmt);
7174 gimple_assign_set_lhs (new_stmt, new_temp);
7175 new_stmt_info
7176 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
7179 if (slp_node)
7181 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
7182 vect_defs.quick_push (new_temp);
7184 else
7185 vect_defs[0] = new_temp;
7188 if (slp_node)
7189 continue;
7191 if (j == 0)
7192 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
7193 else
7194 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
7196 prev_stmt_info = new_stmt_info;
7199 /* Finalize the reduction-phi (set its arguments) and create the
7200 epilog reduction code. */
7201 if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node)
7202 vect_defs[0] = gimple_get_lhs ((*vec_stmt)->stmt);
7204 vect_create_epilog_for_reduction (vect_defs, stmt_info, reduc_def_phi,
7205 epilog_copies, reduc_fn, phis,
7206 double_reduc, slp_node, slp_node_instance,
7207 cond_reduc_val, cond_reduc_op_code,
7208 neutral_op);
7210 return true;
7213 /* Function vect_min_worthwhile_factor.
7215 For a loop where we could vectorize the operation indicated by CODE,
7216 return the minimum vectorization factor that makes it worthwhile
7217 to use generic vectors. */
7218 static unsigned int
7219 vect_min_worthwhile_factor (enum tree_code code)
7221 switch (code)
7223 case PLUS_EXPR:
7224 case MINUS_EXPR:
7225 case NEGATE_EXPR:
7226 return 4;
7228 case BIT_AND_EXPR:
7229 case BIT_IOR_EXPR:
7230 case BIT_XOR_EXPR:
7231 case BIT_NOT_EXPR:
7232 return 2;
7234 default:
7235 return INT_MAX;
7239 /* Return true if VINFO indicates we are doing loop vectorization and if
7240 it is worth decomposing CODE operations into scalar operations for
7241 that loop's vectorization factor. */
7243 bool
7244 vect_worthwhile_without_simd_p (vec_info *vinfo, tree_code code)
7246 loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
7247 unsigned HOST_WIDE_INT value;
7248 return (loop_vinfo
7249 && LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&value)
7250 && value >= vect_min_worthwhile_factor (code));
7253 /* Function vectorizable_induction
7255 Check if STMT_INFO performs an induction computation that can be vectorized.
7256 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
7257 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
7258 Return true if STMT_INFO is vectorizable in this way. */
7260 bool
7261 vectorizable_induction (stmt_vec_info stmt_info,
7262 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
7263 stmt_vec_info *vec_stmt, slp_tree slp_node,
7264 stmt_vector_for_cost *cost_vec)
7266 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7267 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
7268 unsigned ncopies;
7269 bool nested_in_vect_loop = false;
7270 struct loop *iv_loop;
7271 tree vec_def;
7272 edge pe = loop_preheader_edge (loop);
7273 basic_block new_bb;
7274 tree new_vec, vec_init, vec_step, t;
7275 tree new_name;
7276 gimple *new_stmt;
7277 gphi *induction_phi;
7278 tree induc_def, vec_dest;
7279 tree init_expr, step_expr;
7280 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
7281 unsigned i;
7282 tree expr;
7283 gimple_seq stmts;
7284 imm_use_iterator imm_iter;
7285 use_operand_p use_p;
7286 gimple *exit_phi;
7287 edge latch_e;
7288 tree loop_arg;
7289 gimple_stmt_iterator si;
7291 gphi *phi = dyn_cast <gphi *> (stmt_info->stmt);
7292 if (!phi)
7293 return false;
7295 if (!STMT_VINFO_RELEVANT_P (stmt_info))
7296 return false;
7298 /* Make sure it was recognized as induction computation. */
7299 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
7300 return false;
7302 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7303 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
7305 if (slp_node)
7306 ncopies = 1;
7307 else
7308 ncopies = vect_get_num_copies (loop_vinfo, vectype);
7309 gcc_assert (ncopies >= 1);
7311 /* FORNOW. These restrictions should be relaxed. */
7312 if (nested_in_vect_loop_p (loop, stmt_info))
7314 imm_use_iterator imm_iter;
7315 use_operand_p use_p;
7316 gimple *exit_phi;
7317 edge latch_e;
7318 tree loop_arg;
7320 if (ncopies > 1)
7322 if (dump_enabled_p ())
7323 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7324 "multiple types in nested loop.\n");
7325 return false;
7328 /* FORNOW: outer loop induction with SLP not supported. */
7329 if (STMT_SLP_TYPE (stmt_info))
7330 return false;
7332 exit_phi = NULL;
7333 latch_e = loop_latch_edge (loop->inner);
7334 loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
7335 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
7337 gimple *use_stmt = USE_STMT (use_p);
7338 if (is_gimple_debug (use_stmt))
7339 continue;
7341 if (!flow_bb_inside_loop_p (loop->inner, gimple_bb (use_stmt)))
7343 exit_phi = use_stmt;
7344 break;
7347 if (exit_phi)
7349 stmt_vec_info exit_phi_vinfo = loop_vinfo->lookup_stmt (exit_phi);
7350 if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
7351 && !STMT_VINFO_LIVE_P (exit_phi_vinfo)))
7353 if (dump_enabled_p ())
7354 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7355 "inner-loop induction only used outside "
7356 "of the outer vectorized loop.\n");
7357 return false;
7361 nested_in_vect_loop = true;
7362 iv_loop = loop->inner;
7364 else
7365 iv_loop = loop;
7366 gcc_assert (iv_loop == (gimple_bb (phi))->loop_father);
7368 if (slp_node && !nunits.is_constant ())
7370 /* The current SLP code creates the initial value element-by-element. */
7371 if (dump_enabled_p ())
7372 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7373 "SLP induction not supported for variable-length"
7374 " vectors.\n");
7375 return false;
7378 if (!vec_stmt) /* transformation not required. */
7380 STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
7381 DUMP_VECT_SCOPE ("vectorizable_induction");
7382 vect_model_induction_cost (stmt_info, ncopies, cost_vec);
7383 return true;
7386 /* Transform. */
7388 /* Compute a vector variable, initialized with the first VF values of
7389 the induction variable. E.g., for an iv with IV_PHI='X' and
7390 evolution S, for a vector of 4 units, we want to compute:
7391 [X, X + S, X + 2*S, X + 3*S]. */
7393 if (dump_enabled_p ())
7394 dump_printf_loc (MSG_NOTE, vect_location, "transform induction phi.\n");
7396 latch_e = loop_latch_edge (iv_loop);
7397 loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
7399 step_expr = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_info);
7400 gcc_assert (step_expr != NULL_TREE);
7402 pe = loop_preheader_edge (iv_loop);
7403 init_expr = PHI_ARG_DEF_FROM_EDGE (phi,
7404 loop_preheader_edge (iv_loop));
7406 stmts = NULL;
7407 if (!nested_in_vect_loop)
7409 /* Convert the initial value to the desired type. */
7410 tree new_type = TREE_TYPE (vectype);
7411 init_expr = gimple_convert (&stmts, new_type, init_expr);
7413 /* If we are using the loop mask to "peel" for alignment then we need
7414 to adjust the start value here. */
7415 tree skip_niters = LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo);
7416 if (skip_niters != NULL_TREE)
7418 if (FLOAT_TYPE_P (vectype))
7419 skip_niters = gimple_build (&stmts, FLOAT_EXPR, new_type,
7420 skip_niters);
7421 else
7422 skip_niters = gimple_convert (&stmts, new_type, skip_niters);
7423 tree skip_step = gimple_build (&stmts, MULT_EXPR, new_type,
7424 skip_niters, step_expr);
7425 init_expr = gimple_build (&stmts, MINUS_EXPR, new_type,
7426 init_expr, skip_step);
7430 /* Convert the step to the desired type. */
7431 step_expr = gimple_convert (&stmts, TREE_TYPE (vectype), step_expr);
7433 if (stmts)
7435 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
7436 gcc_assert (!new_bb);
7439 /* Find the first insertion point in the BB. */
7440 basic_block bb = gimple_bb (phi);
7441 si = gsi_after_labels (bb);
7443 /* For SLP induction we have to generate several IVs as for example
7444 with group size 3 we need [i, i, i, i + S] [i + S, i + S, i + 2*S, i + 2*S]
7445 [i + 2*S, i + 3*S, i + 3*S, i + 3*S]. The step is the same uniform
7446 [VF*S, VF*S, VF*S, VF*S] for all. */
7447 if (slp_node)
7449 /* Enforced above. */
7450 unsigned int const_nunits = nunits.to_constant ();
7452 /* Generate [VF*S, VF*S, ... ]. */
7453 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7455 expr = build_int_cst (integer_type_node, vf);
7456 expr = fold_convert (TREE_TYPE (step_expr), expr);
7458 else
7459 expr = build_int_cst (TREE_TYPE (step_expr), vf);
7460 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
7461 expr, step_expr);
7462 if (! CONSTANT_CLASS_P (new_name))
7463 new_name = vect_init_vector (stmt_info, new_name,
7464 TREE_TYPE (step_expr), NULL);
7465 new_vec = build_vector_from_val (vectype, new_name);
7466 vec_step = vect_init_vector (stmt_info, new_vec, vectype, NULL);
7468 /* Now generate the IVs. */
7469 unsigned group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
7470 unsigned nvects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7471 unsigned elts = const_nunits * nvects;
7472 unsigned nivs = least_common_multiple (group_size,
7473 const_nunits) / const_nunits;
7474 gcc_assert (elts % group_size == 0);
7475 tree elt = init_expr;
7476 unsigned ivn;
7477 for (ivn = 0; ivn < nivs; ++ivn)
7479 tree_vector_builder elts (vectype, const_nunits, 1);
7480 stmts = NULL;
7481 for (unsigned eltn = 0; eltn < const_nunits; ++eltn)
7483 if (ivn*const_nunits + eltn >= group_size
7484 && (ivn * const_nunits + eltn) % group_size == 0)
7485 elt = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (elt),
7486 elt, step_expr);
7487 elts.quick_push (elt);
7489 vec_init = gimple_build_vector (&stmts, &elts);
7490 if (stmts)
7492 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
7493 gcc_assert (!new_bb);
7496 /* Create the induction-phi that defines the induction-operand. */
7497 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
7498 induction_phi = create_phi_node (vec_dest, iv_loop->header);
7499 stmt_vec_info induction_phi_info
7500 = loop_vinfo->add_stmt (induction_phi);
7501 induc_def = PHI_RESULT (induction_phi);
7503 /* Create the iv update inside the loop */
7504 vec_def = make_ssa_name (vec_dest);
7505 new_stmt = gimple_build_assign (vec_def, PLUS_EXPR, induc_def, vec_step);
7506 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
7507 loop_vinfo->add_stmt (new_stmt);
7509 /* Set the arguments of the phi node: */
7510 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
7511 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
7512 UNKNOWN_LOCATION);
7514 SLP_TREE_VEC_STMTS (slp_node).quick_push (induction_phi_info);
7517 /* Re-use IVs when we can. */
7518 if (ivn < nvects)
7520 unsigned vfp
7521 = least_common_multiple (group_size, const_nunits) / group_size;
7522 /* Generate [VF'*S, VF'*S, ... ]. */
7523 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7525 expr = build_int_cst (integer_type_node, vfp);
7526 expr = fold_convert (TREE_TYPE (step_expr), expr);
7528 else
7529 expr = build_int_cst (TREE_TYPE (step_expr), vfp);
7530 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
7531 expr, step_expr);
7532 if (! CONSTANT_CLASS_P (new_name))
7533 new_name = vect_init_vector (stmt_info, new_name,
7534 TREE_TYPE (step_expr), NULL);
7535 new_vec = build_vector_from_val (vectype, new_name);
7536 vec_step = vect_init_vector (stmt_info, new_vec, vectype, NULL);
7537 for (; ivn < nvects; ++ivn)
7539 gimple *iv = SLP_TREE_VEC_STMTS (slp_node)[ivn - nivs]->stmt;
7540 tree def;
7541 if (gimple_code (iv) == GIMPLE_PHI)
7542 def = gimple_phi_result (iv);
7543 else
7544 def = gimple_assign_lhs (iv);
7545 new_stmt = gimple_build_assign (make_ssa_name (vectype),
7546 PLUS_EXPR,
7547 def, vec_step);
7548 if (gimple_code (iv) == GIMPLE_PHI)
7549 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
7550 else
7552 gimple_stmt_iterator tgsi = gsi_for_stmt (iv);
7553 gsi_insert_after (&tgsi, new_stmt, GSI_CONTINUE_LINKING);
7555 SLP_TREE_VEC_STMTS (slp_node).quick_push
7556 (loop_vinfo->add_stmt (new_stmt));
7560 return true;
7563 /* Create the vector that holds the initial_value of the induction. */
7564 if (nested_in_vect_loop)
7566 /* iv_loop is nested in the loop to be vectorized. init_expr had already
7567 been created during vectorization of previous stmts. We obtain it
7568 from the STMT_VINFO_VEC_STMT of the defining stmt. */
7569 vec_init = vect_get_vec_def_for_operand (init_expr, stmt_info);
7570 /* If the initial value is not of proper type, convert it. */
7571 if (!useless_type_conversion_p (vectype, TREE_TYPE (vec_init)))
7573 new_stmt
7574 = gimple_build_assign (vect_get_new_ssa_name (vectype,
7575 vect_simple_var,
7576 "vec_iv_"),
7577 VIEW_CONVERT_EXPR,
7578 build1 (VIEW_CONVERT_EXPR, vectype,
7579 vec_init));
7580 vec_init = gimple_assign_lhs (new_stmt);
7581 new_bb = gsi_insert_on_edge_immediate (loop_preheader_edge (iv_loop),
7582 new_stmt);
7583 gcc_assert (!new_bb);
7584 loop_vinfo->add_stmt (new_stmt);
7587 else
7589 /* iv_loop is the loop to be vectorized. Create:
7590 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
7591 stmts = NULL;
7592 new_name = gimple_convert (&stmts, TREE_TYPE (vectype), init_expr);
7594 unsigned HOST_WIDE_INT const_nunits;
7595 if (nunits.is_constant (&const_nunits))
7597 tree_vector_builder elts (vectype, const_nunits, 1);
7598 elts.quick_push (new_name);
7599 for (i = 1; i < const_nunits; i++)
7601 /* Create: new_name_i = new_name + step_expr */
7602 new_name = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (new_name),
7603 new_name, step_expr);
7604 elts.quick_push (new_name);
7606 /* Create a vector from [new_name_0, new_name_1, ...,
7607 new_name_nunits-1] */
7608 vec_init = gimple_build_vector (&stmts, &elts);
7610 else if (INTEGRAL_TYPE_P (TREE_TYPE (step_expr)))
7611 /* Build the initial value directly from a VEC_SERIES_EXPR. */
7612 vec_init = gimple_build (&stmts, VEC_SERIES_EXPR, vectype,
7613 new_name, step_expr);
7614 else
7616 /* Build:
7617 [base, base, base, ...]
7618 + (vectype) [0, 1, 2, ...] * [step, step, step, ...]. */
7619 gcc_assert (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)));
7620 gcc_assert (flag_associative_math);
7621 tree index = build_index_vector (vectype, 0, 1);
7622 tree base_vec = gimple_build_vector_from_val (&stmts, vectype,
7623 new_name);
7624 tree step_vec = gimple_build_vector_from_val (&stmts, vectype,
7625 step_expr);
7626 vec_init = gimple_build (&stmts, FLOAT_EXPR, vectype, index);
7627 vec_init = gimple_build (&stmts, MULT_EXPR, vectype,
7628 vec_init, step_vec);
7629 vec_init = gimple_build (&stmts, PLUS_EXPR, vectype,
7630 vec_init, base_vec);
7633 if (stmts)
7635 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
7636 gcc_assert (!new_bb);
7641 /* Create the vector that holds the step of the induction. */
7642 if (nested_in_vect_loop)
7643 /* iv_loop is nested in the loop to be vectorized. Generate:
7644 vec_step = [S, S, S, S] */
7645 new_name = step_expr;
7646 else
7648 /* iv_loop is the loop to be vectorized. Generate:
7649 vec_step = [VF*S, VF*S, VF*S, VF*S] */
7650 gimple_seq seq = NULL;
7651 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7653 expr = build_int_cst (integer_type_node, vf);
7654 expr = gimple_build (&seq, FLOAT_EXPR, TREE_TYPE (step_expr), expr);
7656 else
7657 expr = build_int_cst (TREE_TYPE (step_expr), vf);
7658 new_name = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step_expr),
7659 expr, step_expr);
7660 if (seq)
7662 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
7663 gcc_assert (!new_bb);
7667 t = unshare_expr (new_name);
7668 gcc_assert (CONSTANT_CLASS_P (new_name)
7669 || TREE_CODE (new_name) == SSA_NAME);
7670 new_vec = build_vector_from_val (vectype, t);
7671 vec_step = vect_init_vector (stmt_info, new_vec, vectype, NULL);
7674 /* Create the following def-use cycle:
7675 loop prolog:
7676 vec_init = ...
7677 vec_step = ...
7678 loop:
7679 vec_iv = PHI <vec_init, vec_loop>
7681 STMT
7683 vec_loop = vec_iv + vec_step; */
7685 /* Create the induction-phi that defines the induction-operand. */
7686 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
7687 induction_phi = create_phi_node (vec_dest, iv_loop->header);
7688 stmt_vec_info induction_phi_info = loop_vinfo->add_stmt (induction_phi);
7689 induc_def = PHI_RESULT (induction_phi);
7691 /* Create the iv update inside the loop */
7692 vec_def = make_ssa_name (vec_dest);
7693 new_stmt = gimple_build_assign (vec_def, PLUS_EXPR, induc_def, vec_step);
7694 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
7695 stmt_vec_info new_stmt_info = loop_vinfo->add_stmt (new_stmt);
7697 /* Set the arguments of the phi node: */
7698 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
7699 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
7700 UNKNOWN_LOCATION);
7702 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = induction_phi_info;
7704 /* In case that vectorization factor (VF) is bigger than the number
7705 of elements that we can fit in a vectype (nunits), we have to generate
7706 more than one vector stmt - i.e - we need to "unroll" the
7707 vector stmt by a factor VF/nunits. For more details see documentation
7708 in vectorizable_operation. */
7710 if (ncopies > 1)
7712 gimple_seq seq = NULL;
7713 stmt_vec_info prev_stmt_vinfo;
7714 /* FORNOW. This restriction should be relaxed. */
7715 gcc_assert (!nested_in_vect_loop);
7717 /* Create the vector that holds the step of the induction. */
7718 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7720 expr = build_int_cst (integer_type_node, nunits);
7721 expr = gimple_build (&seq, FLOAT_EXPR, TREE_TYPE (step_expr), expr);
7723 else
7724 expr = build_int_cst (TREE_TYPE (step_expr), nunits);
7725 new_name = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step_expr),
7726 expr, step_expr);
7727 if (seq)
7729 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
7730 gcc_assert (!new_bb);
7733 t = unshare_expr (new_name);
7734 gcc_assert (CONSTANT_CLASS_P (new_name)
7735 || TREE_CODE (new_name) == SSA_NAME);
7736 new_vec = build_vector_from_val (vectype, t);
7737 vec_step = vect_init_vector (stmt_info, new_vec, vectype, NULL);
7739 vec_def = induc_def;
7740 prev_stmt_vinfo = induction_phi_info;
7741 for (i = 1; i < ncopies; i++)
7743 /* vec_i = vec_prev + vec_step */
7744 new_stmt = gimple_build_assign (vec_dest, PLUS_EXPR,
7745 vec_def, vec_step);
7746 vec_def = make_ssa_name (vec_dest, new_stmt);
7747 gimple_assign_set_lhs (new_stmt, vec_def);
7749 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
7750 new_stmt_info = loop_vinfo->add_stmt (new_stmt);
7751 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt_info;
7752 prev_stmt_vinfo = new_stmt_info;
7756 if (nested_in_vect_loop)
7758 /* Find the loop-closed exit-phi of the induction, and record
7759 the final vector of induction results: */
7760 exit_phi = NULL;
7761 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
7763 gimple *use_stmt = USE_STMT (use_p);
7764 if (is_gimple_debug (use_stmt))
7765 continue;
7767 if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (use_stmt)))
7769 exit_phi = use_stmt;
7770 break;
7773 if (exit_phi)
7775 stmt_vec_info stmt_vinfo = loop_vinfo->lookup_stmt (exit_phi);
7776 /* FORNOW. Currently not supporting the case that an inner-loop induction
7777 is not used in the outer-loop (i.e. only outside the outer-loop). */
7778 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
7779 && !STMT_VINFO_LIVE_P (stmt_vinfo));
7781 STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt_info;
7782 if (dump_enabled_p ())
7784 dump_printf_loc (MSG_NOTE, vect_location,
7785 "vector of inductions after inner-loop:");
7786 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
7792 if (dump_enabled_p ())
7794 dump_printf_loc (MSG_NOTE, vect_location,
7795 "transform induction: created def-use cycle: ");
7796 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, induction_phi, 0);
7797 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
7798 SSA_NAME_DEF_STMT (vec_def), 0);
7801 return true;
7804 /* Function vectorizable_live_operation.
7806 STMT_INFO computes a value that is used outside the loop. Check if
7807 it can be supported. */
7809 bool
7810 vectorizable_live_operation (stmt_vec_info stmt_info,
7811 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
7812 slp_tree slp_node, int slp_index,
7813 stmt_vec_info *vec_stmt,
7814 stmt_vector_for_cost *)
7816 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7817 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
7818 imm_use_iterator imm_iter;
7819 tree lhs, lhs_type, bitsize, vec_bitsize;
7820 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7821 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
7822 int ncopies;
7823 gimple *use_stmt;
7824 auto_vec<tree> vec_oprnds;
7825 int vec_entry = 0;
7826 poly_uint64 vec_index = 0;
7828 gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
7830 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
7831 return false;
7833 /* FORNOW. CHECKME. */
7834 if (nested_in_vect_loop_p (loop, stmt_info))
7835 return false;
7837 /* If STMT is not relevant and it is a simple assignment and its inputs are
7838 invariant then it can remain in place, unvectorized. The original last
7839 scalar value that it computes will be used. */
7840 if (!STMT_VINFO_RELEVANT_P (stmt_info))
7842 gcc_assert (is_simple_and_all_uses_invariant (stmt_info, loop_vinfo));
7843 if (dump_enabled_p ())
7844 dump_printf_loc (MSG_NOTE, vect_location,
7845 "statement is simple and uses invariant. Leaving in "
7846 "place.\n");
7847 return true;
7850 if (slp_node)
7851 ncopies = 1;
7852 else
7853 ncopies = vect_get_num_copies (loop_vinfo, vectype);
7855 if (slp_node)
7857 gcc_assert (slp_index >= 0);
7859 int num_scalar = SLP_TREE_SCALAR_STMTS (slp_node).length ();
7860 int num_vec = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7862 /* Get the last occurrence of the scalar index from the concatenation of
7863 all the slp vectors. Calculate which slp vector it is and the index
7864 within. */
7865 poly_uint64 pos = (num_vec * nunits) - num_scalar + slp_index;
7867 /* Calculate which vector contains the result, and which lane of
7868 that vector we need. */
7869 if (!can_div_trunc_p (pos, nunits, &vec_entry, &vec_index))
7871 if (dump_enabled_p ())
7872 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7873 "Cannot determine which vector holds the"
7874 " final result.\n");
7875 return false;
7879 if (!vec_stmt)
7881 /* No transformation required. */
7882 if (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
7884 if (!direct_internal_fn_supported_p (IFN_EXTRACT_LAST, vectype,
7885 OPTIMIZE_FOR_SPEED))
7887 if (dump_enabled_p ())
7888 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7889 "can't use a fully-masked loop because "
7890 "the target doesn't support extract last "
7891 "reduction.\n");
7892 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
7894 else if (slp_node)
7896 if (dump_enabled_p ())
7897 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7898 "can't use a fully-masked loop because an "
7899 "SLP statement is live after the loop.\n");
7900 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
7902 else if (ncopies > 1)
7904 if (dump_enabled_p ())
7905 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7906 "can't use a fully-masked loop because"
7907 " ncopies is greater than 1.\n");
7908 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
7910 else
7912 gcc_assert (ncopies == 1 && !slp_node);
7913 vect_record_loop_mask (loop_vinfo,
7914 &LOOP_VINFO_MASKS (loop_vinfo),
7915 1, vectype);
7918 return true;
7921 /* Use the lhs of the original scalar statement. */
7922 gimple *stmt = vect_orig_stmt (stmt_info)->stmt;
7924 lhs = (is_a <gphi *> (stmt)) ? gimple_phi_result (stmt)
7925 : gimple_get_lhs (stmt);
7926 lhs_type = TREE_TYPE (lhs);
7928 bitsize = (VECTOR_BOOLEAN_TYPE_P (vectype)
7929 ? bitsize_int (TYPE_PRECISION (TREE_TYPE (vectype)))
7930 : TYPE_SIZE (TREE_TYPE (vectype)));
7931 vec_bitsize = TYPE_SIZE (vectype);
7933 /* Get the vectorized lhs of STMT and the lane to use (counted in bits). */
7934 tree vec_lhs, bitstart;
7935 if (slp_node)
7937 gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
7939 /* Get the correct slp vectorized stmt. */
7940 gimple *vec_stmt = SLP_TREE_VEC_STMTS (slp_node)[vec_entry]->stmt;
7941 if (gphi *phi = dyn_cast <gphi *> (vec_stmt))
7942 vec_lhs = gimple_phi_result (phi);
7943 else
7944 vec_lhs = gimple_get_lhs (vec_stmt);
7946 /* Get entry to use. */
7947 bitstart = bitsize_int (vec_index);
7948 bitstart = int_const_binop (MULT_EXPR, bitsize, bitstart);
7950 else
7952 enum vect_def_type dt = STMT_VINFO_DEF_TYPE (stmt_info);
7953 vec_lhs = vect_get_vec_def_for_operand_1 (stmt_info, dt);
7954 gcc_checking_assert (ncopies == 1
7955 || !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
7957 /* For multiple copies, get the last copy. */
7958 for (int i = 1; i < ncopies; ++i)
7959 vec_lhs = vect_get_vec_def_for_stmt_copy (loop_vinfo, vec_lhs);
7961 /* Get the last lane in the vector. */
7962 bitstart = int_const_binop (MINUS_EXPR, vec_bitsize, bitsize);
7965 gimple_seq stmts = NULL;
7966 tree new_tree;
7967 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
7969 /* Emit:
7971 SCALAR_RES = EXTRACT_LAST <VEC_LHS, MASK>
7973 where VEC_LHS is the vectorized live-out result and MASK is
7974 the loop mask for the final iteration. */
7975 gcc_assert (ncopies == 1 && !slp_node);
7976 tree scalar_type = TREE_TYPE (STMT_VINFO_VECTYPE (stmt_info));
7977 tree mask = vect_get_loop_mask (gsi, &LOOP_VINFO_MASKS (loop_vinfo),
7978 1, vectype, 0);
7979 tree scalar_res = gimple_build (&stmts, CFN_EXTRACT_LAST,
7980 scalar_type, mask, vec_lhs);
7982 /* Convert the extracted vector element to the required scalar type. */
7983 new_tree = gimple_convert (&stmts, lhs_type, scalar_res);
7985 else
7987 tree bftype = TREE_TYPE (vectype);
7988 if (VECTOR_BOOLEAN_TYPE_P (vectype))
7989 bftype = build_nonstandard_integer_type (tree_to_uhwi (bitsize), 1);
7990 new_tree = build3 (BIT_FIELD_REF, bftype, vec_lhs, bitsize, bitstart);
7991 new_tree = force_gimple_operand (fold_convert (lhs_type, new_tree),
7992 &stmts, true, NULL_TREE);
7995 if (stmts)
7996 gsi_insert_seq_on_edge_immediate (single_exit (loop), stmts);
7998 /* Replace use of lhs with newly computed result. If the use stmt is a
7999 single arg PHI, just replace all uses of PHI result. It's necessary
8000 because lcssa PHI defining lhs may be before newly inserted stmt. */
8001 use_operand_p use_p;
8002 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, lhs)
8003 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
8004 && !is_gimple_debug (use_stmt))
8006 if (gimple_code (use_stmt) == GIMPLE_PHI
8007 && gimple_phi_num_args (use_stmt) == 1)
8009 replace_uses_by (gimple_phi_result (use_stmt), new_tree);
8011 else
8013 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
8014 SET_USE (use_p, new_tree);
8016 update_stmt (use_stmt);
8019 return true;
8022 /* Kill any debug uses outside LOOP of SSA names defined in STMT_INFO. */
8024 static void
8025 vect_loop_kill_debug_uses (struct loop *loop, stmt_vec_info stmt_info)
8027 ssa_op_iter op_iter;
8028 imm_use_iterator imm_iter;
8029 def_operand_p def_p;
8030 gimple *ustmt;
8032 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt_info->stmt, op_iter, SSA_OP_DEF)
8034 FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p))
8036 basic_block bb;
8038 if (!is_gimple_debug (ustmt))
8039 continue;
8041 bb = gimple_bb (ustmt);
8043 if (!flow_bb_inside_loop_p (loop, bb))
8045 if (gimple_debug_bind_p (ustmt))
8047 if (dump_enabled_p ())
8048 dump_printf_loc (MSG_NOTE, vect_location,
8049 "killing debug use\n");
8051 gimple_debug_bind_reset_value (ustmt);
8052 update_stmt (ustmt);
8054 else
8055 gcc_unreachable ();
8061 /* Given loop represented by LOOP_VINFO, return true if computation of
8062 LOOP_VINFO_NITERS (= LOOP_VINFO_NITERSM1 + 1) doesn't overflow, false
8063 otherwise. */
8065 static bool
8066 loop_niters_no_overflow (loop_vec_info loop_vinfo)
8068 /* Constant case. */
8069 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
8071 tree cst_niters = LOOP_VINFO_NITERS (loop_vinfo);
8072 tree cst_nitersm1 = LOOP_VINFO_NITERSM1 (loop_vinfo);
8074 gcc_assert (TREE_CODE (cst_niters) == INTEGER_CST);
8075 gcc_assert (TREE_CODE (cst_nitersm1) == INTEGER_CST);
8076 if (wi::to_widest (cst_nitersm1) < wi::to_widest (cst_niters))
8077 return true;
8080 widest_int max;
8081 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
8082 /* Check the upper bound of loop niters. */
8083 if (get_max_loop_iterations (loop, &max))
8085 tree type = TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo));
8086 signop sgn = TYPE_SIGN (type);
8087 widest_int type_max = widest_int::from (wi::max_value (type), sgn);
8088 if (max < type_max)
8089 return true;
8091 return false;
8094 /* Return a mask type with half the number of elements as TYPE. */
8096 tree
8097 vect_halve_mask_nunits (tree type)
8099 poly_uint64 nunits = exact_div (TYPE_VECTOR_SUBPARTS (type), 2);
8100 return build_truth_vector_type (nunits, current_vector_size);
8103 /* Return a mask type with twice as many elements as TYPE. */
8105 tree
8106 vect_double_mask_nunits (tree type)
8108 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (type) * 2;
8109 return build_truth_vector_type (nunits, current_vector_size);
8112 /* Record that a fully-masked version of LOOP_VINFO would need MASKS to
8113 contain a sequence of NVECTORS masks that each control a vector of type
8114 VECTYPE. */
8116 void
8117 vect_record_loop_mask (loop_vec_info loop_vinfo, vec_loop_masks *masks,
8118 unsigned int nvectors, tree vectype)
8120 gcc_assert (nvectors != 0);
8121 if (masks->length () < nvectors)
8122 masks->safe_grow_cleared (nvectors);
8123 rgroup_masks *rgm = &(*masks)[nvectors - 1];
8124 /* The number of scalars per iteration and the number of vectors are
8125 both compile-time constants. */
8126 unsigned int nscalars_per_iter
8127 = exact_div (nvectors * TYPE_VECTOR_SUBPARTS (vectype),
8128 LOOP_VINFO_VECT_FACTOR (loop_vinfo)).to_constant ();
8129 if (rgm->max_nscalars_per_iter < nscalars_per_iter)
8131 rgm->max_nscalars_per_iter = nscalars_per_iter;
8132 rgm->mask_type = build_same_sized_truth_vector_type (vectype);
8136 /* Given a complete set of masks MASKS, extract mask number INDEX
8137 for an rgroup that operates on NVECTORS vectors of type VECTYPE,
8138 where 0 <= INDEX < NVECTORS. Insert any set-up statements before GSI.
8140 See the comment above vec_loop_masks for more details about the mask
8141 arrangement. */
8143 tree
8144 vect_get_loop_mask (gimple_stmt_iterator *gsi, vec_loop_masks *masks,
8145 unsigned int nvectors, tree vectype, unsigned int index)
8147 rgroup_masks *rgm = &(*masks)[nvectors - 1];
8148 tree mask_type = rgm->mask_type;
8150 /* Populate the rgroup's mask array, if this is the first time we've
8151 used it. */
8152 if (rgm->masks.is_empty ())
8154 rgm->masks.safe_grow_cleared (nvectors);
8155 for (unsigned int i = 0; i < nvectors; ++i)
8157 tree mask = make_temp_ssa_name (mask_type, NULL, "loop_mask");
8158 /* Provide a dummy definition until the real one is available. */
8159 SSA_NAME_DEF_STMT (mask) = gimple_build_nop ();
8160 rgm->masks[i] = mask;
8164 tree mask = rgm->masks[index];
8165 if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type),
8166 TYPE_VECTOR_SUBPARTS (vectype)))
8168 /* A loop mask for data type X can be reused for data type Y
8169 if X has N times more elements than Y and if Y's elements
8170 are N times bigger than X's. In this case each sequence
8171 of N elements in the loop mask will be all-zero or all-one.
8172 We can then view-convert the mask so that each sequence of
8173 N elements is replaced by a single element. */
8174 gcc_assert (multiple_p (TYPE_VECTOR_SUBPARTS (mask_type),
8175 TYPE_VECTOR_SUBPARTS (vectype)));
8176 gimple_seq seq = NULL;
8177 mask_type = build_same_sized_truth_vector_type (vectype);
8178 mask = gimple_build (&seq, VIEW_CONVERT_EXPR, mask_type, mask);
8179 if (seq)
8180 gsi_insert_seq_before (gsi, seq, GSI_SAME_STMT);
8182 return mask;
8185 /* Scale profiling counters by estimation for LOOP which is vectorized
8186 by factor VF. */
8188 static void
8189 scale_profile_for_vect_loop (struct loop *loop, unsigned vf)
8191 edge preheader = loop_preheader_edge (loop);
8192 /* Reduce loop iterations by the vectorization factor. */
8193 gcov_type new_est_niter = niter_for_unrolled_loop (loop, vf);
8194 profile_count freq_h = loop->header->count, freq_e = preheader->count ();
8196 if (freq_h.nonzero_p ())
8198 profile_probability p;
8200 /* Avoid dropping loop body profile counter to 0 because of zero count
8201 in loop's preheader. */
8202 if (!(freq_e == profile_count::zero ()))
8203 freq_e = freq_e.force_nonzero ();
8204 p = freq_e.apply_scale (new_est_niter + 1, 1).probability_in (freq_h);
8205 scale_loop_frequencies (loop, p);
8208 edge exit_e = single_exit (loop);
8209 exit_e->probability = profile_probability::always ()
8210 .apply_scale (1, new_est_niter + 1);
8212 edge exit_l = single_pred_edge (loop->latch);
8213 profile_probability prob = exit_l->probability;
8214 exit_l->probability = exit_e->probability.invert ();
8215 if (prob.initialized_p () && exit_l->probability.initialized_p ())
8216 scale_bbs_frequencies (&loop->latch, 1, exit_l->probability / prob);
8219 /* Vectorize STMT_INFO if relevant, inserting any new instructions before GSI.
8220 When vectorizing STMT_INFO as a store, set *SEEN_STORE to its
8221 stmt_vec_info. */
8223 static void
8224 vect_transform_loop_stmt (loop_vec_info loop_vinfo, stmt_vec_info stmt_info,
8225 gimple_stmt_iterator *gsi, stmt_vec_info *seen_store)
8227 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
8228 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
8230 if (dump_enabled_p ())
8232 dump_printf_loc (MSG_NOTE, vect_location,
8233 "------>vectorizing statement: ");
8234 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
8237 if (MAY_HAVE_DEBUG_BIND_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
8238 vect_loop_kill_debug_uses (loop, stmt_info);
8240 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8241 && !STMT_VINFO_LIVE_P (stmt_info))
8242 return;
8244 if (STMT_VINFO_VECTYPE (stmt_info))
8246 poly_uint64 nunits
8247 = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
8248 if (!STMT_SLP_TYPE (stmt_info)
8249 && maybe_ne (nunits, vf)
8250 && dump_enabled_p ())
8251 /* For SLP VF is set according to unrolling factor, and not
8252 to vector size, hence for SLP this print is not valid. */
8253 dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
8256 /* Pure SLP statements have already been vectorized. We still need
8257 to apply loop vectorization to hybrid SLP statements. */
8258 if (PURE_SLP_STMT (stmt_info))
8259 return;
8261 if (dump_enabled_p ())
8262 dump_printf_loc (MSG_NOTE, vect_location, "transform statement.\n");
8264 if (vect_transform_stmt (stmt_info, gsi, NULL, NULL))
8265 *seen_store = stmt_info;
8268 /* Function vect_transform_loop.
8270 The analysis phase has determined that the loop is vectorizable.
8271 Vectorize the loop - created vectorized stmts to replace the scalar
8272 stmts in the loop, and update the loop exit condition.
8273 Returns scalar epilogue loop if any. */
8275 struct loop *
8276 vect_transform_loop (loop_vec_info loop_vinfo)
8278 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
8279 struct loop *epilogue = NULL;
8280 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
8281 int nbbs = loop->num_nodes;
8282 int i;
8283 tree niters_vector = NULL_TREE;
8284 tree step_vector = NULL_TREE;
8285 tree niters_vector_mult_vf = NULL_TREE;
8286 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
8287 unsigned int lowest_vf = constant_lower_bound (vf);
8288 gimple *stmt;
8289 bool check_profitability = false;
8290 unsigned int th;
8292 DUMP_VECT_SCOPE ("vec_transform_loop");
8294 loop_vinfo->shared->check_datarefs ();
8296 /* Use the more conservative vectorization threshold. If the number
8297 of iterations is constant assume the cost check has been performed
8298 by our caller. If the threshold makes all loops profitable that
8299 run at least the (estimated) vectorization factor number of times
8300 checking is pointless, too. */
8301 th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
8302 if (th >= vect_vf_for_cost (loop_vinfo)
8303 && !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
8305 if (dump_enabled_p ())
8306 dump_printf_loc (MSG_NOTE, vect_location,
8307 "Profitability threshold is %d loop iterations.\n",
8308 th);
8309 check_profitability = true;
8312 /* Make sure there exists a single-predecessor exit bb. Do this before
8313 versioning. */
8314 edge e = single_exit (loop);
8315 if (! single_pred_p (e->dest))
8317 split_loop_exit_edge (e);
8318 if (dump_enabled_p ())
8319 dump_printf (MSG_NOTE, "split exit edge\n");
8322 /* Version the loop first, if required, so the profitability check
8323 comes first. */
8325 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
8327 poly_uint64 versioning_threshold
8328 = LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo);
8329 if (check_profitability
8330 && ordered_p (poly_uint64 (th), versioning_threshold))
8332 versioning_threshold = ordered_max (poly_uint64 (th),
8333 versioning_threshold);
8334 check_profitability = false;
8336 vect_loop_versioning (loop_vinfo, th, check_profitability,
8337 versioning_threshold);
8338 check_profitability = false;
8341 /* Make sure there exists a single-predecessor exit bb also on the
8342 scalar loop copy. Do this after versioning but before peeling
8343 so CFG structure is fine for both scalar and if-converted loop
8344 to make slpeel_duplicate_current_defs_from_edges face matched
8345 loop closed PHI nodes on the exit. */
8346 if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo))
8348 e = single_exit (LOOP_VINFO_SCALAR_LOOP (loop_vinfo));
8349 if (! single_pred_p (e->dest))
8351 split_loop_exit_edge (e);
8352 if (dump_enabled_p ())
8353 dump_printf (MSG_NOTE, "split exit edge of scalar loop\n");
8357 tree niters = vect_build_loop_niters (loop_vinfo);
8358 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = niters;
8359 tree nitersm1 = unshare_expr (LOOP_VINFO_NITERSM1 (loop_vinfo));
8360 bool niters_no_overflow = loop_niters_no_overflow (loop_vinfo);
8361 epilogue = vect_do_peeling (loop_vinfo, niters, nitersm1, &niters_vector,
8362 &step_vector, &niters_vector_mult_vf, th,
8363 check_profitability, niters_no_overflow);
8365 if (niters_vector == NULL_TREE)
8367 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
8368 && !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
8369 && known_eq (lowest_vf, vf))
8371 niters_vector
8372 = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
8373 LOOP_VINFO_INT_NITERS (loop_vinfo) / lowest_vf);
8374 step_vector = build_one_cst (TREE_TYPE (niters));
8376 else
8377 vect_gen_vector_loop_niters (loop_vinfo, niters, &niters_vector,
8378 &step_vector, niters_no_overflow);
8381 /* 1) Make sure the loop header has exactly two entries
8382 2) Make sure we have a preheader basic block. */
8384 gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
8386 split_edge (loop_preheader_edge (loop));
8388 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
8389 && vect_use_loop_mask_for_alignment_p (loop_vinfo))
8390 /* This will deal with any possible peeling. */
8391 vect_prepare_for_masked_peels (loop_vinfo);
8393 /* Schedule the SLP instances first, then handle loop vectorization
8394 below. */
8395 if (!loop_vinfo->slp_instances.is_empty ())
8397 DUMP_VECT_SCOPE ("scheduling SLP instances");
8398 vect_schedule_slp (loop_vinfo);
8401 /* FORNOW: the vectorizer supports only loops which body consist
8402 of one basic block (header + empty latch). When the vectorizer will
8403 support more involved loop forms, the order by which the BBs are
8404 traversed need to be reconsidered. */
8406 for (i = 0; i < nbbs; i++)
8408 basic_block bb = bbs[i];
8409 stmt_vec_info stmt_info;
8411 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
8412 gsi_next (&si))
8414 gphi *phi = si.phi ();
8415 if (dump_enabled_p ())
8417 dump_printf_loc (MSG_NOTE, vect_location,
8418 "------>vectorizing phi: ");
8419 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
8421 stmt_info = loop_vinfo->lookup_stmt (phi);
8422 if (!stmt_info)
8423 continue;
8425 if (MAY_HAVE_DEBUG_BIND_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
8426 vect_loop_kill_debug_uses (loop, stmt_info);
8428 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8429 && !STMT_VINFO_LIVE_P (stmt_info))
8430 continue;
8432 if (STMT_VINFO_VECTYPE (stmt_info)
8433 && (maybe_ne
8434 (TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)), vf))
8435 && dump_enabled_p ())
8436 dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
8438 if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def
8439 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
8440 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
8441 && ! PURE_SLP_STMT (stmt_info))
8443 if (dump_enabled_p ())
8444 dump_printf_loc (MSG_NOTE, vect_location, "transform phi.\n");
8445 vect_transform_stmt (stmt_info, NULL, NULL, NULL);
8449 for (gimple_stmt_iterator si = gsi_start_bb (bb);
8450 !gsi_end_p (si);)
8452 stmt = gsi_stmt (si);
8453 /* During vectorization remove existing clobber stmts. */
8454 if (gimple_clobber_p (stmt))
8456 unlink_stmt_vdef (stmt);
8457 gsi_remove (&si, true);
8458 release_defs (stmt);
8460 else
8462 stmt_info = loop_vinfo->lookup_stmt (stmt);
8464 /* vector stmts created in the outer-loop during vectorization of
8465 stmts in an inner-loop may not have a stmt_info, and do not
8466 need to be vectorized. */
8467 stmt_vec_info seen_store = NULL;
8468 if (stmt_info)
8470 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
8472 gimple *def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
8473 for (gimple_stmt_iterator subsi = gsi_start (def_seq);
8474 !gsi_end_p (subsi); gsi_next (&subsi))
8476 stmt_vec_info pat_stmt_info
8477 = loop_vinfo->lookup_stmt (gsi_stmt (subsi));
8478 vect_transform_loop_stmt (loop_vinfo, pat_stmt_info,
8479 &si, &seen_store);
8481 stmt_vec_info pat_stmt_info
8482 = STMT_VINFO_RELATED_STMT (stmt_info);
8483 vect_transform_loop_stmt (loop_vinfo, pat_stmt_info, &si,
8484 &seen_store);
8486 vect_transform_loop_stmt (loop_vinfo, stmt_info, &si,
8487 &seen_store);
8489 gsi_next (&si);
8490 if (seen_store)
8492 if (STMT_VINFO_GROUPED_ACCESS (seen_store))
8493 /* Interleaving. If IS_STORE is TRUE, the
8494 vectorization of the interleaving chain was
8495 completed - free all the stores in the chain. */
8496 vect_remove_stores (DR_GROUP_FIRST_ELEMENT (seen_store));
8497 else
8498 /* Free the attached stmt_vec_info and remove the stmt. */
8499 loop_vinfo->remove_stmt (stmt_info);
8504 /* Stub out scalar statements that must not survive vectorization.
8505 Doing this here helps with grouped statements, or statements that
8506 are involved in patterns. */
8507 for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
8508 !gsi_end_p (gsi); gsi_next (&gsi))
8510 gcall *call = dyn_cast <gcall *> (gsi_stmt (gsi));
8511 if (call && gimple_call_internal_p (call, IFN_MASK_LOAD))
8513 tree lhs = gimple_get_lhs (call);
8514 if (!VECTOR_TYPE_P (TREE_TYPE (lhs)))
8516 tree zero = build_zero_cst (TREE_TYPE (lhs));
8517 gimple *new_stmt = gimple_build_assign (lhs, zero);
8518 gsi_replace (&gsi, new_stmt, true);
8522 } /* BBs in loop */
8524 /* The vectorization factor is always > 1, so if we use an IV increment of 1.
8525 a zero NITERS becomes a nonzero NITERS_VECTOR. */
8526 if (integer_onep (step_vector))
8527 niters_no_overflow = true;
8528 vect_set_loop_condition (loop, loop_vinfo, niters_vector, step_vector,
8529 niters_vector_mult_vf, !niters_no_overflow);
8531 unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
8532 scale_profile_for_vect_loop (loop, assumed_vf);
8534 /* True if the final iteration might not handle a full vector's
8535 worth of scalar iterations. */
8536 bool final_iter_may_be_partial = LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
8537 /* The minimum number of iterations performed by the epilogue. This
8538 is 1 when peeling for gaps because we always need a final scalar
8539 iteration. */
8540 int min_epilogue_iters = LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) ? 1 : 0;
8541 /* +1 to convert latch counts to loop iteration counts,
8542 -min_epilogue_iters to remove iterations that cannot be performed
8543 by the vector code. */
8544 int bias_for_lowest = 1 - min_epilogue_iters;
8545 int bias_for_assumed = bias_for_lowest;
8546 int alignment_npeels = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
8547 if (alignment_npeels && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
8549 /* When the amount of peeling is known at compile time, the first
8550 iteration will have exactly alignment_npeels active elements.
8551 In the worst case it will have at least one. */
8552 int min_first_active = (alignment_npeels > 0 ? alignment_npeels : 1);
8553 bias_for_lowest += lowest_vf - min_first_active;
8554 bias_for_assumed += assumed_vf - min_first_active;
8556 /* In these calculations the "- 1" converts loop iteration counts
8557 back to latch counts. */
8558 if (loop->any_upper_bound)
8559 loop->nb_iterations_upper_bound
8560 = (final_iter_may_be_partial
8561 ? wi::udiv_ceil (loop->nb_iterations_upper_bound + bias_for_lowest,
8562 lowest_vf) - 1
8563 : wi::udiv_floor (loop->nb_iterations_upper_bound + bias_for_lowest,
8564 lowest_vf) - 1);
8565 if (loop->any_likely_upper_bound)
8566 loop->nb_iterations_likely_upper_bound
8567 = (final_iter_may_be_partial
8568 ? wi::udiv_ceil (loop->nb_iterations_likely_upper_bound
8569 + bias_for_lowest, lowest_vf) - 1
8570 : wi::udiv_floor (loop->nb_iterations_likely_upper_bound
8571 + bias_for_lowest, lowest_vf) - 1);
8572 if (loop->any_estimate)
8573 loop->nb_iterations_estimate
8574 = (final_iter_may_be_partial
8575 ? wi::udiv_ceil (loop->nb_iterations_estimate + bias_for_assumed,
8576 assumed_vf) - 1
8577 : wi::udiv_floor (loop->nb_iterations_estimate + bias_for_assumed,
8578 assumed_vf) - 1);
8580 if (dump_enabled_p ())
8582 if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo))
8584 dump_printf_loc (MSG_NOTE, vect_location,
8585 "LOOP VECTORIZED\n");
8586 if (loop->inner)
8587 dump_printf_loc (MSG_NOTE, vect_location,
8588 "OUTER LOOP VECTORIZED\n");
8589 dump_printf (MSG_NOTE, "\n");
8591 else
8593 dump_printf_loc (MSG_NOTE, vect_location,
8594 "LOOP EPILOGUE VECTORIZED (VS=");
8595 dump_dec (MSG_NOTE, current_vector_size);
8596 dump_printf (MSG_NOTE, ")\n");
8600 /* Free SLP instances here because otherwise stmt reference counting
8601 won't work. */
8602 slp_instance instance;
8603 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance)
8604 vect_free_slp_instance (instance, true);
8605 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
8606 /* Clear-up safelen field since its value is invalid after vectorization
8607 since vectorized loop can have loop-carried dependencies. */
8608 loop->safelen = 0;
8610 /* Don't vectorize epilogue for epilogue. */
8611 if (LOOP_VINFO_EPILOGUE_P (loop_vinfo))
8612 epilogue = NULL;
8614 if (!PARAM_VALUE (PARAM_VECT_EPILOGUES_NOMASK))
8615 epilogue = NULL;
8617 if (epilogue)
8619 auto_vector_sizes vector_sizes;
8620 targetm.vectorize.autovectorize_vector_sizes (&vector_sizes);
8621 unsigned int next_size = 0;
8623 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
8624 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) >= 0
8625 && known_eq (vf, lowest_vf))
8627 unsigned int eiters
8628 = (LOOP_VINFO_INT_NITERS (loop_vinfo)
8629 - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo));
8630 eiters = eiters % lowest_vf;
8631 epilogue->nb_iterations_upper_bound = eiters - 1;
8633 unsigned int ratio;
8634 while (next_size < vector_sizes.length ()
8635 && !(constant_multiple_p (current_vector_size,
8636 vector_sizes[next_size], &ratio)
8637 && eiters >= lowest_vf / ratio))
8638 next_size += 1;
8640 else
8641 while (next_size < vector_sizes.length ()
8642 && maybe_lt (current_vector_size, vector_sizes[next_size]))
8643 next_size += 1;
8645 if (next_size == vector_sizes.length ())
8646 epilogue = NULL;
8649 if (epilogue)
8651 epilogue->force_vectorize = loop->force_vectorize;
8652 epilogue->safelen = loop->safelen;
8653 epilogue->dont_vectorize = false;
8655 /* We may need to if-convert epilogue to vectorize it. */
8656 if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo))
8657 tree_if_conversion (epilogue);
8660 return epilogue;
8663 /* The code below is trying to perform simple optimization - revert
8664 if-conversion for masked stores, i.e. if the mask of a store is zero
8665 do not perform it and all stored value producers also if possible.
8666 For example,
8667 for (i=0; i<n; i++)
8668 if (c[i])
8670 p1[i] += 1;
8671 p2[i] = p3[i] +2;
8673 this transformation will produce the following semi-hammock:
8675 if (!mask__ifc__42.18_165 == { 0, 0, 0, 0, 0, 0, 0, 0 })
8677 vect__11.19_170 = MASK_LOAD (vectp_p1.20_168, 0B, mask__ifc__42.18_165);
8678 vect__12.22_172 = vect__11.19_170 + vect_cst__171;
8679 MASK_STORE (vectp_p1.23_175, 0B, mask__ifc__42.18_165, vect__12.22_172);
8680 vect__18.25_182 = MASK_LOAD (vectp_p3.26_180, 0B, mask__ifc__42.18_165);
8681 vect__19.28_184 = vect__18.25_182 + vect_cst__183;
8682 MASK_STORE (vectp_p2.29_187, 0B, mask__ifc__42.18_165, vect__19.28_184);
8686 void
8687 optimize_mask_stores (struct loop *loop)
8689 basic_block *bbs = get_loop_body (loop);
8690 unsigned nbbs = loop->num_nodes;
8691 unsigned i;
8692 basic_block bb;
8693 struct loop *bb_loop;
8694 gimple_stmt_iterator gsi;
8695 gimple *stmt;
8696 auto_vec<gimple *> worklist;
8698 vect_location = find_loop_location (loop);
8699 /* Pick up all masked stores in loop if any. */
8700 for (i = 0; i < nbbs; i++)
8702 bb = bbs[i];
8703 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
8704 gsi_next (&gsi))
8706 stmt = gsi_stmt (gsi);
8707 if (gimple_call_internal_p (stmt, IFN_MASK_STORE))
8708 worklist.safe_push (stmt);
8712 free (bbs);
8713 if (worklist.is_empty ())
8714 return;
8716 /* Loop has masked stores. */
8717 while (!worklist.is_empty ())
8719 gimple *last, *last_store;
8720 edge e, efalse;
8721 tree mask;
8722 basic_block store_bb, join_bb;
8723 gimple_stmt_iterator gsi_to;
8724 tree vdef, new_vdef;
8725 gphi *phi;
8726 tree vectype;
8727 tree zero;
8729 last = worklist.pop ();
8730 mask = gimple_call_arg (last, 2);
8731 bb = gimple_bb (last);
8732 /* Create then_bb and if-then structure in CFG, then_bb belongs to
8733 the same loop as if_bb. It could be different to LOOP when two
8734 level loop-nest is vectorized and mask_store belongs to the inner
8735 one. */
8736 e = split_block (bb, last);
8737 bb_loop = bb->loop_father;
8738 gcc_assert (loop == bb_loop || flow_loop_nested_p (loop, bb_loop));
8739 join_bb = e->dest;
8740 store_bb = create_empty_bb (bb);
8741 add_bb_to_loop (store_bb, bb_loop);
8742 e->flags = EDGE_TRUE_VALUE;
8743 efalse = make_edge (bb, store_bb, EDGE_FALSE_VALUE);
8744 /* Put STORE_BB to likely part. */
8745 efalse->probability = profile_probability::unlikely ();
8746 store_bb->count = efalse->count ();
8747 make_single_succ_edge (store_bb, join_bb, EDGE_FALLTHRU);
8748 if (dom_info_available_p (CDI_DOMINATORS))
8749 set_immediate_dominator (CDI_DOMINATORS, store_bb, bb);
8750 if (dump_enabled_p ())
8751 dump_printf_loc (MSG_NOTE, vect_location,
8752 "Create new block %d to sink mask stores.",
8753 store_bb->index);
8754 /* Create vector comparison with boolean result. */
8755 vectype = TREE_TYPE (mask);
8756 zero = build_zero_cst (vectype);
8757 stmt = gimple_build_cond (EQ_EXPR, mask, zero, NULL_TREE, NULL_TREE);
8758 gsi = gsi_last_bb (bb);
8759 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
8760 /* Create new PHI node for vdef of the last masked store:
8761 .MEM_2 = VDEF <.MEM_1>
8762 will be converted to
8763 .MEM.3 = VDEF <.MEM_1>
8764 and new PHI node will be created in join bb
8765 .MEM_2 = PHI <.MEM_1, .MEM_3>
8767 vdef = gimple_vdef (last);
8768 new_vdef = make_ssa_name (gimple_vop (cfun), last);
8769 gimple_set_vdef (last, new_vdef);
8770 phi = create_phi_node (vdef, join_bb);
8771 add_phi_arg (phi, new_vdef, EDGE_SUCC (store_bb, 0), UNKNOWN_LOCATION);
8773 /* Put all masked stores with the same mask to STORE_BB if possible. */
8774 while (true)
8776 gimple_stmt_iterator gsi_from;
8777 gimple *stmt1 = NULL;
8779 /* Move masked store to STORE_BB. */
8780 last_store = last;
8781 gsi = gsi_for_stmt (last);
8782 gsi_from = gsi;
8783 /* Shift GSI to the previous stmt for further traversal. */
8784 gsi_prev (&gsi);
8785 gsi_to = gsi_start_bb (store_bb);
8786 gsi_move_before (&gsi_from, &gsi_to);
8787 /* Setup GSI_TO to the non-empty block start. */
8788 gsi_to = gsi_start_bb (store_bb);
8789 if (dump_enabled_p ())
8791 dump_printf_loc (MSG_NOTE, vect_location,
8792 "Move stmt to created bb\n");
8793 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, last, 0);
8795 /* Move all stored value producers if possible. */
8796 while (!gsi_end_p (gsi))
8798 tree lhs;
8799 imm_use_iterator imm_iter;
8800 use_operand_p use_p;
8801 bool res;
8803 /* Skip debug statements. */
8804 if (is_gimple_debug (gsi_stmt (gsi)))
8806 gsi_prev (&gsi);
8807 continue;
8809 stmt1 = gsi_stmt (gsi);
8810 /* Do not consider statements writing to memory or having
8811 volatile operand. */
8812 if (gimple_vdef (stmt1)
8813 || gimple_has_volatile_ops (stmt1))
8814 break;
8815 gsi_from = gsi;
8816 gsi_prev (&gsi);
8817 lhs = gimple_get_lhs (stmt1);
8818 if (!lhs)
8819 break;
8821 /* LHS of vectorized stmt must be SSA_NAME. */
8822 if (TREE_CODE (lhs) != SSA_NAME)
8823 break;
8825 if (!VECTOR_TYPE_P (TREE_TYPE (lhs)))
8827 /* Remove dead scalar statement. */
8828 if (has_zero_uses (lhs))
8830 gsi_remove (&gsi_from, true);
8831 continue;
8835 /* Check that LHS does not have uses outside of STORE_BB. */
8836 res = true;
8837 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
8839 gimple *use_stmt;
8840 use_stmt = USE_STMT (use_p);
8841 if (is_gimple_debug (use_stmt))
8842 continue;
8843 if (gimple_bb (use_stmt) != store_bb)
8845 res = false;
8846 break;
8849 if (!res)
8850 break;
8852 if (gimple_vuse (stmt1)
8853 && gimple_vuse (stmt1) != gimple_vuse (last_store))
8854 break;
8856 /* Can move STMT1 to STORE_BB. */
8857 if (dump_enabled_p ())
8859 dump_printf_loc (MSG_NOTE, vect_location,
8860 "Move stmt to created bb\n");
8861 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt1, 0);
8863 gsi_move_before (&gsi_from, &gsi_to);
8864 /* Shift GSI_TO for further insertion. */
8865 gsi_prev (&gsi_to);
8867 /* Put other masked stores with the same mask to STORE_BB. */
8868 if (worklist.is_empty ()
8869 || gimple_call_arg (worklist.last (), 2) != mask
8870 || worklist.last () != stmt1)
8871 break;
8872 last = worklist.pop ();
8874 add_phi_arg (phi, gimple_vuse (last_store), e, UNKNOWN_LOCATION);