C++: suggestions for misspelled private members (PR c++/84993)
[official-gcc.git] / gcc / tree-vect-loop.c
blob177b284e9c617a41c33d1387ba5afbed51d8ed00
1 /* Loop Vectorization
2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
4 Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "cfghooks.h"
31 #include "tree-pass.h"
32 #include "ssa.h"
33 #include "optabs-tree.h"
34 #include "diagnostic-core.h"
35 #include "fold-const.h"
36 #include "stor-layout.h"
37 #include "cfganal.h"
38 #include "gimplify.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
41 #include "tree-ssa-loop-ivopts.h"
42 #include "tree-ssa-loop-manip.h"
43 #include "tree-ssa-loop-niter.h"
44 #include "tree-ssa-loop.h"
45 #include "cfgloop.h"
46 #include "params.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
49 #include "gimple-fold.h"
50 #include "cgraph.h"
51 #include "tree-cfg.h"
52 #include "tree-if-conv.h"
53 #include "internal-fn.h"
54 #include "tree-vector-builder.h"
55 #include "vec-perm-indices.h"
56 #include "tree-eh.h"
58 /* Loop Vectorization Pass.
60 This pass tries to vectorize loops.
62 For example, the vectorizer transforms the following simple loop:
64 short a[N]; short b[N]; short c[N]; int i;
66 for (i=0; i<N; i++){
67 a[i] = b[i] + c[i];
70 as if it was manually vectorized by rewriting the source code into:
72 typedef int __attribute__((mode(V8HI))) v8hi;
73 short a[N]; short b[N]; short c[N]; int i;
74 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
75 v8hi va, vb, vc;
77 for (i=0; i<N/8; i++){
78 vb = pb[i];
79 vc = pc[i];
80 va = vb + vc;
81 pa[i] = va;
84 The main entry to this pass is vectorize_loops(), in which
85 the vectorizer applies a set of analyses on a given set of loops,
86 followed by the actual vectorization transformation for the loops that
87 had successfully passed the analysis phase.
88 Throughout this pass we make a distinction between two types of
89 data: scalars (which are represented by SSA_NAMES), and memory references
90 ("data-refs"). These two types of data require different handling both
91 during analysis and transformation. The types of data-refs that the
92 vectorizer currently supports are ARRAY_REFS which base is an array DECL
93 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
94 accesses are required to have a simple (consecutive) access pattern.
96 Analysis phase:
97 ===============
98 The driver for the analysis phase is vect_analyze_loop().
99 It applies a set of analyses, some of which rely on the scalar evolution
100 analyzer (scev) developed by Sebastian Pop.
102 During the analysis phase the vectorizer records some information
103 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
104 loop, as well as general information about the loop as a whole, which is
105 recorded in a "loop_vec_info" struct attached to each loop.
107 Transformation phase:
108 =====================
109 The loop transformation phase scans all the stmts in the loop, and
110 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
111 the loop that needs to be vectorized. It inserts the vector code sequence
112 just before the scalar stmt S, and records a pointer to the vector code
113 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
114 attached to S). This pointer will be used for the vectorization of following
115 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
116 otherwise, we rely on dead code elimination for removing it.
118 For example, say stmt S1 was vectorized into stmt VS1:
120 VS1: vb = px[i];
121 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
122 S2: a = b;
124 To vectorize stmt S2, the vectorizer first finds the stmt that defines
125 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
126 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
127 resulting sequence would be:
129 VS1: vb = px[i];
130 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
131 VS2: va = vb;
132 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
134 Operands that are not SSA_NAMEs, are data-refs that appear in
135 load/store operations (like 'x[i]' in S1), and are handled differently.
137 Target modeling:
138 =================
139 Currently the only target specific information that is used is the
140 size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".
141 Targets that can support different sizes of vectors, for now will need
142 to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More
143 flexibility will be added in the future.
145 Since we only vectorize operations which vector form can be
146 expressed using existing tree codes, to verify that an operation is
147 supported, the vectorizer checks the relevant optab at the relevant
148 machine_mode (e.g, optab_handler (add_optab, V8HImode)). If
149 the value found is CODE_FOR_nothing, then there's no target support, and
150 we can't vectorize the stmt.
152 For additional information on this project see:
153 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
156 static void vect_estimate_min_profitable_iters (loop_vec_info, int *, int *);
158 /* Subroutine of vect_determine_vf_for_stmt that handles only one
159 statement. VECTYPE_MAYBE_SET_P is true if STMT_VINFO_VECTYPE
160 may already be set for general statements (not just data refs). */
162 static opt_result
163 vect_determine_vf_for_stmt_1 (stmt_vec_info stmt_info,
164 bool vectype_maybe_set_p,
165 poly_uint64 *vf,
166 vec<stmt_vec_info > *mask_producers)
168 gimple *stmt = stmt_info->stmt;
170 if ((!STMT_VINFO_RELEVANT_P (stmt_info)
171 && !STMT_VINFO_LIVE_P (stmt_info))
172 || gimple_clobber_p (stmt))
174 if (dump_enabled_p ())
175 dump_printf_loc (MSG_NOTE, vect_location, "skip.\n");
176 return opt_result::success ();
179 tree stmt_vectype, nunits_vectype;
180 opt_result res = vect_get_vector_types_for_stmt (stmt_info, &stmt_vectype,
181 &nunits_vectype);
182 if (!res)
183 return res;
185 if (stmt_vectype)
187 if (STMT_VINFO_VECTYPE (stmt_info))
188 /* The only case when a vectype had been already set is for stmts
189 that contain a data ref, or for "pattern-stmts" (stmts generated
190 by the vectorizer to represent/replace a certain idiom). */
191 gcc_assert ((STMT_VINFO_DATA_REF (stmt_info)
192 || vectype_maybe_set_p)
193 && STMT_VINFO_VECTYPE (stmt_info) == stmt_vectype);
194 else if (stmt_vectype == boolean_type_node)
195 mask_producers->safe_push (stmt_info);
196 else
197 STMT_VINFO_VECTYPE (stmt_info) = stmt_vectype;
200 if (nunits_vectype)
201 vect_update_max_nunits (vf, nunits_vectype);
203 return opt_result::success ();
206 /* Subroutine of vect_determine_vectorization_factor. Set the vector
207 types of STMT_INFO and all attached pattern statements and update
208 the vectorization factor VF accordingly. If some of the statements
209 produce a mask result whose vector type can only be calculated later,
210 add them to MASK_PRODUCERS. Return true on success or false if
211 something prevented vectorization. */
213 static opt_result
214 vect_determine_vf_for_stmt (stmt_vec_info stmt_info, poly_uint64 *vf,
215 vec<stmt_vec_info > *mask_producers)
217 vec_info *vinfo = stmt_info->vinfo;
218 if (dump_enabled_p ())
219 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: %G",
220 stmt_info->stmt);
221 opt_result res
222 = vect_determine_vf_for_stmt_1 (stmt_info, false, vf, mask_producers);
223 if (!res)
224 return res;
226 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
227 && STMT_VINFO_RELATED_STMT (stmt_info))
229 gimple *pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
230 stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
232 /* If a pattern statement has def stmts, analyze them too. */
233 for (gimple_stmt_iterator si = gsi_start (pattern_def_seq);
234 !gsi_end_p (si); gsi_next (&si))
236 stmt_vec_info def_stmt_info = vinfo->lookup_stmt (gsi_stmt (si));
237 if (dump_enabled_p ())
238 dump_printf_loc (MSG_NOTE, vect_location,
239 "==> examining pattern def stmt: %G",
240 def_stmt_info->stmt);
241 if (!vect_determine_vf_for_stmt_1 (def_stmt_info, true,
242 vf, mask_producers))
243 res = vect_determine_vf_for_stmt_1 (def_stmt_info, true,
244 vf, mask_producers);
245 if (!res)
246 return res;
249 if (dump_enabled_p ())
250 dump_printf_loc (MSG_NOTE, vect_location,
251 "==> examining pattern statement: %G",
252 stmt_info->stmt);
253 res = vect_determine_vf_for_stmt_1 (stmt_info, true, vf, mask_producers);
254 if (!res)
255 return res;
258 return opt_result::success ();
261 /* Function vect_determine_vectorization_factor
263 Determine the vectorization factor (VF). VF is the number of data elements
264 that are operated upon in parallel in a single iteration of the vectorized
265 loop. For example, when vectorizing a loop that operates on 4byte elements,
266 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
267 elements can fit in a single vector register.
269 We currently support vectorization of loops in which all types operated upon
270 are of the same size. Therefore this function currently sets VF according to
271 the size of the types operated upon, and fails if there are multiple sizes
272 in the loop.
274 VF is also the factor by which the loop iterations are strip-mined, e.g.:
275 original loop:
276 for (i=0; i<N; i++){
277 a[i] = b[i] + c[i];
280 vectorized loop:
281 for (i=0; i<N; i+=VF){
282 a[i:VF] = b[i:VF] + c[i:VF];
286 static opt_result
287 vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
289 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
290 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
291 unsigned nbbs = loop->num_nodes;
292 poly_uint64 vectorization_factor = 1;
293 tree scalar_type = NULL_TREE;
294 gphi *phi;
295 tree vectype;
296 stmt_vec_info stmt_info;
297 unsigned i;
298 auto_vec<stmt_vec_info> mask_producers;
300 DUMP_VECT_SCOPE ("vect_determine_vectorization_factor");
302 for (i = 0; i < nbbs; i++)
304 basic_block bb = bbs[i];
306 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
307 gsi_next (&si))
309 phi = si.phi ();
310 stmt_info = loop_vinfo->lookup_stmt (phi);
311 if (dump_enabled_p ())
312 dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: %G",
313 phi);
315 gcc_assert (stmt_info);
317 if (STMT_VINFO_RELEVANT_P (stmt_info)
318 || STMT_VINFO_LIVE_P (stmt_info))
320 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
321 scalar_type = TREE_TYPE (PHI_RESULT (phi));
323 if (dump_enabled_p ())
324 dump_printf_loc (MSG_NOTE, vect_location,
325 "get vectype for scalar type: %T\n",
326 scalar_type);
328 vectype = get_vectype_for_scalar_type (scalar_type);
329 if (!vectype)
330 return opt_result::failure_at (phi,
331 "not vectorized: unsupported "
332 "data-type %T\n",
333 scalar_type);
334 STMT_VINFO_VECTYPE (stmt_info) = vectype;
336 if (dump_enabled_p ())
337 dump_printf_loc (MSG_NOTE, vect_location, "vectype: %T\n",
338 vectype);
340 if (dump_enabled_p ())
342 dump_printf_loc (MSG_NOTE, vect_location, "nunits = ");
343 dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (vectype));
344 dump_printf (MSG_NOTE, "\n");
347 vect_update_max_nunits (&vectorization_factor, vectype);
351 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
352 gsi_next (&si))
354 stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
355 opt_result res
356 = vect_determine_vf_for_stmt (stmt_info, &vectorization_factor,
357 &mask_producers);
358 if (!res)
359 return res;
363 /* TODO: Analyze cost. Decide if worth while to vectorize. */
364 if (dump_enabled_p ())
366 dump_printf_loc (MSG_NOTE, vect_location, "vectorization factor = ");
367 dump_dec (MSG_NOTE, vectorization_factor);
368 dump_printf (MSG_NOTE, "\n");
371 if (known_le (vectorization_factor, 1U))
372 return opt_result::failure_at (vect_location,
373 "not vectorized: unsupported data-type\n");
374 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
376 for (i = 0; i < mask_producers.length (); i++)
378 stmt_info = mask_producers[i];
379 opt_tree mask_type = vect_get_mask_type_for_stmt (stmt_info);
380 if (!mask_type)
381 return opt_result::propagate_failure (mask_type);
382 STMT_VINFO_VECTYPE (stmt_info) = mask_type;
385 return opt_result::success ();
389 /* Function vect_is_simple_iv_evolution.
391 FORNOW: A simple evolution of an induction variables in the loop is
392 considered a polynomial evolution. */
394 static bool
395 vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
396 tree * step)
398 tree init_expr;
399 tree step_expr;
400 tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
401 basic_block bb;
403 /* When there is no evolution in this loop, the evolution function
404 is not "simple". */
405 if (evolution_part == NULL_TREE)
406 return false;
408 /* When the evolution is a polynomial of degree >= 2
409 the evolution function is not "simple". */
410 if (tree_is_chrec (evolution_part))
411 return false;
413 step_expr = evolution_part;
414 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
416 if (dump_enabled_p ())
417 dump_printf_loc (MSG_NOTE, vect_location, "step: %T, init: %T\n",
418 step_expr, init_expr);
420 *init = init_expr;
421 *step = step_expr;
423 if (TREE_CODE (step_expr) != INTEGER_CST
424 && (TREE_CODE (step_expr) != SSA_NAME
425 || ((bb = gimple_bb (SSA_NAME_DEF_STMT (step_expr)))
426 && flow_bb_inside_loop_p (get_loop (cfun, loop_nb), bb))
427 || (!INTEGRAL_TYPE_P (TREE_TYPE (step_expr))
428 && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))
429 || !flag_associative_math)))
430 && (TREE_CODE (step_expr) != REAL_CST
431 || !flag_associative_math))
433 if (dump_enabled_p ())
434 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
435 "step unknown.\n");
436 return false;
439 return true;
442 /* Return true if PHI, described by STMT_INFO, is the inner PHI in
443 what we are assuming is a double reduction. For example, given
444 a structure like this:
446 outer1:
447 x_1 = PHI <x_4(outer2), ...>;
450 inner:
451 x_2 = PHI <x_1(outer1), ...>;
453 x_3 = ...;
456 outer2:
457 x_4 = PHI <x_3(inner)>;
460 outer loop analysis would treat x_1 as a double reduction phi and
461 this function would then return true for x_2. */
463 static bool
464 vect_inner_phi_in_double_reduction_p (stmt_vec_info stmt_info, gphi *phi)
466 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
467 use_operand_p use_p;
468 ssa_op_iter op_iter;
469 FOR_EACH_PHI_ARG (use_p, phi, op_iter, SSA_OP_USE)
470 if (stmt_vec_info def_info = loop_vinfo->lookup_def (USE_FROM_PTR (use_p)))
471 if (STMT_VINFO_DEF_TYPE (def_info) == vect_double_reduction_def)
472 return true;
473 return false;
476 /* Function vect_analyze_scalar_cycles_1.
478 Examine the cross iteration def-use cycles of scalar variables
479 in LOOP. LOOP_VINFO represents the loop that is now being
480 considered for vectorization (can be LOOP, or an outer-loop
481 enclosing LOOP). */
483 static void
484 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
486 basic_block bb = loop->header;
487 tree init, step;
488 auto_vec<stmt_vec_info, 64> worklist;
489 gphi_iterator gsi;
490 bool double_reduc;
492 DUMP_VECT_SCOPE ("vect_analyze_scalar_cycles");
494 /* First - identify all inductions. Reduction detection assumes that all the
495 inductions have been identified, therefore, this order must not be
496 changed. */
497 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
499 gphi *phi = gsi.phi ();
500 tree access_fn = NULL;
501 tree def = PHI_RESULT (phi);
502 stmt_vec_info stmt_vinfo = loop_vinfo->lookup_stmt (phi);
504 if (dump_enabled_p ())
505 dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: %G", phi);
507 /* Skip virtual phi's. The data dependences that are associated with
508 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
509 if (virtual_operand_p (def))
510 continue;
512 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
514 /* Analyze the evolution function. */
515 access_fn = analyze_scalar_evolution (loop, def);
516 if (access_fn)
518 STRIP_NOPS (access_fn);
519 if (dump_enabled_p ())
520 dump_printf_loc (MSG_NOTE, vect_location,
521 "Access function of PHI: %T\n", access_fn);
522 STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo)
523 = initial_condition_in_loop_num (access_fn, loop->num);
524 STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo)
525 = evolution_part_in_loop_num (access_fn, loop->num);
528 if (!access_fn
529 || vect_inner_phi_in_double_reduction_p (stmt_vinfo, phi)
530 || !vect_is_simple_iv_evolution (loop->num, access_fn, &init, &step)
531 || (LOOP_VINFO_LOOP (loop_vinfo) != loop
532 && TREE_CODE (step) != INTEGER_CST))
534 worklist.safe_push (stmt_vinfo);
535 continue;
538 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo)
539 != NULL_TREE);
540 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) != NULL_TREE);
542 if (dump_enabled_p ())
543 dump_printf_loc (MSG_NOTE, vect_location, "Detected induction.\n");
544 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
548 /* Second - identify all reductions and nested cycles. */
549 while (worklist.length () > 0)
551 stmt_vec_info stmt_vinfo = worklist.pop ();
552 gphi *phi = as_a <gphi *> (stmt_vinfo->stmt);
553 tree def = PHI_RESULT (phi);
555 if (dump_enabled_p ())
556 dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: %G", phi);
558 gcc_assert (!virtual_operand_p (def)
559 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
561 stmt_vec_info reduc_stmt_info
562 = vect_force_simple_reduction (loop_vinfo, stmt_vinfo,
563 &double_reduc, false);
564 if (reduc_stmt_info)
566 if (double_reduc)
568 if (dump_enabled_p ())
569 dump_printf_loc (MSG_NOTE, vect_location,
570 "Detected double reduction.\n");
572 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
573 STMT_VINFO_DEF_TYPE (reduc_stmt_info)
574 = vect_double_reduction_def;
576 else
578 if (loop != LOOP_VINFO_LOOP (loop_vinfo))
580 if (dump_enabled_p ())
581 dump_printf_loc (MSG_NOTE, vect_location,
582 "Detected vectorizable nested cycle.\n");
584 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
585 STMT_VINFO_DEF_TYPE (reduc_stmt_info) = vect_nested_cycle;
587 else
589 if (dump_enabled_p ())
590 dump_printf_loc (MSG_NOTE, vect_location,
591 "Detected reduction.\n");
593 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
594 STMT_VINFO_DEF_TYPE (reduc_stmt_info) = vect_reduction_def;
595 /* Store the reduction cycles for possible vectorization in
596 loop-aware SLP if it was not detected as reduction
597 chain. */
598 if (! REDUC_GROUP_FIRST_ELEMENT (reduc_stmt_info))
599 LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push
600 (reduc_stmt_info);
604 else
605 if (dump_enabled_p ())
606 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
607 "Unknown def-use cycle pattern.\n");
612 /* Function vect_analyze_scalar_cycles.
614 Examine the cross iteration def-use cycles of scalar variables, by
615 analyzing the loop-header PHIs of scalar variables. Classify each
616 cycle as one of the following: invariant, induction, reduction, unknown.
617 We do that for the loop represented by LOOP_VINFO, and also to its
618 inner-loop, if exists.
619 Examples for scalar cycles:
621 Example1: reduction:
623 loop1:
624 for (i=0; i<N; i++)
625 sum += a[i];
627 Example2: induction:
629 loop2:
630 for (i=0; i<N; i++)
631 a[i] = i; */
633 static void
634 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
636 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
638 vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
640 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
641 Reductions in such inner-loop therefore have different properties than
642 the reductions in the nest that gets vectorized:
643 1. When vectorized, they are executed in the same order as in the original
644 scalar loop, so we can't change the order of computation when
645 vectorizing them.
646 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
647 current checks are too strict. */
649 if (loop->inner)
650 vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
653 /* Transfer group and reduction information from STMT_INFO to its
654 pattern stmt. */
656 static void
657 vect_fixup_reduc_chain (stmt_vec_info stmt_info)
659 stmt_vec_info firstp = STMT_VINFO_RELATED_STMT (stmt_info);
660 stmt_vec_info stmtp;
661 gcc_assert (!REDUC_GROUP_FIRST_ELEMENT (firstp)
662 && REDUC_GROUP_FIRST_ELEMENT (stmt_info));
663 REDUC_GROUP_SIZE (firstp) = REDUC_GROUP_SIZE (stmt_info);
666 stmtp = STMT_VINFO_RELATED_STMT (stmt_info);
667 REDUC_GROUP_FIRST_ELEMENT (stmtp) = firstp;
668 stmt_info = REDUC_GROUP_NEXT_ELEMENT (stmt_info);
669 if (stmt_info)
670 REDUC_GROUP_NEXT_ELEMENT (stmtp)
671 = STMT_VINFO_RELATED_STMT (stmt_info);
673 while (stmt_info);
674 STMT_VINFO_DEF_TYPE (stmtp) = vect_reduction_def;
677 /* Fixup scalar cycles that now have their stmts detected as patterns. */
679 static void
680 vect_fixup_scalar_cycles_with_patterns (loop_vec_info loop_vinfo)
682 stmt_vec_info first;
683 unsigned i;
685 FOR_EACH_VEC_ELT (LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo), i, first)
686 if (STMT_VINFO_IN_PATTERN_P (first))
688 stmt_vec_info next = REDUC_GROUP_NEXT_ELEMENT (first);
689 while (next)
691 if (! STMT_VINFO_IN_PATTERN_P (next))
692 break;
693 next = REDUC_GROUP_NEXT_ELEMENT (next);
695 /* If not all stmt in the chain are patterns try to handle
696 the chain without patterns. */
697 if (! next)
699 vect_fixup_reduc_chain (first);
700 LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo)[i]
701 = STMT_VINFO_RELATED_STMT (first);
706 /* Function vect_get_loop_niters.
708 Determine how many iterations the loop is executed and place it
709 in NUMBER_OF_ITERATIONS. Place the number of latch iterations
710 in NUMBER_OF_ITERATIONSM1. Place the condition under which the
711 niter information holds in ASSUMPTIONS.
713 Return the loop exit condition. */
716 static gcond *
717 vect_get_loop_niters (struct loop *loop, tree *assumptions,
718 tree *number_of_iterations, tree *number_of_iterationsm1)
720 edge exit = single_exit (loop);
721 struct tree_niter_desc niter_desc;
722 tree niter_assumptions, niter, may_be_zero;
723 gcond *cond = get_loop_exit_condition (loop);
725 *assumptions = boolean_true_node;
726 *number_of_iterationsm1 = chrec_dont_know;
727 *number_of_iterations = chrec_dont_know;
728 DUMP_VECT_SCOPE ("get_loop_niters");
730 if (!exit)
731 return cond;
733 niter = chrec_dont_know;
734 may_be_zero = NULL_TREE;
735 niter_assumptions = boolean_true_node;
736 if (!number_of_iterations_exit_assumptions (loop, exit, &niter_desc, NULL)
737 || chrec_contains_undetermined (niter_desc.niter))
738 return cond;
740 niter_assumptions = niter_desc.assumptions;
741 may_be_zero = niter_desc.may_be_zero;
742 niter = niter_desc.niter;
744 if (may_be_zero && integer_zerop (may_be_zero))
745 may_be_zero = NULL_TREE;
747 if (may_be_zero)
749 if (COMPARISON_CLASS_P (may_be_zero))
751 /* Try to combine may_be_zero with assumptions, this can simplify
752 computation of niter expression. */
753 if (niter_assumptions && !integer_nonzerop (niter_assumptions))
754 niter_assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
755 niter_assumptions,
756 fold_build1 (TRUTH_NOT_EXPR,
757 boolean_type_node,
758 may_be_zero));
759 else
760 niter = fold_build3 (COND_EXPR, TREE_TYPE (niter), may_be_zero,
761 build_int_cst (TREE_TYPE (niter), 0),
762 rewrite_to_non_trapping_overflow (niter));
764 may_be_zero = NULL_TREE;
766 else if (integer_nonzerop (may_be_zero))
768 *number_of_iterationsm1 = build_int_cst (TREE_TYPE (niter), 0);
769 *number_of_iterations = build_int_cst (TREE_TYPE (niter), 1);
770 return cond;
772 else
773 return cond;
776 *assumptions = niter_assumptions;
777 *number_of_iterationsm1 = niter;
779 /* We want the number of loop header executions which is the number
780 of latch executions plus one.
781 ??? For UINT_MAX latch executions this number overflows to zero
782 for loops like do { n++; } while (n != 0); */
783 if (niter && !chrec_contains_undetermined (niter))
784 niter = fold_build2 (PLUS_EXPR, TREE_TYPE (niter), unshare_expr (niter),
785 build_int_cst (TREE_TYPE (niter), 1));
786 *number_of_iterations = niter;
788 return cond;
791 /* Function bb_in_loop_p
793 Used as predicate for dfs order traversal of the loop bbs. */
795 static bool
796 bb_in_loop_p (const_basic_block bb, const void *data)
798 const struct loop *const loop = (const struct loop *)data;
799 if (flow_bb_inside_loop_p (loop, bb))
800 return true;
801 return false;
805 /* Create and initialize a new loop_vec_info struct for LOOP_IN, as well as
806 stmt_vec_info structs for all the stmts in LOOP_IN. */
808 _loop_vec_info::_loop_vec_info (struct loop *loop_in, vec_info_shared *shared)
809 : vec_info (vec_info::loop, init_cost (loop_in), shared),
810 loop (loop_in),
811 bbs (XCNEWVEC (basic_block, loop->num_nodes)),
812 num_itersm1 (NULL_TREE),
813 num_iters (NULL_TREE),
814 num_iters_unchanged (NULL_TREE),
815 num_iters_assumptions (NULL_TREE),
816 th (0),
817 versioning_threshold (0),
818 vectorization_factor (0),
819 max_vectorization_factor (0),
820 mask_skip_niters (NULL_TREE),
821 mask_compare_type (NULL_TREE),
822 unaligned_dr (NULL),
823 peeling_for_alignment (0),
824 ptr_mask (0),
825 ivexpr_map (NULL),
826 slp_unrolling_factor (1),
827 single_scalar_iteration_cost (0),
828 vectorizable (false),
829 can_fully_mask_p (true),
830 fully_masked_p (false),
831 peeling_for_gaps (false),
832 peeling_for_niter (false),
833 operands_swapped (false),
834 no_data_dependencies (false),
835 has_mask_store (false),
836 scalar_loop (NULL),
837 orig_loop_info (NULL)
839 /* CHECKME: We want to visit all BBs before their successors (except for
840 latch blocks, for which this assertion wouldn't hold). In the simple
841 case of the loop forms we allow, a dfs order of the BBs would the same
842 as reversed postorder traversal, so we are safe. */
844 unsigned int nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
845 bbs, loop->num_nodes, loop);
846 gcc_assert (nbbs == loop->num_nodes);
848 for (unsigned int i = 0; i < nbbs; i++)
850 basic_block bb = bbs[i];
851 gimple_stmt_iterator si;
853 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
855 gimple *phi = gsi_stmt (si);
856 gimple_set_uid (phi, 0);
857 add_stmt (phi);
860 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
862 gimple *stmt = gsi_stmt (si);
863 gimple_set_uid (stmt, 0);
864 add_stmt (stmt);
869 /* Free all levels of MASKS. */
871 void
872 release_vec_loop_masks (vec_loop_masks *masks)
874 rgroup_masks *rgm;
875 unsigned int i;
876 FOR_EACH_VEC_ELT (*masks, i, rgm)
877 rgm->masks.release ();
878 masks->release ();
881 /* Free all memory used by the _loop_vec_info, as well as all the
882 stmt_vec_info structs of all the stmts in the loop. */
884 _loop_vec_info::~_loop_vec_info ()
886 int nbbs;
887 gimple_stmt_iterator si;
888 int j;
890 nbbs = loop->num_nodes;
891 for (j = 0; j < nbbs; j++)
893 basic_block bb = bbs[j];
894 for (si = gsi_start_bb (bb); !gsi_end_p (si); )
896 gimple *stmt = gsi_stmt (si);
898 /* We may have broken canonical form by moving a constant
899 into RHS1 of a commutative op. Fix such occurrences. */
900 if (operands_swapped && is_gimple_assign (stmt))
902 enum tree_code code = gimple_assign_rhs_code (stmt);
904 if ((code == PLUS_EXPR
905 || code == POINTER_PLUS_EXPR
906 || code == MULT_EXPR)
907 && CONSTANT_CLASS_P (gimple_assign_rhs1 (stmt)))
908 swap_ssa_operands (stmt,
909 gimple_assign_rhs1_ptr (stmt),
910 gimple_assign_rhs2_ptr (stmt));
911 else if (code == COND_EXPR
912 && CONSTANT_CLASS_P (gimple_assign_rhs2 (stmt)))
914 tree cond_expr = gimple_assign_rhs1 (stmt);
915 enum tree_code cond_code = TREE_CODE (cond_expr);
917 if (TREE_CODE_CLASS (cond_code) == tcc_comparison)
919 bool honor_nans = HONOR_NANS (TREE_OPERAND (cond_expr,
920 0));
921 cond_code = invert_tree_comparison (cond_code,
922 honor_nans);
923 if (cond_code != ERROR_MARK)
925 TREE_SET_CODE (cond_expr, cond_code);
926 swap_ssa_operands (stmt,
927 gimple_assign_rhs2_ptr (stmt),
928 gimple_assign_rhs3_ptr (stmt));
933 gsi_next (&si);
937 free (bbs);
939 release_vec_loop_masks (&masks);
940 delete ivexpr_map;
942 loop->aux = NULL;
945 /* Return an invariant or register for EXPR and emit necessary
946 computations in the LOOP_VINFO loop preheader. */
948 tree
949 cse_and_gimplify_to_preheader (loop_vec_info loop_vinfo, tree expr)
951 if (is_gimple_reg (expr)
952 || is_gimple_min_invariant (expr))
953 return expr;
955 if (! loop_vinfo->ivexpr_map)
956 loop_vinfo->ivexpr_map = new hash_map<tree_operand_hash, tree>;
957 tree &cached = loop_vinfo->ivexpr_map->get_or_insert (expr);
958 if (! cached)
960 gimple_seq stmts = NULL;
961 cached = force_gimple_operand (unshare_expr (expr),
962 &stmts, true, NULL_TREE);
963 if (stmts)
965 edge e = loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo));
966 gsi_insert_seq_on_edge_immediate (e, stmts);
969 return cached;
972 /* Return true if we can use CMP_TYPE as the comparison type to produce
973 all masks required to mask LOOP_VINFO. */
975 static bool
976 can_produce_all_loop_masks_p (loop_vec_info loop_vinfo, tree cmp_type)
978 rgroup_masks *rgm;
979 unsigned int i;
980 FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), i, rgm)
981 if (rgm->mask_type != NULL_TREE
982 && !direct_internal_fn_supported_p (IFN_WHILE_ULT,
983 cmp_type, rgm->mask_type,
984 OPTIMIZE_FOR_SPEED))
985 return false;
986 return true;
989 /* Calculate the maximum number of scalars per iteration for every
990 rgroup in LOOP_VINFO. */
992 static unsigned int
993 vect_get_max_nscalars_per_iter (loop_vec_info loop_vinfo)
995 unsigned int res = 1;
996 unsigned int i;
997 rgroup_masks *rgm;
998 FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), i, rgm)
999 res = MAX (res, rgm->max_nscalars_per_iter);
1000 return res;
1003 /* Each statement in LOOP_VINFO can be masked where necessary. Check
1004 whether we can actually generate the masks required. Return true if so,
1005 storing the type of the scalar IV in LOOP_VINFO_MASK_COMPARE_TYPE. */
1007 static bool
1008 vect_verify_full_masking (loop_vec_info loop_vinfo)
1010 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1011 unsigned int min_ni_width;
1013 /* Use a normal loop if there are no statements that need masking.
1014 This only happens in rare degenerate cases: it means that the loop
1015 has no loads, no stores, and no live-out values. */
1016 if (LOOP_VINFO_MASKS (loop_vinfo).is_empty ())
1017 return false;
1019 /* Get the maximum number of iterations that is representable
1020 in the counter type. */
1021 tree ni_type = TREE_TYPE (LOOP_VINFO_NITERSM1 (loop_vinfo));
1022 widest_int max_ni = wi::to_widest (TYPE_MAX_VALUE (ni_type)) + 1;
1024 /* Get a more refined estimate for the number of iterations. */
1025 widest_int max_back_edges;
1026 if (max_loop_iterations (loop, &max_back_edges))
1027 max_ni = wi::smin (max_ni, max_back_edges + 1);
1029 /* Account for rgroup masks, in which each bit is replicated N times. */
1030 max_ni *= vect_get_max_nscalars_per_iter (loop_vinfo);
1032 /* Work out how many bits we need to represent the limit. */
1033 min_ni_width = wi::min_precision (max_ni, UNSIGNED);
1035 /* Find a scalar mode for which WHILE_ULT is supported. */
1036 opt_scalar_int_mode cmp_mode_iter;
1037 tree cmp_type = NULL_TREE;
1038 FOR_EACH_MODE_IN_CLASS (cmp_mode_iter, MODE_INT)
1040 unsigned int cmp_bits = GET_MODE_BITSIZE (cmp_mode_iter.require ());
1041 if (cmp_bits >= min_ni_width
1042 && targetm.scalar_mode_supported_p (cmp_mode_iter.require ()))
1044 tree this_type = build_nonstandard_integer_type (cmp_bits, true);
1045 if (this_type
1046 && can_produce_all_loop_masks_p (loop_vinfo, this_type))
1048 /* Although we could stop as soon as we find a valid mode,
1049 it's often better to continue until we hit Pmode, since the
1050 operands to the WHILE are more likely to be reusable in
1051 address calculations. */
1052 cmp_type = this_type;
1053 if (cmp_bits >= GET_MODE_BITSIZE (Pmode))
1054 break;
1059 if (!cmp_type)
1060 return false;
1062 LOOP_VINFO_MASK_COMPARE_TYPE (loop_vinfo) = cmp_type;
1063 return true;
1066 /* Calculate the cost of one scalar iteration of the loop. */
1067 static void
1068 vect_compute_single_scalar_iteration_cost (loop_vec_info loop_vinfo)
1070 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1071 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1072 int nbbs = loop->num_nodes, factor;
1073 int innerloop_iters, i;
1075 DUMP_VECT_SCOPE ("vect_compute_single_scalar_iteration_cost");
1077 /* Gather costs for statements in the scalar loop. */
1079 /* FORNOW. */
1080 innerloop_iters = 1;
1081 if (loop->inner)
1082 innerloop_iters = 50; /* FIXME */
1084 for (i = 0; i < nbbs; i++)
1086 gimple_stmt_iterator si;
1087 basic_block bb = bbs[i];
1089 if (bb->loop_father == loop->inner)
1090 factor = innerloop_iters;
1091 else
1092 factor = 1;
1094 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1096 gimple *stmt = gsi_stmt (si);
1097 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (stmt);
1099 if (!is_gimple_assign (stmt) && !is_gimple_call (stmt))
1100 continue;
1102 /* Skip stmts that are not vectorized inside the loop. */
1103 if (stmt_info
1104 && !STMT_VINFO_RELEVANT_P (stmt_info)
1105 && (!STMT_VINFO_LIVE_P (stmt_info)
1106 || !VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1107 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
1108 continue;
1110 vect_cost_for_stmt kind;
1111 if (STMT_VINFO_DATA_REF (stmt_info))
1113 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
1114 kind = scalar_load;
1115 else
1116 kind = scalar_store;
1118 else
1119 kind = scalar_stmt;
1121 record_stmt_cost (&LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
1122 factor, kind, stmt_info, 0, vect_prologue);
1126 /* Now accumulate cost. */
1127 void *target_cost_data = init_cost (loop);
1128 stmt_info_for_cost *si;
1129 int j;
1130 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
1131 j, si)
1132 (void) add_stmt_cost (target_cost_data, si->count,
1133 si->kind, si->stmt_info, si->misalign,
1134 vect_body);
1135 unsigned dummy, body_cost = 0;
1136 finish_cost (target_cost_data, &dummy, &body_cost, &dummy);
1137 destroy_cost_data (target_cost_data);
1138 LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo) = body_cost;
1142 /* Function vect_analyze_loop_form_1.
1144 Verify that certain CFG restrictions hold, including:
1145 - the loop has a pre-header
1146 - the loop has a single entry and exit
1147 - the loop exit condition is simple enough
1148 - the number of iterations can be analyzed, i.e, a countable loop. The
1149 niter could be analyzed under some assumptions. */
1151 opt_result
1152 vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
1153 tree *assumptions, tree *number_of_iterationsm1,
1154 tree *number_of_iterations, gcond **inner_loop_cond)
1156 DUMP_VECT_SCOPE ("vect_analyze_loop_form");
1158 /* Different restrictions apply when we are considering an inner-most loop,
1159 vs. an outer (nested) loop.
1160 (FORNOW. May want to relax some of these restrictions in the future). */
1162 if (!loop->inner)
1164 /* Inner-most loop. We currently require that the number of BBs is
1165 exactly 2 (the header and latch). Vectorizable inner-most loops
1166 look like this:
1168 (pre-header)
1170 header <--------+
1171 | | |
1172 | +--> latch --+
1174 (exit-bb) */
1176 if (loop->num_nodes != 2)
1177 return opt_result::failure_at (vect_location,
1178 "not vectorized:"
1179 " control flow in loop.\n");
1181 if (empty_block_p (loop->header))
1182 return opt_result::failure_at (vect_location,
1183 "not vectorized: empty loop.\n");
1185 else
1187 struct loop *innerloop = loop->inner;
1188 edge entryedge;
1190 /* Nested loop. We currently require that the loop is doubly-nested,
1191 contains a single inner loop, and the number of BBs is exactly 5.
1192 Vectorizable outer-loops look like this:
1194 (pre-header)
1196 header <---+
1198 inner-loop |
1200 tail ------+
1202 (exit-bb)
1204 The inner-loop has the properties expected of inner-most loops
1205 as described above. */
1207 if ((loop->inner)->inner || (loop->inner)->next)
1208 return opt_result::failure_at (vect_location,
1209 "not vectorized:"
1210 " multiple nested loops.\n");
1212 if (loop->num_nodes != 5)
1213 return opt_result::failure_at (vect_location,
1214 "not vectorized:"
1215 " control flow in loop.\n");
1217 entryedge = loop_preheader_edge (innerloop);
1218 if (entryedge->src != loop->header
1219 || !single_exit (innerloop)
1220 || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
1221 return opt_result::failure_at (vect_location,
1222 "not vectorized:"
1223 " unsupported outerloop form.\n");
1225 /* Analyze the inner-loop. */
1226 tree inner_niterm1, inner_niter, inner_assumptions;
1227 opt_result res
1228 = vect_analyze_loop_form_1 (loop->inner, inner_loop_cond,
1229 &inner_assumptions, &inner_niterm1,
1230 &inner_niter, NULL);
1231 if (!res)
1233 if (dump_enabled_p ())
1234 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1235 "not vectorized: Bad inner loop.\n");
1236 return res;
1239 /* Don't support analyzing niter under assumptions for inner
1240 loop. */
1241 if (!integer_onep (inner_assumptions))
1242 return opt_result::failure_at (vect_location,
1243 "not vectorized: Bad inner loop.\n");
1245 if (!expr_invariant_in_loop_p (loop, inner_niter))
1246 return opt_result::failure_at (vect_location,
1247 "not vectorized: inner-loop count not"
1248 " invariant.\n");
1250 if (dump_enabled_p ())
1251 dump_printf_loc (MSG_NOTE, vect_location,
1252 "Considering outer-loop vectorization.\n");
1255 if (!single_exit (loop))
1256 return opt_result::failure_at (vect_location,
1257 "not vectorized: multiple exits.\n");
1258 if (EDGE_COUNT (loop->header->preds) != 2)
1259 return opt_result::failure_at (vect_location,
1260 "not vectorized:"
1261 " too many incoming edges.\n");
1263 /* We assume that the loop exit condition is at the end of the loop. i.e,
1264 that the loop is represented as a do-while (with a proper if-guard
1265 before the loop if needed), where the loop header contains all the
1266 executable statements, and the latch is empty. */
1267 if (!empty_block_p (loop->latch)
1268 || !gimple_seq_empty_p (phi_nodes (loop->latch)))
1269 return opt_result::failure_at (vect_location,
1270 "not vectorized: latch block not empty.\n");
1272 /* Make sure the exit is not abnormal. */
1273 edge e = single_exit (loop);
1274 if (e->flags & EDGE_ABNORMAL)
1275 return opt_result::failure_at (vect_location,
1276 "not vectorized:"
1277 " abnormal loop exit edge.\n");
1279 *loop_cond = vect_get_loop_niters (loop, assumptions, number_of_iterations,
1280 number_of_iterationsm1);
1281 if (!*loop_cond)
1282 return opt_result::failure_at
1283 (vect_location,
1284 "not vectorized: complicated exit condition.\n");
1286 if (integer_zerop (*assumptions)
1287 || !*number_of_iterations
1288 || chrec_contains_undetermined (*number_of_iterations))
1289 return opt_result::failure_at
1290 (*loop_cond,
1291 "not vectorized: number of iterations cannot be computed.\n");
1293 if (integer_zerop (*number_of_iterations))
1294 return opt_result::failure_at
1295 (*loop_cond,
1296 "not vectorized: number of iterations = 0.\n");
1298 return opt_result::success ();
1301 /* Analyze LOOP form and return a loop_vec_info if it is of suitable form. */
1303 opt_loop_vec_info
1304 vect_analyze_loop_form (struct loop *loop, vec_info_shared *shared)
1306 tree assumptions, number_of_iterations, number_of_iterationsm1;
1307 gcond *loop_cond, *inner_loop_cond = NULL;
1309 opt_result res
1310 = vect_analyze_loop_form_1 (loop, &loop_cond,
1311 &assumptions, &number_of_iterationsm1,
1312 &number_of_iterations, &inner_loop_cond);
1313 if (!res)
1314 return opt_loop_vec_info::propagate_failure (res);
1316 loop_vec_info loop_vinfo = new _loop_vec_info (loop, shared);
1317 LOOP_VINFO_NITERSM1 (loop_vinfo) = number_of_iterationsm1;
1318 LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
1319 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
1320 if (!integer_onep (assumptions))
1322 /* We consider to vectorize this loop by versioning it under
1323 some assumptions. In order to do this, we need to clear
1324 existing information computed by scev and niter analyzer. */
1325 scev_reset_htab ();
1326 free_numbers_of_iterations_estimates (loop);
1327 /* Also set flag for this loop so that following scev and niter
1328 analysis are done under the assumptions. */
1329 loop_constraint_set (loop, LOOP_C_FINITE);
1330 /* Also record the assumptions for versioning. */
1331 LOOP_VINFO_NITERS_ASSUMPTIONS (loop_vinfo) = assumptions;
1334 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
1336 if (dump_enabled_p ())
1338 dump_printf_loc (MSG_NOTE, vect_location,
1339 "Symbolic number of iterations is ");
1340 dump_generic_expr (MSG_NOTE, TDF_DETAILS, number_of_iterations);
1341 dump_printf (MSG_NOTE, "\n");
1345 stmt_vec_info loop_cond_info = loop_vinfo->lookup_stmt (loop_cond);
1346 STMT_VINFO_TYPE (loop_cond_info) = loop_exit_ctrl_vec_info_type;
1347 if (inner_loop_cond)
1349 stmt_vec_info inner_loop_cond_info
1350 = loop_vinfo->lookup_stmt (inner_loop_cond);
1351 STMT_VINFO_TYPE (inner_loop_cond_info) = loop_exit_ctrl_vec_info_type;
1354 gcc_assert (!loop->aux);
1355 loop->aux = loop_vinfo;
1356 return opt_loop_vec_info::success (loop_vinfo);
1361 /* Scan the loop stmts and dependent on whether there are any (non-)SLP
1362 statements update the vectorization factor. */
1364 static void
1365 vect_update_vf_for_slp (loop_vec_info loop_vinfo)
1367 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1368 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1369 int nbbs = loop->num_nodes;
1370 poly_uint64 vectorization_factor;
1371 int i;
1373 DUMP_VECT_SCOPE ("vect_update_vf_for_slp");
1375 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1376 gcc_assert (known_ne (vectorization_factor, 0U));
1378 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1379 vectorization factor of the loop is the unrolling factor required by
1380 the SLP instances. If that unrolling factor is 1, we say, that we
1381 perform pure SLP on loop - cross iteration parallelism is not
1382 exploited. */
1383 bool only_slp_in_loop = true;
1384 for (i = 0; i < nbbs; i++)
1386 basic_block bb = bbs[i];
1387 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
1388 gsi_next (&si))
1390 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
1391 stmt_info = vect_stmt_to_vectorize (stmt_info);
1392 if ((STMT_VINFO_RELEVANT_P (stmt_info)
1393 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1394 && !PURE_SLP_STMT (stmt_info))
1395 /* STMT needs both SLP and loop-based vectorization. */
1396 only_slp_in_loop = false;
1400 if (only_slp_in_loop)
1402 dump_printf_loc (MSG_NOTE, vect_location,
1403 "Loop contains only SLP stmts\n");
1404 vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
1406 else
1408 dump_printf_loc (MSG_NOTE, vect_location,
1409 "Loop contains SLP and non-SLP stmts\n");
1410 /* Both the vectorization factor and unroll factor have the form
1411 current_vector_size * X for some rational X, so they must have
1412 a common multiple. */
1413 vectorization_factor
1414 = force_common_multiple (vectorization_factor,
1415 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
1418 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
1419 if (dump_enabled_p ())
1421 dump_printf_loc (MSG_NOTE, vect_location,
1422 "Updating vectorization factor to ");
1423 dump_dec (MSG_NOTE, vectorization_factor);
1424 dump_printf (MSG_NOTE, ".\n");
1428 /* Return true if STMT_INFO describes a double reduction phi and if
1429 the other phi in the reduction is also relevant for vectorization.
1430 This rejects cases such as:
1432 outer1:
1433 x_1 = PHI <x_3(outer2), ...>;
1436 inner:
1437 x_2 = ...;
1440 outer2:
1441 x_3 = PHI <x_2(inner)>;
1443 if nothing in x_2 or elsewhere makes x_1 relevant. */
1445 static bool
1446 vect_active_double_reduction_p (stmt_vec_info stmt_info)
1448 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_double_reduction_def)
1449 return false;
1451 return STMT_VINFO_RELEVANT_P (STMT_VINFO_REDUC_DEF (stmt_info));
1454 /* Function vect_analyze_loop_operations.
1456 Scan the loop stmts and make sure they are all vectorizable. */
1458 static opt_result
1459 vect_analyze_loop_operations (loop_vec_info loop_vinfo)
1461 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1462 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1463 int nbbs = loop->num_nodes;
1464 int i;
1465 stmt_vec_info stmt_info;
1466 bool need_to_vectorize = false;
1467 bool ok;
1469 DUMP_VECT_SCOPE ("vect_analyze_loop_operations");
1471 stmt_vector_for_cost cost_vec;
1472 cost_vec.create (2);
1474 for (i = 0; i < nbbs; i++)
1476 basic_block bb = bbs[i];
1478 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
1479 gsi_next (&si))
1481 gphi *phi = si.phi ();
1482 ok = true;
1484 stmt_info = loop_vinfo->lookup_stmt (phi);
1485 if (dump_enabled_p ())
1486 dump_printf_loc (MSG_NOTE, vect_location, "examining phi: %G", phi);
1487 if (virtual_operand_p (gimple_phi_result (phi)))
1488 continue;
1490 /* Inner-loop loop-closed exit phi in outer-loop vectorization
1491 (i.e., a phi in the tail of the outer-loop). */
1492 if (! is_loop_header_bb_p (bb))
1494 /* FORNOW: we currently don't support the case that these phis
1495 are not used in the outerloop (unless it is double reduction,
1496 i.e., this phi is vect_reduction_def), cause this case
1497 requires to actually do something here. */
1498 if (STMT_VINFO_LIVE_P (stmt_info)
1499 && !vect_active_double_reduction_p (stmt_info))
1500 return opt_result::failure_at (phi,
1501 "Unsupported loop-closed phi"
1502 " in outer-loop.\n");
1504 /* If PHI is used in the outer loop, we check that its operand
1505 is defined in the inner loop. */
1506 if (STMT_VINFO_RELEVANT_P (stmt_info))
1508 tree phi_op;
1510 if (gimple_phi_num_args (phi) != 1)
1511 return opt_result::failure_at (phi, "unsupported phi");
1513 phi_op = PHI_ARG_DEF (phi, 0);
1514 stmt_vec_info op_def_info = loop_vinfo->lookup_def (phi_op);
1515 if (!op_def_info)
1516 return opt_result::failure_at (phi, "unsupported phi");
1518 if (STMT_VINFO_RELEVANT (op_def_info) != vect_used_in_outer
1519 && (STMT_VINFO_RELEVANT (op_def_info)
1520 != vect_used_in_outer_by_reduction))
1521 return opt_result::failure_at (phi, "unsupported phi");
1524 continue;
1527 gcc_assert (stmt_info);
1529 if ((STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
1530 || STMT_VINFO_LIVE_P (stmt_info))
1531 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
1532 /* A scalar-dependence cycle that we don't support. */
1533 return opt_result::failure_at (phi,
1534 "not vectorized:"
1535 " scalar dependence cycle.\n");
1537 if (STMT_VINFO_RELEVANT_P (stmt_info))
1539 need_to_vectorize = true;
1540 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def
1541 && ! PURE_SLP_STMT (stmt_info))
1542 ok = vectorizable_induction (stmt_info, NULL, NULL, NULL,
1543 &cost_vec);
1544 else if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
1545 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
1546 && ! PURE_SLP_STMT (stmt_info))
1547 ok = vectorizable_reduction (stmt_info, NULL, NULL, NULL, NULL,
1548 &cost_vec);
1551 /* SLP PHIs are tested by vect_slp_analyze_node_operations. */
1552 if (ok
1553 && STMT_VINFO_LIVE_P (stmt_info)
1554 && !PURE_SLP_STMT (stmt_info))
1555 ok = vectorizable_live_operation (stmt_info, NULL, NULL, -1, NULL,
1556 &cost_vec);
1558 if (!ok)
1559 return opt_result::failure_at (phi,
1560 "not vectorized: relevant phi not "
1561 "supported: %G",
1562 static_cast <gimple *> (phi));
1565 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
1566 gsi_next (&si))
1568 gimple *stmt = gsi_stmt (si);
1569 if (!gimple_clobber_p (stmt))
1571 opt_result res
1572 = vect_analyze_stmt (loop_vinfo->lookup_stmt (stmt),
1573 &need_to_vectorize,
1574 NULL, NULL, &cost_vec);
1575 if (!res)
1576 return res;
1579 } /* bbs */
1581 add_stmt_costs (loop_vinfo->target_cost_data, &cost_vec);
1582 cost_vec.release ();
1584 /* All operations in the loop are either irrelevant (deal with loop
1585 control, or dead), or only used outside the loop and can be moved
1586 out of the loop (e.g. invariants, inductions). The loop can be
1587 optimized away by scalar optimizations. We're better off not
1588 touching this loop. */
1589 if (!need_to_vectorize)
1591 if (dump_enabled_p ())
1592 dump_printf_loc (MSG_NOTE, vect_location,
1593 "All the computation can be taken out of the loop.\n");
1594 return opt_result::failure_at
1595 (vect_location,
1596 "not vectorized: redundant loop. no profit to vectorize.\n");
1599 return opt_result::success ();
1602 /* Analyze the cost of the loop described by LOOP_VINFO. Decide if it
1603 is worthwhile to vectorize. Return 1 if definitely yes, 0 if
1604 definitely no, or -1 if it's worth retrying. */
1606 static int
1607 vect_analyze_loop_costing (loop_vec_info loop_vinfo)
1609 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1610 unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
1612 /* Only fully-masked loops can have iteration counts less than the
1613 vectorization factor. */
1614 if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
1616 HOST_WIDE_INT max_niter;
1618 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
1619 max_niter = LOOP_VINFO_INT_NITERS (loop_vinfo);
1620 else
1621 max_niter = max_stmt_executions_int (loop);
1623 if (max_niter != -1
1624 && (unsigned HOST_WIDE_INT) max_niter < assumed_vf)
1626 if (dump_enabled_p ())
1627 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1628 "not vectorized: iteration count smaller than "
1629 "vectorization factor.\n");
1630 return 0;
1634 int min_profitable_iters, min_profitable_estimate;
1635 vect_estimate_min_profitable_iters (loop_vinfo, &min_profitable_iters,
1636 &min_profitable_estimate);
1638 if (min_profitable_iters < 0)
1640 if (dump_enabled_p ())
1641 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1642 "not vectorized: vectorization not profitable.\n");
1643 if (dump_enabled_p ())
1644 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1645 "not vectorized: vector version will never be "
1646 "profitable.\n");
1647 return -1;
1650 int min_scalar_loop_bound = (PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
1651 * assumed_vf);
1653 /* Use the cost model only if it is more conservative than user specified
1654 threshold. */
1655 unsigned int th = (unsigned) MAX (min_scalar_loop_bound,
1656 min_profitable_iters);
1658 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = th;
1660 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1661 && LOOP_VINFO_INT_NITERS (loop_vinfo) < th)
1663 if (dump_enabled_p ())
1664 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1665 "not vectorized: vectorization not profitable.\n");
1666 if (dump_enabled_p ())
1667 dump_printf_loc (MSG_NOTE, vect_location,
1668 "not vectorized: iteration count smaller than user "
1669 "specified loop bound parameter or minimum profitable "
1670 "iterations (whichever is more conservative).\n");
1671 return 0;
1674 HOST_WIDE_INT estimated_niter = estimated_stmt_executions_int (loop);
1675 if (estimated_niter == -1)
1676 estimated_niter = likely_max_stmt_executions_int (loop);
1677 if (estimated_niter != -1
1678 && ((unsigned HOST_WIDE_INT) estimated_niter
1679 < MAX (th, (unsigned) min_profitable_estimate)))
1681 if (dump_enabled_p ())
1682 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1683 "not vectorized: estimated iteration count too "
1684 "small.\n");
1685 if (dump_enabled_p ())
1686 dump_printf_loc (MSG_NOTE, vect_location,
1687 "not vectorized: estimated iteration count smaller "
1688 "than specified loop bound parameter or minimum "
1689 "profitable iterations (whichever is more "
1690 "conservative).\n");
1691 return -1;
1694 return 1;
1697 static opt_result
1698 vect_get_datarefs_in_loop (loop_p loop, basic_block *bbs,
1699 vec<data_reference_p> *datarefs,
1700 unsigned int *n_stmts)
1702 *n_stmts = 0;
1703 for (unsigned i = 0; i < loop->num_nodes; i++)
1704 for (gimple_stmt_iterator gsi = gsi_start_bb (bbs[i]);
1705 !gsi_end_p (gsi); gsi_next (&gsi))
1707 gimple *stmt = gsi_stmt (gsi);
1708 if (is_gimple_debug (stmt))
1709 continue;
1710 ++(*n_stmts);
1711 opt_result res = vect_find_stmt_data_reference (loop, stmt, datarefs);
1712 if (!res)
1714 if (is_gimple_call (stmt) && loop->safelen)
1716 tree fndecl = gimple_call_fndecl (stmt), op;
1717 if (fndecl != NULL_TREE)
1719 cgraph_node *node = cgraph_node::get (fndecl);
1720 if (node != NULL && node->simd_clones != NULL)
1722 unsigned int j, n = gimple_call_num_args (stmt);
1723 for (j = 0; j < n; j++)
1725 op = gimple_call_arg (stmt, j);
1726 if (DECL_P (op)
1727 || (REFERENCE_CLASS_P (op)
1728 && get_base_address (op)))
1729 break;
1731 op = gimple_call_lhs (stmt);
1732 /* Ignore #pragma omp declare simd functions
1733 if they don't have data references in the
1734 call stmt itself. */
1735 if (j == n
1736 && !(op
1737 && (DECL_P (op)
1738 || (REFERENCE_CLASS_P (op)
1739 && get_base_address (op)))))
1740 continue;
1744 return res;
1746 /* If dependence analysis will give up due to the limit on the
1747 number of datarefs stop here and fail fatally. */
1748 if (datarefs->length ()
1749 > (unsigned)PARAM_VALUE (PARAM_LOOP_MAX_DATAREFS_FOR_DATADEPS))
1750 return opt_result::failure_at (stmt, "exceeded param "
1751 "loop-max-datarefs-for-datadeps\n");
1753 return opt_result::success ();
1756 /* Function vect_analyze_loop_2.
1758 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1759 for it. The different analyses will record information in the
1760 loop_vec_info struct. */
1761 static opt_result
1762 vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal, unsigned *n_stmts)
1764 opt_result ok = opt_result::success ();
1765 int res;
1766 unsigned int max_vf = MAX_VECTORIZATION_FACTOR;
1767 poly_uint64 min_vf = 2;
1769 /* The first group of checks is independent of the vector size. */
1770 fatal = true;
1772 /* Find all data references in the loop (which correspond to vdefs/vuses)
1773 and analyze their evolution in the loop. */
1775 loop_p loop = LOOP_VINFO_LOOP (loop_vinfo);
1777 /* Gather the data references and count stmts in the loop. */
1778 if (!LOOP_VINFO_DATAREFS (loop_vinfo).exists ())
1780 opt_result res
1781 = vect_get_datarefs_in_loop (loop, LOOP_VINFO_BBS (loop_vinfo),
1782 &LOOP_VINFO_DATAREFS (loop_vinfo),
1783 n_stmts);
1784 if (!res)
1786 if (dump_enabled_p ())
1787 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1788 "not vectorized: loop contains function "
1789 "calls or data references that cannot "
1790 "be analyzed\n");
1791 return res;
1793 loop_vinfo->shared->save_datarefs ();
1795 else
1796 loop_vinfo->shared->check_datarefs ();
1798 /* Analyze the data references and also adjust the minimal
1799 vectorization factor according to the loads and stores. */
1801 ok = vect_analyze_data_refs (loop_vinfo, &min_vf);
1802 if (!ok)
1804 if (dump_enabled_p ())
1805 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1806 "bad data references.\n");
1807 return ok;
1810 /* Classify all cross-iteration scalar data-flow cycles.
1811 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1812 vect_analyze_scalar_cycles (loop_vinfo);
1814 vect_pattern_recog (loop_vinfo);
1816 vect_fixup_scalar_cycles_with_patterns (loop_vinfo);
1818 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1819 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1821 ok = vect_analyze_data_ref_accesses (loop_vinfo);
1822 if (!ok)
1824 if (dump_enabled_p ())
1825 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1826 "bad data access.\n");
1827 return ok;
1830 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1832 ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
1833 if (!ok)
1835 if (dump_enabled_p ())
1836 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1837 "unexpected pattern.\n");
1838 return ok;
1841 /* While the rest of the analysis below depends on it in some way. */
1842 fatal = false;
1844 /* Analyze data dependences between the data-refs in the loop
1845 and adjust the maximum vectorization factor according to
1846 the dependences.
1847 FORNOW: fail at the first data dependence that we encounter. */
1849 ok = vect_analyze_data_ref_dependences (loop_vinfo, &max_vf);
1850 if (!ok)
1852 if (dump_enabled_p ())
1853 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1854 "bad data dependence.\n");
1855 return ok;
1857 if (max_vf != MAX_VECTORIZATION_FACTOR
1858 && maybe_lt (max_vf, min_vf))
1859 return opt_result::failure_at (vect_location, "bad data dependence.\n");
1860 LOOP_VINFO_MAX_VECT_FACTOR (loop_vinfo) = max_vf;
1862 ok = vect_determine_vectorization_factor (loop_vinfo);
1863 if (!ok)
1865 if (dump_enabled_p ())
1866 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1867 "can't determine vectorization factor.\n");
1868 return ok;
1870 if (max_vf != MAX_VECTORIZATION_FACTOR
1871 && maybe_lt (max_vf, LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
1872 return opt_result::failure_at (vect_location, "bad data dependence.\n");
1874 /* Compute the scalar iteration cost. */
1875 vect_compute_single_scalar_iteration_cost (loop_vinfo);
1877 poly_uint64 saved_vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1878 unsigned th;
1880 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
1881 ok = vect_analyze_slp (loop_vinfo, *n_stmts);
1882 if (!ok)
1883 return ok;
1885 /* If there are any SLP instances mark them as pure_slp. */
1886 bool slp = vect_make_slp_decision (loop_vinfo);
1887 if (slp)
1889 /* Find stmts that need to be both vectorized and SLPed. */
1890 vect_detect_hybrid_slp (loop_vinfo);
1892 /* Update the vectorization factor based on the SLP decision. */
1893 vect_update_vf_for_slp (loop_vinfo);
1896 bool saved_can_fully_mask_p = LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo);
1898 /* We don't expect to have to roll back to anything other than an empty
1899 set of rgroups. */
1900 gcc_assert (LOOP_VINFO_MASKS (loop_vinfo).is_empty ());
1902 /* This is the point where we can re-start analysis with SLP forced off. */
1903 start_over:
1905 /* Now the vectorization factor is final. */
1906 poly_uint64 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1907 gcc_assert (known_ne (vectorization_factor, 0U));
1909 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && dump_enabled_p ())
1911 dump_printf_loc (MSG_NOTE, vect_location,
1912 "vectorization_factor = ");
1913 dump_dec (MSG_NOTE, vectorization_factor);
1914 dump_printf (MSG_NOTE, ", niters = %wd\n",
1915 LOOP_VINFO_INT_NITERS (loop_vinfo));
1918 HOST_WIDE_INT max_niter
1919 = likely_max_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo));
1921 /* Analyze the alignment of the data-refs in the loop.
1922 Fail if a data reference is found that cannot be vectorized. */
1924 ok = vect_analyze_data_refs_alignment (loop_vinfo);
1925 if (!ok)
1927 if (dump_enabled_p ())
1928 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1929 "bad data alignment.\n");
1930 return ok;
1933 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
1934 It is important to call pruning after vect_analyze_data_ref_accesses,
1935 since we use grouping information gathered by interleaving analysis. */
1936 ok = vect_prune_runtime_alias_test_list (loop_vinfo);
1937 if (!ok)
1938 return ok;
1940 /* Do not invoke vect_enhance_data_refs_alignment for epilogue
1941 vectorization, since we do not want to add extra peeling or
1942 add versioning for alignment. */
1943 if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo))
1944 /* This pass will decide on using loop versioning and/or loop peeling in
1945 order to enhance the alignment of data references in the loop. */
1946 ok = vect_enhance_data_refs_alignment (loop_vinfo);
1947 else
1948 ok = vect_verify_datarefs_alignment (loop_vinfo);
1949 if (!ok)
1950 return ok;
1952 if (slp)
1954 /* Analyze operations in the SLP instances. Note this may
1955 remove unsupported SLP instances which makes the above
1956 SLP kind detection invalid. */
1957 unsigned old_size = LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length ();
1958 vect_slp_analyze_operations (loop_vinfo);
1959 if (LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length () != old_size)
1961 ok = opt_result::failure_at (vect_location,
1962 "unsupported SLP instances\n");
1963 goto again;
1967 /* Scan all the remaining operations in the loop that are not subject
1968 to SLP and make sure they are vectorizable. */
1969 ok = vect_analyze_loop_operations (loop_vinfo);
1970 if (!ok)
1972 if (dump_enabled_p ())
1973 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1974 "bad operation or unsupported loop bound.\n");
1975 return ok;
1978 /* Decide whether to use a fully-masked loop for this vectorization
1979 factor. */
1980 LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
1981 = (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo)
1982 && vect_verify_full_masking (loop_vinfo));
1983 if (dump_enabled_p ())
1985 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
1986 dump_printf_loc (MSG_NOTE, vect_location,
1987 "using a fully-masked loop.\n");
1988 else
1989 dump_printf_loc (MSG_NOTE, vect_location,
1990 "not using a fully-masked loop.\n");
1993 /* If epilog loop is required because of data accesses with gaps,
1994 one additional iteration needs to be peeled. Check if there is
1995 enough iterations for vectorization. */
1996 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
1997 && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1998 && !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
2000 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2001 tree scalar_niters = LOOP_VINFO_NITERSM1 (loop_vinfo);
2003 if (known_lt (wi::to_widest (scalar_niters), vf))
2004 return opt_result::failure_at (vect_location,
2005 "loop has no enough iterations to"
2006 " support peeling for gaps.\n");
2009 /* Check the costings of the loop make vectorizing worthwhile. */
2010 res = vect_analyze_loop_costing (loop_vinfo);
2011 if (res < 0)
2013 ok = opt_result::failure_at (vect_location,
2014 "Loop costings may not be worthwhile.\n");
2015 goto again;
2017 if (!res)
2018 return opt_result::failure_at (vect_location,
2019 "Loop costings not worthwhile.\n");
2021 /* Decide whether we need to create an epilogue loop to handle
2022 remaining scalar iterations. */
2023 th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
2025 unsigned HOST_WIDE_INT const_vf;
2026 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
2027 /* The main loop handles all iterations. */
2028 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false;
2029 else if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2030 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) >= 0)
2032 /* Work out the (constant) number of iterations that need to be
2033 peeled for reasons other than niters. */
2034 unsigned int peel_niter = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
2035 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
2036 peel_niter += 1;
2037 if (!multiple_p (LOOP_VINFO_INT_NITERS (loop_vinfo) - peel_niter,
2038 LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
2039 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
2041 else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
2042 /* ??? When peeling for gaps but not alignment, we could
2043 try to check whether the (variable) niters is known to be
2044 VF * N + 1. That's something of a niche case though. */
2045 || LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
2046 || !LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&const_vf)
2047 || ((tree_ctz (LOOP_VINFO_NITERS (loop_vinfo))
2048 < (unsigned) exact_log2 (const_vf))
2049 /* In case of versioning, check if the maximum number of
2050 iterations is greater than th. If they are identical,
2051 the epilogue is unnecessary. */
2052 && (!LOOP_REQUIRES_VERSIONING (loop_vinfo)
2053 || ((unsigned HOST_WIDE_INT) max_niter
2054 > (th / const_vf) * const_vf))))
2055 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
2057 /* If an epilogue loop is required make sure we can create one. */
2058 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
2059 || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo))
2061 if (dump_enabled_p ())
2062 dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required\n");
2063 if (!vect_can_advance_ivs_p (loop_vinfo)
2064 || !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo),
2065 single_exit (LOOP_VINFO_LOOP
2066 (loop_vinfo))))
2068 ok = opt_result::failure_at (vect_location,
2069 "not vectorized: can't create required "
2070 "epilog loop\n");
2071 goto again;
2075 /* During peeling, we need to check if number of loop iterations is
2076 enough for both peeled prolog loop and vector loop. This check
2077 can be merged along with threshold check of loop versioning, so
2078 increase threshold for this case if necessary. */
2079 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
2081 poly_uint64 niters_th = 0;
2083 if (!vect_use_loop_mask_for_alignment_p (loop_vinfo))
2085 /* Niters for peeled prolog loop. */
2086 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
2088 dr_vec_info *dr_info = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
2089 tree vectype = STMT_VINFO_VECTYPE (dr_info->stmt);
2090 niters_th += TYPE_VECTOR_SUBPARTS (vectype) - 1;
2092 else
2093 niters_th += LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
2096 /* Niters for at least one iteration of vectorized loop. */
2097 if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
2098 niters_th += LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2099 /* One additional iteration because of peeling for gap. */
2100 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
2101 niters_th += 1;
2102 LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = niters_th;
2105 gcc_assert (known_eq (vectorization_factor,
2106 LOOP_VINFO_VECT_FACTOR (loop_vinfo)));
2108 /* Ok to vectorize! */
2109 return opt_result::success ();
2111 again:
2112 /* Ensure that "ok" is false (with an opt_problem if dumping is enabled). */
2113 gcc_assert (!ok);
2115 /* Try again with SLP forced off but if we didn't do any SLP there is
2116 no point in re-trying. */
2117 if (!slp)
2118 return ok;
2120 /* If there are reduction chains re-trying will fail anyway. */
2121 if (! LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).is_empty ())
2122 return ok;
2124 /* Likewise if the grouped loads or stores in the SLP cannot be handled
2125 via interleaving or lane instructions. */
2126 slp_instance instance;
2127 slp_tree node;
2128 unsigned i, j;
2129 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance)
2131 stmt_vec_info vinfo;
2132 vinfo = SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance))[0];
2133 if (! STMT_VINFO_GROUPED_ACCESS (vinfo))
2134 continue;
2135 vinfo = DR_GROUP_FIRST_ELEMENT (vinfo);
2136 unsigned int size = DR_GROUP_SIZE (vinfo);
2137 tree vectype = STMT_VINFO_VECTYPE (vinfo);
2138 if (! vect_store_lanes_supported (vectype, size, false)
2139 && ! known_eq (TYPE_VECTOR_SUBPARTS (vectype), 1U)
2140 && ! vect_grouped_store_supported (vectype, size))
2141 return opt_result::failure_at (vinfo->stmt,
2142 "unsupported grouped store\n");
2143 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), j, node)
2145 vinfo = SLP_TREE_SCALAR_STMTS (node)[0];
2146 vinfo = DR_GROUP_FIRST_ELEMENT (vinfo);
2147 bool single_element_p = !DR_GROUP_NEXT_ELEMENT (vinfo);
2148 size = DR_GROUP_SIZE (vinfo);
2149 vectype = STMT_VINFO_VECTYPE (vinfo);
2150 if (! vect_load_lanes_supported (vectype, size, false)
2151 && ! vect_grouped_load_supported (vectype, single_element_p,
2152 size))
2153 return opt_result::failure_at (vinfo->stmt,
2154 "unsupported grouped load\n");
2158 if (dump_enabled_p ())
2159 dump_printf_loc (MSG_NOTE, vect_location,
2160 "re-trying with SLP disabled\n");
2162 /* Roll back state appropriately. No SLP this time. */
2163 slp = false;
2164 /* Restore vectorization factor as it were without SLP. */
2165 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = saved_vectorization_factor;
2166 /* Free the SLP instances. */
2167 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), j, instance)
2168 vect_free_slp_instance (instance, false);
2169 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
2170 /* Reset SLP type to loop_vect on all stmts. */
2171 for (i = 0; i < LOOP_VINFO_LOOP (loop_vinfo)->num_nodes; ++i)
2173 basic_block bb = LOOP_VINFO_BBS (loop_vinfo)[i];
2174 for (gimple_stmt_iterator si = gsi_start_phis (bb);
2175 !gsi_end_p (si); gsi_next (&si))
2177 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
2178 STMT_SLP_TYPE (stmt_info) = loop_vect;
2180 for (gimple_stmt_iterator si = gsi_start_bb (bb);
2181 !gsi_end_p (si); gsi_next (&si))
2183 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
2184 STMT_SLP_TYPE (stmt_info) = loop_vect;
2185 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
2187 gimple *pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
2188 stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
2189 STMT_SLP_TYPE (stmt_info) = loop_vect;
2190 for (gimple_stmt_iterator pi = gsi_start (pattern_def_seq);
2191 !gsi_end_p (pi); gsi_next (&pi))
2192 STMT_SLP_TYPE (loop_vinfo->lookup_stmt (gsi_stmt (pi)))
2193 = loop_vect;
2197 /* Free optimized alias test DDRS. */
2198 LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).truncate (0);
2199 LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).release ();
2200 LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).release ();
2201 /* Reset target cost data. */
2202 destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo));
2203 LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)
2204 = init_cost (LOOP_VINFO_LOOP (loop_vinfo));
2205 /* Reset accumulated rgroup information. */
2206 release_vec_loop_masks (&LOOP_VINFO_MASKS (loop_vinfo));
2207 /* Reset assorted flags. */
2208 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false;
2209 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = false;
2210 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = 0;
2211 LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = 0;
2212 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = saved_can_fully_mask_p;
2214 goto start_over;
2217 /* Function vect_analyze_loop.
2219 Apply a set of analyses on LOOP, and create a loop_vec_info struct
2220 for it. The different analyses will record information in the
2221 loop_vec_info struct. If ORIG_LOOP_VINFO is not NULL epilogue must
2222 be vectorized. */
2223 opt_loop_vec_info
2224 vect_analyze_loop (struct loop *loop, loop_vec_info orig_loop_vinfo,
2225 vec_info_shared *shared)
2227 auto_vector_sizes vector_sizes;
2229 /* Autodetect first vector size we try. */
2230 current_vector_size = 0;
2231 targetm.vectorize.autovectorize_vector_sizes (&vector_sizes);
2232 unsigned int next_size = 0;
2234 DUMP_VECT_SCOPE ("analyze_loop_nest");
2236 if (loop_outer (loop)
2237 && loop_vec_info_for_loop (loop_outer (loop))
2238 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
2239 return opt_loop_vec_info::failure_at (vect_location,
2240 "outer-loop already vectorized.\n");
2242 if (!find_loop_nest (loop, &shared->loop_nest))
2243 return opt_loop_vec_info::failure_at
2244 (vect_location,
2245 "not vectorized: loop nest containing two or more consecutive inner"
2246 " loops cannot be vectorized\n");
2248 unsigned n_stmts = 0;
2249 poly_uint64 autodetected_vector_size = 0;
2250 while (1)
2252 /* Check the CFG characteristics of the loop (nesting, entry/exit). */
2253 opt_loop_vec_info loop_vinfo
2254 = vect_analyze_loop_form (loop, shared);
2255 if (!loop_vinfo)
2257 if (dump_enabled_p ())
2258 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2259 "bad loop form.\n");
2260 return loop_vinfo;
2263 bool fatal = false;
2265 if (orig_loop_vinfo)
2266 LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo) = orig_loop_vinfo;
2268 opt_result res = vect_analyze_loop_2 (loop_vinfo, fatal, &n_stmts);
2269 if (res)
2271 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
2273 return loop_vinfo;
2276 delete loop_vinfo;
2278 if (next_size == 0)
2279 autodetected_vector_size = current_vector_size;
2281 if (next_size < vector_sizes.length ()
2282 && known_eq (vector_sizes[next_size], autodetected_vector_size))
2283 next_size += 1;
2285 if (fatal
2286 || next_size == vector_sizes.length ()
2287 || known_eq (current_vector_size, 0U))
2288 return opt_loop_vec_info::propagate_failure (res);
2290 /* Try the next biggest vector size. */
2291 current_vector_size = vector_sizes[next_size++];
2292 if (dump_enabled_p ())
2294 dump_printf_loc (MSG_NOTE, vect_location,
2295 "***** Re-trying analysis with "
2296 "vector size ");
2297 dump_dec (MSG_NOTE, current_vector_size);
2298 dump_printf (MSG_NOTE, "\n");
2303 /* Return true if there is an in-order reduction function for CODE, storing
2304 it in *REDUC_FN if so. */
2306 static bool
2307 fold_left_reduction_fn (tree_code code, internal_fn *reduc_fn)
2309 switch (code)
2311 case PLUS_EXPR:
2312 *reduc_fn = IFN_FOLD_LEFT_PLUS;
2313 return true;
2315 default:
2316 return false;
2320 /* Function reduction_fn_for_scalar_code
2322 Input:
2323 CODE - tree_code of a reduction operations.
2325 Output:
2326 REDUC_FN - the corresponding internal function to be used to reduce the
2327 vector of partial results into a single scalar result, or IFN_LAST
2328 if the operation is a supported reduction operation, but does not have
2329 such an internal function.
2331 Return FALSE if CODE currently cannot be vectorized as reduction. */
2333 static bool
2334 reduction_fn_for_scalar_code (enum tree_code code, internal_fn *reduc_fn)
2336 switch (code)
2338 case MAX_EXPR:
2339 *reduc_fn = IFN_REDUC_MAX;
2340 return true;
2342 case MIN_EXPR:
2343 *reduc_fn = IFN_REDUC_MIN;
2344 return true;
2346 case PLUS_EXPR:
2347 *reduc_fn = IFN_REDUC_PLUS;
2348 return true;
2350 case BIT_AND_EXPR:
2351 *reduc_fn = IFN_REDUC_AND;
2352 return true;
2354 case BIT_IOR_EXPR:
2355 *reduc_fn = IFN_REDUC_IOR;
2356 return true;
2358 case BIT_XOR_EXPR:
2359 *reduc_fn = IFN_REDUC_XOR;
2360 return true;
2362 case MULT_EXPR:
2363 case MINUS_EXPR:
2364 *reduc_fn = IFN_LAST;
2365 return true;
2367 default:
2368 return false;
2372 /* If there is a neutral value X such that SLP reduction NODE would not
2373 be affected by the introduction of additional X elements, return that X,
2374 otherwise return null. CODE is the code of the reduction. REDUC_CHAIN
2375 is true if the SLP statements perform a single reduction, false if each
2376 statement performs an independent reduction. */
2378 static tree
2379 neutral_op_for_slp_reduction (slp_tree slp_node, tree_code code,
2380 bool reduc_chain)
2382 vec<stmt_vec_info> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
2383 stmt_vec_info stmt_vinfo = stmts[0];
2384 tree vector_type = STMT_VINFO_VECTYPE (stmt_vinfo);
2385 tree scalar_type = TREE_TYPE (vector_type);
2386 struct loop *loop = gimple_bb (stmt_vinfo->stmt)->loop_father;
2387 gcc_assert (loop);
2389 switch (code)
2391 case WIDEN_SUM_EXPR:
2392 case DOT_PROD_EXPR:
2393 case SAD_EXPR:
2394 case PLUS_EXPR:
2395 case MINUS_EXPR:
2396 case BIT_IOR_EXPR:
2397 case BIT_XOR_EXPR:
2398 return build_zero_cst (scalar_type);
2400 case MULT_EXPR:
2401 return build_one_cst (scalar_type);
2403 case BIT_AND_EXPR:
2404 return build_all_ones_cst (scalar_type);
2406 case MAX_EXPR:
2407 case MIN_EXPR:
2408 /* For MIN/MAX the initial values are neutral. A reduction chain
2409 has only a single initial value, so that value is neutral for
2410 all statements. */
2411 if (reduc_chain)
2412 return PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt,
2413 loop_preheader_edge (loop));
2414 return NULL_TREE;
2416 default:
2417 return NULL_TREE;
2421 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
2422 STMT is printed with a message MSG. */
2424 static void
2425 report_vect_op (dump_flags_t msg_type, gimple *stmt, const char *msg)
2427 dump_printf_loc (msg_type, vect_location, "%s%G", msg, stmt);
2430 /* DEF_STMT_INFO occurs in a loop that contains a potential reduction
2431 operation. Return true if the results of DEF_STMT_INFO are something
2432 that can be accumulated by such a reduction. */
2434 static bool
2435 vect_valid_reduction_input_p (stmt_vec_info def_stmt_info)
2437 return (is_gimple_assign (def_stmt_info->stmt)
2438 || is_gimple_call (def_stmt_info->stmt)
2439 || STMT_VINFO_DEF_TYPE (def_stmt_info) == vect_induction_def
2440 || (gimple_code (def_stmt_info->stmt) == GIMPLE_PHI
2441 && STMT_VINFO_DEF_TYPE (def_stmt_info) == vect_internal_def
2442 && !is_loop_header_bb_p (gimple_bb (def_stmt_info->stmt))));
2445 /* Detect SLP reduction of the form:
2447 #a1 = phi <a5, a0>
2448 a2 = operation (a1)
2449 a3 = operation (a2)
2450 a4 = operation (a3)
2451 a5 = operation (a4)
2453 #a = phi <a5>
2455 PHI is the reduction phi node (#a1 = phi <a5, a0> above)
2456 FIRST_STMT is the first reduction stmt in the chain
2457 (a2 = operation (a1)).
2459 Return TRUE if a reduction chain was detected. */
2461 static bool
2462 vect_is_slp_reduction (loop_vec_info loop_info, gimple *phi,
2463 gimple *first_stmt)
2465 struct loop *loop = (gimple_bb (phi))->loop_father;
2466 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
2467 enum tree_code code;
2468 gimple *loop_use_stmt = NULL;
2469 stmt_vec_info use_stmt_info, current_stmt_info = NULL;
2470 tree lhs;
2471 imm_use_iterator imm_iter;
2472 use_operand_p use_p;
2473 int nloop_uses, size = 0, n_out_of_loop_uses;
2474 bool found = false;
2476 if (loop != vect_loop)
2477 return false;
2479 lhs = PHI_RESULT (phi);
2480 code = gimple_assign_rhs_code (first_stmt);
2481 while (1)
2483 nloop_uses = 0;
2484 n_out_of_loop_uses = 0;
2485 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
2487 gimple *use_stmt = USE_STMT (use_p);
2488 if (is_gimple_debug (use_stmt))
2489 continue;
2491 /* Check if we got back to the reduction phi. */
2492 if (use_stmt == phi)
2494 loop_use_stmt = use_stmt;
2495 found = true;
2496 break;
2499 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2501 loop_use_stmt = use_stmt;
2502 nloop_uses++;
2504 else
2505 n_out_of_loop_uses++;
2507 /* There are can be either a single use in the loop or two uses in
2508 phi nodes. */
2509 if (nloop_uses > 1 || (n_out_of_loop_uses && nloop_uses))
2510 return false;
2513 if (found)
2514 break;
2516 /* We reached a statement with no loop uses. */
2517 if (nloop_uses == 0)
2518 return false;
2520 /* This is a loop exit phi, and we haven't reached the reduction phi. */
2521 if (gimple_code (loop_use_stmt) == GIMPLE_PHI)
2522 return false;
2524 if (!is_gimple_assign (loop_use_stmt)
2525 || code != gimple_assign_rhs_code (loop_use_stmt)
2526 || !flow_bb_inside_loop_p (loop, gimple_bb (loop_use_stmt)))
2527 return false;
2529 /* Insert USE_STMT into reduction chain. */
2530 use_stmt_info = loop_info->lookup_stmt (loop_use_stmt);
2531 if (current_stmt_info)
2533 REDUC_GROUP_NEXT_ELEMENT (current_stmt_info) = use_stmt_info;
2534 REDUC_GROUP_FIRST_ELEMENT (use_stmt_info)
2535 = REDUC_GROUP_FIRST_ELEMENT (current_stmt_info);
2537 else
2538 REDUC_GROUP_FIRST_ELEMENT (use_stmt_info) = use_stmt_info;
2540 lhs = gimple_assign_lhs (loop_use_stmt);
2541 current_stmt_info = use_stmt_info;
2542 size++;
2545 if (!found || loop_use_stmt != phi || size < 2)
2546 return false;
2548 /* Swap the operands, if needed, to make the reduction operand be the second
2549 operand. */
2550 lhs = PHI_RESULT (phi);
2551 stmt_vec_info next_stmt_info = REDUC_GROUP_FIRST_ELEMENT (current_stmt_info);
2552 while (next_stmt_info)
2554 gassign *next_stmt = as_a <gassign *> (next_stmt_info->stmt);
2555 if (gimple_assign_rhs2 (next_stmt) == lhs)
2557 tree op = gimple_assign_rhs1 (next_stmt);
2558 stmt_vec_info def_stmt_info = loop_info->lookup_def (op);
2560 /* Check that the other def is either defined in the loop
2561 ("vect_internal_def"), or it's an induction (defined by a
2562 loop-header phi-node). */
2563 if (def_stmt_info
2564 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt_info->stmt))
2565 && vect_valid_reduction_input_p (def_stmt_info))
2567 lhs = gimple_assign_lhs (next_stmt);
2568 next_stmt_info = REDUC_GROUP_NEXT_ELEMENT (next_stmt_info);
2569 continue;
2572 return false;
2574 else
2576 tree op = gimple_assign_rhs2 (next_stmt);
2577 stmt_vec_info def_stmt_info = loop_info->lookup_def (op);
2579 /* Check that the other def is either defined in the loop
2580 ("vect_internal_def"), or it's an induction (defined by a
2581 loop-header phi-node). */
2582 if (def_stmt_info
2583 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt_info->stmt))
2584 && vect_valid_reduction_input_p (def_stmt_info))
2586 if (dump_enabled_p ())
2587 dump_printf_loc (MSG_NOTE, vect_location, "swapping oprnds: %G",
2588 next_stmt);
2590 swap_ssa_operands (next_stmt,
2591 gimple_assign_rhs1_ptr (next_stmt),
2592 gimple_assign_rhs2_ptr (next_stmt));
2593 update_stmt (next_stmt);
2595 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (next_stmt)))
2596 LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true;
2598 else
2599 return false;
2602 lhs = gimple_assign_lhs (next_stmt);
2603 next_stmt_info = REDUC_GROUP_NEXT_ELEMENT (next_stmt_info);
2606 /* Save the chain for further analysis in SLP detection. */
2607 stmt_vec_info first_stmt_info
2608 = REDUC_GROUP_FIRST_ELEMENT (current_stmt_info);
2609 LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (first_stmt_info);
2610 REDUC_GROUP_SIZE (first_stmt_info) = size;
2612 return true;
2615 /* Return true if we need an in-order reduction for operation CODE
2616 on type TYPE. NEED_WRAPPING_INTEGRAL_OVERFLOW is true if integer
2617 overflow must wrap. */
2619 static bool
2620 needs_fold_left_reduction_p (tree type, tree_code code,
2621 bool need_wrapping_integral_overflow)
2623 /* CHECKME: check for !flag_finite_math_only too? */
2624 if (SCALAR_FLOAT_TYPE_P (type))
2625 switch (code)
2627 case MIN_EXPR:
2628 case MAX_EXPR:
2629 return false;
2631 default:
2632 return !flag_associative_math;
2635 if (INTEGRAL_TYPE_P (type))
2637 if (!operation_no_trapping_overflow (type, code))
2638 return true;
2639 if (need_wrapping_integral_overflow
2640 && !TYPE_OVERFLOW_WRAPS (type)
2641 && operation_can_overflow (code))
2642 return true;
2643 return false;
2646 if (SAT_FIXED_POINT_TYPE_P (type))
2647 return true;
2649 return false;
2652 /* Return true if the reduction PHI in LOOP with latch arg LOOP_ARG and
2653 reduction operation CODE has a handled computation expression. */
2655 bool
2656 check_reduction_path (dump_user_location_t loc, loop_p loop, gphi *phi,
2657 tree loop_arg, enum tree_code code)
2659 auto_vec<std::pair<ssa_op_iter, use_operand_p> > path;
2660 auto_bitmap visited;
2661 tree lookfor = PHI_RESULT (phi);
2662 ssa_op_iter curri;
2663 use_operand_p curr = op_iter_init_phiuse (&curri, phi, SSA_OP_USE);
2664 while (USE_FROM_PTR (curr) != loop_arg)
2665 curr = op_iter_next_use (&curri);
2666 curri.i = curri.numops;
2669 path.safe_push (std::make_pair (curri, curr));
2670 tree use = USE_FROM_PTR (curr);
2671 if (use == lookfor)
2672 break;
2673 gimple *def = SSA_NAME_DEF_STMT (use);
2674 if (gimple_nop_p (def)
2675 || ! flow_bb_inside_loop_p (loop, gimple_bb (def)))
2677 pop:
2680 std::pair<ssa_op_iter, use_operand_p> x = path.pop ();
2681 curri = x.first;
2682 curr = x.second;
2684 curr = op_iter_next_use (&curri);
2685 /* Skip already visited or non-SSA operands (from iterating
2686 over PHI args). */
2687 while (curr != NULL_USE_OPERAND_P
2688 && (TREE_CODE (USE_FROM_PTR (curr)) != SSA_NAME
2689 || ! bitmap_set_bit (visited,
2690 SSA_NAME_VERSION
2691 (USE_FROM_PTR (curr)))));
2693 while (curr == NULL_USE_OPERAND_P && ! path.is_empty ());
2694 if (curr == NULL_USE_OPERAND_P)
2695 break;
2697 else
2699 if (gimple_code (def) == GIMPLE_PHI)
2700 curr = op_iter_init_phiuse (&curri, as_a <gphi *>(def), SSA_OP_USE);
2701 else
2702 curr = op_iter_init_use (&curri, def, SSA_OP_USE);
2703 while (curr != NULL_USE_OPERAND_P
2704 && (TREE_CODE (USE_FROM_PTR (curr)) != SSA_NAME
2705 || ! bitmap_set_bit (visited,
2706 SSA_NAME_VERSION
2707 (USE_FROM_PTR (curr)))))
2708 curr = op_iter_next_use (&curri);
2709 if (curr == NULL_USE_OPERAND_P)
2710 goto pop;
2713 while (1);
2714 if (dump_file && (dump_flags & TDF_DETAILS))
2716 dump_printf_loc (MSG_NOTE, loc, "reduction path: ");
2717 unsigned i;
2718 std::pair<ssa_op_iter, use_operand_p> *x;
2719 FOR_EACH_VEC_ELT (path, i, x)
2720 dump_printf (MSG_NOTE, "%T ", USE_FROM_PTR (x->second));
2721 dump_printf (MSG_NOTE, "\n");
2724 /* Check whether the reduction path detected is valid. */
2725 bool fail = path.length () == 0;
2726 bool neg = false;
2727 for (unsigned i = 1; i < path.length (); ++i)
2729 gimple *use_stmt = USE_STMT (path[i].second);
2730 tree op = USE_FROM_PTR (path[i].second);
2731 if (! has_single_use (op)
2732 || ! is_gimple_assign (use_stmt))
2734 fail = true;
2735 break;
2737 if (gimple_assign_rhs_code (use_stmt) != code)
2739 if (code == PLUS_EXPR
2740 && gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
2742 /* Track whether we negate the reduction value each iteration. */
2743 if (gimple_assign_rhs2 (use_stmt) == op)
2744 neg = ! neg;
2746 else
2748 fail = true;
2749 break;
2753 return ! fail && ! neg;
2757 /* Function vect_is_simple_reduction
2759 (1) Detect a cross-iteration def-use cycle that represents a simple
2760 reduction computation. We look for the following pattern:
2762 loop_header:
2763 a1 = phi < a0, a2 >
2764 a3 = ...
2765 a2 = operation (a3, a1)
2769 a3 = ...
2770 loop_header:
2771 a1 = phi < a0, a2 >
2772 a2 = operation (a3, a1)
2774 such that:
2775 1. operation is commutative and associative and it is safe to
2776 change the order of the computation
2777 2. no uses for a2 in the loop (a2 is used out of the loop)
2778 3. no uses of a1 in the loop besides the reduction operation
2779 4. no uses of a1 outside the loop.
2781 Conditions 1,4 are tested here.
2782 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
2784 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
2785 nested cycles.
2787 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
2788 reductions:
2790 a1 = phi < a0, a2 >
2791 inner loop (def of a3)
2792 a2 = phi < a3 >
2794 (4) Detect condition expressions, ie:
2795 for (int i = 0; i < N; i++)
2796 if (a[i] < val)
2797 ret_val = a[i];
2801 static stmt_vec_info
2802 vect_is_simple_reduction (loop_vec_info loop_info, stmt_vec_info phi_info,
2803 bool *double_reduc,
2804 bool need_wrapping_integral_overflow,
2805 enum vect_reduction_type *v_reduc_type)
2807 gphi *phi = as_a <gphi *> (phi_info->stmt);
2808 struct loop *loop = (gimple_bb (phi))->loop_father;
2809 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
2810 gimple *phi_use_stmt = NULL;
2811 enum tree_code orig_code, code;
2812 tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE;
2813 tree type;
2814 int nloop_uses;
2815 tree name;
2816 imm_use_iterator imm_iter;
2817 use_operand_p use_p;
2818 bool phi_def;
2820 *double_reduc = false;
2821 *v_reduc_type = TREE_CODE_REDUCTION;
2823 tree phi_name = PHI_RESULT (phi);
2824 /* ??? If there are no uses of the PHI result the inner loop reduction
2825 won't be detected as possibly double-reduction by vectorizable_reduction
2826 because that tries to walk the PHI arg from the preheader edge which
2827 can be constant. See PR60382. */
2828 if (has_zero_uses (phi_name))
2829 return NULL;
2830 nloop_uses = 0;
2831 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, phi_name)
2833 gimple *use_stmt = USE_STMT (use_p);
2834 if (is_gimple_debug (use_stmt))
2835 continue;
2837 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2839 if (dump_enabled_p ())
2840 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2841 "intermediate value used outside loop.\n");
2843 return NULL;
2846 nloop_uses++;
2847 if (nloop_uses > 1)
2849 if (dump_enabled_p ())
2850 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2851 "reduction value used in loop.\n");
2852 return NULL;
2855 phi_use_stmt = use_stmt;
2858 edge latch_e = loop_latch_edge (loop);
2859 tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
2860 if (TREE_CODE (loop_arg) != SSA_NAME)
2862 if (dump_enabled_p ())
2863 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2864 "reduction: not ssa_name: %T\n", loop_arg);
2865 return NULL;
2868 stmt_vec_info def_stmt_info = loop_info->lookup_def (loop_arg);
2869 if (!def_stmt_info
2870 || !flow_bb_inside_loop_p (loop, gimple_bb (def_stmt_info->stmt)))
2871 return NULL;
2873 if (gassign *def_stmt = dyn_cast <gassign *> (def_stmt_info->stmt))
2875 name = gimple_assign_lhs (def_stmt);
2876 phi_def = false;
2878 else if (gphi *def_stmt = dyn_cast <gphi *> (def_stmt_info->stmt))
2880 name = PHI_RESULT (def_stmt);
2881 phi_def = true;
2883 else
2885 if (dump_enabled_p ())
2886 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2887 "reduction: unhandled reduction operation: %G",
2888 def_stmt_info->stmt);
2889 return NULL;
2892 nloop_uses = 0;
2893 auto_vec<gphi *, 3> lcphis;
2894 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
2896 gimple *use_stmt = USE_STMT (use_p);
2897 if (is_gimple_debug (use_stmt))
2898 continue;
2899 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2900 nloop_uses++;
2901 else
2902 /* We can have more than one loop-closed PHI. */
2903 lcphis.safe_push (as_a <gphi *> (use_stmt));
2904 if (nloop_uses > 1)
2906 if (dump_enabled_p ())
2907 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2908 "reduction used in loop.\n");
2909 return NULL;
2913 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
2914 defined in the inner loop. */
2915 if (phi_def)
2917 gphi *def_stmt = as_a <gphi *> (def_stmt_info->stmt);
2918 op1 = PHI_ARG_DEF (def_stmt, 0);
2920 if (gimple_phi_num_args (def_stmt) != 1
2921 || TREE_CODE (op1) != SSA_NAME)
2923 if (dump_enabled_p ())
2924 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2925 "unsupported phi node definition.\n");
2927 return NULL;
2930 gimple *def1 = SSA_NAME_DEF_STMT (op1);
2931 if (gimple_bb (def1)
2932 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
2933 && loop->inner
2934 && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1))
2935 && is_gimple_assign (def1)
2936 && is_a <gphi *> (phi_use_stmt)
2937 && flow_bb_inside_loop_p (loop->inner, gimple_bb (phi_use_stmt)))
2939 if (dump_enabled_p ())
2940 report_vect_op (MSG_NOTE, def_stmt,
2941 "detected double reduction: ");
2943 *double_reduc = true;
2944 return def_stmt_info;
2947 return NULL;
2950 /* If we are vectorizing an inner reduction we are executing that
2951 in the original order only in case we are not dealing with a
2952 double reduction. */
2953 bool check_reduction = true;
2954 if (flow_loop_nested_p (vect_loop, loop))
2956 gphi *lcphi;
2957 unsigned i;
2958 check_reduction = false;
2959 FOR_EACH_VEC_ELT (lcphis, i, lcphi)
2960 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_phi_result (lcphi))
2962 gimple *use_stmt = USE_STMT (use_p);
2963 if (is_gimple_debug (use_stmt))
2964 continue;
2965 if (! flow_bb_inside_loop_p (vect_loop, gimple_bb (use_stmt)))
2966 check_reduction = true;
2970 gassign *def_stmt = as_a <gassign *> (def_stmt_info->stmt);
2971 bool nested_in_vect_loop = flow_loop_nested_p (vect_loop, loop);
2972 code = orig_code = gimple_assign_rhs_code (def_stmt);
2974 /* We can handle "res -= x[i]", which is non-associative by
2975 simply rewriting this into "res += -x[i]". Avoid changing
2976 gimple instruction for the first simple tests and only do this
2977 if we're allowed to change code at all. */
2978 if (code == MINUS_EXPR && gimple_assign_rhs2 (def_stmt) != phi_name)
2979 code = PLUS_EXPR;
2981 if (code == COND_EXPR)
2983 if (! nested_in_vect_loop)
2984 *v_reduc_type = COND_REDUCTION;
2986 op3 = gimple_assign_rhs1 (def_stmt);
2987 if (COMPARISON_CLASS_P (op3))
2989 op4 = TREE_OPERAND (op3, 1);
2990 op3 = TREE_OPERAND (op3, 0);
2992 if (op3 == phi_name || op4 == phi_name)
2994 if (dump_enabled_p ())
2995 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2996 "reduction: condition depends on previous"
2997 " iteration: ");
2998 return NULL;
3001 op1 = gimple_assign_rhs2 (def_stmt);
3002 op2 = gimple_assign_rhs3 (def_stmt);
3004 else if (!commutative_tree_code (code) || !associative_tree_code (code))
3006 if (dump_enabled_p ())
3007 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3008 "reduction: not commutative/associative: ");
3009 return NULL;
3011 else if (get_gimple_rhs_class (code) == GIMPLE_BINARY_RHS)
3013 op1 = gimple_assign_rhs1 (def_stmt);
3014 op2 = gimple_assign_rhs2 (def_stmt);
3016 else
3018 if (dump_enabled_p ())
3019 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3020 "reduction: not handled operation: ");
3021 return NULL;
3024 if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
3026 if (dump_enabled_p ())
3027 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3028 "reduction: both uses not ssa_names: ");
3030 return NULL;
3033 type = TREE_TYPE (gimple_assign_lhs (def_stmt));
3034 if ((TREE_CODE (op1) == SSA_NAME
3035 && !types_compatible_p (type,TREE_TYPE (op1)))
3036 || (TREE_CODE (op2) == SSA_NAME
3037 && !types_compatible_p (type, TREE_TYPE (op2)))
3038 || (op3 && TREE_CODE (op3) == SSA_NAME
3039 && !types_compatible_p (type, TREE_TYPE (op3)))
3040 || (op4 && TREE_CODE (op4) == SSA_NAME
3041 && !types_compatible_p (type, TREE_TYPE (op4))))
3043 if (dump_enabled_p ())
3045 dump_printf_loc (MSG_NOTE, vect_location,
3046 "reduction: multiple types: operation type: "
3047 "%T, operands types: %T,%T",
3048 type, TREE_TYPE (op1), TREE_TYPE (op2));
3049 if (op3)
3050 dump_printf (MSG_NOTE, ",%T", TREE_TYPE (op3));
3052 if (op4)
3053 dump_printf (MSG_NOTE, ",%T", TREE_TYPE (op4));
3054 dump_printf (MSG_NOTE, "\n");
3057 return NULL;
3060 /* Check whether it's ok to change the order of the computation.
3061 Generally, when vectorizing a reduction we change the order of the
3062 computation. This may change the behavior of the program in some
3063 cases, so we need to check that this is ok. One exception is when
3064 vectorizing an outer-loop: the inner-loop is executed sequentially,
3065 and therefore vectorizing reductions in the inner-loop during
3066 outer-loop vectorization is safe. */
3067 if (check_reduction
3068 && *v_reduc_type == TREE_CODE_REDUCTION
3069 && needs_fold_left_reduction_p (type, code,
3070 need_wrapping_integral_overflow))
3071 *v_reduc_type = FOLD_LEFT_REDUCTION;
3073 /* Reduction is safe. We're dealing with one of the following:
3074 1) integer arithmetic and no trapv
3075 2) floating point arithmetic, and special flags permit this optimization
3076 3) nested cycle (i.e., outer loop vectorization). */
3077 stmt_vec_info def1_info = loop_info->lookup_def (op1);
3078 stmt_vec_info def2_info = loop_info->lookup_def (op2);
3079 if (code != COND_EXPR && !def1_info && !def2_info)
3081 if (dump_enabled_p ())
3082 report_vect_op (MSG_NOTE, def_stmt, "reduction: no defs for operands: ");
3083 return NULL;
3086 /* Check that one def is the reduction def, defined by PHI,
3087 the other def is either defined in the loop ("vect_internal_def"),
3088 or it's an induction (defined by a loop-header phi-node). */
3090 if (def2_info
3091 && def2_info->stmt == phi
3092 && (code == COND_EXPR
3093 || !def1_info
3094 || !flow_bb_inside_loop_p (loop, gimple_bb (def1_info->stmt))
3095 || vect_valid_reduction_input_p (def1_info)))
3097 if (dump_enabled_p ())
3098 report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
3099 return def_stmt_info;
3102 if (def1_info
3103 && def1_info->stmt == phi
3104 && (code == COND_EXPR
3105 || !def2_info
3106 || !flow_bb_inside_loop_p (loop, gimple_bb (def2_info->stmt))
3107 || vect_valid_reduction_input_p (def2_info)))
3109 if (! nested_in_vect_loop && orig_code != MINUS_EXPR)
3111 /* Check if we can swap operands (just for simplicity - so that
3112 the rest of the code can assume that the reduction variable
3113 is always the last (second) argument). */
3114 if (code == COND_EXPR)
3116 /* Swap cond_expr by inverting the condition. */
3117 tree cond_expr = gimple_assign_rhs1 (def_stmt);
3118 enum tree_code invert_code = ERROR_MARK;
3119 enum tree_code cond_code = TREE_CODE (cond_expr);
3121 if (TREE_CODE_CLASS (cond_code) == tcc_comparison)
3123 bool honor_nans = HONOR_NANS (TREE_OPERAND (cond_expr, 0));
3124 invert_code = invert_tree_comparison (cond_code, honor_nans);
3126 if (invert_code != ERROR_MARK)
3128 TREE_SET_CODE (cond_expr, invert_code);
3129 swap_ssa_operands (def_stmt,
3130 gimple_assign_rhs2_ptr (def_stmt),
3131 gimple_assign_rhs3_ptr (def_stmt));
3133 else
3135 if (dump_enabled_p ())
3136 report_vect_op (MSG_NOTE, def_stmt,
3137 "detected reduction: cannot swap operands "
3138 "for cond_expr");
3139 return NULL;
3142 else
3143 swap_ssa_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
3144 gimple_assign_rhs2_ptr (def_stmt));
3146 if (dump_enabled_p ())
3147 report_vect_op (MSG_NOTE, def_stmt,
3148 "detected reduction: need to swap operands: ");
3150 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (def_stmt)))
3151 LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true;
3153 else
3155 if (dump_enabled_p ())
3156 report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
3159 return def_stmt_info;
3162 /* Try to find SLP reduction chain. */
3163 if (! nested_in_vect_loop
3164 && code != COND_EXPR
3165 && orig_code != MINUS_EXPR
3166 && vect_is_slp_reduction (loop_info, phi, def_stmt))
3168 if (dump_enabled_p ())
3169 report_vect_op (MSG_NOTE, def_stmt,
3170 "reduction: detected reduction chain: ");
3172 return def_stmt_info;
3175 /* Dissolve group eventually half-built by vect_is_slp_reduction. */
3176 stmt_vec_info first = REDUC_GROUP_FIRST_ELEMENT (def_stmt_info);
3177 while (first)
3179 stmt_vec_info next = REDUC_GROUP_NEXT_ELEMENT (first);
3180 REDUC_GROUP_FIRST_ELEMENT (first) = NULL;
3181 REDUC_GROUP_NEXT_ELEMENT (first) = NULL;
3182 first = next;
3185 /* Look for the expression computing loop_arg from loop PHI result. */
3186 if (check_reduction_path (vect_location, loop, phi, loop_arg, code))
3187 return def_stmt_info;
3189 if (dump_enabled_p ())
3191 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3192 "reduction: unknown pattern: ");
3195 return NULL;
3198 /* Wrapper around vect_is_simple_reduction, which will modify code
3199 in-place if it enables detection of more reductions. Arguments
3200 as there. */
3202 stmt_vec_info
3203 vect_force_simple_reduction (loop_vec_info loop_info, stmt_vec_info phi_info,
3204 bool *double_reduc,
3205 bool need_wrapping_integral_overflow)
3207 enum vect_reduction_type v_reduc_type;
3208 stmt_vec_info def_info
3209 = vect_is_simple_reduction (loop_info, phi_info, double_reduc,
3210 need_wrapping_integral_overflow,
3211 &v_reduc_type);
3212 if (def_info)
3214 STMT_VINFO_REDUC_TYPE (phi_info) = v_reduc_type;
3215 STMT_VINFO_REDUC_DEF (phi_info) = def_info;
3216 STMT_VINFO_REDUC_TYPE (def_info) = v_reduc_type;
3217 STMT_VINFO_REDUC_DEF (def_info) = phi_info;
3219 return def_info;
3222 /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */
3224 vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue,
3225 int *peel_iters_epilogue,
3226 stmt_vector_for_cost *scalar_cost_vec,
3227 stmt_vector_for_cost *prologue_cost_vec,
3228 stmt_vector_for_cost *epilogue_cost_vec)
3230 int retval = 0;
3231 int assumed_vf = vect_vf_for_cost (loop_vinfo);
3233 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
3235 *peel_iters_epilogue = assumed_vf / 2;
3236 if (dump_enabled_p ())
3237 dump_printf_loc (MSG_NOTE, vect_location,
3238 "cost model: epilogue peel iters set to vf/2 "
3239 "because loop iterations are unknown .\n");
3241 /* If peeled iterations are known but number of scalar loop
3242 iterations are unknown, count a taken branch per peeled loop. */
3243 retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken,
3244 NULL, 0, vect_prologue);
3245 retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken,
3246 NULL, 0, vect_epilogue);
3248 else
3250 int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
3251 peel_iters_prologue = niters < peel_iters_prologue ?
3252 niters : peel_iters_prologue;
3253 *peel_iters_epilogue = (niters - peel_iters_prologue) % assumed_vf;
3254 /* If we need to peel for gaps, but no peeling is required, we have to
3255 peel VF iterations. */
3256 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && !*peel_iters_epilogue)
3257 *peel_iters_epilogue = assumed_vf;
3260 stmt_info_for_cost *si;
3261 int j;
3262 if (peel_iters_prologue)
3263 FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si)
3264 retval += record_stmt_cost (prologue_cost_vec,
3265 si->count * peel_iters_prologue,
3266 si->kind, si->stmt_info, si->misalign,
3267 vect_prologue);
3268 if (*peel_iters_epilogue)
3269 FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si)
3270 retval += record_stmt_cost (epilogue_cost_vec,
3271 si->count * *peel_iters_epilogue,
3272 si->kind, si->stmt_info, si->misalign,
3273 vect_epilogue);
3275 return retval;
3278 /* Function vect_estimate_min_profitable_iters
3280 Return the number of iterations required for the vector version of the
3281 loop to be profitable relative to the cost of the scalar version of the
3282 loop.
3284 *RET_MIN_PROFITABLE_NITERS is a cost model profitability threshold
3285 of iterations for vectorization. -1 value means loop vectorization
3286 is not profitable. This returned value may be used for dynamic
3287 profitability check.
3289 *RET_MIN_PROFITABLE_ESTIMATE is a profitability threshold to be used
3290 for static check against estimated number of iterations. */
3292 static void
3293 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
3294 int *ret_min_profitable_niters,
3295 int *ret_min_profitable_estimate)
3297 int min_profitable_iters;
3298 int min_profitable_estimate;
3299 int peel_iters_prologue;
3300 int peel_iters_epilogue;
3301 unsigned vec_inside_cost = 0;
3302 int vec_outside_cost = 0;
3303 unsigned vec_prologue_cost = 0;
3304 unsigned vec_epilogue_cost = 0;
3305 int scalar_single_iter_cost = 0;
3306 int scalar_outside_cost = 0;
3307 int assumed_vf = vect_vf_for_cost (loop_vinfo);
3308 int npeel = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
3309 void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
3311 /* Cost model disabled. */
3312 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
3314 dump_printf_loc (MSG_NOTE, vect_location, "cost model disabled.\n");
3315 *ret_min_profitable_niters = 0;
3316 *ret_min_profitable_estimate = 0;
3317 return;
3320 /* Requires loop versioning tests to handle misalignment. */
3321 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
3323 /* FIXME: Make cost depend on complexity of individual check. */
3324 unsigned len = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ();
3325 (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
3326 vect_prologue);
3327 dump_printf (MSG_NOTE,
3328 "cost model: Adding cost of checks for loop "
3329 "versioning to treat misalignment.\n");
3332 /* Requires loop versioning with alias checks. */
3333 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
3335 /* FIXME: Make cost depend on complexity of individual check. */
3336 unsigned len = LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).length ();
3337 (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
3338 vect_prologue);
3339 len = LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).length ();
3340 if (len)
3341 /* Count LEN - 1 ANDs and LEN comparisons. */
3342 (void) add_stmt_cost (target_cost_data, len * 2 - 1, scalar_stmt,
3343 NULL, 0, vect_prologue);
3344 len = LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).length ();
3345 if (len)
3347 /* Count LEN - 1 ANDs and LEN comparisons. */
3348 unsigned int nstmts = len * 2 - 1;
3349 /* +1 for each bias that needs adding. */
3350 for (unsigned int i = 0; i < len; ++i)
3351 if (!LOOP_VINFO_LOWER_BOUNDS (loop_vinfo)[i].unsigned_p)
3352 nstmts += 1;
3353 (void) add_stmt_cost (target_cost_data, nstmts, scalar_stmt,
3354 NULL, 0, vect_prologue);
3356 dump_printf (MSG_NOTE,
3357 "cost model: Adding cost of checks for loop "
3358 "versioning aliasing.\n");
3361 /* Requires loop versioning with niter checks. */
3362 if (LOOP_REQUIRES_VERSIONING_FOR_NITERS (loop_vinfo))
3364 /* FIXME: Make cost depend on complexity of individual check. */
3365 (void) add_stmt_cost (target_cost_data, 1, vector_stmt, NULL, 0,
3366 vect_prologue);
3367 dump_printf (MSG_NOTE,
3368 "cost model: Adding cost of checks for loop "
3369 "versioning niters.\n");
3372 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
3373 (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, NULL, 0,
3374 vect_prologue);
3376 /* Count statements in scalar loop. Using this as scalar cost for a single
3377 iteration for now.
3379 TODO: Add outer loop support.
3381 TODO: Consider assigning different costs to different scalar
3382 statements. */
3384 scalar_single_iter_cost
3385 = LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo);
3387 /* Add additional cost for the peeled instructions in prologue and epilogue
3388 loop. (For fully-masked loops there will be no peeling.)
3390 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
3391 at compile-time - we assume it's vf/2 (the worst would be vf-1).
3393 TODO: Build an expression that represents peel_iters for prologue and
3394 epilogue to be used in a run-time test. */
3396 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
3398 peel_iters_prologue = 0;
3399 peel_iters_epilogue = 0;
3401 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
3403 /* We need to peel exactly one iteration. */
3404 peel_iters_epilogue += 1;
3405 stmt_info_for_cost *si;
3406 int j;
3407 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
3408 j, si)
3409 (void) add_stmt_cost (target_cost_data, si->count,
3410 si->kind, si->stmt_info, si->misalign,
3411 vect_epilogue);
3414 else if (npeel < 0)
3416 peel_iters_prologue = assumed_vf / 2;
3417 dump_printf (MSG_NOTE, "cost model: "
3418 "prologue peel iters set to vf/2.\n");
3420 /* If peeling for alignment is unknown, loop bound of main loop becomes
3421 unknown. */
3422 peel_iters_epilogue = assumed_vf / 2;
3423 dump_printf (MSG_NOTE, "cost model: "
3424 "epilogue peel iters set to vf/2 because "
3425 "peeling for alignment is unknown.\n");
3427 /* If peeled iterations are unknown, count a taken branch and a not taken
3428 branch per peeled loop. Even if scalar loop iterations are known,
3429 vector iterations are not known since peeled prologue iterations are
3430 not known. Hence guards remain the same. */
3431 (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken,
3432 NULL, 0, vect_prologue);
3433 (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken,
3434 NULL, 0, vect_prologue);
3435 (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken,
3436 NULL, 0, vect_epilogue);
3437 (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken,
3438 NULL, 0, vect_epilogue);
3439 stmt_info_for_cost *si;
3440 int j;
3441 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), j, si)
3443 (void) add_stmt_cost (target_cost_data,
3444 si->count * peel_iters_prologue,
3445 si->kind, si->stmt_info, si->misalign,
3446 vect_prologue);
3447 (void) add_stmt_cost (target_cost_data,
3448 si->count * peel_iters_epilogue,
3449 si->kind, si->stmt_info, si->misalign,
3450 vect_epilogue);
3453 else
3455 stmt_vector_for_cost prologue_cost_vec, epilogue_cost_vec;
3456 stmt_info_for_cost *si;
3457 int j;
3458 void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
3460 prologue_cost_vec.create (2);
3461 epilogue_cost_vec.create (2);
3462 peel_iters_prologue = npeel;
3464 (void) vect_get_known_peeling_cost (loop_vinfo, peel_iters_prologue,
3465 &peel_iters_epilogue,
3466 &LOOP_VINFO_SCALAR_ITERATION_COST
3467 (loop_vinfo),
3468 &prologue_cost_vec,
3469 &epilogue_cost_vec);
3471 FOR_EACH_VEC_ELT (prologue_cost_vec, j, si)
3472 (void) add_stmt_cost (data, si->count, si->kind, si->stmt_info,
3473 si->misalign, vect_prologue);
3475 FOR_EACH_VEC_ELT (epilogue_cost_vec, j, si)
3476 (void) add_stmt_cost (data, si->count, si->kind, si->stmt_info,
3477 si->misalign, vect_epilogue);
3479 prologue_cost_vec.release ();
3480 epilogue_cost_vec.release ();
3483 /* FORNOW: The scalar outside cost is incremented in one of the
3484 following ways:
3486 1. The vectorizer checks for alignment and aliasing and generates
3487 a condition that allows dynamic vectorization. A cost model
3488 check is ANDED with the versioning condition. Hence scalar code
3489 path now has the added cost of the versioning check.
3491 if (cost > th & versioning_check)
3492 jmp to vector code
3494 Hence run-time scalar is incremented by not-taken branch cost.
3496 2. The vectorizer then checks if a prologue is required. If the
3497 cost model check was not done before during versioning, it has to
3498 be done before the prologue check.
3500 if (cost <= th)
3501 prologue = scalar_iters
3502 if (prologue == 0)
3503 jmp to vector code
3504 else
3505 execute prologue
3506 if (prologue == num_iters)
3507 go to exit
3509 Hence the run-time scalar cost is incremented by a taken branch,
3510 plus a not-taken branch, plus a taken branch cost.
3512 3. The vectorizer then checks if an epilogue is required. If the
3513 cost model check was not done before during prologue check, it
3514 has to be done with the epilogue check.
3516 if (prologue == 0)
3517 jmp to vector code
3518 else
3519 execute prologue
3520 if (prologue == num_iters)
3521 go to exit
3522 vector code:
3523 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
3524 jmp to epilogue
3526 Hence the run-time scalar cost should be incremented by 2 taken
3527 branches.
3529 TODO: The back end may reorder the BBS's differently and reverse
3530 conditions/branch directions. Change the estimates below to
3531 something more reasonable. */
3533 /* If the number of iterations is known and we do not do versioning, we can
3534 decide whether to vectorize at compile time. Hence the scalar version
3535 do not carry cost model guard costs. */
3536 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
3537 || LOOP_REQUIRES_VERSIONING (loop_vinfo))
3539 /* Cost model check occurs at versioning. */
3540 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
3541 scalar_outside_cost += vect_get_stmt_cost (cond_branch_not_taken);
3542 else
3544 /* Cost model check occurs at prologue generation. */
3545 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
3546 scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken)
3547 + vect_get_stmt_cost (cond_branch_not_taken);
3548 /* Cost model check occurs at epilogue generation. */
3549 else
3550 scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken);
3554 /* Complete the target-specific cost calculations. */
3555 finish_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo), &vec_prologue_cost,
3556 &vec_inside_cost, &vec_epilogue_cost);
3558 vec_outside_cost = (int)(vec_prologue_cost + vec_epilogue_cost);
3560 if (dump_enabled_p ())
3562 dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
3563 dump_printf (MSG_NOTE, " Vector inside of loop cost: %d\n",
3564 vec_inside_cost);
3565 dump_printf (MSG_NOTE, " Vector prologue cost: %d\n",
3566 vec_prologue_cost);
3567 dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n",
3568 vec_epilogue_cost);
3569 dump_printf (MSG_NOTE, " Scalar iteration cost: %d\n",
3570 scalar_single_iter_cost);
3571 dump_printf (MSG_NOTE, " Scalar outside cost: %d\n",
3572 scalar_outside_cost);
3573 dump_printf (MSG_NOTE, " Vector outside cost: %d\n",
3574 vec_outside_cost);
3575 dump_printf (MSG_NOTE, " prologue iterations: %d\n",
3576 peel_iters_prologue);
3577 dump_printf (MSG_NOTE, " epilogue iterations: %d\n",
3578 peel_iters_epilogue);
3581 /* Calculate number of iterations required to make the vector version
3582 profitable, relative to the loop bodies only. The following condition
3583 must hold true:
3584 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
3585 where
3586 SIC = scalar iteration cost, VIC = vector iteration cost,
3587 VOC = vector outside cost, VF = vectorization factor,
3588 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
3589 SOC = scalar outside cost for run time cost model check. */
3591 if ((scalar_single_iter_cost * assumed_vf) > (int) vec_inside_cost)
3593 min_profitable_iters = ((vec_outside_cost - scalar_outside_cost)
3594 * assumed_vf
3595 - vec_inside_cost * peel_iters_prologue
3596 - vec_inside_cost * peel_iters_epilogue);
3597 if (min_profitable_iters <= 0)
3598 min_profitable_iters = 0;
3599 else
3601 min_profitable_iters /= ((scalar_single_iter_cost * assumed_vf)
3602 - vec_inside_cost);
3604 if ((scalar_single_iter_cost * assumed_vf * min_profitable_iters)
3605 <= (((int) vec_inside_cost * min_profitable_iters)
3606 + (((int) vec_outside_cost - scalar_outside_cost)
3607 * assumed_vf)))
3608 min_profitable_iters++;
3611 /* vector version will never be profitable. */
3612 else
3614 if (LOOP_VINFO_LOOP (loop_vinfo)->force_vectorize)
3615 warning_at (vect_location.get_location_t (), OPT_Wopenmp_simd,
3616 "vectorization did not happen for a simd loop");
3618 if (dump_enabled_p ())
3619 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3620 "cost model: the vector iteration cost = %d "
3621 "divided by the scalar iteration cost = %d "
3622 "is greater or equal to the vectorization factor = %d"
3623 ".\n",
3624 vec_inside_cost, scalar_single_iter_cost, assumed_vf);
3625 *ret_min_profitable_niters = -1;
3626 *ret_min_profitable_estimate = -1;
3627 return;
3630 dump_printf (MSG_NOTE,
3631 " Calculated minimum iters for profitability: %d\n",
3632 min_profitable_iters);
3634 if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
3635 && min_profitable_iters < (assumed_vf + peel_iters_prologue))
3636 /* We want the vectorized loop to execute at least once. */
3637 min_profitable_iters = assumed_vf + peel_iters_prologue;
3639 if (dump_enabled_p ())
3640 dump_printf_loc (MSG_NOTE, vect_location,
3641 " Runtime profitability threshold = %d\n",
3642 min_profitable_iters);
3644 *ret_min_profitable_niters = min_profitable_iters;
3646 /* Calculate number of iterations required to make the vector version
3647 profitable, relative to the loop bodies only.
3649 Non-vectorized variant is SIC * niters and it must win over vector
3650 variant on the expected loop trip count. The following condition must hold true:
3651 SIC * niters > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC + SOC */
3653 if (vec_outside_cost <= 0)
3654 min_profitable_estimate = 0;
3655 else
3657 min_profitable_estimate = ((vec_outside_cost + scalar_outside_cost)
3658 * assumed_vf
3659 - vec_inside_cost * peel_iters_prologue
3660 - vec_inside_cost * peel_iters_epilogue)
3661 / ((scalar_single_iter_cost * assumed_vf)
3662 - vec_inside_cost);
3664 min_profitable_estimate = MAX (min_profitable_estimate, min_profitable_iters);
3665 if (dump_enabled_p ())
3666 dump_printf_loc (MSG_NOTE, vect_location,
3667 " Static estimate profitability threshold = %d\n",
3668 min_profitable_estimate);
3670 *ret_min_profitable_estimate = min_profitable_estimate;
3673 /* Writes into SEL a mask for a vec_perm, equivalent to a vec_shr by OFFSET
3674 vector elements (not bits) for a vector with NELT elements. */
3675 static void
3676 calc_vec_perm_mask_for_shift (unsigned int offset, unsigned int nelt,
3677 vec_perm_builder *sel)
3679 /* The encoding is a single stepped pattern. Any wrap-around is handled
3680 by vec_perm_indices. */
3681 sel->new_vector (nelt, 1, 3);
3682 for (unsigned int i = 0; i < 3; i++)
3683 sel->quick_push (i + offset);
3686 /* Checks whether the target supports whole-vector shifts for vectors of mode
3687 MODE. This is the case if _either_ the platform handles vec_shr_optab, _or_
3688 it supports vec_perm_const with masks for all necessary shift amounts. */
3689 static bool
3690 have_whole_vector_shift (machine_mode mode)
3692 if (optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
3693 return true;
3695 /* Variable-length vectors should be handled via the optab. */
3696 unsigned int nelt;
3697 if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
3698 return false;
3700 vec_perm_builder sel;
3701 vec_perm_indices indices;
3702 for (unsigned int i = nelt / 2; i >= 1; i /= 2)
3704 calc_vec_perm_mask_for_shift (i, nelt, &sel);
3705 indices.new_vector (sel, 2, nelt);
3706 if (!can_vec_perm_const_p (mode, indices, false))
3707 return false;
3709 return true;
3712 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
3713 functions. Design better to avoid maintenance issues. */
3715 /* Function vect_model_reduction_cost.
3717 Models cost for a reduction operation, including the vector ops
3718 generated within the strip-mine loop, the initial definition before
3719 the loop, and the epilogue code that must be generated. */
3721 static void
3722 vect_model_reduction_cost (stmt_vec_info stmt_info, internal_fn reduc_fn,
3723 int ncopies, stmt_vector_for_cost *cost_vec)
3725 int prologue_cost = 0, epilogue_cost = 0, inside_cost;
3726 enum tree_code code;
3727 optab optab;
3728 tree vectype;
3729 machine_mode mode;
3730 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3731 struct loop *loop = NULL;
3733 if (loop_vinfo)
3734 loop = LOOP_VINFO_LOOP (loop_vinfo);
3736 /* Condition reductions generate two reductions in the loop. */
3737 vect_reduction_type reduction_type
3738 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info);
3739 if (reduction_type == COND_REDUCTION)
3740 ncopies *= 2;
3742 vectype = STMT_VINFO_VECTYPE (stmt_info);
3743 mode = TYPE_MODE (vectype);
3744 stmt_vec_info orig_stmt_info = vect_orig_stmt (stmt_info);
3746 code = gimple_assign_rhs_code (orig_stmt_info->stmt);
3748 if (reduction_type == EXTRACT_LAST_REDUCTION
3749 || reduction_type == FOLD_LEFT_REDUCTION)
3751 /* No extra instructions needed in the prologue. */
3752 prologue_cost = 0;
3754 if (reduction_type == EXTRACT_LAST_REDUCTION || reduc_fn != IFN_LAST)
3755 /* Count one reduction-like operation per vector. */
3756 inside_cost = record_stmt_cost (cost_vec, ncopies, vec_to_scalar,
3757 stmt_info, 0, vect_body);
3758 else
3760 /* Use NELEMENTS extracts and NELEMENTS scalar ops. */
3761 unsigned int nelements = ncopies * vect_nunits_for_cost (vectype);
3762 inside_cost = record_stmt_cost (cost_vec, nelements,
3763 vec_to_scalar, stmt_info, 0,
3764 vect_body);
3765 inside_cost += record_stmt_cost (cost_vec, nelements,
3766 scalar_stmt, stmt_info, 0,
3767 vect_body);
3770 else
3772 /* Add in cost for initial definition.
3773 For cond reduction we have four vectors: initial index, step,
3774 initial result of the data reduction, initial value of the index
3775 reduction. */
3776 int prologue_stmts = reduction_type == COND_REDUCTION ? 4 : 1;
3777 prologue_cost += record_stmt_cost (cost_vec, prologue_stmts,
3778 scalar_to_vec, stmt_info, 0,
3779 vect_prologue);
3781 /* Cost of reduction op inside loop. */
3782 inside_cost = record_stmt_cost (cost_vec, ncopies, vector_stmt,
3783 stmt_info, 0, vect_body);
3786 /* Determine cost of epilogue code.
3788 We have a reduction operator that will reduce the vector in one statement.
3789 Also requires scalar extract. */
3791 if (!loop || !nested_in_vect_loop_p (loop, orig_stmt_info))
3793 if (reduc_fn != IFN_LAST)
3795 if (reduction_type == COND_REDUCTION)
3797 /* An EQ stmt and an COND_EXPR stmt. */
3798 epilogue_cost += record_stmt_cost (cost_vec, 2,
3799 vector_stmt, stmt_info, 0,
3800 vect_epilogue);
3801 /* Reduction of the max index and a reduction of the found
3802 values. */
3803 epilogue_cost += record_stmt_cost (cost_vec, 2,
3804 vec_to_scalar, stmt_info, 0,
3805 vect_epilogue);
3806 /* A broadcast of the max value. */
3807 epilogue_cost += record_stmt_cost (cost_vec, 1,
3808 scalar_to_vec, stmt_info, 0,
3809 vect_epilogue);
3811 else
3813 epilogue_cost += record_stmt_cost (cost_vec, 1, vector_stmt,
3814 stmt_info, 0, vect_epilogue);
3815 epilogue_cost += record_stmt_cost (cost_vec, 1,
3816 vec_to_scalar, stmt_info, 0,
3817 vect_epilogue);
3820 else if (reduction_type == COND_REDUCTION)
3822 unsigned estimated_nunits = vect_nunits_for_cost (vectype);
3823 /* Extraction of scalar elements. */
3824 epilogue_cost += record_stmt_cost (cost_vec,
3825 2 * estimated_nunits,
3826 vec_to_scalar, stmt_info, 0,
3827 vect_epilogue);
3828 /* Scalar max reductions via COND_EXPR / MAX_EXPR. */
3829 epilogue_cost += record_stmt_cost (cost_vec,
3830 2 * estimated_nunits - 3,
3831 scalar_stmt, stmt_info, 0,
3832 vect_epilogue);
3834 else if (reduction_type == EXTRACT_LAST_REDUCTION
3835 || reduction_type == FOLD_LEFT_REDUCTION)
3836 /* No extra instructions need in the epilogue. */
3838 else
3840 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
3841 tree bitsize =
3842 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt_info->stmt)));
3843 int element_bitsize = tree_to_uhwi (bitsize);
3844 int nelements = vec_size_in_bits / element_bitsize;
3846 if (code == COND_EXPR)
3847 code = MAX_EXPR;
3849 optab = optab_for_tree_code (code, vectype, optab_default);
3851 /* We have a whole vector shift available. */
3852 if (optab != unknown_optab
3853 && VECTOR_MODE_P (mode)
3854 && optab_handler (optab, mode) != CODE_FOR_nothing
3855 && have_whole_vector_shift (mode))
3857 /* Final reduction via vector shifts and the reduction operator.
3858 Also requires scalar extract. */
3859 epilogue_cost += record_stmt_cost (cost_vec,
3860 exact_log2 (nelements) * 2,
3861 vector_stmt, stmt_info, 0,
3862 vect_epilogue);
3863 epilogue_cost += record_stmt_cost (cost_vec, 1,
3864 vec_to_scalar, stmt_info, 0,
3865 vect_epilogue);
3867 else
3868 /* Use extracts and reduction op for final reduction. For N
3869 elements, we have N extracts and N-1 reduction ops. */
3870 epilogue_cost += record_stmt_cost (cost_vec,
3871 nelements + nelements - 1,
3872 vector_stmt, stmt_info, 0,
3873 vect_epilogue);
3877 if (dump_enabled_p ())
3878 dump_printf (MSG_NOTE,
3879 "vect_model_reduction_cost: inside_cost = %d, "
3880 "prologue_cost = %d, epilogue_cost = %d .\n", inside_cost,
3881 prologue_cost, epilogue_cost);
3885 /* Function vect_model_induction_cost.
3887 Models cost for induction operations. */
3889 static void
3890 vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies,
3891 stmt_vector_for_cost *cost_vec)
3893 unsigned inside_cost, prologue_cost;
3895 if (PURE_SLP_STMT (stmt_info))
3896 return;
3898 /* loop cost for vec_loop. */
3899 inside_cost = record_stmt_cost (cost_vec, ncopies, vector_stmt,
3900 stmt_info, 0, vect_body);
3902 /* prologue cost for vec_init and vec_step. */
3903 prologue_cost = record_stmt_cost (cost_vec, 2, scalar_to_vec,
3904 stmt_info, 0, vect_prologue);
3906 if (dump_enabled_p ())
3907 dump_printf_loc (MSG_NOTE, vect_location,
3908 "vect_model_induction_cost: inside_cost = %d, "
3909 "prologue_cost = %d .\n", inside_cost, prologue_cost);
3914 /* Function get_initial_def_for_reduction
3916 Input:
3917 STMT_VINFO - a stmt that performs a reduction operation in the loop.
3918 INIT_VAL - the initial value of the reduction variable
3920 Output:
3921 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
3922 of the reduction (used for adjusting the epilog - see below).
3923 Return a vector variable, initialized according to the operation that
3924 STMT_VINFO performs. This vector will be used as the initial value
3925 of the vector of partial results.
3927 Option1 (adjust in epilog): Initialize the vector as follows:
3928 add/bit or/xor: [0,0,...,0,0]
3929 mult/bit and: [1,1,...,1,1]
3930 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
3931 and when necessary (e.g. add/mult case) let the caller know
3932 that it needs to adjust the result by init_val.
3934 Option2: Initialize the vector as follows:
3935 add/bit or/xor: [init_val,0,0,...,0]
3936 mult/bit and: [init_val,1,1,...,1]
3937 min/max/cond_expr: [init_val,init_val,...,init_val]
3938 and no adjustments are needed.
3940 For example, for the following code:
3942 s = init_val;
3943 for (i=0;i<n;i++)
3944 s = s + a[i];
3946 STMT_VINFO is 's = s + a[i]', and the reduction variable is 's'.
3947 For a vector of 4 units, we want to return either [0,0,0,init_val],
3948 or [0,0,0,0] and let the caller know that it needs to adjust
3949 the result at the end by 'init_val'.
3951 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
3952 initialization vector is simpler (same element in all entries), if
3953 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
3955 A cost model should help decide between these two schemes. */
3957 tree
3958 get_initial_def_for_reduction (stmt_vec_info stmt_vinfo, tree init_val,
3959 tree *adjustment_def)
3961 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
3962 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3963 tree scalar_type = TREE_TYPE (init_val);
3964 tree vectype = get_vectype_for_scalar_type (scalar_type);
3965 enum tree_code code = gimple_assign_rhs_code (stmt_vinfo->stmt);
3966 tree def_for_init;
3967 tree init_def;
3968 REAL_VALUE_TYPE real_init_val = dconst0;
3969 int int_init_val = 0;
3970 gimple_seq stmts = NULL;
3972 gcc_assert (vectype);
3974 gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
3975 || SCALAR_FLOAT_TYPE_P (scalar_type));
3977 gcc_assert (nested_in_vect_loop_p (loop, stmt_vinfo)
3978 || loop == (gimple_bb (stmt_vinfo->stmt))->loop_father);
3980 vect_reduction_type reduction_type
3981 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_vinfo);
3983 switch (code)
3985 case WIDEN_SUM_EXPR:
3986 case DOT_PROD_EXPR:
3987 case SAD_EXPR:
3988 case PLUS_EXPR:
3989 case MINUS_EXPR:
3990 case BIT_IOR_EXPR:
3991 case BIT_XOR_EXPR:
3992 case MULT_EXPR:
3993 case BIT_AND_EXPR:
3995 /* ADJUSTMENT_DEF is NULL when called from
3996 vect_create_epilog_for_reduction to vectorize double reduction. */
3997 if (adjustment_def)
3998 *adjustment_def = init_val;
4000 if (code == MULT_EXPR)
4002 real_init_val = dconst1;
4003 int_init_val = 1;
4006 if (code == BIT_AND_EXPR)
4007 int_init_val = -1;
4009 if (SCALAR_FLOAT_TYPE_P (scalar_type))
4010 def_for_init = build_real (scalar_type, real_init_val);
4011 else
4012 def_for_init = build_int_cst (scalar_type, int_init_val);
4014 if (adjustment_def)
4015 /* Option1: the first element is '0' or '1' as well. */
4016 init_def = gimple_build_vector_from_val (&stmts, vectype,
4017 def_for_init);
4018 else if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant ())
4020 /* Option2 (variable length): the first element is INIT_VAL. */
4021 init_def = gimple_build_vector_from_val (&stmts, vectype,
4022 def_for_init);
4023 init_def = gimple_build (&stmts, CFN_VEC_SHL_INSERT,
4024 vectype, init_def, init_val);
4026 else
4028 /* Option2: the first element is INIT_VAL. */
4029 tree_vector_builder elts (vectype, 1, 2);
4030 elts.quick_push (init_val);
4031 elts.quick_push (def_for_init);
4032 init_def = gimple_build_vector (&stmts, &elts);
4035 break;
4037 case MIN_EXPR:
4038 case MAX_EXPR:
4039 case COND_EXPR:
4041 if (adjustment_def)
4043 *adjustment_def = NULL_TREE;
4044 if (reduction_type != COND_REDUCTION
4045 && reduction_type != EXTRACT_LAST_REDUCTION)
4047 init_def = vect_get_vec_def_for_operand (init_val, stmt_vinfo);
4048 break;
4051 init_val = gimple_convert (&stmts, TREE_TYPE (vectype), init_val);
4052 init_def = gimple_build_vector_from_val (&stmts, vectype, init_val);
4054 break;
4056 default:
4057 gcc_unreachable ();
4060 if (stmts)
4061 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
4062 return init_def;
4065 /* Get at the initial defs for the reduction PHIs in SLP_NODE.
4066 NUMBER_OF_VECTORS is the number of vector defs to create.
4067 If NEUTRAL_OP is nonnull, introducing extra elements of that
4068 value will not change the result. */
4070 static void
4071 get_initial_defs_for_reduction (slp_tree slp_node,
4072 vec<tree> *vec_oprnds,
4073 unsigned int number_of_vectors,
4074 bool reduc_chain, tree neutral_op)
4076 vec<stmt_vec_info> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
4077 stmt_vec_info stmt_vinfo = stmts[0];
4078 unsigned HOST_WIDE_INT nunits;
4079 unsigned j, number_of_places_left_in_vector;
4080 tree vector_type;
4081 tree vop;
4082 int group_size = stmts.length ();
4083 unsigned int vec_num, i;
4084 unsigned number_of_copies = 1;
4085 vec<tree> voprnds;
4086 voprnds.create (number_of_vectors);
4087 struct loop *loop;
4088 auto_vec<tree, 16> permute_results;
4090 vector_type = STMT_VINFO_VECTYPE (stmt_vinfo);
4092 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def);
4094 loop = (gimple_bb (stmt_vinfo->stmt))->loop_father;
4095 gcc_assert (loop);
4096 edge pe = loop_preheader_edge (loop);
4098 gcc_assert (!reduc_chain || neutral_op);
4100 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
4101 created vectors. It is greater than 1 if unrolling is performed.
4103 For example, we have two scalar operands, s1 and s2 (e.g., group of
4104 strided accesses of size two), while NUNITS is four (i.e., four scalars
4105 of this type can be packed in a vector). The output vector will contain
4106 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
4107 will be 2).
4109 If REDUC_GROUP_SIZE > NUNITS, the scalars will be split into several
4110 vectors containing the operands.
4112 For example, NUNITS is four as before, and the group size is 8
4113 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
4114 {s5, s6, s7, s8}. */
4116 if (!TYPE_VECTOR_SUBPARTS (vector_type).is_constant (&nunits))
4117 nunits = group_size;
4119 number_of_copies = nunits * number_of_vectors / group_size;
4121 number_of_places_left_in_vector = nunits;
4122 bool constant_p = true;
4123 tree_vector_builder elts (vector_type, nunits, 1);
4124 elts.quick_grow (nunits);
4125 for (j = 0; j < number_of_copies; j++)
4127 for (i = group_size - 1; stmts.iterate (i, &stmt_vinfo); i--)
4129 tree op;
4130 /* Get the def before the loop. In reduction chain we have only
4131 one initial value. */
4132 if ((j != (number_of_copies - 1)
4133 || (reduc_chain && i != 0))
4134 && neutral_op)
4135 op = neutral_op;
4136 else
4137 op = PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt, pe);
4139 /* Create 'vect_ = {op0,op1,...,opn}'. */
4140 number_of_places_left_in_vector--;
4141 elts[number_of_places_left_in_vector] = op;
4142 if (!CONSTANT_CLASS_P (op))
4143 constant_p = false;
4145 if (number_of_places_left_in_vector == 0)
4147 gimple_seq ctor_seq = NULL;
4148 tree init;
4149 if (constant_p && !neutral_op
4150 ? multiple_p (TYPE_VECTOR_SUBPARTS (vector_type), nunits)
4151 : known_eq (TYPE_VECTOR_SUBPARTS (vector_type), nunits))
4152 /* Build the vector directly from ELTS. */
4153 init = gimple_build_vector (&ctor_seq, &elts);
4154 else if (neutral_op)
4156 /* Build a vector of the neutral value and shift the
4157 other elements into place. */
4158 init = gimple_build_vector_from_val (&ctor_seq, vector_type,
4159 neutral_op);
4160 int k = nunits;
4161 while (k > 0 && elts[k - 1] == neutral_op)
4162 k -= 1;
4163 while (k > 0)
4165 k -= 1;
4166 init = gimple_build (&ctor_seq, CFN_VEC_SHL_INSERT,
4167 vector_type, init, elts[k]);
4170 else
4172 /* First time round, duplicate ELTS to fill the
4173 required number of vectors, then cherry pick the
4174 appropriate result for each iteration. */
4175 if (vec_oprnds->is_empty ())
4176 duplicate_and_interleave (&ctor_seq, vector_type, elts,
4177 number_of_vectors,
4178 permute_results);
4179 init = permute_results[number_of_vectors - j - 1];
4181 if (ctor_seq != NULL)
4182 gsi_insert_seq_on_edge_immediate (pe, ctor_seq);
4183 voprnds.quick_push (init);
4185 number_of_places_left_in_vector = nunits;
4186 elts.new_vector (vector_type, nunits, 1);
4187 elts.quick_grow (nunits);
4188 constant_p = true;
4193 /* Since the vectors are created in the reverse order, we should invert
4194 them. */
4195 vec_num = voprnds.length ();
4196 for (j = vec_num; j != 0; j--)
4198 vop = voprnds[j - 1];
4199 vec_oprnds->quick_push (vop);
4202 voprnds.release ();
4204 /* In case that VF is greater than the unrolling factor needed for the SLP
4205 group of stmts, NUMBER_OF_VECTORS to be created is greater than
4206 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
4207 to replicate the vectors. */
4208 tree neutral_vec = NULL;
4209 while (number_of_vectors > vec_oprnds->length ())
4211 if (neutral_op)
4213 if (!neutral_vec)
4215 gimple_seq ctor_seq = NULL;
4216 neutral_vec = gimple_build_vector_from_val
4217 (&ctor_seq, vector_type, neutral_op);
4218 if (ctor_seq != NULL)
4219 gsi_insert_seq_on_edge_immediate (pe, ctor_seq);
4221 vec_oprnds->quick_push (neutral_vec);
4223 else
4225 for (i = 0; vec_oprnds->iterate (i, &vop) && i < vec_num; i++)
4226 vec_oprnds->quick_push (vop);
4232 /* Function vect_create_epilog_for_reduction
4234 Create code at the loop-epilog to finalize the result of a reduction
4235 computation.
4237 VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector
4238 reduction statements.
4239 STMT_INFO is the scalar reduction stmt that is being vectorized.
4240 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
4241 number of elements that we can fit in a vectype (nunits). In this case
4242 we have to generate more than one vector stmt - i.e - we need to "unroll"
4243 the vector stmt by a factor VF/nunits. For more details see documentation
4244 in vectorizable_operation.
4245 REDUC_FN is the internal function for the epilog reduction.
4246 REDUCTION_PHIS is a list of the phi-nodes that carry the reduction
4247 computation.
4248 REDUC_INDEX is the index of the operand in the right hand side of the
4249 statement that is defined by REDUCTION_PHI.
4250 DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
4251 SLP_NODE is an SLP node containing a group of reduction statements. The
4252 first one in this group is STMT_INFO.
4253 INDUC_VAL is for INTEGER_INDUC_COND_REDUCTION the value to use for the case
4254 when the COND_EXPR is never true in the loop. For MAX_EXPR, it needs to
4255 be smaller than any value of the IV in the loop, for MIN_EXPR larger than
4256 any value of the IV in the loop.
4257 INDUC_CODE is the code for epilog reduction if INTEGER_INDUC_COND_REDUCTION.
4258 NEUTRAL_OP is the value given by neutral_op_for_slp_reduction; it is
4259 null if this is not an SLP reduction
4261 This function:
4262 1. Creates the reduction def-use cycles: sets the arguments for
4263 REDUCTION_PHIS:
4264 The loop-entry argument is the vectorized initial-value of the reduction.
4265 The loop-latch argument is taken from VECT_DEFS - the vector of partial
4266 sums.
4267 2. "Reduces" each vector of partial results VECT_DEFS into a single result,
4268 by calling the function specified by REDUC_FN if available, or by
4269 other means (whole-vector shifts or a scalar loop).
4270 The function also creates a new phi node at the loop exit to preserve
4271 loop-closed form, as illustrated below.
4273 The flow at the entry to this function:
4275 loop:
4276 vec_def = phi <null, null> # REDUCTION_PHI
4277 VECT_DEF = vector_stmt # vectorized form of STMT_INFO
4278 s_loop = scalar_stmt # (scalar) STMT_INFO
4279 loop_exit:
4280 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4281 use <s_out0>
4282 use <s_out0>
4284 The above is transformed by this function into:
4286 loop:
4287 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
4288 VECT_DEF = vector_stmt # vectorized form of STMT_INFO
4289 s_loop = scalar_stmt # (scalar) STMT_INFO
4290 loop_exit:
4291 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4292 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4293 v_out2 = reduce <v_out1>
4294 s_out3 = extract_field <v_out2, 0>
4295 s_out4 = adjust_result <s_out3>
4296 use <s_out4>
4297 use <s_out4>
4300 static void
4301 vect_create_epilog_for_reduction (vec<tree> vect_defs,
4302 stmt_vec_info stmt_info,
4303 gimple *reduc_def_stmt,
4304 int ncopies, internal_fn reduc_fn,
4305 vec<stmt_vec_info> reduction_phis,
4306 bool double_reduc,
4307 slp_tree slp_node,
4308 slp_instance slp_node_instance,
4309 tree induc_val, enum tree_code induc_code,
4310 tree neutral_op)
4312 stmt_vec_info prev_phi_info;
4313 tree vectype;
4314 machine_mode mode;
4315 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4316 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
4317 basic_block exit_bb;
4318 tree scalar_dest;
4319 tree scalar_type;
4320 gimple *new_phi = NULL, *phi;
4321 stmt_vec_info phi_info;
4322 gimple_stmt_iterator exit_gsi;
4323 tree vec_dest;
4324 tree new_temp = NULL_TREE, new_dest, new_name, new_scalar_dest;
4325 gimple *epilog_stmt = NULL;
4326 enum tree_code code = gimple_assign_rhs_code (stmt_info->stmt);
4327 gimple *exit_phi;
4328 tree bitsize;
4329 tree adjustment_def = NULL;
4330 tree vec_initial_def = NULL;
4331 tree expr, def, initial_def = NULL;
4332 tree orig_name, scalar_result;
4333 imm_use_iterator imm_iter, phi_imm_iter;
4334 use_operand_p use_p, phi_use_p;
4335 gimple *use_stmt;
4336 stmt_vec_info reduction_phi_info = NULL;
4337 bool nested_in_vect_loop = false;
4338 auto_vec<gimple *> new_phis;
4339 auto_vec<stmt_vec_info> inner_phis;
4340 int j, i;
4341 auto_vec<tree> scalar_results;
4342 unsigned int group_size = 1, k, ratio;
4343 auto_vec<tree> vec_initial_defs;
4344 auto_vec<gimple *> phis;
4345 bool slp_reduc = false;
4346 bool direct_slp_reduc;
4347 tree new_phi_result;
4348 stmt_vec_info inner_phi = NULL;
4349 tree induction_index = NULL_TREE;
4351 if (slp_node)
4352 group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
4354 if (nested_in_vect_loop_p (loop, stmt_info))
4356 outer_loop = loop;
4357 loop = loop->inner;
4358 nested_in_vect_loop = true;
4359 gcc_assert (!slp_node);
4362 vectype = STMT_VINFO_VECTYPE (stmt_info);
4363 gcc_assert (vectype);
4364 mode = TYPE_MODE (vectype);
4366 /* 1. Create the reduction def-use cycle:
4367 Set the arguments of REDUCTION_PHIS, i.e., transform
4369 loop:
4370 vec_def = phi <null, null> # REDUCTION_PHI
4371 VECT_DEF = vector_stmt # vectorized form of STMT
4374 into:
4376 loop:
4377 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
4378 VECT_DEF = vector_stmt # vectorized form of STMT
4381 (in case of SLP, do it for all the phis). */
4383 /* Get the loop-entry arguments. */
4384 enum vect_def_type initial_def_dt = vect_unknown_def_type;
4385 if (slp_node)
4387 unsigned vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
4388 vec_initial_defs.reserve (vec_num);
4389 get_initial_defs_for_reduction (slp_node_instance->reduc_phis,
4390 &vec_initial_defs, vec_num,
4391 REDUC_GROUP_FIRST_ELEMENT (stmt_info),
4392 neutral_op);
4394 else
4396 /* Get at the scalar def before the loop, that defines the initial value
4397 of the reduction variable. */
4398 initial_def = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
4399 loop_preheader_edge (loop));
4400 /* Optimize: if initial_def is for REDUC_MAX smaller than the base
4401 and we can't use zero for induc_val, use initial_def. Similarly
4402 for REDUC_MIN and initial_def larger than the base. */
4403 if (TREE_CODE (initial_def) == INTEGER_CST
4404 && (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
4405 == INTEGER_INDUC_COND_REDUCTION)
4406 && !integer_zerop (induc_val)
4407 && ((induc_code == MAX_EXPR
4408 && tree_int_cst_lt (initial_def, induc_val))
4409 || (induc_code == MIN_EXPR
4410 && tree_int_cst_lt (induc_val, initial_def))))
4411 induc_val = initial_def;
4413 if (double_reduc)
4414 /* In case of double reduction we only create a vector variable
4415 to be put in the reduction phi node. The actual statement
4416 creation is done later in this function. */
4417 vec_initial_def = vect_create_destination_var (initial_def, vectype);
4418 else if (nested_in_vect_loop)
4420 /* Do not use an adjustment def as that case is not supported
4421 correctly if ncopies is not one. */
4422 vect_is_simple_use (initial_def, loop_vinfo, &initial_def_dt);
4423 vec_initial_def = vect_get_vec_def_for_operand (initial_def,
4424 stmt_info);
4426 else
4427 vec_initial_def
4428 = get_initial_def_for_reduction (stmt_info, initial_def,
4429 &adjustment_def);
4430 vec_initial_defs.create (1);
4431 vec_initial_defs.quick_push (vec_initial_def);
4434 /* Set phi nodes arguments. */
4435 FOR_EACH_VEC_ELT (reduction_phis, i, phi_info)
4437 tree vec_init_def = vec_initial_defs[i];
4438 tree def = vect_defs[i];
4439 for (j = 0; j < ncopies; j++)
4441 if (j != 0)
4443 phi_info = STMT_VINFO_RELATED_STMT (phi_info);
4444 if (nested_in_vect_loop)
4445 vec_init_def
4446 = vect_get_vec_def_for_stmt_copy (loop_vinfo, vec_init_def);
4449 /* Set the loop-entry arg of the reduction-phi. */
4451 gphi *phi = as_a <gphi *> (phi_info->stmt);
4452 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
4453 == INTEGER_INDUC_COND_REDUCTION)
4455 /* Initialise the reduction phi to zero. This prevents initial
4456 values of non-zero interferring with the reduction op. */
4457 gcc_assert (ncopies == 1);
4458 gcc_assert (i == 0);
4460 tree vec_init_def_type = TREE_TYPE (vec_init_def);
4461 tree induc_val_vec
4462 = build_vector_from_val (vec_init_def_type, induc_val);
4464 add_phi_arg (phi, induc_val_vec, loop_preheader_edge (loop),
4465 UNKNOWN_LOCATION);
4467 else
4468 add_phi_arg (phi, vec_init_def, loop_preheader_edge (loop),
4469 UNKNOWN_LOCATION);
4471 /* Set the loop-latch arg for the reduction-phi. */
4472 if (j > 0)
4473 def = vect_get_vec_def_for_stmt_copy (loop_vinfo, def);
4475 add_phi_arg (phi, def, loop_latch_edge (loop), UNKNOWN_LOCATION);
4477 if (dump_enabled_p ())
4478 dump_printf_loc (MSG_NOTE, vect_location,
4479 "transform reduction: created def-use cycle: %G%G",
4480 phi, SSA_NAME_DEF_STMT (def));
4484 /* For cond reductions we want to create a new vector (INDEX_COND_EXPR)
4485 which is updated with the current index of the loop for every match of
4486 the original loop's cond_expr (VEC_STMT). This results in a vector
4487 containing the last time the condition passed for that vector lane.
4488 The first match will be a 1 to allow 0 to be used for non-matching
4489 indexes. If there are no matches at all then the vector will be all
4490 zeroes. */
4491 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION)
4493 tree indx_before_incr, indx_after_incr;
4494 poly_uint64 nunits_out = TYPE_VECTOR_SUBPARTS (vectype);
4496 gimple *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info)->stmt;
4497 gcc_assert (gimple_assign_rhs_code (vec_stmt) == VEC_COND_EXPR);
4499 int scalar_precision
4500 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (TREE_TYPE (vectype)));
4501 tree cr_index_scalar_type = make_unsigned_type (scalar_precision);
4502 tree cr_index_vector_type = build_vector_type
4503 (cr_index_scalar_type, TYPE_VECTOR_SUBPARTS (vectype));
4505 /* First we create a simple vector induction variable which starts
4506 with the values {1,2,3,...} (SERIES_VECT) and increments by the
4507 vector size (STEP). */
4509 /* Create a {1,2,3,...} vector. */
4510 tree series_vect = build_index_vector (cr_index_vector_type, 1, 1);
4512 /* Create a vector of the step value. */
4513 tree step = build_int_cst (cr_index_scalar_type, nunits_out);
4514 tree vec_step = build_vector_from_val (cr_index_vector_type, step);
4516 /* Create an induction variable. */
4517 gimple_stmt_iterator incr_gsi;
4518 bool insert_after;
4519 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
4520 create_iv (series_vect, vec_step, NULL_TREE, loop, &incr_gsi,
4521 insert_after, &indx_before_incr, &indx_after_incr);
4523 /* Next create a new phi node vector (NEW_PHI_TREE) which starts
4524 filled with zeros (VEC_ZERO). */
4526 /* Create a vector of 0s. */
4527 tree zero = build_zero_cst (cr_index_scalar_type);
4528 tree vec_zero = build_vector_from_val (cr_index_vector_type, zero);
4530 /* Create a vector phi node. */
4531 tree new_phi_tree = make_ssa_name (cr_index_vector_type);
4532 new_phi = create_phi_node (new_phi_tree, loop->header);
4533 loop_vinfo->add_stmt (new_phi);
4534 add_phi_arg (as_a <gphi *> (new_phi), vec_zero,
4535 loop_preheader_edge (loop), UNKNOWN_LOCATION);
4537 /* Now take the condition from the loops original cond_expr
4538 (VEC_STMT) and produce a new cond_expr (INDEX_COND_EXPR) which for
4539 every match uses values from the induction variable
4540 (INDEX_BEFORE_INCR) otherwise uses values from the phi node
4541 (NEW_PHI_TREE).
4542 Finally, we update the phi (NEW_PHI_TREE) to take the value of
4543 the new cond_expr (INDEX_COND_EXPR). */
4545 /* Duplicate the condition from vec_stmt. */
4546 tree ccompare = unshare_expr (gimple_assign_rhs1 (vec_stmt));
4548 /* Create a conditional, where the condition is taken from vec_stmt
4549 (CCOMPARE), then is the induction index (INDEX_BEFORE_INCR) and
4550 else is the phi (NEW_PHI_TREE). */
4551 tree index_cond_expr = build3 (VEC_COND_EXPR, cr_index_vector_type,
4552 ccompare, indx_before_incr,
4553 new_phi_tree);
4554 induction_index = make_ssa_name (cr_index_vector_type);
4555 gimple *index_condition = gimple_build_assign (induction_index,
4556 index_cond_expr);
4557 gsi_insert_before (&incr_gsi, index_condition, GSI_SAME_STMT);
4558 stmt_vec_info index_vec_info = loop_vinfo->add_stmt (index_condition);
4559 STMT_VINFO_VECTYPE (index_vec_info) = cr_index_vector_type;
4561 /* Update the phi with the vec cond. */
4562 add_phi_arg (as_a <gphi *> (new_phi), induction_index,
4563 loop_latch_edge (loop), UNKNOWN_LOCATION);
4566 /* 2. Create epilog code.
4567 The reduction epilog code operates across the elements of the vector
4568 of partial results computed by the vectorized loop.
4569 The reduction epilog code consists of:
4571 step 1: compute the scalar result in a vector (v_out2)
4572 step 2: extract the scalar result (s_out3) from the vector (v_out2)
4573 step 3: adjust the scalar result (s_out3) if needed.
4575 Step 1 can be accomplished using one the following three schemes:
4576 (scheme 1) using reduc_fn, if available.
4577 (scheme 2) using whole-vector shifts, if available.
4578 (scheme 3) using a scalar loop. In this case steps 1+2 above are
4579 combined.
4581 The overall epilog code looks like this:
4583 s_out0 = phi <s_loop> # original EXIT_PHI
4584 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4585 v_out2 = reduce <v_out1> # step 1
4586 s_out3 = extract_field <v_out2, 0> # step 2
4587 s_out4 = adjust_result <s_out3> # step 3
4589 (step 3 is optional, and steps 1 and 2 may be combined).
4590 Lastly, the uses of s_out0 are replaced by s_out4. */
4593 /* 2.1 Create new loop-exit-phis to preserve loop-closed form:
4594 v_out1 = phi <VECT_DEF>
4595 Store them in NEW_PHIS. */
4597 exit_bb = single_exit (loop)->dest;
4598 prev_phi_info = NULL;
4599 new_phis.create (vect_defs.length ());
4600 FOR_EACH_VEC_ELT (vect_defs, i, def)
4602 for (j = 0; j < ncopies; j++)
4604 tree new_def = copy_ssa_name (def);
4605 phi = create_phi_node (new_def, exit_bb);
4606 stmt_vec_info phi_info = loop_vinfo->add_stmt (phi);
4607 if (j == 0)
4608 new_phis.quick_push (phi);
4609 else
4611 def = vect_get_vec_def_for_stmt_copy (loop_vinfo, def);
4612 STMT_VINFO_RELATED_STMT (prev_phi_info) = phi_info;
4615 SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
4616 prev_phi_info = phi_info;
4620 /* The epilogue is created for the outer-loop, i.e., for the loop being
4621 vectorized. Create exit phis for the outer loop. */
4622 if (double_reduc)
4624 loop = outer_loop;
4625 exit_bb = single_exit (loop)->dest;
4626 inner_phis.create (vect_defs.length ());
4627 FOR_EACH_VEC_ELT (new_phis, i, phi)
4629 stmt_vec_info phi_info = loop_vinfo->lookup_stmt (phi);
4630 tree new_result = copy_ssa_name (PHI_RESULT (phi));
4631 gphi *outer_phi = create_phi_node (new_result, exit_bb);
4632 SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
4633 PHI_RESULT (phi));
4634 prev_phi_info = loop_vinfo->add_stmt (outer_phi);
4635 inner_phis.quick_push (phi_info);
4636 new_phis[i] = outer_phi;
4637 while (STMT_VINFO_RELATED_STMT (phi_info))
4639 phi_info = STMT_VINFO_RELATED_STMT (phi_info);
4640 new_result = copy_ssa_name (PHI_RESULT (phi_info->stmt));
4641 outer_phi = create_phi_node (new_result, exit_bb);
4642 SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
4643 PHI_RESULT (phi_info->stmt));
4644 stmt_vec_info outer_phi_info = loop_vinfo->add_stmt (outer_phi);
4645 STMT_VINFO_RELATED_STMT (prev_phi_info) = outer_phi_info;
4646 prev_phi_info = outer_phi_info;
4651 exit_gsi = gsi_after_labels (exit_bb);
4653 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
4654 (i.e. when reduc_fn is not available) and in the final adjustment
4655 code (if needed). Also get the original scalar reduction variable as
4656 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
4657 represents a reduction pattern), the tree-code and scalar-def are
4658 taken from the original stmt that the pattern-stmt (STMT) replaces.
4659 Otherwise (it is a regular reduction) - the tree-code and scalar-def
4660 are taken from STMT. */
4662 stmt_vec_info orig_stmt_info = vect_orig_stmt (stmt_info);
4663 if (orig_stmt_info != stmt_info)
4665 /* Reduction pattern */
4666 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
4667 gcc_assert (STMT_VINFO_RELATED_STMT (orig_stmt_info) == stmt_info);
4670 code = gimple_assign_rhs_code (orig_stmt_info->stmt);
4671 /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
4672 partial results are added and not subtracted. */
4673 if (code == MINUS_EXPR)
4674 code = PLUS_EXPR;
4676 scalar_dest = gimple_assign_lhs (orig_stmt_info->stmt);
4677 scalar_type = TREE_TYPE (scalar_dest);
4678 scalar_results.create (group_size);
4679 new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
4680 bitsize = TYPE_SIZE (scalar_type);
4682 /* In case this is a reduction in an inner-loop while vectorizing an outer
4683 loop - we don't need to extract a single scalar result at the end of the
4684 inner-loop (unless it is double reduction, i.e., the use of reduction is
4685 outside the outer-loop). The final vector of partial results will be used
4686 in the vectorized outer-loop, or reduced to a scalar result at the end of
4687 the outer-loop. */
4688 if (nested_in_vect_loop && !double_reduc)
4689 goto vect_finalize_reduction;
4691 /* SLP reduction without reduction chain, e.g.,
4692 # a1 = phi <a2, a0>
4693 # b1 = phi <b2, b0>
4694 a2 = operation (a1)
4695 b2 = operation (b1) */
4696 slp_reduc = (slp_node && !REDUC_GROUP_FIRST_ELEMENT (stmt_info));
4698 /* True if we should implement SLP_REDUC using native reduction operations
4699 instead of scalar operations. */
4700 direct_slp_reduc = (reduc_fn != IFN_LAST
4701 && slp_reduc
4702 && !TYPE_VECTOR_SUBPARTS (vectype).is_constant ());
4704 /* In case of reduction chain, e.g.,
4705 # a1 = phi <a3, a0>
4706 a2 = operation (a1)
4707 a3 = operation (a2),
4709 we may end up with more than one vector result. Here we reduce them to
4710 one vector. */
4711 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info) || direct_slp_reduc)
4713 tree first_vect = PHI_RESULT (new_phis[0]);
4714 gassign *new_vec_stmt = NULL;
4715 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4716 for (k = 1; k < new_phis.length (); k++)
4718 gimple *next_phi = new_phis[k];
4719 tree second_vect = PHI_RESULT (next_phi);
4720 tree tem = make_ssa_name (vec_dest, new_vec_stmt);
4721 new_vec_stmt = gimple_build_assign (tem, code,
4722 first_vect, second_vect);
4723 gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT);
4724 first_vect = tem;
4727 new_phi_result = first_vect;
4728 if (new_vec_stmt)
4730 new_phis.truncate (0);
4731 new_phis.safe_push (new_vec_stmt);
4734 /* Likewise if we couldn't use a single defuse cycle. */
4735 else if (ncopies > 1)
4737 gcc_assert (new_phis.length () == 1);
4738 tree first_vect = PHI_RESULT (new_phis[0]);
4739 gassign *new_vec_stmt = NULL;
4740 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4741 stmt_vec_info next_phi_info = loop_vinfo->lookup_stmt (new_phis[0]);
4742 for (int k = 1; k < ncopies; ++k)
4744 next_phi_info = STMT_VINFO_RELATED_STMT (next_phi_info);
4745 tree second_vect = PHI_RESULT (next_phi_info->stmt);
4746 tree tem = make_ssa_name (vec_dest, new_vec_stmt);
4747 new_vec_stmt = gimple_build_assign (tem, code,
4748 first_vect, second_vect);
4749 gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT);
4750 first_vect = tem;
4752 new_phi_result = first_vect;
4753 new_phis.truncate (0);
4754 new_phis.safe_push (new_vec_stmt);
4756 else
4757 new_phi_result = PHI_RESULT (new_phis[0]);
4759 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION
4760 && reduc_fn != IFN_LAST)
4762 /* For condition reductions, we have a vector (NEW_PHI_RESULT) containing
4763 various data values where the condition matched and another vector
4764 (INDUCTION_INDEX) containing all the indexes of those matches. We
4765 need to extract the last matching index (which will be the index with
4766 highest value) and use this to index into the data vector.
4767 For the case where there were no matches, the data vector will contain
4768 all default values and the index vector will be all zeros. */
4770 /* Get various versions of the type of the vector of indexes. */
4771 tree index_vec_type = TREE_TYPE (induction_index);
4772 gcc_checking_assert (TYPE_UNSIGNED (index_vec_type));
4773 tree index_scalar_type = TREE_TYPE (index_vec_type);
4774 tree index_vec_cmp_type = build_same_sized_truth_vector_type
4775 (index_vec_type);
4777 /* Get an unsigned integer version of the type of the data vector. */
4778 int scalar_precision
4779 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type));
4780 tree scalar_type_unsigned = make_unsigned_type (scalar_precision);
4781 tree vectype_unsigned = build_vector_type
4782 (scalar_type_unsigned, TYPE_VECTOR_SUBPARTS (vectype));
4784 /* First we need to create a vector (ZERO_VEC) of zeros and another
4785 vector (MAX_INDEX_VEC) filled with the last matching index, which we
4786 can create using a MAX reduction and then expanding.
4787 In the case where the loop never made any matches, the max index will
4788 be zero. */
4790 /* Vector of {0, 0, 0,...}. */
4791 tree zero_vec = make_ssa_name (vectype);
4792 tree zero_vec_rhs = build_zero_cst (vectype);
4793 gimple *zero_vec_stmt = gimple_build_assign (zero_vec, zero_vec_rhs);
4794 gsi_insert_before (&exit_gsi, zero_vec_stmt, GSI_SAME_STMT);
4796 /* Find maximum value from the vector of found indexes. */
4797 tree max_index = make_ssa_name (index_scalar_type);
4798 gcall *max_index_stmt = gimple_build_call_internal (IFN_REDUC_MAX,
4799 1, induction_index);
4800 gimple_call_set_lhs (max_index_stmt, max_index);
4801 gsi_insert_before (&exit_gsi, max_index_stmt, GSI_SAME_STMT);
4803 /* Vector of {max_index, max_index, max_index,...}. */
4804 tree max_index_vec = make_ssa_name (index_vec_type);
4805 tree max_index_vec_rhs = build_vector_from_val (index_vec_type,
4806 max_index);
4807 gimple *max_index_vec_stmt = gimple_build_assign (max_index_vec,
4808 max_index_vec_rhs);
4809 gsi_insert_before (&exit_gsi, max_index_vec_stmt, GSI_SAME_STMT);
4811 /* Next we compare the new vector (MAX_INDEX_VEC) full of max indexes
4812 with the vector (INDUCTION_INDEX) of found indexes, choosing values
4813 from the data vector (NEW_PHI_RESULT) for matches, 0 (ZERO_VEC)
4814 otherwise. Only one value should match, resulting in a vector
4815 (VEC_COND) with one data value and the rest zeros.
4816 In the case where the loop never made any matches, every index will
4817 match, resulting in a vector with all data values (which will all be
4818 the default value). */
4820 /* Compare the max index vector to the vector of found indexes to find
4821 the position of the max value. */
4822 tree vec_compare = make_ssa_name (index_vec_cmp_type);
4823 gimple *vec_compare_stmt = gimple_build_assign (vec_compare, EQ_EXPR,
4824 induction_index,
4825 max_index_vec);
4826 gsi_insert_before (&exit_gsi, vec_compare_stmt, GSI_SAME_STMT);
4828 /* Use the compare to choose either values from the data vector or
4829 zero. */
4830 tree vec_cond = make_ssa_name (vectype);
4831 gimple *vec_cond_stmt = gimple_build_assign (vec_cond, VEC_COND_EXPR,
4832 vec_compare, new_phi_result,
4833 zero_vec);
4834 gsi_insert_before (&exit_gsi, vec_cond_stmt, GSI_SAME_STMT);
4836 /* Finally we need to extract the data value from the vector (VEC_COND)
4837 into a scalar (MATCHED_DATA_REDUC). Logically we want to do a OR
4838 reduction, but because this doesn't exist, we can use a MAX reduction
4839 instead. The data value might be signed or a float so we need to cast
4840 it first.
4841 In the case where the loop never made any matches, the data values are
4842 all identical, and so will reduce down correctly. */
4844 /* Make the matched data values unsigned. */
4845 tree vec_cond_cast = make_ssa_name (vectype_unsigned);
4846 tree vec_cond_cast_rhs = build1 (VIEW_CONVERT_EXPR, vectype_unsigned,
4847 vec_cond);
4848 gimple *vec_cond_cast_stmt = gimple_build_assign (vec_cond_cast,
4849 VIEW_CONVERT_EXPR,
4850 vec_cond_cast_rhs);
4851 gsi_insert_before (&exit_gsi, vec_cond_cast_stmt, GSI_SAME_STMT);
4853 /* Reduce down to a scalar value. */
4854 tree data_reduc = make_ssa_name (scalar_type_unsigned);
4855 gcall *data_reduc_stmt = gimple_build_call_internal (IFN_REDUC_MAX,
4856 1, vec_cond_cast);
4857 gimple_call_set_lhs (data_reduc_stmt, data_reduc);
4858 gsi_insert_before (&exit_gsi, data_reduc_stmt, GSI_SAME_STMT);
4860 /* Convert the reduced value back to the result type and set as the
4861 result. */
4862 gimple_seq stmts = NULL;
4863 new_temp = gimple_build (&stmts, VIEW_CONVERT_EXPR, scalar_type,
4864 data_reduc);
4865 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
4866 scalar_results.safe_push (new_temp);
4868 else if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION
4869 && reduc_fn == IFN_LAST)
4871 /* Condition reduction without supported IFN_REDUC_MAX. Generate
4872 idx = 0;
4873 idx_val = induction_index[0];
4874 val = data_reduc[0];
4875 for (idx = 0, val = init, i = 0; i < nelts; ++i)
4876 if (induction_index[i] > idx_val)
4877 val = data_reduc[i], idx_val = induction_index[i];
4878 return val; */
4880 tree data_eltype = TREE_TYPE (TREE_TYPE (new_phi_result));
4881 tree idx_eltype = TREE_TYPE (TREE_TYPE (induction_index));
4882 unsigned HOST_WIDE_INT el_size = tree_to_uhwi (TYPE_SIZE (idx_eltype));
4883 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (TREE_TYPE (induction_index));
4884 /* Enforced by vectorizable_reduction, which ensures we have target
4885 support before allowing a conditional reduction on variable-length
4886 vectors. */
4887 unsigned HOST_WIDE_INT v_size = el_size * nunits.to_constant ();
4888 tree idx_val = NULL_TREE, val = NULL_TREE;
4889 for (unsigned HOST_WIDE_INT off = 0; off < v_size; off += el_size)
4891 tree old_idx_val = idx_val;
4892 tree old_val = val;
4893 idx_val = make_ssa_name (idx_eltype);
4894 epilog_stmt = gimple_build_assign (idx_val, BIT_FIELD_REF,
4895 build3 (BIT_FIELD_REF, idx_eltype,
4896 induction_index,
4897 bitsize_int (el_size),
4898 bitsize_int (off)));
4899 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4900 val = make_ssa_name (data_eltype);
4901 epilog_stmt = gimple_build_assign (val, BIT_FIELD_REF,
4902 build3 (BIT_FIELD_REF,
4903 data_eltype,
4904 new_phi_result,
4905 bitsize_int (el_size),
4906 bitsize_int (off)));
4907 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4908 if (off != 0)
4910 tree new_idx_val = idx_val;
4911 tree new_val = val;
4912 if (off != v_size - el_size)
4914 new_idx_val = make_ssa_name (idx_eltype);
4915 epilog_stmt = gimple_build_assign (new_idx_val,
4916 MAX_EXPR, idx_val,
4917 old_idx_val);
4918 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4920 new_val = make_ssa_name (data_eltype);
4921 epilog_stmt = gimple_build_assign (new_val,
4922 COND_EXPR,
4923 build2 (GT_EXPR,
4924 boolean_type_node,
4925 idx_val,
4926 old_idx_val),
4927 val, old_val);
4928 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4929 idx_val = new_idx_val;
4930 val = new_val;
4933 /* Convert the reduced value back to the result type and set as the
4934 result. */
4935 gimple_seq stmts = NULL;
4936 val = gimple_convert (&stmts, scalar_type, val);
4937 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
4938 scalar_results.safe_push (val);
4941 /* 2.3 Create the reduction code, using one of the three schemes described
4942 above. In SLP we simply need to extract all the elements from the
4943 vector (without reducing them), so we use scalar shifts. */
4944 else if (reduc_fn != IFN_LAST && !slp_reduc)
4946 tree tmp;
4947 tree vec_elem_type;
4949 /* Case 1: Create:
4950 v_out2 = reduc_expr <v_out1> */
4952 if (dump_enabled_p ())
4953 dump_printf_loc (MSG_NOTE, vect_location,
4954 "Reduce using direct vector reduction.\n");
4956 vec_elem_type = TREE_TYPE (TREE_TYPE (new_phi_result));
4957 if (!useless_type_conversion_p (scalar_type, vec_elem_type))
4959 tree tmp_dest
4960 = vect_create_destination_var (scalar_dest, vec_elem_type);
4961 epilog_stmt = gimple_build_call_internal (reduc_fn, 1,
4962 new_phi_result);
4963 gimple_set_lhs (epilog_stmt, tmp_dest);
4964 new_temp = make_ssa_name (tmp_dest, epilog_stmt);
4965 gimple_set_lhs (epilog_stmt, new_temp);
4966 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4968 epilog_stmt = gimple_build_assign (new_scalar_dest, NOP_EXPR,
4969 new_temp);
4971 else
4973 epilog_stmt = gimple_build_call_internal (reduc_fn, 1,
4974 new_phi_result);
4975 gimple_set_lhs (epilog_stmt, new_scalar_dest);
4978 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
4979 gimple_set_lhs (epilog_stmt, new_temp);
4980 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4982 if ((STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
4983 == INTEGER_INDUC_COND_REDUCTION)
4984 && !operand_equal_p (initial_def, induc_val, 0))
4986 /* Earlier we set the initial value to be a vector if induc_val
4987 values. Check the result and if it is induc_val then replace
4988 with the original initial value, unless induc_val is
4989 the same as initial_def already. */
4990 tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp,
4991 induc_val);
4993 tmp = make_ssa_name (new_scalar_dest);
4994 epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare,
4995 initial_def, new_temp);
4996 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4997 new_temp = tmp;
5000 scalar_results.safe_push (new_temp);
5002 else if (direct_slp_reduc)
5004 /* Here we create one vector for each of the REDUC_GROUP_SIZE results,
5005 with the elements for other SLP statements replaced with the
5006 neutral value. We can then do a normal reduction on each vector. */
5008 /* Enforced by vectorizable_reduction. */
5009 gcc_assert (new_phis.length () == 1);
5010 gcc_assert (pow2p_hwi (group_size));
5012 slp_tree orig_phis_slp_node = slp_node_instance->reduc_phis;
5013 vec<stmt_vec_info> orig_phis
5014 = SLP_TREE_SCALAR_STMTS (orig_phis_slp_node);
5015 gimple_seq seq = NULL;
5017 /* Build a vector {0, 1, 2, ...}, with the same number of elements
5018 and the same element size as VECTYPE. */
5019 tree index = build_index_vector (vectype, 0, 1);
5020 tree index_type = TREE_TYPE (index);
5021 tree index_elt_type = TREE_TYPE (index_type);
5022 tree mask_type = build_same_sized_truth_vector_type (index_type);
5024 /* Create a vector that, for each element, identifies which of
5025 the REDUC_GROUP_SIZE results should use it. */
5026 tree index_mask = build_int_cst (index_elt_type, group_size - 1);
5027 index = gimple_build (&seq, BIT_AND_EXPR, index_type, index,
5028 build_vector_from_val (index_type, index_mask));
5030 /* Get a neutral vector value. This is simply a splat of the neutral
5031 scalar value if we have one, otherwise the initial scalar value
5032 is itself a neutral value. */
5033 tree vector_identity = NULL_TREE;
5034 if (neutral_op)
5035 vector_identity = gimple_build_vector_from_val (&seq, vectype,
5036 neutral_op);
5037 for (unsigned int i = 0; i < group_size; ++i)
5039 /* If there's no univeral neutral value, we can use the
5040 initial scalar value from the original PHI. This is used
5041 for MIN and MAX reduction, for example. */
5042 if (!neutral_op)
5044 tree scalar_value
5045 = PHI_ARG_DEF_FROM_EDGE (orig_phis[i]->stmt,
5046 loop_preheader_edge (loop));
5047 vector_identity = gimple_build_vector_from_val (&seq, vectype,
5048 scalar_value);
5051 /* Calculate the equivalent of:
5053 sel[j] = (index[j] == i);
5055 which selects the elements of NEW_PHI_RESULT that should
5056 be included in the result. */
5057 tree compare_val = build_int_cst (index_elt_type, i);
5058 compare_val = build_vector_from_val (index_type, compare_val);
5059 tree sel = gimple_build (&seq, EQ_EXPR, mask_type,
5060 index, compare_val);
5062 /* Calculate the equivalent of:
5064 vec = seq ? new_phi_result : vector_identity;
5066 VEC is now suitable for a full vector reduction. */
5067 tree vec = gimple_build (&seq, VEC_COND_EXPR, vectype,
5068 sel, new_phi_result, vector_identity);
5070 /* Do the reduction and convert it to the appropriate type. */
5071 tree scalar = gimple_build (&seq, as_combined_fn (reduc_fn),
5072 TREE_TYPE (vectype), vec);
5073 scalar = gimple_convert (&seq, scalar_type, scalar);
5074 scalar_results.safe_push (scalar);
5076 gsi_insert_seq_before (&exit_gsi, seq, GSI_SAME_STMT);
5078 else
5080 bool reduce_with_shift;
5081 tree vec_temp;
5083 /* COND reductions all do the final reduction with MAX_EXPR
5084 or MIN_EXPR. */
5085 if (code == COND_EXPR)
5087 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5088 == INTEGER_INDUC_COND_REDUCTION)
5089 code = induc_code;
5090 else if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5091 == CONST_COND_REDUCTION)
5092 code = STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info);
5093 else
5094 code = MAX_EXPR;
5097 /* See if the target wants to do the final (shift) reduction
5098 in a vector mode of smaller size and first reduce upper/lower
5099 halves against each other. */
5100 enum machine_mode mode1 = mode;
5101 tree vectype1 = vectype;
5102 unsigned sz = tree_to_uhwi (TYPE_SIZE_UNIT (vectype));
5103 unsigned sz1 = sz;
5104 if (!slp_reduc
5105 && (mode1 = targetm.vectorize.split_reduction (mode)) != mode)
5106 sz1 = GET_MODE_SIZE (mode1).to_constant ();
5108 vectype1 = get_vectype_for_scalar_type_and_size (scalar_type, sz1);
5109 reduce_with_shift = have_whole_vector_shift (mode1);
5110 if (!VECTOR_MODE_P (mode1))
5111 reduce_with_shift = false;
5112 else
5114 optab optab = optab_for_tree_code (code, vectype1, optab_default);
5115 if (optab_handler (optab, mode1) == CODE_FOR_nothing)
5116 reduce_with_shift = false;
5119 /* First reduce the vector to the desired vector size we should
5120 do shift reduction on by combining upper and lower halves. */
5121 new_temp = new_phi_result;
5122 while (sz > sz1)
5124 gcc_assert (!slp_reduc);
5125 sz /= 2;
5126 vectype1 = get_vectype_for_scalar_type_and_size (scalar_type, sz);
5128 /* The target has to make sure we support lowpart/highpart
5129 extraction, either via direct vector extract or through
5130 an integer mode punning. */
5131 tree dst1, dst2;
5132 if (convert_optab_handler (vec_extract_optab,
5133 TYPE_MODE (TREE_TYPE (new_temp)),
5134 TYPE_MODE (vectype1))
5135 != CODE_FOR_nothing)
5137 /* Extract sub-vectors directly once vec_extract becomes
5138 a conversion optab. */
5139 dst1 = make_ssa_name (vectype1);
5140 epilog_stmt
5141 = gimple_build_assign (dst1, BIT_FIELD_REF,
5142 build3 (BIT_FIELD_REF, vectype1,
5143 new_temp, TYPE_SIZE (vectype1),
5144 bitsize_int (0)));
5145 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5146 dst2 = make_ssa_name (vectype1);
5147 epilog_stmt
5148 = gimple_build_assign (dst2, BIT_FIELD_REF,
5149 build3 (BIT_FIELD_REF, vectype1,
5150 new_temp, TYPE_SIZE (vectype1),
5151 bitsize_int (sz * BITS_PER_UNIT)));
5152 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5154 else
5156 /* Extract via punning to appropriately sized integer mode
5157 vector. */
5158 tree eltype = build_nonstandard_integer_type (sz * BITS_PER_UNIT,
5160 tree etype = build_vector_type (eltype, 2);
5161 gcc_assert (convert_optab_handler (vec_extract_optab,
5162 TYPE_MODE (etype),
5163 TYPE_MODE (eltype))
5164 != CODE_FOR_nothing);
5165 tree tem = make_ssa_name (etype);
5166 epilog_stmt = gimple_build_assign (tem, VIEW_CONVERT_EXPR,
5167 build1 (VIEW_CONVERT_EXPR,
5168 etype, new_temp));
5169 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5170 new_temp = tem;
5171 tem = make_ssa_name (eltype);
5172 epilog_stmt
5173 = gimple_build_assign (tem, BIT_FIELD_REF,
5174 build3 (BIT_FIELD_REF, eltype,
5175 new_temp, TYPE_SIZE (eltype),
5176 bitsize_int (0)));
5177 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5178 dst1 = make_ssa_name (vectype1);
5179 epilog_stmt = gimple_build_assign (dst1, VIEW_CONVERT_EXPR,
5180 build1 (VIEW_CONVERT_EXPR,
5181 vectype1, tem));
5182 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5183 tem = make_ssa_name (eltype);
5184 epilog_stmt
5185 = gimple_build_assign (tem, BIT_FIELD_REF,
5186 build3 (BIT_FIELD_REF, eltype,
5187 new_temp, TYPE_SIZE (eltype),
5188 bitsize_int (sz * BITS_PER_UNIT)));
5189 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5190 dst2 = make_ssa_name (vectype1);
5191 epilog_stmt = gimple_build_assign (dst2, VIEW_CONVERT_EXPR,
5192 build1 (VIEW_CONVERT_EXPR,
5193 vectype1, tem));
5194 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5197 new_temp = make_ssa_name (vectype1);
5198 epilog_stmt = gimple_build_assign (new_temp, code, dst1, dst2);
5199 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5202 if (reduce_with_shift && !slp_reduc)
5204 int element_bitsize = tree_to_uhwi (bitsize);
5205 /* Enforced by vectorizable_reduction, which disallows SLP reductions
5206 for variable-length vectors and also requires direct target support
5207 for loop reductions. */
5208 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype1));
5209 int nelements = vec_size_in_bits / element_bitsize;
5210 vec_perm_builder sel;
5211 vec_perm_indices indices;
5213 int elt_offset;
5215 tree zero_vec = build_zero_cst (vectype1);
5216 /* Case 2: Create:
5217 for (offset = nelements/2; offset >= 1; offset/=2)
5219 Create: va' = vec_shift <va, offset>
5220 Create: va = vop <va, va'>
5221 } */
5223 tree rhs;
5225 if (dump_enabled_p ())
5226 dump_printf_loc (MSG_NOTE, vect_location,
5227 "Reduce using vector shifts\n");
5229 mode1 = TYPE_MODE (vectype1);
5230 vec_dest = vect_create_destination_var (scalar_dest, vectype1);
5231 for (elt_offset = nelements / 2;
5232 elt_offset >= 1;
5233 elt_offset /= 2)
5235 calc_vec_perm_mask_for_shift (elt_offset, nelements, &sel);
5236 indices.new_vector (sel, 2, nelements);
5237 tree mask = vect_gen_perm_mask_any (vectype1, indices);
5238 epilog_stmt = gimple_build_assign (vec_dest, VEC_PERM_EXPR,
5239 new_temp, zero_vec, mask);
5240 new_name = make_ssa_name (vec_dest, epilog_stmt);
5241 gimple_assign_set_lhs (epilog_stmt, new_name);
5242 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5244 epilog_stmt = gimple_build_assign (vec_dest, code, new_name,
5245 new_temp);
5246 new_temp = make_ssa_name (vec_dest, epilog_stmt);
5247 gimple_assign_set_lhs (epilog_stmt, new_temp);
5248 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5251 /* 2.4 Extract the final scalar result. Create:
5252 s_out3 = extract_field <v_out2, bitpos> */
5254 if (dump_enabled_p ())
5255 dump_printf_loc (MSG_NOTE, vect_location,
5256 "extract scalar result\n");
5258 rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp,
5259 bitsize, bitsize_zero_node);
5260 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
5261 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5262 gimple_assign_set_lhs (epilog_stmt, new_temp);
5263 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5264 scalar_results.safe_push (new_temp);
5266 else
5268 /* Case 3: Create:
5269 s = extract_field <v_out2, 0>
5270 for (offset = element_size;
5271 offset < vector_size;
5272 offset += element_size;)
5274 Create: s' = extract_field <v_out2, offset>
5275 Create: s = op <s, s'> // For non SLP cases
5276 } */
5278 if (dump_enabled_p ())
5279 dump_printf_loc (MSG_NOTE, vect_location,
5280 "Reduce using scalar code.\n");
5282 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype1));
5283 int element_bitsize = tree_to_uhwi (bitsize);
5284 FOR_EACH_VEC_ELT (new_phis, i, new_phi)
5286 int bit_offset;
5287 if (gimple_code (new_phi) == GIMPLE_PHI)
5288 vec_temp = PHI_RESULT (new_phi);
5289 else
5290 vec_temp = gimple_assign_lhs (new_phi);
5291 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
5292 bitsize_zero_node);
5293 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
5294 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5295 gimple_assign_set_lhs (epilog_stmt, new_temp);
5296 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5298 /* In SLP we don't need to apply reduction operation, so we just
5299 collect s' values in SCALAR_RESULTS. */
5300 if (slp_reduc)
5301 scalar_results.safe_push (new_temp);
5303 for (bit_offset = element_bitsize;
5304 bit_offset < vec_size_in_bits;
5305 bit_offset += element_bitsize)
5307 tree bitpos = bitsize_int (bit_offset);
5308 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp,
5309 bitsize, bitpos);
5311 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
5312 new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
5313 gimple_assign_set_lhs (epilog_stmt, new_name);
5314 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5316 if (slp_reduc)
5318 /* In SLP we don't need to apply reduction operation, so
5319 we just collect s' values in SCALAR_RESULTS. */
5320 new_temp = new_name;
5321 scalar_results.safe_push (new_name);
5323 else
5325 epilog_stmt = gimple_build_assign (new_scalar_dest, code,
5326 new_name, new_temp);
5327 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5328 gimple_assign_set_lhs (epilog_stmt, new_temp);
5329 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5334 /* The only case where we need to reduce scalar results in SLP, is
5335 unrolling. If the size of SCALAR_RESULTS is greater than
5336 REDUC_GROUP_SIZE, we reduce them combining elements modulo
5337 REDUC_GROUP_SIZE. */
5338 if (slp_reduc)
5340 tree res, first_res, new_res;
5341 gimple *new_stmt;
5343 /* Reduce multiple scalar results in case of SLP unrolling. */
5344 for (j = group_size; scalar_results.iterate (j, &res);
5345 j++)
5347 first_res = scalar_results[j % group_size];
5348 new_stmt = gimple_build_assign (new_scalar_dest, code,
5349 first_res, res);
5350 new_res = make_ssa_name (new_scalar_dest, new_stmt);
5351 gimple_assign_set_lhs (new_stmt, new_res);
5352 gsi_insert_before (&exit_gsi, new_stmt, GSI_SAME_STMT);
5353 scalar_results[j % group_size] = new_res;
5356 else
5357 /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
5358 scalar_results.safe_push (new_temp);
5361 if ((STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5362 == INTEGER_INDUC_COND_REDUCTION)
5363 && !operand_equal_p (initial_def, induc_val, 0))
5365 /* Earlier we set the initial value to be a vector if induc_val
5366 values. Check the result and if it is induc_val then replace
5367 with the original initial value, unless induc_val is
5368 the same as initial_def already. */
5369 tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp,
5370 induc_val);
5372 tree tmp = make_ssa_name (new_scalar_dest);
5373 epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare,
5374 initial_def, new_temp);
5375 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5376 scalar_results[0] = tmp;
5380 vect_finalize_reduction:
5382 if (double_reduc)
5383 loop = loop->inner;
5385 /* 2.5 Adjust the final result by the initial value of the reduction
5386 variable. (When such adjustment is not needed, then
5387 'adjustment_def' is zero). For example, if code is PLUS we create:
5388 new_temp = loop_exit_def + adjustment_def */
5390 if (adjustment_def)
5392 gcc_assert (!slp_reduc);
5393 if (nested_in_vect_loop)
5395 new_phi = new_phis[0];
5396 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
5397 expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
5398 new_dest = vect_create_destination_var (scalar_dest, vectype);
5400 else
5402 new_temp = scalar_results[0];
5403 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
5404 expr = build2 (code, scalar_type, new_temp, adjustment_def);
5405 new_dest = vect_create_destination_var (scalar_dest, scalar_type);
5408 epilog_stmt = gimple_build_assign (new_dest, expr);
5409 new_temp = make_ssa_name (new_dest, epilog_stmt);
5410 gimple_assign_set_lhs (epilog_stmt, new_temp);
5411 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5412 if (nested_in_vect_loop)
5414 stmt_vec_info epilog_stmt_info = loop_vinfo->add_stmt (epilog_stmt);
5415 STMT_VINFO_RELATED_STMT (epilog_stmt_info)
5416 = STMT_VINFO_RELATED_STMT (loop_vinfo->lookup_stmt (new_phi));
5418 if (!double_reduc)
5419 scalar_results.quick_push (new_temp);
5420 else
5421 scalar_results[0] = new_temp;
5423 else
5424 scalar_results[0] = new_temp;
5426 new_phis[0] = epilog_stmt;
5429 /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit
5430 phis with new adjusted scalar results, i.e., replace use <s_out0>
5431 with use <s_out4>.
5433 Transform:
5434 loop_exit:
5435 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
5436 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
5437 v_out2 = reduce <v_out1>
5438 s_out3 = extract_field <v_out2, 0>
5439 s_out4 = adjust_result <s_out3>
5440 use <s_out0>
5441 use <s_out0>
5443 into:
5445 loop_exit:
5446 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
5447 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
5448 v_out2 = reduce <v_out1>
5449 s_out3 = extract_field <v_out2, 0>
5450 s_out4 = adjust_result <s_out3>
5451 use <s_out4>
5452 use <s_out4> */
5455 /* In SLP reduction chain we reduce vector results into one vector if
5456 necessary, hence we set here REDUC_GROUP_SIZE to 1. SCALAR_DEST is the
5457 LHS of the last stmt in the reduction chain, since we are looking for
5458 the loop exit phi node. */
5459 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info))
5461 stmt_vec_info dest_stmt_info
5462 = vect_orig_stmt (SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1]);
5463 scalar_dest = gimple_assign_lhs (dest_stmt_info->stmt);
5464 group_size = 1;
5467 /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
5468 case that REDUC_GROUP_SIZE is greater than vectorization factor).
5469 Therefore, we need to match SCALAR_RESULTS with corresponding statements.
5470 The first (REDUC_GROUP_SIZE / number of new vector stmts) scalar results
5471 correspond to the first vector stmt, etc.
5472 (RATIO is equal to (REDUC_GROUP_SIZE / number of new vector stmts)). */
5473 if (group_size > new_phis.length ())
5475 ratio = group_size / new_phis.length ();
5476 gcc_assert (!(group_size % new_phis.length ()));
5478 else
5479 ratio = 1;
5481 stmt_vec_info epilog_stmt_info = NULL;
5482 for (k = 0; k < group_size; k++)
5484 if (k % ratio == 0)
5486 epilog_stmt_info = loop_vinfo->lookup_stmt (new_phis[k / ratio]);
5487 reduction_phi_info = reduction_phis[k / ratio];
5488 if (double_reduc)
5489 inner_phi = inner_phis[k / ratio];
5492 if (slp_reduc)
5494 stmt_vec_info scalar_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[k];
5496 orig_stmt_info = STMT_VINFO_RELATED_STMT (scalar_stmt_info);
5497 /* SLP statements can't participate in patterns. */
5498 gcc_assert (!orig_stmt_info);
5499 scalar_dest = gimple_assign_lhs (scalar_stmt_info->stmt);
5502 phis.create (3);
5503 /* Find the loop-closed-use at the loop exit of the original scalar
5504 result. (The reduction result is expected to have two immediate uses -
5505 one at the latch block, and one at the loop exit). */
5506 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
5507 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p)))
5508 && !is_gimple_debug (USE_STMT (use_p)))
5509 phis.safe_push (USE_STMT (use_p));
5511 /* While we expect to have found an exit_phi because of loop-closed-ssa
5512 form we can end up without one if the scalar cycle is dead. */
5514 FOR_EACH_VEC_ELT (phis, i, exit_phi)
5516 if (outer_loop)
5518 stmt_vec_info exit_phi_vinfo
5519 = loop_vinfo->lookup_stmt (exit_phi);
5520 gphi *vect_phi;
5522 /* FORNOW. Currently not supporting the case that an inner-loop
5523 reduction is not used in the outer-loop (but only outside the
5524 outer-loop), unless it is double reduction. */
5525 gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
5526 && !STMT_VINFO_LIVE_P (exit_phi_vinfo))
5527 || double_reduc);
5529 if (double_reduc)
5530 STMT_VINFO_VEC_STMT (exit_phi_vinfo) = inner_phi;
5531 else
5532 STMT_VINFO_VEC_STMT (exit_phi_vinfo) = epilog_stmt_info;
5533 if (!double_reduc
5534 || STMT_VINFO_DEF_TYPE (exit_phi_vinfo)
5535 != vect_double_reduction_def)
5536 continue;
5538 /* Handle double reduction:
5540 stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
5541 stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop)
5542 stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop)
5543 stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
5545 At that point the regular reduction (stmt2 and stmt3) is
5546 already vectorized, as well as the exit phi node, stmt4.
5547 Here we vectorize the phi node of double reduction, stmt1, and
5548 update all relevant statements. */
5550 /* Go through all the uses of s2 to find double reduction phi
5551 node, i.e., stmt1 above. */
5552 orig_name = PHI_RESULT (exit_phi);
5553 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
5555 stmt_vec_info use_stmt_vinfo;
5556 tree vect_phi_init, preheader_arg, vect_phi_res;
5557 basic_block bb = gimple_bb (use_stmt);
5559 /* Check that USE_STMT is really double reduction phi
5560 node. */
5561 if (gimple_code (use_stmt) != GIMPLE_PHI
5562 || gimple_phi_num_args (use_stmt) != 2
5563 || bb->loop_father != outer_loop)
5564 continue;
5565 use_stmt_vinfo = loop_vinfo->lookup_stmt (use_stmt);
5566 if (!use_stmt_vinfo
5567 || STMT_VINFO_DEF_TYPE (use_stmt_vinfo)
5568 != vect_double_reduction_def)
5569 continue;
5571 /* Create vector phi node for double reduction:
5572 vs1 = phi <vs0, vs2>
5573 vs1 was created previously in this function by a call to
5574 vect_get_vec_def_for_operand and is stored in
5575 vec_initial_def;
5576 vs2 is defined by INNER_PHI, the vectorized EXIT_PHI;
5577 vs0 is created here. */
5579 /* Create vector phi node. */
5580 vect_phi = create_phi_node (vec_initial_def, bb);
5581 loop_vec_info_for_loop (outer_loop)->add_stmt (vect_phi);
5583 /* Create vs0 - initial def of the double reduction phi. */
5584 preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt,
5585 loop_preheader_edge (outer_loop));
5586 vect_phi_init = get_initial_def_for_reduction
5587 (stmt_info, preheader_arg, NULL);
5589 /* Update phi node arguments with vs0 and vs2. */
5590 add_phi_arg (vect_phi, vect_phi_init,
5591 loop_preheader_edge (outer_loop),
5592 UNKNOWN_LOCATION);
5593 add_phi_arg (vect_phi, PHI_RESULT (inner_phi->stmt),
5594 loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
5595 if (dump_enabled_p ())
5596 dump_printf_loc (MSG_NOTE, vect_location,
5597 "created double reduction phi node: %G",
5598 vect_phi);
5600 vect_phi_res = PHI_RESULT (vect_phi);
5602 /* Replace the use, i.e., set the correct vs1 in the regular
5603 reduction phi node. FORNOW, NCOPIES is always 1, so the
5604 loop is redundant. */
5605 stmt_vec_info use_info = reduction_phi_info;
5606 for (j = 0; j < ncopies; j++)
5608 edge pr_edge = loop_preheader_edge (loop);
5609 SET_PHI_ARG_DEF (as_a <gphi *> (use_info->stmt),
5610 pr_edge->dest_idx, vect_phi_res);
5611 use_info = STMT_VINFO_RELATED_STMT (use_info);
5617 phis.release ();
5618 if (nested_in_vect_loop)
5620 if (double_reduc)
5621 loop = outer_loop;
5622 else
5623 continue;
5626 phis.create (3);
5627 /* Find the loop-closed-use at the loop exit of the original scalar
5628 result. (The reduction result is expected to have two immediate uses,
5629 one at the latch block, and one at the loop exit). For double
5630 reductions we are looking for exit phis of the outer loop. */
5631 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
5633 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
5635 if (!is_gimple_debug (USE_STMT (use_p)))
5636 phis.safe_push (USE_STMT (use_p));
5638 else
5640 if (double_reduc && gimple_code (USE_STMT (use_p)) == GIMPLE_PHI)
5642 tree phi_res = PHI_RESULT (USE_STMT (use_p));
5644 FOR_EACH_IMM_USE_FAST (phi_use_p, phi_imm_iter, phi_res)
5646 if (!flow_bb_inside_loop_p (loop,
5647 gimple_bb (USE_STMT (phi_use_p)))
5648 && !is_gimple_debug (USE_STMT (phi_use_p)))
5649 phis.safe_push (USE_STMT (phi_use_p));
5655 FOR_EACH_VEC_ELT (phis, i, exit_phi)
5657 /* Replace the uses: */
5658 orig_name = PHI_RESULT (exit_phi);
5659 scalar_result = scalar_results[k];
5660 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
5661 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
5662 SET_USE (use_p, scalar_result);
5665 phis.release ();
5669 /* Return a vector of type VECTYPE that is equal to the vector select
5670 operation "MASK ? VEC : IDENTITY". Insert the select statements
5671 before GSI. */
5673 static tree
5674 merge_with_identity (gimple_stmt_iterator *gsi, tree mask, tree vectype,
5675 tree vec, tree identity)
5677 tree cond = make_temp_ssa_name (vectype, NULL, "cond");
5678 gimple *new_stmt = gimple_build_assign (cond, VEC_COND_EXPR,
5679 mask, vec, identity);
5680 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
5681 return cond;
5684 /* Successively apply CODE to each element of VECTOR_RHS, in left-to-right
5685 order, starting with LHS. Insert the extraction statements before GSI and
5686 associate the new scalar SSA names with variable SCALAR_DEST.
5687 Return the SSA name for the result. */
5689 static tree
5690 vect_expand_fold_left (gimple_stmt_iterator *gsi, tree scalar_dest,
5691 tree_code code, tree lhs, tree vector_rhs)
5693 tree vectype = TREE_TYPE (vector_rhs);
5694 tree scalar_type = TREE_TYPE (vectype);
5695 tree bitsize = TYPE_SIZE (scalar_type);
5696 unsigned HOST_WIDE_INT vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
5697 unsigned HOST_WIDE_INT element_bitsize = tree_to_uhwi (bitsize);
5699 for (unsigned HOST_WIDE_INT bit_offset = 0;
5700 bit_offset < vec_size_in_bits;
5701 bit_offset += element_bitsize)
5703 tree bitpos = bitsize_int (bit_offset);
5704 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vector_rhs,
5705 bitsize, bitpos);
5707 gassign *stmt = gimple_build_assign (scalar_dest, rhs);
5708 rhs = make_ssa_name (scalar_dest, stmt);
5709 gimple_assign_set_lhs (stmt, rhs);
5710 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
5712 stmt = gimple_build_assign (scalar_dest, code, lhs, rhs);
5713 tree new_name = make_ssa_name (scalar_dest, stmt);
5714 gimple_assign_set_lhs (stmt, new_name);
5715 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
5716 lhs = new_name;
5718 return lhs;
5721 /* Perform an in-order reduction (FOLD_LEFT_REDUCTION). STMT_INFO is the
5722 statement that sets the live-out value. REDUC_DEF_STMT is the phi
5723 statement. CODE is the operation performed by STMT_INFO and OPS are
5724 its scalar operands. REDUC_INDEX is the index of the operand in
5725 OPS that is set by REDUC_DEF_STMT. REDUC_FN is the function that
5726 implements in-order reduction, or IFN_LAST if we should open-code it.
5727 VECTYPE_IN is the type of the vector input. MASKS specifies the masks
5728 that should be used to control the operation in a fully-masked loop. */
5730 static bool
5731 vectorize_fold_left_reduction (stmt_vec_info stmt_info,
5732 gimple_stmt_iterator *gsi,
5733 stmt_vec_info *vec_stmt, slp_tree slp_node,
5734 gimple *reduc_def_stmt,
5735 tree_code code, internal_fn reduc_fn,
5736 tree ops[3], tree vectype_in,
5737 int reduc_index, vec_loop_masks *masks)
5739 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5740 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5741 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
5742 stmt_vec_info new_stmt_info = NULL;
5744 int ncopies;
5745 if (slp_node)
5746 ncopies = 1;
5747 else
5748 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
5750 gcc_assert (!nested_in_vect_loop_p (loop, stmt_info));
5751 gcc_assert (ncopies == 1);
5752 gcc_assert (TREE_CODE_LENGTH (code) == binary_op);
5753 gcc_assert (reduc_index == (code == MINUS_EXPR ? 0 : 1));
5754 gcc_assert (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5755 == FOLD_LEFT_REDUCTION);
5757 if (slp_node)
5758 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype_out),
5759 TYPE_VECTOR_SUBPARTS (vectype_in)));
5761 tree op0 = ops[1 - reduc_index];
5763 int group_size = 1;
5764 stmt_vec_info scalar_dest_def_info;
5765 auto_vec<tree> vec_oprnds0;
5766 if (slp_node)
5768 vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL,
5769 slp_node);
5770 group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
5771 scalar_dest_def_info = SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1];
5773 else
5775 tree loop_vec_def0 = vect_get_vec_def_for_operand (op0, stmt_info);
5776 vec_oprnds0.create (1);
5777 vec_oprnds0.quick_push (loop_vec_def0);
5778 scalar_dest_def_info = stmt_info;
5781 tree scalar_dest = gimple_assign_lhs (scalar_dest_def_info->stmt);
5782 tree scalar_type = TREE_TYPE (scalar_dest);
5783 tree reduc_var = gimple_phi_result (reduc_def_stmt);
5785 int vec_num = vec_oprnds0.length ();
5786 gcc_assert (vec_num == 1 || slp_node);
5787 tree vec_elem_type = TREE_TYPE (vectype_out);
5788 gcc_checking_assert (useless_type_conversion_p (scalar_type, vec_elem_type));
5790 tree vector_identity = NULL_TREE;
5791 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
5792 vector_identity = build_zero_cst (vectype_out);
5794 tree scalar_dest_var = vect_create_destination_var (scalar_dest, NULL);
5795 int i;
5796 tree def0;
5797 FOR_EACH_VEC_ELT (vec_oprnds0, i, def0)
5799 gimple *new_stmt;
5800 tree mask = NULL_TREE;
5801 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
5802 mask = vect_get_loop_mask (gsi, masks, vec_num, vectype_in, i);
5804 /* Handle MINUS by adding the negative. */
5805 if (reduc_fn != IFN_LAST && code == MINUS_EXPR)
5807 tree negated = make_ssa_name (vectype_out);
5808 new_stmt = gimple_build_assign (negated, NEGATE_EXPR, def0);
5809 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
5810 def0 = negated;
5813 if (mask)
5814 def0 = merge_with_identity (gsi, mask, vectype_out, def0,
5815 vector_identity);
5817 /* On the first iteration the input is simply the scalar phi
5818 result, and for subsequent iterations it is the output of
5819 the preceding operation. */
5820 if (reduc_fn != IFN_LAST)
5822 new_stmt = gimple_build_call_internal (reduc_fn, 2, reduc_var, def0);
5823 /* For chained SLP reductions the output of the previous reduction
5824 operation serves as the input of the next. For the final statement
5825 the output cannot be a temporary - we reuse the original
5826 scalar destination of the last statement. */
5827 if (i != vec_num - 1)
5829 gimple_set_lhs (new_stmt, scalar_dest_var);
5830 reduc_var = make_ssa_name (scalar_dest_var, new_stmt);
5831 gimple_set_lhs (new_stmt, reduc_var);
5834 else
5836 reduc_var = vect_expand_fold_left (gsi, scalar_dest_var, code,
5837 reduc_var, def0);
5838 new_stmt = SSA_NAME_DEF_STMT (reduc_var);
5839 /* Remove the statement, so that we can use the same code paths
5840 as for statements that we've just created. */
5841 gimple_stmt_iterator tmp_gsi = gsi_for_stmt (new_stmt);
5842 gsi_remove (&tmp_gsi, false);
5845 if (i == vec_num - 1)
5847 gimple_set_lhs (new_stmt, scalar_dest);
5848 new_stmt_info = vect_finish_replace_stmt (scalar_dest_def_info,
5849 new_stmt);
5851 else
5852 new_stmt_info = vect_finish_stmt_generation (scalar_dest_def_info,
5853 new_stmt, gsi);
5855 if (slp_node)
5856 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
5859 if (!slp_node)
5860 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
5862 return true;
5865 /* Function is_nonwrapping_integer_induction.
5867 Check if STMT_VINO (which is part of loop LOOP) both increments and
5868 does not cause overflow. */
5870 static bool
5871 is_nonwrapping_integer_induction (stmt_vec_info stmt_vinfo, struct loop *loop)
5873 gphi *phi = as_a <gphi *> (stmt_vinfo->stmt);
5874 tree base = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo);
5875 tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo);
5876 tree lhs_type = TREE_TYPE (gimple_phi_result (phi));
5877 widest_int ni, max_loop_value, lhs_max;
5878 wi::overflow_type overflow = wi::OVF_NONE;
5880 /* Make sure the loop is integer based. */
5881 if (TREE_CODE (base) != INTEGER_CST
5882 || TREE_CODE (step) != INTEGER_CST)
5883 return false;
5885 /* Check that the max size of the loop will not wrap. */
5887 if (TYPE_OVERFLOW_UNDEFINED (lhs_type))
5888 return true;
5890 if (! max_stmt_executions (loop, &ni))
5891 return false;
5893 max_loop_value = wi::mul (wi::to_widest (step), ni, TYPE_SIGN (lhs_type),
5894 &overflow);
5895 if (overflow)
5896 return false;
5898 max_loop_value = wi::add (wi::to_widest (base), max_loop_value,
5899 TYPE_SIGN (lhs_type), &overflow);
5900 if (overflow)
5901 return false;
5903 return (wi::min_precision (max_loop_value, TYPE_SIGN (lhs_type))
5904 <= TYPE_PRECISION (lhs_type));
5907 /* Function vectorizable_reduction.
5909 Check if STMT_INFO performs a reduction operation that can be vectorized.
5910 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
5911 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
5912 Return true if STMT_INFO is vectorizable in this way.
5914 This function also handles reduction idioms (patterns) that have been
5915 recognized in advance during vect_pattern_recog. In this case, STMT_INFO
5916 may be of this form:
5917 X = pattern_expr (arg0, arg1, ..., X)
5918 and its STMT_VINFO_RELATED_STMT points to the last stmt in the original
5919 sequence that had been detected and replaced by the pattern-stmt
5920 (STMT_INFO).
5922 This function also handles reduction of condition expressions, for example:
5923 for (int i = 0; i < N; i++)
5924 if (a[i] < value)
5925 last = a[i];
5926 This is handled by vectorising the loop and creating an additional vector
5927 containing the loop indexes for which "a[i] < value" was true. In the
5928 function epilogue this is reduced to a single max value and then used to
5929 index into the vector of results.
5931 In some cases of reduction patterns, the type of the reduction variable X is
5932 different than the type of the other arguments of STMT_INFO.
5933 In such cases, the vectype that is used when transforming STMT_INFO into
5934 a vector stmt is different than the vectype that is used to determine the
5935 vectorization factor, because it consists of a different number of elements
5936 than the actual number of elements that are being operated upon in parallel.
5938 For example, consider an accumulation of shorts into an int accumulator.
5939 On some targets it's possible to vectorize this pattern operating on 8
5940 shorts at a time (hence, the vectype for purposes of determining the
5941 vectorization factor should be V8HI); on the other hand, the vectype that
5942 is used to create the vector form is actually V4SI (the type of the result).
5944 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
5945 indicates what is the actual level of parallelism (V8HI in the example), so
5946 that the right vectorization factor would be derived. This vectype
5947 corresponds to the type of arguments to the reduction stmt, and should *NOT*
5948 be used to create the vectorized stmt. The right vectype for the vectorized
5949 stmt is obtained from the type of the result X:
5950 get_vectype_for_scalar_type (TREE_TYPE (X))
5952 This means that, contrary to "regular" reductions (or "regular" stmts in
5953 general), the following equation:
5954 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
5955 does *NOT* necessarily hold for reduction patterns. */
5957 bool
5958 vectorizable_reduction (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
5959 stmt_vec_info *vec_stmt, slp_tree slp_node,
5960 slp_instance slp_node_instance,
5961 stmt_vector_for_cost *cost_vec)
5963 tree vec_dest;
5964 tree scalar_dest;
5965 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
5966 tree vectype_in = NULL_TREE;
5967 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5968 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5969 enum tree_code code, orig_code;
5970 internal_fn reduc_fn;
5971 machine_mode vec_mode;
5972 int op_type;
5973 optab optab;
5974 tree new_temp = NULL_TREE;
5975 enum vect_def_type dt, cond_reduc_dt = vect_unknown_def_type;
5976 stmt_vec_info cond_stmt_vinfo = NULL;
5977 enum tree_code cond_reduc_op_code = ERROR_MARK;
5978 tree scalar_type;
5979 bool is_simple_use;
5980 int i;
5981 int ncopies;
5982 int epilog_copies;
5983 stmt_vec_info prev_stmt_info, prev_phi_info;
5984 bool single_defuse_cycle = false;
5985 stmt_vec_info new_stmt_info = NULL;
5986 int j;
5987 tree ops[3];
5988 enum vect_def_type dts[3];
5989 bool nested_cycle = false, found_nested_cycle_def = false;
5990 bool double_reduc = false;
5991 basic_block def_bb;
5992 struct loop * def_stmt_loop;
5993 tree def_arg;
5994 auto_vec<tree> vec_oprnds0;
5995 auto_vec<tree> vec_oprnds1;
5996 auto_vec<tree> vec_oprnds2;
5997 auto_vec<tree> vect_defs;
5998 auto_vec<stmt_vec_info> phis;
5999 int vec_num;
6000 tree def0, tem;
6001 tree cr_index_scalar_type = NULL_TREE, cr_index_vector_type = NULL_TREE;
6002 tree cond_reduc_val = NULL_TREE;
6004 /* Make sure it was already recognized as a reduction computation. */
6005 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def
6006 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_nested_cycle)
6007 return false;
6009 if (nested_in_vect_loop_p (loop, stmt_info))
6011 loop = loop->inner;
6012 nested_cycle = true;
6015 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info))
6016 gcc_assert (slp_node
6017 && REDUC_GROUP_FIRST_ELEMENT (stmt_info) == stmt_info);
6019 if (gphi *phi = dyn_cast <gphi *> (stmt_info->stmt))
6021 tree phi_result = gimple_phi_result (phi);
6022 /* Analysis is fully done on the reduction stmt invocation. */
6023 if (! vec_stmt)
6025 if (slp_node)
6026 slp_node_instance->reduc_phis = slp_node;
6028 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
6029 return true;
6032 if (STMT_VINFO_REDUC_TYPE (stmt_info) == FOLD_LEFT_REDUCTION)
6033 /* Leave the scalar phi in place. Note that checking
6034 STMT_VINFO_VEC_REDUCTION_TYPE (as below) only works
6035 for reductions involving a single statement. */
6036 return true;
6038 stmt_vec_info reduc_stmt_info = STMT_VINFO_REDUC_DEF (stmt_info);
6039 reduc_stmt_info = vect_stmt_to_vectorize (reduc_stmt_info);
6041 if (STMT_VINFO_VEC_REDUCTION_TYPE (reduc_stmt_info)
6042 == EXTRACT_LAST_REDUCTION)
6043 /* Leave the scalar phi in place. */
6044 return true;
6046 gassign *reduc_stmt = as_a <gassign *> (reduc_stmt_info->stmt);
6047 for (unsigned k = 1; k < gimple_num_ops (reduc_stmt); ++k)
6049 tree op = gimple_op (reduc_stmt, k);
6050 if (op == phi_result)
6051 continue;
6052 if (k == 1
6053 && gimple_assign_rhs_code (reduc_stmt) == COND_EXPR)
6054 continue;
6055 if (!vectype_in
6056 || (GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (vectype_in)))
6057 < GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (op)))))
6058 vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op));
6059 break;
6061 gcc_assert (vectype_in);
6063 if (slp_node)
6064 ncopies = 1;
6065 else
6066 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
6068 stmt_vec_info use_stmt_info;
6069 if (ncopies > 1
6070 && STMT_VINFO_RELEVANT (reduc_stmt_info) <= vect_used_only_live
6071 && (use_stmt_info = loop_vinfo->lookup_single_use (phi_result))
6072 && vect_stmt_to_vectorize (use_stmt_info) == reduc_stmt_info)
6073 single_defuse_cycle = true;
6075 /* Create the destination vector */
6076 scalar_dest = gimple_assign_lhs (reduc_stmt);
6077 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
6079 if (slp_node)
6080 /* The size vect_schedule_slp_instance computes is off for us. */
6081 vec_num = vect_get_num_vectors
6082 (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6083 * SLP_TREE_SCALAR_STMTS (slp_node).length (),
6084 vectype_in);
6085 else
6086 vec_num = 1;
6088 /* Generate the reduction PHIs upfront. */
6089 prev_phi_info = NULL;
6090 for (j = 0; j < ncopies; j++)
6092 if (j == 0 || !single_defuse_cycle)
6094 for (i = 0; i < vec_num; i++)
6096 /* Create the reduction-phi that defines the reduction
6097 operand. */
6098 gimple *new_phi = create_phi_node (vec_dest, loop->header);
6099 stmt_vec_info new_phi_info = loop_vinfo->add_stmt (new_phi);
6101 if (slp_node)
6102 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_phi_info);
6103 else
6105 if (j == 0)
6106 STMT_VINFO_VEC_STMT (stmt_info)
6107 = *vec_stmt = new_phi_info;
6108 else
6109 STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi_info;
6110 prev_phi_info = new_phi_info;
6116 return true;
6119 /* 1. Is vectorizable reduction? */
6120 /* Not supportable if the reduction variable is used in the loop, unless
6121 it's a reduction chain. */
6122 if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer
6123 && !REDUC_GROUP_FIRST_ELEMENT (stmt_info))
6124 return false;
6126 /* Reductions that are not used even in an enclosing outer-loop,
6127 are expected to be "live" (used out of the loop). */
6128 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope
6129 && !STMT_VINFO_LIVE_P (stmt_info))
6130 return false;
6132 /* 2. Has this been recognized as a reduction pattern?
6134 Check if STMT represents a pattern that has been recognized
6135 in earlier analysis stages. For stmts that represent a pattern,
6136 the STMT_VINFO_RELATED_STMT field records the last stmt in
6137 the original sequence that constitutes the pattern. */
6139 stmt_vec_info orig_stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
6140 if (orig_stmt_info)
6142 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
6143 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
6146 /* 3. Check the operands of the operation. The first operands are defined
6147 inside the loop body. The last operand is the reduction variable,
6148 which is defined by the loop-header-phi. */
6150 gassign *stmt = as_a <gassign *> (stmt_info->stmt);
6152 /* Flatten RHS. */
6153 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
6155 case GIMPLE_BINARY_RHS:
6156 code = gimple_assign_rhs_code (stmt);
6157 op_type = TREE_CODE_LENGTH (code);
6158 gcc_assert (op_type == binary_op);
6159 ops[0] = gimple_assign_rhs1 (stmt);
6160 ops[1] = gimple_assign_rhs2 (stmt);
6161 break;
6163 case GIMPLE_TERNARY_RHS:
6164 code = gimple_assign_rhs_code (stmt);
6165 op_type = TREE_CODE_LENGTH (code);
6166 gcc_assert (op_type == ternary_op);
6167 ops[0] = gimple_assign_rhs1 (stmt);
6168 ops[1] = gimple_assign_rhs2 (stmt);
6169 ops[2] = gimple_assign_rhs3 (stmt);
6170 break;
6172 case GIMPLE_UNARY_RHS:
6173 return false;
6175 default:
6176 gcc_unreachable ();
6179 if (code == COND_EXPR && slp_node)
6180 return false;
6182 scalar_dest = gimple_assign_lhs (stmt);
6183 scalar_type = TREE_TYPE (scalar_dest);
6184 if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
6185 && !SCALAR_FLOAT_TYPE_P (scalar_type))
6186 return false;
6188 /* Do not try to vectorize bit-precision reductions. */
6189 if (!type_has_mode_precision_p (scalar_type))
6190 return false;
6192 /* All uses but the last are expected to be defined in the loop.
6193 The last use is the reduction variable. In case of nested cycle this
6194 assumption is not true: we use reduc_index to record the index of the
6195 reduction variable. */
6196 stmt_vec_info reduc_def_info = NULL;
6197 int reduc_index = -1;
6198 for (i = 0; i < op_type; i++)
6200 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
6201 if (i == 0 && code == COND_EXPR)
6202 continue;
6204 stmt_vec_info def_stmt_info;
6205 is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, &dts[i], &tem,
6206 &def_stmt_info);
6207 dt = dts[i];
6208 gcc_assert (is_simple_use);
6209 if (dt == vect_reduction_def)
6211 reduc_def_info = def_stmt_info;
6212 reduc_index = i;
6213 continue;
6215 else if (tem)
6217 /* To properly compute ncopies we are interested in the widest
6218 input type in case we're looking at a widening accumulation. */
6219 if (!vectype_in
6220 || (GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (vectype_in)))
6221 < GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (tem)))))
6222 vectype_in = tem;
6225 if (dt != vect_internal_def
6226 && dt != vect_external_def
6227 && dt != vect_constant_def
6228 && dt != vect_induction_def
6229 && !(dt == vect_nested_cycle && nested_cycle))
6230 return false;
6232 if (dt == vect_nested_cycle)
6234 found_nested_cycle_def = true;
6235 reduc_def_info = def_stmt_info;
6236 reduc_index = i;
6239 if (i == 1 && code == COND_EXPR)
6241 /* Record how value of COND_EXPR is defined. */
6242 if (dt == vect_constant_def)
6244 cond_reduc_dt = dt;
6245 cond_reduc_val = ops[i];
6247 if (dt == vect_induction_def
6248 && def_stmt_info
6249 && is_nonwrapping_integer_induction (def_stmt_info, loop))
6251 cond_reduc_dt = dt;
6252 cond_stmt_vinfo = def_stmt_info;
6257 if (!vectype_in)
6258 vectype_in = vectype_out;
6260 /* When vectorizing a reduction chain w/o SLP the reduction PHI is not
6261 directy used in stmt. */
6262 if (reduc_index == -1)
6264 if (STMT_VINFO_REDUC_TYPE (stmt_info) == FOLD_LEFT_REDUCTION)
6266 if (dump_enabled_p ())
6267 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6268 "in-order reduction chain without SLP.\n");
6269 return false;
6272 if (orig_stmt_info)
6273 reduc_def_info = STMT_VINFO_REDUC_DEF (orig_stmt_info);
6274 else
6275 reduc_def_info = STMT_VINFO_REDUC_DEF (stmt_info);
6278 if (! reduc_def_info)
6279 return false;
6281 gphi *reduc_def_phi = dyn_cast <gphi *> (reduc_def_info->stmt);
6282 if (!reduc_def_phi)
6283 return false;
6285 if (!(reduc_index == -1
6286 || dts[reduc_index] == vect_reduction_def
6287 || dts[reduc_index] == vect_nested_cycle
6288 || ((dts[reduc_index] == vect_internal_def
6289 || dts[reduc_index] == vect_external_def
6290 || dts[reduc_index] == vect_constant_def
6291 || dts[reduc_index] == vect_induction_def)
6292 && nested_cycle && found_nested_cycle_def)))
6294 /* For pattern recognized stmts, orig_stmt might be a reduction,
6295 but some helper statements for the pattern might not, or
6296 might be COND_EXPRs with reduction uses in the condition. */
6297 gcc_assert (orig_stmt_info);
6298 return false;
6301 /* PHIs should not participate in patterns. */
6302 gcc_assert (!STMT_VINFO_RELATED_STMT (reduc_def_info));
6303 enum vect_reduction_type v_reduc_type
6304 = STMT_VINFO_REDUC_TYPE (reduc_def_info);
6305 stmt_vec_info tmp = STMT_VINFO_REDUC_DEF (reduc_def_info);
6307 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = v_reduc_type;
6308 /* If we have a condition reduction, see if we can simplify it further. */
6309 if (v_reduc_type == COND_REDUCTION)
6311 /* TODO: We can't yet handle reduction chains, since we need to treat
6312 each COND_EXPR in the chain specially, not just the last one.
6313 E.g. for:
6315 x_1 = PHI <x_3, ...>
6316 x_2 = a_2 ? ... : x_1;
6317 x_3 = a_3 ? ... : x_2;
6319 we're interested in the last element in x_3 for which a_2 || a_3
6320 is true, whereas the current reduction chain handling would
6321 vectorize x_2 as a normal VEC_COND_EXPR and only treat x_3
6322 as a reduction operation. */
6323 if (reduc_index == -1)
6325 if (dump_enabled_p ())
6326 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6327 "conditional reduction chains not supported\n");
6328 return false;
6331 /* vect_is_simple_reduction ensured that operand 2 is the
6332 loop-carried operand. */
6333 gcc_assert (reduc_index == 2);
6335 /* Loop peeling modifies initial value of reduction PHI, which
6336 makes the reduction stmt to be transformed different to the
6337 original stmt analyzed. We need to record reduction code for
6338 CONST_COND_REDUCTION type reduction at analyzing stage, thus
6339 it can be used directly at transform stage. */
6340 if (STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info) == MAX_EXPR
6341 || STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info) == MIN_EXPR)
6343 /* Also set the reduction type to CONST_COND_REDUCTION. */
6344 gcc_assert (cond_reduc_dt == vect_constant_def);
6345 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = CONST_COND_REDUCTION;
6347 else if (direct_internal_fn_supported_p (IFN_FOLD_EXTRACT_LAST,
6348 vectype_in, OPTIMIZE_FOR_SPEED))
6350 if (dump_enabled_p ())
6351 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6352 "optimizing condition reduction with"
6353 " FOLD_EXTRACT_LAST.\n");
6354 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = EXTRACT_LAST_REDUCTION;
6356 else if (cond_reduc_dt == vect_induction_def)
6358 tree base
6359 = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (cond_stmt_vinfo);
6360 tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (cond_stmt_vinfo);
6362 gcc_assert (TREE_CODE (base) == INTEGER_CST
6363 && TREE_CODE (step) == INTEGER_CST);
6364 cond_reduc_val = NULL_TREE;
6365 /* Find a suitable value, for MAX_EXPR below base, for MIN_EXPR
6366 above base; punt if base is the minimum value of the type for
6367 MAX_EXPR or maximum value of the type for MIN_EXPR for now. */
6368 if (tree_int_cst_sgn (step) == -1)
6370 cond_reduc_op_code = MIN_EXPR;
6371 if (tree_int_cst_sgn (base) == -1)
6372 cond_reduc_val = build_int_cst (TREE_TYPE (base), 0);
6373 else if (tree_int_cst_lt (base,
6374 TYPE_MAX_VALUE (TREE_TYPE (base))))
6375 cond_reduc_val
6376 = int_const_binop (PLUS_EXPR, base, integer_one_node);
6378 else
6380 cond_reduc_op_code = MAX_EXPR;
6381 if (tree_int_cst_sgn (base) == 1)
6382 cond_reduc_val = build_int_cst (TREE_TYPE (base), 0);
6383 else if (tree_int_cst_lt (TYPE_MIN_VALUE (TREE_TYPE (base)),
6384 base))
6385 cond_reduc_val
6386 = int_const_binop (MINUS_EXPR, base, integer_one_node);
6388 if (cond_reduc_val)
6390 if (dump_enabled_p ())
6391 dump_printf_loc (MSG_NOTE, vect_location,
6392 "condition expression based on "
6393 "integer induction.\n");
6394 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
6395 = INTEGER_INDUC_COND_REDUCTION;
6398 else if (cond_reduc_dt == vect_constant_def)
6400 enum vect_def_type cond_initial_dt;
6401 gimple *def_stmt = SSA_NAME_DEF_STMT (ops[reduc_index]);
6402 tree cond_initial_val
6403 = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
6405 gcc_assert (cond_reduc_val != NULL_TREE);
6406 vect_is_simple_use (cond_initial_val, loop_vinfo, &cond_initial_dt);
6407 if (cond_initial_dt == vect_constant_def
6408 && types_compatible_p (TREE_TYPE (cond_initial_val),
6409 TREE_TYPE (cond_reduc_val)))
6411 tree e = fold_binary (LE_EXPR, boolean_type_node,
6412 cond_initial_val, cond_reduc_val);
6413 if (e && (integer_onep (e) || integer_zerop (e)))
6415 if (dump_enabled_p ())
6416 dump_printf_loc (MSG_NOTE, vect_location,
6417 "condition expression based on "
6418 "compile time constant.\n");
6419 /* Record reduction code at analysis stage. */
6420 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info)
6421 = integer_onep (e) ? MAX_EXPR : MIN_EXPR;
6422 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
6423 = CONST_COND_REDUCTION;
6429 if (orig_stmt_info)
6430 gcc_assert (tmp == orig_stmt_info
6431 || REDUC_GROUP_FIRST_ELEMENT (tmp) == orig_stmt_info);
6432 else
6433 /* We changed STMT to be the first stmt in reduction chain, hence we
6434 check that in this case the first element in the chain is STMT. */
6435 gcc_assert (tmp == stmt_info
6436 || REDUC_GROUP_FIRST_ELEMENT (tmp) == stmt_info);
6438 if (STMT_VINFO_LIVE_P (reduc_def_info))
6439 return false;
6441 if (slp_node)
6442 ncopies = 1;
6443 else
6444 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
6446 gcc_assert (ncopies >= 1);
6448 vec_mode = TYPE_MODE (vectype_in);
6449 poly_uint64 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
6451 if (code == COND_EXPR)
6453 /* Only call during the analysis stage, otherwise we'll lose
6454 STMT_VINFO_TYPE. */
6455 if (!vec_stmt && !vectorizable_condition (stmt_info, gsi, NULL,
6456 ops[reduc_index], 0, NULL,
6457 cost_vec))
6459 if (dump_enabled_p ())
6460 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6461 "unsupported condition in reduction\n");
6462 return false;
6465 else
6467 /* 4. Supportable by target? */
6469 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR
6470 || code == LROTATE_EXPR || code == RROTATE_EXPR)
6472 /* Shifts and rotates are only supported by vectorizable_shifts,
6473 not vectorizable_reduction. */
6474 if (dump_enabled_p ())
6475 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6476 "unsupported shift or rotation.\n");
6477 return false;
6480 /* 4.1. check support for the operation in the loop */
6481 optab = optab_for_tree_code (code, vectype_in, optab_default);
6482 if (!optab)
6484 if (dump_enabled_p ())
6485 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6486 "no optab.\n");
6488 return false;
6491 if (optab_handler (optab, vec_mode) == CODE_FOR_nothing)
6493 if (dump_enabled_p ())
6494 dump_printf (MSG_NOTE, "op not supported by target.\n");
6496 if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
6497 || !vect_worthwhile_without_simd_p (loop_vinfo, code))
6498 return false;
6500 if (dump_enabled_p ())
6501 dump_printf (MSG_NOTE, "proceeding using word mode.\n");
6504 /* Worthwhile without SIMD support? */
6505 if (!VECTOR_MODE_P (TYPE_MODE (vectype_in))
6506 && !vect_worthwhile_without_simd_p (loop_vinfo, code))
6508 if (dump_enabled_p ())
6509 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6510 "not worthwhile without SIMD support.\n");
6512 return false;
6516 /* 4.2. Check support for the epilog operation.
6518 If STMT represents a reduction pattern, then the type of the
6519 reduction variable may be different than the type of the rest
6520 of the arguments. For example, consider the case of accumulation
6521 of shorts into an int accumulator; The original code:
6522 S1: int_a = (int) short_a;
6523 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
6525 was replaced with:
6526 STMT: int_acc = widen_sum <short_a, int_acc>
6528 This means that:
6529 1. The tree-code that is used to create the vector operation in the
6530 epilog code (that reduces the partial results) is not the
6531 tree-code of STMT, but is rather the tree-code of the original
6532 stmt from the pattern that STMT is replacing. I.e, in the example
6533 above we want to use 'widen_sum' in the loop, but 'plus' in the
6534 epilog.
6535 2. The type (mode) we use to check available target support
6536 for the vector operation to be created in the *epilog*, is
6537 determined by the type of the reduction variable (in the example
6538 above we'd check this: optab_handler (plus_optab, vect_int_mode])).
6539 However the type (mode) we use to check available target support
6540 for the vector operation to be created *inside the loop*, is
6541 determined by the type of the other arguments to STMT (in the
6542 example we'd check this: optab_handler (widen_sum_optab,
6543 vect_short_mode)).
6545 This is contrary to "regular" reductions, in which the types of all
6546 the arguments are the same as the type of the reduction variable.
6547 For "regular" reductions we can therefore use the same vector type
6548 (and also the same tree-code) when generating the epilog code and
6549 when generating the code inside the loop. */
6551 vect_reduction_type reduction_type
6552 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info);
6553 if (orig_stmt_info
6554 && (reduction_type == TREE_CODE_REDUCTION
6555 || reduction_type == FOLD_LEFT_REDUCTION))
6557 /* This is a reduction pattern: get the vectype from the type of the
6558 reduction variable, and get the tree-code from orig_stmt. */
6559 orig_code = gimple_assign_rhs_code (orig_stmt_info->stmt);
6560 gcc_assert (vectype_out);
6561 vec_mode = TYPE_MODE (vectype_out);
6563 else
6565 /* Regular reduction: use the same vectype and tree-code as used for
6566 the vector code inside the loop can be used for the epilog code. */
6567 orig_code = code;
6569 if (code == MINUS_EXPR)
6570 orig_code = PLUS_EXPR;
6572 /* For simple condition reductions, replace with the actual expression
6573 we want to base our reduction around. */
6574 if (reduction_type == CONST_COND_REDUCTION)
6576 orig_code = STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info);
6577 gcc_assert (orig_code == MAX_EXPR || orig_code == MIN_EXPR);
6579 else if (reduction_type == INTEGER_INDUC_COND_REDUCTION)
6580 orig_code = cond_reduc_op_code;
6583 if (nested_cycle)
6585 def_bb = gimple_bb (reduc_def_phi);
6586 def_stmt_loop = def_bb->loop_father;
6587 def_arg = PHI_ARG_DEF_FROM_EDGE (reduc_def_phi,
6588 loop_preheader_edge (def_stmt_loop));
6589 stmt_vec_info def_arg_stmt_info = loop_vinfo->lookup_def (def_arg);
6590 if (def_arg_stmt_info
6591 && (STMT_VINFO_DEF_TYPE (def_arg_stmt_info)
6592 == vect_double_reduction_def))
6593 double_reduc = true;
6596 reduc_fn = IFN_LAST;
6598 if (reduction_type == TREE_CODE_REDUCTION
6599 || reduction_type == FOLD_LEFT_REDUCTION
6600 || reduction_type == INTEGER_INDUC_COND_REDUCTION
6601 || reduction_type == CONST_COND_REDUCTION)
6603 if (reduction_type == FOLD_LEFT_REDUCTION
6604 ? fold_left_reduction_fn (orig_code, &reduc_fn)
6605 : reduction_fn_for_scalar_code (orig_code, &reduc_fn))
6607 if (reduc_fn != IFN_LAST
6608 && !direct_internal_fn_supported_p (reduc_fn, vectype_out,
6609 OPTIMIZE_FOR_SPEED))
6611 if (dump_enabled_p ())
6612 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6613 "reduc op not supported by target.\n");
6615 reduc_fn = IFN_LAST;
6618 else
6620 if (!nested_cycle || double_reduc)
6622 if (dump_enabled_p ())
6623 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6624 "no reduc code for scalar code.\n");
6626 return false;
6630 else if (reduction_type == COND_REDUCTION)
6632 int scalar_precision
6633 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type));
6634 cr_index_scalar_type = make_unsigned_type (scalar_precision);
6635 cr_index_vector_type = build_vector_type (cr_index_scalar_type,
6636 nunits_out);
6638 if (direct_internal_fn_supported_p (IFN_REDUC_MAX, cr_index_vector_type,
6639 OPTIMIZE_FOR_SPEED))
6640 reduc_fn = IFN_REDUC_MAX;
6643 if (reduction_type != EXTRACT_LAST_REDUCTION
6644 && (!nested_cycle || double_reduc)
6645 && reduc_fn == IFN_LAST
6646 && !nunits_out.is_constant ())
6648 if (dump_enabled_p ())
6649 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6650 "missing target support for reduction on"
6651 " variable-length vectors.\n");
6652 return false;
6655 if ((double_reduc || reduction_type != TREE_CODE_REDUCTION)
6656 && ncopies > 1)
6658 if (dump_enabled_p ())
6659 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6660 "multiple types in double reduction or condition "
6661 "reduction.\n");
6662 return false;
6665 /* For SLP reductions, see if there is a neutral value we can use. */
6666 tree neutral_op = NULL_TREE;
6667 if (slp_node)
6668 neutral_op = neutral_op_for_slp_reduction
6669 (slp_node_instance->reduc_phis, code,
6670 REDUC_GROUP_FIRST_ELEMENT (stmt_info) != NULL);
6672 if (double_reduc && reduction_type == FOLD_LEFT_REDUCTION)
6674 /* We can't support in-order reductions of code such as this:
6676 for (int i = 0; i < n1; ++i)
6677 for (int j = 0; j < n2; ++j)
6678 l += a[j];
6680 since GCC effectively transforms the loop when vectorizing:
6682 for (int i = 0; i < n1 / VF; ++i)
6683 for (int j = 0; j < n2; ++j)
6684 for (int k = 0; k < VF; ++k)
6685 l += a[j];
6687 which is a reassociation of the original operation. */
6688 if (dump_enabled_p ())
6689 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6690 "in-order double reduction not supported.\n");
6692 return false;
6695 if (reduction_type == FOLD_LEFT_REDUCTION
6696 && slp_node
6697 && !REDUC_GROUP_FIRST_ELEMENT (stmt_info))
6699 /* We cannot use in-order reductions in this case because there is
6700 an implicit reassociation of the operations involved. */
6701 if (dump_enabled_p ())
6702 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6703 "in-order unchained SLP reductions not supported.\n");
6704 return false;
6707 /* For double reductions, and for SLP reductions with a neutral value,
6708 we construct a variable-length initial vector by loading a vector
6709 full of the neutral value and then shift-and-inserting the start
6710 values into the low-numbered elements. */
6711 if ((double_reduc || neutral_op)
6712 && !nunits_out.is_constant ()
6713 && !direct_internal_fn_supported_p (IFN_VEC_SHL_INSERT,
6714 vectype_out, OPTIMIZE_FOR_SPEED))
6716 if (dump_enabled_p ())
6717 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6718 "reduction on variable-length vectors requires"
6719 " target support for a vector-shift-and-insert"
6720 " operation.\n");
6721 return false;
6724 /* Check extra constraints for variable-length unchained SLP reductions. */
6725 if (STMT_SLP_TYPE (stmt_info)
6726 && !REDUC_GROUP_FIRST_ELEMENT (stmt_info)
6727 && !nunits_out.is_constant ())
6729 /* We checked above that we could build the initial vector when
6730 there's a neutral element value. Check here for the case in
6731 which each SLP statement has its own initial value and in which
6732 that value needs to be repeated for every instance of the
6733 statement within the initial vector. */
6734 unsigned int group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
6735 scalar_mode elt_mode = SCALAR_TYPE_MODE (TREE_TYPE (vectype_out));
6736 if (!neutral_op
6737 && !can_duplicate_and_interleave_p (group_size, elt_mode))
6739 if (dump_enabled_p ())
6740 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6741 "unsupported form of SLP reduction for"
6742 " variable-length vectors: cannot build"
6743 " initial vector.\n");
6744 return false;
6746 /* The epilogue code relies on the number of elements being a multiple
6747 of the group size. The duplicate-and-interleave approach to setting
6748 up the the initial vector does too. */
6749 if (!multiple_p (nunits_out, group_size))
6751 if (dump_enabled_p ())
6752 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6753 "unsupported form of SLP reduction for"
6754 " variable-length vectors: the vector size"
6755 " is not a multiple of the number of results.\n");
6756 return false;
6760 /* In case of widenning multiplication by a constant, we update the type
6761 of the constant to be the type of the other operand. We check that the
6762 constant fits the type in the pattern recognition pass. */
6763 if (code == DOT_PROD_EXPR
6764 && !types_compatible_p (TREE_TYPE (ops[0]), TREE_TYPE (ops[1])))
6766 if (TREE_CODE (ops[0]) == INTEGER_CST)
6767 ops[0] = fold_convert (TREE_TYPE (ops[1]), ops[0]);
6768 else if (TREE_CODE (ops[1]) == INTEGER_CST)
6769 ops[1] = fold_convert (TREE_TYPE (ops[0]), ops[1]);
6770 else
6772 if (dump_enabled_p ())
6773 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6774 "invalid types in dot-prod\n");
6776 return false;
6780 if (reduction_type == COND_REDUCTION)
6782 widest_int ni;
6784 if (! max_loop_iterations (loop, &ni))
6786 if (dump_enabled_p ())
6787 dump_printf_loc (MSG_NOTE, vect_location,
6788 "loop count not known, cannot create cond "
6789 "reduction.\n");
6790 return false;
6792 /* Convert backedges to iterations. */
6793 ni += 1;
6795 /* The additional index will be the same type as the condition. Check
6796 that the loop can fit into this less one (because we'll use up the
6797 zero slot for when there are no matches). */
6798 tree max_index = TYPE_MAX_VALUE (cr_index_scalar_type);
6799 if (wi::geu_p (ni, wi::to_widest (max_index)))
6801 if (dump_enabled_p ())
6802 dump_printf_loc (MSG_NOTE, vect_location,
6803 "loop size is greater than data size.\n");
6804 return false;
6808 /* In case the vectorization factor (VF) is bigger than the number
6809 of elements that we can fit in a vectype (nunits), we have to generate
6810 more than one vector stmt - i.e - we need to "unroll" the
6811 vector stmt by a factor VF/nunits. For more details see documentation
6812 in vectorizable_operation. */
6814 /* If the reduction is used in an outer loop we need to generate
6815 VF intermediate results, like so (e.g. for ncopies=2):
6816 r0 = phi (init, r0)
6817 r1 = phi (init, r1)
6818 r0 = x0 + r0;
6819 r1 = x1 + r1;
6820 (i.e. we generate VF results in 2 registers).
6821 In this case we have a separate def-use cycle for each copy, and therefore
6822 for each copy we get the vector def for the reduction variable from the
6823 respective phi node created for this copy.
6825 Otherwise (the reduction is unused in the loop nest), we can combine
6826 together intermediate results, like so (e.g. for ncopies=2):
6827 r = phi (init, r)
6828 r = x0 + r;
6829 r = x1 + r;
6830 (i.e. we generate VF/2 results in a single register).
6831 In this case for each copy we get the vector def for the reduction variable
6832 from the vectorized reduction operation generated in the previous iteration.
6834 This only works when we see both the reduction PHI and its only consumer
6835 in vectorizable_reduction and there are no intermediate stmts
6836 participating. */
6837 stmt_vec_info use_stmt_info;
6838 tree reduc_phi_result = gimple_phi_result (reduc_def_phi);
6839 if (ncopies > 1
6840 && (STMT_VINFO_RELEVANT (stmt_info) <= vect_used_only_live)
6841 && (use_stmt_info = loop_vinfo->lookup_single_use (reduc_phi_result))
6842 && vect_stmt_to_vectorize (use_stmt_info) == stmt_info)
6844 single_defuse_cycle = true;
6845 epilog_copies = 1;
6847 else
6848 epilog_copies = ncopies;
6850 /* If the reduction stmt is one of the patterns that have lane
6851 reduction embedded we cannot handle the case of ! single_defuse_cycle. */
6852 if ((ncopies > 1
6853 && ! single_defuse_cycle)
6854 && (code == DOT_PROD_EXPR
6855 || code == WIDEN_SUM_EXPR
6856 || code == SAD_EXPR))
6858 if (dump_enabled_p ())
6859 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6860 "multi def-use cycle not possible for lane-reducing "
6861 "reduction operation\n");
6862 return false;
6865 if (slp_node)
6866 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6867 else
6868 vec_num = 1;
6870 internal_fn cond_fn = get_conditional_internal_fn (code);
6871 vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
6873 if (!vec_stmt) /* transformation not required. */
6875 vect_model_reduction_cost (stmt_info, reduc_fn, ncopies, cost_vec);
6876 if (loop_vinfo && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
6878 if (reduction_type != FOLD_LEFT_REDUCTION
6879 && (cond_fn == IFN_LAST
6880 || !direct_internal_fn_supported_p (cond_fn, vectype_in,
6881 OPTIMIZE_FOR_SPEED)))
6883 if (dump_enabled_p ())
6884 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6885 "can't use a fully-masked loop because no"
6886 " conditional operation is available.\n");
6887 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
6889 else if (reduc_index == -1)
6891 if (dump_enabled_p ())
6892 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6893 "can't use a fully-masked loop for chained"
6894 " reductions.\n");
6895 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
6897 else
6898 vect_record_loop_mask (loop_vinfo, masks, ncopies * vec_num,
6899 vectype_in);
6901 if (dump_enabled_p ()
6902 && reduction_type == FOLD_LEFT_REDUCTION)
6903 dump_printf_loc (MSG_NOTE, vect_location,
6904 "using an in-order (fold-left) reduction.\n");
6905 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
6906 return true;
6909 /* Transform. */
6911 if (dump_enabled_p ())
6912 dump_printf_loc (MSG_NOTE, vect_location, "transform reduction.\n");
6914 /* FORNOW: Multiple types are not supported for condition. */
6915 if (code == COND_EXPR)
6916 gcc_assert (ncopies == 1);
6918 bool masked_loop_p = LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
6920 if (reduction_type == FOLD_LEFT_REDUCTION)
6921 return vectorize_fold_left_reduction
6922 (stmt_info, gsi, vec_stmt, slp_node, reduc_def_phi, code,
6923 reduc_fn, ops, vectype_in, reduc_index, masks);
6925 if (reduction_type == EXTRACT_LAST_REDUCTION)
6927 gcc_assert (!slp_node);
6928 return vectorizable_condition (stmt_info, gsi, vec_stmt,
6929 NULL, reduc_index, NULL, NULL);
6932 /* Create the destination vector */
6933 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
6935 prev_stmt_info = NULL;
6936 prev_phi_info = NULL;
6937 if (!slp_node)
6939 vec_oprnds0.create (1);
6940 vec_oprnds1.create (1);
6941 if (op_type == ternary_op)
6942 vec_oprnds2.create (1);
6945 phis.create (vec_num);
6946 vect_defs.create (vec_num);
6947 if (!slp_node)
6948 vect_defs.quick_push (NULL_TREE);
6950 if (slp_node)
6951 phis.splice (SLP_TREE_VEC_STMTS (slp_node_instance->reduc_phis));
6952 else
6953 phis.quick_push (STMT_VINFO_VEC_STMT (reduc_def_info));
6955 for (j = 0; j < ncopies; j++)
6957 if (code == COND_EXPR)
6959 gcc_assert (!slp_node);
6960 vectorizable_condition (stmt_info, gsi, vec_stmt,
6961 PHI_RESULT (phis[0]->stmt),
6962 reduc_index, NULL, NULL);
6963 /* Multiple types are not supported for condition. */
6964 break;
6967 /* Handle uses. */
6968 if (j == 0)
6970 if (slp_node)
6972 /* Get vec defs for all the operands except the reduction index,
6973 ensuring the ordering of the ops in the vector is kept. */
6974 auto_vec<tree, 3> slp_ops;
6975 auto_vec<vec<tree>, 3> vec_defs;
6977 slp_ops.quick_push (ops[0]);
6978 slp_ops.quick_push (ops[1]);
6979 if (op_type == ternary_op)
6980 slp_ops.quick_push (ops[2]);
6982 vect_get_slp_defs (slp_ops, slp_node, &vec_defs);
6984 vec_oprnds0.safe_splice (vec_defs[0]);
6985 vec_defs[0].release ();
6986 vec_oprnds1.safe_splice (vec_defs[1]);
6987 vec_defs[1].release ();
6988 if (op_type == ternary_op)
6990 vec_oprnds2.safe_splice (vec_defs[2]);
6991 vec_defs[2].release ();
6994 else
6996 vec_oprnds0.quick_push
6997 (vect_get_vec_def_for_operand (ops[0], stmt_info));
6998 vec_oprnds1.quick_push
6999 (vect_get_vec_def_for_operand (ops[1], stmt_info));
7000 if (op_type == ternary_op)
7001 vec_oprnds2.quick_push
7002 (vect_get_vec_def_for_operand (ops[2], stmt_info));
7005 else
7007 if (!slp_node)
7009 gcc_assert (reduc_index != -1 || ! single_defuse_cycle);
7011 if (single_defuse_cycle && reduc_index == 0)
7012 vec_oprnds0[0] = gimple_get_lhs (new_stmt_info->stmt);
7013 else
7014 vec_oprnds0[0]
7015 = vect_get_vec_def_for_stmt_copy (loop_vinfo,
7016 vec_oprnds0[0]);
7017 if (single_defuse_cycle && reduc_index == 1)
7018 vec_oprnds1[0] = gimple_get_lhs (new_stmt_info->stmt);
7019 else
7020 vec_oprnds1[0]
7021 = vect_get_vec_def_for_stmt_copy (loop_vinfo,
7022 vec_oprnds1[0]);
7023 if (op_type == ternary_op)
7025 if (single_defuse_cycle && reduc_index == 2)
7026 vec_oprnds2[0] = gimple_get_lhs (new_stmt_info->stmt);
7027 else
7028 vec_oprnds2[0]
7029 = vect_get_vec_def_for_stmt_copy (loop_vinfo,
7030 vec_oprnds2[0]);
7035 FOR_EACH_VEC_ELT (vec_oprnds0, i, def0)
7037 tree vop[3] = { def0, vec_oprnds1[i], NULL_TREE };
7038 if (masked_loop_p)
7040 /* Make sure that the reduction accumulator is vop[0]. */
7041 if (reduc_index == 1)
7043 gcc_assert (commutative_tree_code (code));
7044 std::swap (vop[0], vop[1]);
7046 tree mask = vect_get_loop_mask (gsi, masks, vec_num * ncopies,
7047 vectype_in, i * ncopies + j);
7048 gcall *call = gimple_build_call_internal (cond_fn, 4, mask,
7049 vop[0], vop[1],
7050 vop[0]);
7051 new_temp = make_ssa_name (vec_dest, call);
7052 gimple_call_set_lhs (call, new_temp);
7053 gimple_call_set_nothrow (call, true);
7054 new_stmt_info
7055 = vect_finish_stmt_generation (stmt_info, call, gsi);
7057 else
7059 if (op_type == ternary_op)
7060 vop[2] = vec_oprnds2[i];
7062 gassign *new_stmt = gimple_build_assign (vec_dest, code,
7063 vop[0], vop[1], vop[2]);
7064 new_temp = make_ssa_name (vec_dest, new_stmt);
7065 gimple_assign_set_lhs (new_stmt, new_temp);
7066 new_stmt_info
7067 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
7070 if (slp_node)
7072 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
7073 vect_defs.quick_push (new_temp);
7075 else
7076 vect_defs[0] = new_temp;
7079 if (slp_node)
7080 continue;
7082 if (j == 0)
7083 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
7084 else
7085 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
7087 prev_stmt_info = new_stmt_info;
7090 /* Finalize the reduction-phi (set its arguments) and create the
7091 epilog reduction code. */
7092 if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node)
7093 vect_defs[0] = gimple_get_lhs ((*vec_stmt)->stmt);
7095 vect_create_epilog_for_reduction (vect_defs, stmt_info, reduc_def_phi,
7096 epilog_copies, reduc_fn, phis,
7097 double_reduc, slp_node, slp_node_instance,
7098 cond_reduc_val, cond_reduc_op_code,
7099 neutral_op);
7101 return true;
7104 /* Function vect_min_worthwhile_factor.
7106 For a loop where we could vectorize the operation indicated by CODE,
7107 return the minimum vectorization factor that makes it worthwhile
7108 to use generic vectors. */
7109 static unsigned int
7110 vect_min_worthwhile_factor (enum tree_code code)
7112 switch (code)
7114 case PLUS_EXPR:
7115 case MINUS_EXPR:
7116 case NEGATE_EXPR:
7117 return 4;
7119 case BIT_AND_EXPR:
7120 case BIT_IOR_EXPR:
7121 case BIT_XOR_EXPR:
7122 case BIT_NOT_EXPR:
7123 return 2;
7125 default:
7126 return INT_MAX;
7130 /* Return true if VINFO indicates we are doing loop vectorization and if
7131 it is worth decomposing CODE operations into scalar operations for
7132 that loop's vectorization factor. */
7134 bool
7135 vect_worthwhile_without_simd_p (vec_info *vinfo, tree_code code)
7137 loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
7138 unsigned HOST_WIDE_INT value;
7139 return (loop_vinfo
7140 && LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&value)
7141 && value >= vect_min_worthwhile_factor (code));
7144 /* Function vectorizable_induction
7146 Check if STMT_INFO performs an induction computation that can be vectorized.
7147 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
7148 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
7149 Return true if STMT_INFO is vectorizable in this way. */
7151 bool
7152 vectorizable_induction (stmt_vec_info stmt_info,
7153 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
7154 stmt_vec_info *vec_stmt, slp_tree slp_node,
7155 stmt_vector_for_cost *cost_vec)
7157 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7158 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
7159 unsigned ncopies;
7160 bool nested_in_vect_loop = false;
7161 struct loop *iv_loop;
7162 tree vec_def;
7163 edge pe = loop_preheader_edge (loop);
7164 basic_block new_bb;
7165 tree new_vec, vec_init, vec_step, t;
7166 tree new_name;
7167 gimple *new_stmt;
7168 gphi *induction_phi;
7169 tree induc_def, vec_dest;
7170 tree init_expr, step_expr;
7171 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
7172 unsigned i;
7173 tree expr;
7174 gimple_seq stmts;
7175 imm_use_iterator imm_iter;
7176 use_operand_p use_p;
7177 gimple *exit_phi;
7178 edge latch_e;
7179 tree loop_arg;
7180 gimple_stmt_iterator si;
7182 gphi *phi = dyn_cast <gphi *> (stmt_info->stmt);
7183 if (!phi)
7184 return false;
7186 if (!STMT_VINFO_RELEVANT_P (stmt_info))
7187 return false;
7189 /* Make sure it was recognized as induction computation. */
7190 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
7191 return false;
7193 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7194 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
7196 if (slp_node)
7197 ncopies = 1;
7198 else
7199 ncopies = vect_get_num_copies (loop_vinfo, vectype);
7200 gcc_assert (ncopies >= 1);
7202 /* FORNOW. These restrictions should be relaxed. */
7203 if (nested_in_vect_loop_p (loop, stmt_info))
7205 imm_use_iterator imm_iter;
7206 use_operand_p use_p;
7207 gimple *exit_phi;
7208 edge latch_e;
7209 tree loop_arg;
7211 if (ncopies > 1)
7213 if (dump_enabled_p ())
7214 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7215 "multiple types in nested loop.\n");
7216 return false;
7219 /* FORNOW: outer loop induction with SLP not supported. */
7220 if (STMT_SLP_TYPE (stmt_info))
7221 return false;
7223 exit_phi = NULL;
7224 latch_e = loop_latch_edge (loop->inner);
7225 loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
7226 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
7228 gimple *use_stmt = USE_STMT (use_p);
7229 if (is_gimple_debug (use_stmt))
7230 continue;
7232 if (!flow_bb_inside_loop_p (loop->inner, gimple_bb (use_stmt)))
7234 exit_phi = use_stmt;
7235 break;
7238 if (exit_phi)
7240 stmt_vec_info exit_phi_vinfo = loop_vinfo->lookup_stmt (exit_phi);
7241 if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
7242 && !STMT_VINFO_LIVE_P (exit_phi_vinfo)))
7244 if (dump_enabled_p ())
7245 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7246 "inner-loop induction only used outside "
7247 "of the outer vectorized loop.\n");
7248 return false;
7252 nested_in_vect_loop = true;
7253 iv_loop = loop->inner;
7255 else
7256 iv_loop = loop;
7257 gcc_assert (iv_loop == (gimple_bb (phi))->loop_father);
7259 if (slp_node && !nunits.is_constant ())
7261 /* The current SLP code creates the initial value element-by-element. */
7262 if (dump_enabled_p ())
7263 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7264 "SLP induction not supported for variable-length"
7265 " vectors.\n");
7266 return false;
7269 if (!vec_stmt) /* transformation not required. */
7271 STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
7272 DUMP_VECT_SCOPE ("vectorizable_induction");
7273 vect_model_induction_cost (stmt_info, ncopies, cost_vec);
7274 return true;
7277 /* Transform. */
7279 /* Compute a vector variable, initialized with the first VF values of
7280 the induction variable. E.g., for an iv with IV_PHI='X' and
7281 evolution S, for a vector of 4 units, we want to compute:
7282 [X, X + S, X + 2*S, X + 3*S]. */
7284 if (dump_enabled_p ())
7285 dump_printf_loc (MSG_NOTE, vect_location, "transform induction phi.\n");
7287 latch_e = loop_latch_edge (iv_loop);
7288 loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
7290 step_expr = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_info);
7291 gcc_assert (step_expr != NULL_TREE);
7293 pe = loop_preheader_edge (iv_loop);
7294 init_expr = PHI_ARG_DEF_FROM_EDGE (phi,
7295 loop_preheader_edge (iv_loop));
7297 stmts = NULL;
7298 if (!nested_in_vect_loop)
7300 /* Convert the initial value to the desired type. */
7301 tree new_type = TREE_TYPE (vectype);
7302 init_expr = gimple_convert (&stmts, new_type, init_expr);
7304 /* If we are using the loop mask to "peel" for alignment then we need
7305 to adjust the start value here. */
7306 tree skip_niters = LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo);
7307 if (skip_niters != NULL_TREE)
7309 if (FLOAT_TYPE_P (vectype))
7310 skip_niters = gimple_build (&stmts, FLOAT_EXPR, new_type,
7311 skip_niters);
7312 else
7313 skip_niters = gimple_convert (&stmts, new_type, skip_niters);
7314 tree skip_step = gimple_build (&stmts, MULT_EXPR, new_type,
7315 skip_niters, step_expr);
7316 init_expr = gimple_build (&stmts, MINUS_EXPR, new_type,
7317 init_expr, skip_step);
7321 /* Convert the step to the desired type. */
7322 step_expr = gimple_convert (&stmts, TREE_TYPE (vectype), step_expr);
7324 if (stmts)
7326 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
7327 gcc_assert (!new_bb);
7330 /* Find the first insertion point in the BB. */
7331 basic_block bb = gimple_bb (phi);
7332 si = gsi_after_labels (bb);
7334 /* For SLP induction we have to generate several IVs as for example
7335 with group size 3 we need [i, i, i, i + S] [i + S, i + S, i + 2*S, i + 2*S]
7336 [i + 2*S, i + 3*S, i + 3*S, i + 3*S]. The step is the same uniform
7337 [VF*S, VF*S, VF*S, VF*S] for all. */
7338 if (slp_node)
7340 /* Enforced above. */
7341 unsigned int const_nunits = nunits.to_constant ();
7343 /* Generate [VF*S, VF*S, ... ]. */
7344 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7346 expr = build_int_cst (integer_type_node, vf);
7347 expr = fold_convert (TREE_TYPE (step_expr), expr);
7349 else
7350 expr = build_int_cst (TREE_TYPE (step_expr), vf);
7351 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
7352 expr, step_expr);
7353 if (! CONSTANT_CLASS_P (new_name))
7354 new_name = vect_init_vector (stmt_info, new_name,
7355 TREE_TYPE (step_expr), NULL);
7356 new_vec = build_vector_from_val (vectype, new_name);
7357 vec_step = vect_init_vector (stmt_info, new_vec, vectype, NULL);
7359 /* Now generate the IVs. */
7360 unsigned group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
7361 unsigned nvects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7362 unsigned elts = const_nunits * nvects;
7363 unsigned nivs = least_common_multiple (group_size,
7364 const_nunits) / const_nunits;
7365 gcc_assert (elts % group_size == 0);
7366 tree elt = init_expr;
7367 unsigned ivn;
7368 for (ivn = 0; ivn < nivs; ++ivn)
7370 tree_vector_builder elts (vectype, const_nunits, 1);
7371 stmts = NULL;
7372 for (unsigned eltn = 0; eltn < const_nunits; ++eltn)
7374 if (ivn*const_nunits + eltn >= group_size
7375 && (ivn * const_nunits + eltn) % group_size == 0)
7376 elt = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (elt),
7377 elt, step_expr);
7378 elts.quick_push (elt);
7380 vec_init = gimple_build_vector (&stmts, &elts);
7381 if (stmts)
7383 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
7384 gcc_assert (!new_bb);
7387 /* Create the induction-phi that defines the induction-operand. */
7388 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
7389 induction_phi = create_phi_node (vec_dest, iv_loop->header);
7390 stmt_vec_info induction_phi_info
7391 = loop_vinfo->add_stmt (induction_phi);
7392 induc_def = PHI_RESULT (induction_phi);
7394 /* Create the iv update inside the loop */
7395 vec_def = make_ssa_name (vec_dest);
7396 new_stmt = gimple_build_assign (vec_def, PLUS_EXPR, induc_def, vec_step);
7397 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
7398 loop_vinfo->add_stmt (new_stmt);
7400 /* Set the arguments of the phi node: */
7401 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
7402 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
7403 UNKNOWN_LOCATION);
7405 SLP_TREE_VEC_STMTS (slp_node).quick_push (induction_phi_info);
7408 /* Re-use IVs when we can. */
7409 if (ivn < nvects)
7411 unsigned vfp
7412 = least_common_multiple (group_size, const_nunits) / group_size;
7413 /* Generate [VF'*S, VF'*S, ... ]. */
7414 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7416 expr = build_int_cst (integer_type_node, vfp);
7417 expr = fold_convert (TREE_TYPE (step_expr), expr);
7419 else
7420 expr = build_int_cst (TREE_TYPE (step_expr), vfp);
7421 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
7422 expr, step_expr);
7423 if (! CONSTANT_CLASS_P (new_name))
7424 new_name = vect_init_vector (stmt_info, new_name,
7425 TREE_TYPE (step_expr), NULL);
7426 new_vec = build_vector_from_val (vectype, new_name);
7427 vec_step = vect_init_vector (stmt_info, new_vec, vectype, NULL);
7428 for (; ivn < nvects; ++ivn)
7430 gimple *iv = SLP_TREE_VEC_STMTS (slp_node)[ivn - nivs]->stmt;
7431 tree def;
7432 if (gimple_code (iv) == GIMPLE_PHI)
7433 def = gimple_phi_result (iv);
7434 else
7435 def = gimple_assign_lhs (iv);
7436 new_stmt = gimple_build_assign (make_ssa_name (vectype),
7437 PLUS_EXPR,
7438 def, vec_step);
7439 if (gimple_code (iv) == GIMPLE_PHI)
7440 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
7441 else
7443 gimple_stmt_iterator tgsi = gsi_for_stmt (iv);
7444 gsi_insert_after (&tgsi, new_stmt, GSI_CONTINUE_LINKING);
7446 SLP_TREE_VEC_STMTS (slp_node).quick_push
7447 (loop_vinfo->add_stmt (new_stmt));
7451 return true;
7454 /* Create the vector that holds the initial_value of the induction. */
7455 if (nested_in_vect_loop)
7457 /* iv_loop is nested in the loop to be vectorized. init_expr had already
7458 been created during vectorization of previous stmts. We obtain it
7459 from the STMT_VINFO_VEC_STMT of the defining stmt. */
7460 vec_init = vect_get_vec_def_for_operand (init_expr, stmt_info);
7461 /* If the initial value is not of proper type, convert it. */
7462 if (!useless_type_conversion_p (vectype, TREE_TYPE (vec_init)))
7464 new_stmt
7465 = gimple_build_assign (vect_get_new_ssa_name (vectype,
7466 vect_simple_var,
7467 "vec_iv_"),
7468 VIEW_CONVERT_EXPR,
7469 build1 (VIEW_CONVERT_EXPR, vectype,
7470 vec_init));
7471 vec_init = gimple_assign_lhs (new_stmt);
7472 new_bb = gsi_insert_on_edge_immediate (loop_preheader_edge (iv_loop),
7473 new_stmt);
7474 gcc_assert (!new_bb);
7475 loop_vinfo->add_stmt (new_stmt);
7478 else
7480 /* iv_loop is the loop to be vectorized. Create:
7481 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
7482 stmts = NULL;
7483 new_name = gimple_convert (&stmts, TREE_TYPE (vectype), init_expr);
7485 unsigned HOST_WIDE_INT const_nunits;
7486 if (nunits.is_constant (&const_nunits))
7488 tree_vector_builder elts (vectype, const_nunits, 1);
7489 elts.quick_push (new_name);
7490 for (i = 1; i < const_nunits; i++)
7492 /* Create: new_name_i = new_name + step_expr */
7493 new_name = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (new_name),
7494 new_name, step_expr);
7495 elts.quick_push (new_name);
7497 /* Create a vector from [new_name_0, new_name_1, ...,
7498 new_name_nunits-1] */
7499 vec_init = gimple_build_vector (&stmts, &elts);
7501 else if (INTEGRAL_TYPE_P (TREE_TYPE (step_expr)))
7502 /* Build the initial value directly from a VEC_SERIES_EXPR. */
7503 vec_init = gimple_build (&stmts, VEC_SERIES_EXPR, vectype,
7504 new_name, step_expr);
7505 else
7507 /* Build:
7508 [base, base, base, ...]
7509 + (vectype) [0, 1, 2, ...] * [step, step, step, ...]. */
7510 gcc_assert (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)));
7511 gcc_assert (flag_associative_math);
7512 tree index = build_index_vector (vectype, 0, 1);
7513 tree base_vec = gimple_build_vector_from_val (&stmts, vectype,
7514 new_name);
7515 tree step_vec = gimple_build_vector_from_val (&stmts, vectype,
7516 step_expr);
7517 vec_init = gimple_build (&stmts, FLOAT_EXPR, vectype, index);
7518 vec_init = gimple_build (&stmts, MULT_EXPR, vectype,
7519 vec_init, step_vec);
7520 vec_init = gimple_build (&stmts, PLUS_EXPR, vectype,
7521 vec_init, base_vec);
7524 if (stmts)
7526 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
7527 gcc_assert (!new_bb);
7532 /* Create the vector that holds the step of the induction. */
7533 if (nested_in_vect_loop)
7534 /* iv_loop is nested in the loop to be vectorized. Generate:
7535 vec_step = [S, S, S, S] */
7536 new_name = step_expr;
7537 else
7539 /* iv_loop is the loop to be vectorized. Generate:
7540 vec_step = [VF*S, VF*S, VF*S, VF*S] */
7541 gimple_seq seq = NULL;
7542 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7544 expr = build_int_cst (integer_type_node, vf);
7545 expr = gimple_build (&seq, FLOAT_EXPR, TREE_TYPE (step_expr), expr);
7547 else
7548 expr = build_int_cst (TREE_TYPE (step_expr), vf);
7549 new_name = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step_expr),
7550 expr, step_expr);
7551 if (seq)
7553 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
7554 gcc_assert (!new_bb);
7558 t = unshare_expr (new_name);
7559 gcc_assert (CONSTANT_CLASS_P (new_name)
7560 || TREE_CODE (new_name) == SSA_NAME);
7561 new_vec = build_vector_from_val (vectype, t);
7562 vec_step = vect_init_vector (stmt_info, new_vec, vectype, NULL);
7565 /* Create the following def-use cycle:
7566 loop prolog:
7567 vec_init = ...
7568 vec_step = ...
7569 loop:
7570 vec_iv = PHI <vec_init, vec_loop>
7572 STMT
7574 vec_loop = vec_iv + vec_step; */
7576 /* Create the induction-phi that defines the induction-operand. */
7577 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
7578 induction_phi = create_phi_node (vec_dest, iv_loop->header);
7579 stmt_vec_info induction_phi_info = loop_vinfo->add_stmt (induction_phi);
7580 induc_def = PHI_RESULT (induction_phi);
7582 /* Create the iv update inside the loop */
7583 vec_def = make_ssa_name (vec_dest);
7584 new_stmt = gimple_build_assign (vec_def, PLUS_EXPR, induc_def, vec_step);
7585 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
7586 stmt_vec_info new_stmt_info = loop_vinfo->add_stmt (new_stmt);
7588 /* Set the arguments of the phi node: */
7589 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
7590 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
7591 UNKNOWN_LOCATION);
7593 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = induction_phi_info;
7595 /* In case that vectorization factor (VF) is bigger than the number
7596 of elements that we can fit in a vectype (nunits), we have to generate
7597 more than one vector stmt - i.e - we need to "unroll" the
7598 vector stmt by a factor VF/nunits. For more details see documentation
7599 in vectorizable_operation. */
7601 if (ncopies > 1)
7603 gimple_seq seq = NULL;
7604 stmt_vec_info prev_stmt_vinfo;
7605 /* FORNOW. This restriction should be relaxed. */
7606 gcc_assert (!nested_in_vect_loop);
7608 /* Create the vector that holds the step of the induction. */
7609 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7611 expr = build_int_cst (integer_type_node, nunits);
7612 expr = gimple_build (&seq, FLOAT_EXPR, TREE_TYPE (step_expr), expr);
7614 else
7615 expr = build_int_cst (TREE_TYPE (step_expr), nunits);
7616 new_name = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step_expr),
7617 expr, step_expr);
7618 if (seq)
7620 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
7621 gcc_assert (!new_bb);
7624 t = unshare_expr (new_name);
7625 gcc_assert (CONSTANT_CLASS_P (new_name)
7626 || TREE_CODE (new_name) == SSA_NAME);
7627 new_vec = build_vector_from_val (vectype, t);
7628 vec_step = vect_init_vector (stmt_info, new_vec, vectype, NULL);
7630 vec_def = induc_def;
7631 prev_stmt_vinfo = induction_phi_info;
7632 for (i = 1; i < ncopies; i++)
7634 /* vec_i = vec_prev + vec_step */
7635 new_stmt = gimple_build_assign (vec_dest, PLUS_EXPR,
7636 vec_def, vec_step);
7637 vec_def = make_ssa_name (vec_dest, new_stmt);
7638 gimple_assign_set_lhs (new_stmt, vec_def);
7640 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
7641 new_stmt_info = loop_vinfo->add_stmt (new_stmt);
7642 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt_info;
7643 prev_stmt_vinfo = new_stmt_info;
7647 if (nested_in_vect_loop)
7649 /* Find the loop-closed exit-phi of the induction, and record
7650 the final vector of induction results: */
7651 exit_phi = NULL;
7652 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
7654 gimple *use_stmt = USE_STMT (use_p);
7655 if (is_gimple_debug (use_stmt))
7656 continue;
7658 if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (use_stmt)))
7660 exit_phi = use_stmt;
7661 break;
7664 if (exit_phi)
7666 stmt_vec_info stmt_vinfo = loop_vinfo->lookup_stmt (exit_phi);
7667 /* FORNOW. Currently not supporting the case that an inner-loop induction
7668 is not used in the outer-loop (i.e. only outside the outer-loop). */
7669 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
7670 && !STMT_VINFO_LIVE_P (stmt_vinfo));
7672 STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt_info;
7673 if (dump_enabled_p ())
7674 dump_printf_loc (MSG_NOTE, vect_location,
7675 "vector of inductions after inner-loop:%G",
7676 new_stmt);
7681 if (dump_enabled_p ())
7682 dump_printf_loc (MSG_NOTE, vect_location,
7683 "transform induction: created def-use cycle: %G%G",
7684 induction_phi, SSA_NAME_DEF_STMT (vec_def));
7686 return true;
7689 /* Function vectorizable_live_operation.
7691 STMT_INFO computes a value that is used outside the loop. Check if
7692 it can be supported. */
7694 bool
7695 vectorizable_live_operation (stmt_vec_info stmt_info,
7696 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
7697 slp_tree slp_node, int slp_index,
7698 stmt_vec_info *vec_stmt,
7699 stmt_vector_for_cost *)
7701 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7702 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
7703 imm_use_iterator imm_iter;
7704 tree lhs, lhs_type, bitsize, vec_bitsize;
7705 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7706 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
7707 int ncopies;
7708 gimple *use_stmt;
7709 auto_vec<tree> vec_oprnds;
7710 int vec_entry = 0;
7711 poly_uint64 vec_index = 0;
7713 gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
7715 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
7716 return false;
7718 /* FORNOW. CHECKME. */
7719 if (nested_in_vect_loop_p (loop, stmt_info))
7720 return false;
7722 /* If STMT is not relevant and it is a simple assignment and its inputs are
7723 invariant then it can remain in place, unvectorized. The original last
7724 scalar value that it computes will be used. */
7725 if (!STMT_VINFO_RELEVANT_P (stmt_info))
7727 gcc_assert (is_simple_and_all_uses_invariant (stmt_info, loop_vinfo));
7728 if (dump_enabled_p ())
7729 dump_printf_loc (MSG_NOTE, vect_location,
7730 "statement is simple and uses invariant. Leaving in "
7731 "place.\n");
7732 return true;
7735 if (slp_node)
7736 ncopies = 1;
7737 else
7738 ncopies = vect_get_num_copies (loop_vinfo, vectype);
7740 if (slp_node)
7742 gcc_assert (slp_index >= 0);
7744 int num_scalar = SLP_TREE_SCALAR_STMTS (slp_node).length ();
7745 int num_vec = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7747 /* Get the last occurrence of the scalar index from the concatenation of
7748 all the slp vectors. Calculate which slp vector it is and the index
7749 within. */
7750 poly_uint64 pos = (num_vec * nunits) - num_scalar + slp_index;
7752 /* Calculate which vector contains the result, and which lane of
7753 that vector we need. */
7754 if (!can_div_trunc_p (pos, nunits, &vec_entry, &vec_index))
7756 if (dump_enabled_p ())
7757 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7758 "Cannot determine which vector holds the"
7759 " final result.\n");
7760 return false;
7764 if (!vec_stmt)
7766 /* No transformation required. */
7767 if (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
7769 if (!direct_internal_fn_supported_p (IFN_EXTRACT_LAST, vectype,
7770 OPTIMIZE_FOR_SPEED))
7772 if (dump_enabled_p ())
7773 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7774 "can't use a fully-masked loop because "
7775 "the target doesn't support extract last "
7776 "reduction.\n");
7777 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
7779 else if (slp_node)
7781 if (dump_enabled_p ())
7782 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7783 "can't use a fully-masked loop because an "
7784 "SLP statement is live after the loop.\n");
7785 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
7787 else if (ncopies > 1)
7789 if (dump_enabled_p ())
7790 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7791 "can't use a fully-masked loop because"
7792 " ncopies is greater than 1.\n");
7793 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
7795 else
7797 gcc_assert (ncopies == 1 && !slp_node);
7798 vect_record_loop_mask (loop_vinfo,
7799 &LOOP_VINFO_MASKS (loop_vinfo),
7800 1, vectype);
7803 return true;
7806 /* Use the lhs of the original scalar statement. */
7807 gimple *stmt = vect_orig_stmt (stmt_info)->stmt;
7809 lhs = (is_a <gphi *> (stmt)) ? gimple_phi_result (stmt)
7810 : gimple_get_lhs (stmt);
7811 lhs_type = TREE_TYPE (lhs);
7813 bitsize = (VECTOR_BOOLEAN_TYPE_P (vectype)
7814 ? bitsize_int (TYPE_PRECISION (TREE_TYPE (vectype)))
7815 : TYPE_SIZE (TREE_TYPE (vectype)));
7816 vec_bitsize = TYPE_SIZE (vectype);
7818 /* Get the vectorized lhs of STMT and the lane to use (counted in bits). */
7819 tree vec_lhs, bitstart;
7820 if (slp_node)
7822 gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
7824 /* Get the correct slp vectorized stmt. */
7825 gimple *vec_stmt = SLP_TREE_VEC_STMTS (slp_node)[vec_entry]->stmt;
7826 if (gphi *phi = dyn_cast <gphi *> (vec_stmt))
7827 vec_lhs = gimple_phi_result (phi);
7828 else
7829 vec_lhs = gimple_get_lhs (vec_stmt);
7831 /* Get entry to use. */
7832 bitstart = bitsize_int (vec_index);
7833 bitstart = int_const_binop (MULT_EXPR, bitsize, bitstart);
7835 else
7837 enum vect_def_type dt = STMT_VINFO_DEF_TYPE (stmt_info);
7838 vec_lhs = vect_get_vec_def_for_operand_1 (stmt_info, dt);
7839 gcc_checking_assert (ncopies == 1
7840 || !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
7842 /* For multiple copies, get the last copy. */
7843 for (int i = 1; i < ncopies; ++i)
7844 vec_lhs = vect_get_vec_def_for_stmt_copy (loop_vinfo, vec_lhs);
7846 /* Get the last lane in the vector. */
7847 bitstart = int_const_binop (MINUS_EXPR, vec_bitsize, bitsize);
7850 gimple_seq stmts = NULL;
7851 tree new_tree;
7852 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
7854 /* Emit:
7856 SCALAR_RES = EXTRACT_LAST <VEC_LHS, MASK>
7858 where VEC_LHS is the vectorized live-out result and MASK is
7859 the loop mask for the final iteration. */
7860 gcc_assert (ncopies == 1 && !slp_node);
7861 tree scalar_type = TREE_TYPE (STMT_VINFO_VECTYPE (stmt_info));
7862 tree mask = vect_get_loop_mask (gsi, &LOOP_VINFO_MASKS (loop_vinfo),
7863 1, vectype, 0);
7864 tree scalar_res = gimple_build (&stmts, CFN_EXTRACT_LAST,
7865 scalar_type, mask, vec_lhs);
7867 /* Convert the extracted vector element to the required scalar type. */
7868 new_tree = gimple_convert (&stmts, lhs_type, scalar_res);
7870 else
7872 tree bftype = TREE_TYPE (vectype);
7873 if (VECTOR_BOOLEAN_TYPE_P (vectype))
7874 bftype = build_nonstandard_integer_type (tree_to_uhwi (bitsize), 1);
7875 new_tree = build3 (BIT_FIELD_REF, bftype, vec_lhs, bitsize, bitstart);
7876 new_tree = force_gimple_operand (fold_convert (lhs_type, new_tree),
7877 &stmts, true, NULL_TREE);
7880 if (stmts)
7881 gsi_insert_seq_on_edge_immediate (single_exit (loop), stmts);
7883 /* Replace use of lhs with newly computed result. If the use stmt is a
7884 single arg PHI, just replace all uses of PHI result. It's necessary
7885 because lcssa PHI defining lhs may be before newly inserted stmt. */
7886 use_operand_p use_p;
7887 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, lhs)
7888 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
7889 && !is_gimple_debug (use_stmt))
7891 if (gimple_code (use_stmt) == GIMPLE_PHI
7892 && gimple_phi_num_args (use_stmt) == 1)
7894 replace_uses_by (gimple_phi_result (use_stmt), new_tree);
7896 else
7898 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
7899 SET_USE (use_p, new_tree);
7901 update_stmt (use_stmt);
7904 return true;
7907 /* Kill any debug uses outside LOOP of SSA names defined in STMT_INFO. */
7909 static void
7910 vect_loop_kill_debug_uses (struct loop *loop, stmt_vec_info stmt_info)
7912 ssa_op_iter op_iter;
7913 imm_use_iterator imm_iter;
7914 def_operand_p def_p;
7915 gimple *ustmt;
7917 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt_info->stmt, op_iter, SSA_OP_DEF)
7919 FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p))
7921 basic_block bb;
7923 if (!is_gimple_debug (ustmt))
7924 continue;
7926 bb = gimple_bb (ustmt);
7928 if (!flow_bb_inside_loop_p (loop, bb))
7930 if (gimple_debug_bind_p (ustmt))
7932 if (dump_enabled_p ())
7933 dump_printf_loc (MSG_NOTE, vect_location,
7934 "killing debug use\n");
7936 gimple_debug_bind_reset_value (ustmt);
7937 update_stmt (ustmt);
7939 else
7940 gcc_unreachable ();
7946 /* Given loop represented by LOOP_VINFO, return true if computation of
7947 LOOP_VINFO_NITERS (= LOOP_VINFO_NITERSM1 + 1) doesn't overflow, false
7948 otherwise. */
7950 static bool
7951 loop_niters_no_overflow (loop_vec_info loop_vinfo)
7953 /* Constant case. */
7954 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
7956 tree cst_niters = LOOP_VINFO_NITERS (loop_vinfo);
7957 tree cst_nitersm1 = LOOP_VINFO_NITERSM1 (loop_vinfo);
7959 gcc_assert (TREE_CODE (cst_niters) == INTEGER_CST);
7960 gcc_assert (TREE_CODE (cst_nitersm1) == INTEGER_CST);
7961 if (wi::to_widest (cst_nitersm1) < wi::to_widest (cst_niters))
7962 return true;
7965 widest_int max;
7966 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
7967 /* Check the upper bound of loop niters. */
7968 if (get_max_loop_iterations (loop, &max))
7970 tree type = TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo));
7971 signop sgn = TYPE_SIGN (type);
7972 widest_int type_max = widest_int::from (wi::max_value (type), sgn);
7973 if (max < type_max)
7974 return true;
7976 return false;
7979 /* Return a mask type with half the number of elements as TYPE. */
7981 tree
7982 vect_halve_mask_nunits (tree type)
7984 poly_uint64 nunits = exact_div (TYPE_VECTOR_SUBPARTS (type), 2);
7985 return build_truth_vector_type (nunits, current_vector_size);
7988 /* Return a mask type with twice as many elements as TYPE. */
7990 tree
7991 vect_double_mask_nunits (tree type)
7993 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (type) * 2;
7994 return build_truth_vector_type (nunits, current_vector_size);
7997 /* Record that a fully-masked version of LOOP_VINFO would need MASKS to
7998 contain a sequence of NVECTORS masks that each control a vector of type
7999 VECTYPE. */
8001 void
8002 vect_record_loop_mask (loop_vec_info loop_vinfo, vec_loop_masks *masks,
8003 unsigned int nvectors, tree vectype)
8005 gcc_assert (nvectors != 0);
8006 if (masks->length () < nvectors)
8007 masks->safe_grow_cleared (nvectors);
8008 rgroup_masks *rgm = &(*masks)[nvectors - 1];
8009 /* The number of scalars per iteration and the number of vectors are
8010 both compile-time constants. */
8011 unsigned int nscalars_per_iter
8012 = exact_div (nvectors * TYPE_VECTOR_SUBPARTS (vectype),
8013 LOOP_VINFO_VECT_FACTOR (loop_vinfo)).to_constant ();
8014 if (rgm->max_nscalars_per_iter < nscalars_per_iter)
8016 rgm->max_nscalars_per_iter = nscalars_per_iter;
8017 rgm->mask_type = build_same_sized_truth_vector_type (vectype);
8021 /* Given a complete set of masks MASKS, extract mask number INDEX
8022 for an rgroup that operates on NVECTORS vectors of type VECTYPE,
8023 where 0 <= INDEX < NVECTORS. Insert any set-up statements before GSI.
8025 See the comment above vec_loop_masks for more details about the mask
8026 arrangement. */
8028 tree
8029 vect_get_loop_mask (gimple_stmt_iterator *gsi, vec_loop_masks *masks,
8030 unsigned int nvectors, tree vectype, unsigned int index)
8032 rgroup_masks *rgm = &(*masks)[nvectors - 1];
8033 tree mask_type = rgm->mask_type;
8035 /* Populate the rgroup's mask array, if this is the first time we've
8036 used it. */
8037 if (rgm->masks.is_empty ())
8039 rgm->masks.safe_grow_cleared (nvectors);
8040 for (unsigned int i = 0; i < nvectors; ++i)
8042 tree mask = make_temp_ssa_name (mask_type, NULL, "loop_mask");
8043 /* Provide a dummy definition until the real one is available. */
8044 SSA_NAME_DEF_STMT (mask) = gimple_build_nop ();
8045 rgm->masks[i] = mask;
8049 tree mask = rgm->masks[index];
8050 if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type),
8051 TYPE_VECTOR_SUBPARTS (vectype)))
8053 /* A loop mask for data type X can be reused for data type Y
8054 if X has N times more elements than Y and if Y's elements
8055 are N times bigger than X's. In this case each sequence
8056 of N elements in the loop mask will be all-zero or all-one.
8057 We can then view-convert the mask so that each sequence of
8058 N elements is replaced by a single element. */
8059 gcc_assert (multiple_p (TYPE_VECTOR_SUBPARTS (mask_type),
8060 TYPE_VECTOR_SUBPARTS (vectype)));
8061 gimple_seq seq = NULL;
8062 mask_type = build_same_sized_truth_vector_type (vectype);
8063 mask = gimple_build (&seq, VIEW_CONVERT_EXPR, mask_type, mask);
8064 if (seq)
8065 gsi_insert_seq_before (gsi, seq, GSI_SAME_STMT);
8067 return mask;
8070 /* Scale profiling counters by estimation for LOOP which is vectorized
8071 by factor VF. */
8073 static void
8074 scale_profile_for_vect_loop (struct loop *loop, unsigned vf)
8076 edge preheader = loop_preheader_edge (loop);
8077 /* Reduce loop iterations by the vectorization factor. */
8078 gcov_type new_est_niter = niter_for_unrolled_loop (loop, vf);
8079 profile_count freq_h = loop->header->count, freq_e = preheader->count ();
8081 if (freq_h.nonzero_p ())
8083 profile_probability p;
8085 /* Avoid dropping loop body profile counter to 0 because of zero count
8086 in loop's preheader. */
8087 if (!(freq_e == profile_count::zero ()))
8088 freq_e = freq_e.force_nonzero ();
8089 p = freq_e.apply_scale (new_est_niter + 1, 1).probability_in (freq_h);
8090 scale_loop_frequencies (loop, p);
8093 edge exit_e = single_exit (loop);
8094 exit_e->probability = profile_probability::always ()
8095 .apply_scale (1, new_est_niter + 1);
8097 edge exit_l = single_pred_edge (loop->latch);
8098 profile_probability prob = exit_l->probability;
8099 exit_l->probability = exit_e->probability.invert ();
8100 if (prob.initialized_p () && exit_l->probability.initialized_p ())
8101 scale_bbs_frequencies (&loop->latch, 1, exit_l->probability / prob);
8104 /* Vectorize STMT_INFO if relevant, inserting any new instructions before GSI.
8105 When vectorizing STMT_INFO as a store, set *SEEN_STORE to its
8106 stmt_vec_info. */
8108 static void
8109 vect_transform_loop_stmt (loop_vec_info loop_vinfo, stmt_vec_info stmt_info,
8110 gimple_stmt_iterator *gsi, stmt_vec_info *seen_store)
8112 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
8113 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
8115 if (dump_enabled_p ())
8116 dump_printf_loc (MSG_NOTE, vect_location,
8117 "------>vectorizing statement: %G", stmt_info->stmt);
8119 if (MAY_HAVE_DEBUG_BIND_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
8120 vect_loop_kill_debug_uses (loop, stmt_info);
8122 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8123 && !STMT_VINFO_LIVE_P (stmt_info))
8124 return;
8126 if (STMT_VINFO_VECTYPE (stmt_info))
8128 poly_uint64 nunits
8129 = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
8130 if (!STMT_SLP_TYPE (stmt_info)
8131 && maybe_ne (nunits, vf)
8132 && dump_enabled_p ())
8133 /* For SLP VF is set according to unrolling factor, and not
8134 to vector size, hence for SLP this print is not valid. */
8135 dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
8138 /* Pure SLP statements have already been vectorized. We still need
8139 to apply loop vectorization to hybrid SLP statements. */
8140 if (PURE_SLP_STMT (stmt_info))
8141 return;
8143 if (dump_enabled_p ())
8144 dump_printf_loc (MSG_NOTE, vect_location, "transform statement.\n");
8146 if (vect_transform_stmt (stmt_info, gsi, NULL, NULL))
8147 *seen_store = stmt_info;
8150 /* Function vect_transform_loop.
8152 The analysis phase has determined that the loop is vectorizable.
8153 Vectorize the loop - created vectorized stmts to replace the scalar
8154 stmts in the loop, and update the loop exit condition.
8155 Returns scalar epilogue loop if any. */
8157 struct loop *
8158 vect_transform_loop (loop_vec_info loop_vinfo)
8160 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
8161 struct loop *epilogue = NULL;
8162 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
8163 int nbbs = loop->num_nodes;
8164 int i;
8165 tree niters_vector = NULL_TREE;
8166 tree step_vector = NULL_TREE;
8167 tree niters_vector_mult_vf = NULL_TREE;
8168 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
8169 unsigned int lowest_vf = constant_lower_bound (vf);
8170 gimple *stmt;
8171 bool check_profitability = false;
8172 unsigned int th;
8174 DUMP_VECT_SCOPE ("vec_transform_loop");
8176 loop_vinfo->shared->check_datarefs ();
8178 /* Use the more conservative vectorization threshold. If the number
8179 of iterations is constant assume the cost check has been performed
8180 by our caller. If the threshold makes all loops profitable that
8181 run at least the (estimated) vectorization factor number of times
8182 checking is pointless, too. */
8183 th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
8184 if (th >= vect_vf_for_cost (loop_vinfo)
8185 && !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
8187 if (dump_enabled_p ())
8188 dump_printf_loc (MSG_NOTE, vect_location,
8189 "Profitability threshold is %d loop iterations.\n",
8190 th);
8191 check_profitability = true;
8194 /* Make sure there exists a single-predecessor exit bb. Do this before
8195 versioning. */
8196 edge e = single_exit (loop);
8197 if (! single_pred_p (e->dest))
8199 split_loop_exit_edge (e);
8200 if (dump_enabled_p ())
8201 dump_printf (MSG_NOTE, "split exit edge\n");
8204 /* Version the loop first, if required, so the profitability check
8205 comes first. */
8207 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
8209 poly_uint64 versioning_threshold
8210 = LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo);
8211 if (check_profitability
8212 && ordered_p (poly_uint64 (th), versioning_threshold))
8214 versioning_threshold = ordered_max (poly_uint64 (th),
8215 versioning_threshold);
8216 check_profitability = false;
8218 vect_loop_versioning (loop_vinfo, th, check_profitability,
8219 versioning_threshold);
8220 check_profitability = false;
8223 /* Make sure there exists a single-predecessor exit bb also on the
8224 scalar loop copy. Do this after versioning but before peeling
8225 so CFG structure is fine for both scalar and if-converted loop
8226 to make slpeel_duplicate_current_defs_from_edges face matched
8227 loop closed PHI nodes on the exit. */
8228 if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo))
8230 e = single_exit (LOOP_VINFO_SCALAR_LOOP (loop_vinfo));
8231 if (! single_pred_p (e->dest))
8233 split_loop_exit_edge (e);
8234 if (dump_enabled_p ())
8235 dump_printf (MSG_NOTE, "split exit edge of scalar loop\n");
8239 tree niters = vect_build_loop_niters (loop_vinfo);
8240 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = niters;
8241 tree nitersm1 = unshare_expr (LOOP_VINFO_NITERSM1 (loop_vinfo));
8242 bool niters_no_overflow = loop_niters_no_overflow (loop_vinfo);
8243 epilogue = vect_do_peeling (loop_vinfo, niters, nitersm1, &niters_vector,
8244 &step_vector, &niters_vector_mult_vf, th,
8245 check_profitability, niters_no_overflow);
8247 if (niters_vector == NULL_TREE)
8249 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
8250 && !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
8251 && known_eq (lowest_vf, vf))
8253 niters_vector
8254 = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
8255 LOOP_VINFO_INT_NITERS (loop_vinfo) / lowest_vf);
8256 step_vector = build_one_cst (TREE_TYPE (niters));
8258 else
8259 vect_gen_vector_loop_niters (loop_vinfo, niters, &niters_vector,
8260 &step_vector, niters_no_overflow);
8263 /* 1) Make sure the loop header has exactly two entries
8264 2) Make sure we have a preheader basic block. */
8266 gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
8268 split_edge (loop_preheader_edge (loop));
8270 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
8271 && vect_use_loop_mask_for_alignment_p (loop_vinfo))
8272 /* This will deal with any possible peeling. */
8273 vect_prepare_for_masked_peels (loop_vinfo);
8275 /* Schedule the SLP instances first, then handle loop vectorization
8276 below. */
8277 if (!loop_vinfo->slp_instances.is_empty ())
8279 DUMP_VECT_SCOPE ("scheduling SLP instances");
8280 vect_schedule_slp (loop_vinfo);
8283 /* FORNOW: the vectorizer supports only loops which body consist
8284 of one basic block (header + empty latch). When the vectorizer will
8285 support more involved loop forms, the order by which the BBs are
8286 traversed need to be reconsidered. */
8288 for (i = 0; i < nbbs; i++)
8290 basic_block bb = bbs[i];
8291 stmt_vec_info stmt_info;
8293 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
8294 gsi_next (&si))
8296 gphi *phi = si.phi ();
8297 if (dump_enabled_p ())
8298 dump_printf_loc (MSG_NOTE, vect_location,
8299 "------>vectorizing phi: %G", phi);
8300 stmt_info = loop_vinfo->lookup_stmt (phi);
8301 if (!stmt_info)
8302 continue;
8304 if (MAY_HAVE_DEBUG_BIND_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
8305 vect_loop_kill_debug_uses (loop, stmt_info);
8307 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8308 && !STMT_VINFO_LIVE_P (stmt_info))
8309 continue;
8311 if (STMT_VINFO_VECTYPE (stmt_info)
8312 && (maybe_ne
8313 (TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)), vf))
8314 && dump_enabled_p ())
8315 dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
8317 if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def
8318 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
8319 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
8320 && ! PURE_SLP_STMT (stmt_info))
8322 if (dump_enabled_p ())
8323 dump_printf_loc (MSG_NOTE, vect_location, "transform phi.\n");
8324 vect_transform_stmt (stmt_info, NULL, NULL, NULL);
8328 for (gimple_stmt_iterator si = gsi_start_bb (bb);
8329 !gsi_end_p (si);)
8331 stmt = gsi_stmt (si);
8332 /* During vectorization remove existing clobber stmts. */
8333 if (gimple_clobber_p (stmt))
8335 unlink_stmt_vdef (stmt);
8336 gsi_remove (&si, true);
8337 release_defs (stmt);
8339 else
8341 stmt_info = loop_vinfo->lookup_stmt (stmt);
8343 /* vector stmts created in the outer-loop during vectorization of
8344 stmts in an inner-loop may not have a stmt_info, and do not
8345 need to be vectorized. */
8346 stmt_vec_info seen_store = NULL;
8347 if (stmt_info)
8349 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
8351 gimple *def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
8352 for (gimple_stmt_iterator subsi = gsi_start (def_seq);
8353 !gsi_end_p (subsi); gsi_next (&subsi))
8355 stmt_vec_info pat_stmt_info
8356 = loop_vinfo->lookup_stmt (gsi_stmt (subsi));
8357 vect_transform_loop_stmt (loop_vinfo, pat_stmt_info,
8358 &si, &seen_store);
8360 stmt_vec_info pat_stmt_info
8361 = STMT_VINFO_RELATED_STMT (stmt_info);
8362 vect_transform_loop_stmt (loop_vinfo, pat_stmt_info, &si,
8363 &seen_store);
8365 vect_transform_loop_stmt (loop_vinfo, stmt_info, &si,
8366 &seen_store);
8368 gsi_next (&si);
8369 if (seen_store)
8371 if (STMT_VINFO_GROUPED_ACCESS (seen_store))
8372 /* Interleaving. If IS_STORE is TRUE, the
8373 vectorization of the interleaving chain was
8374 completed - free all the stores in the chain. */
8375 vect_remove_stores (DR_GROUP_FIRST_ELEMENT (seen_store));
8376 else
8377 /* Free the attached stmt_vec_info and remove the stmt. */
8378 loop_vinfo->remove_stmt (stmt_info);
8383 /* Stub out scalar statements that must not survive vectorization.
8384 Doing this here helps with grouped statements, or statements that
8385 are involved in patterns. */
8386 for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
8387 !gsi_end_p (gsi); gsi_next (&gsi))
8389 gcall *call = dyn_cast <gcall *> (gsi_stmt (gsi));
8390 if (call && gimple_call_internal_p (call, IFN_MASK_LOAD))
8392 tree lhs = gimple_get_lhs (call);
8393 if (!VECTOR_TYPE_P (TREE_TYPE (lhs)))
8395 tree zero = build_zero_cst (TREE_TYPE (lhs));
8396 gimple *new_stmt = gimple_build_assign (lhs, zero);
8397 gsi_replace (&gsi, new_stmt, true);
8401 } /* BBs in loop */
8403 /* The vectorization factor is always > 1, so if we use an IV increment of 1.
8404 a zero NITERS becomes a nonzero NITERS_VECTOR. */
8405 if (integer_onep (step_vector))
8406 niters_no_overflow = true;
8407 vect_set_loop_condition (loop, loop_vinfo, niters_vector, step_vector,
8408 niters_vector_mult_vf, !niters_no_overflow);
8410 unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
8411 scale_profile_for_vect_loop (loop, assumed_vf);
8413 /* True if the final iteration might not handle a full vector's
8414 worth of scalar iterations. */
8415 bool final_iter_may_be_partial = LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
8416 /* The minimum number of iterations performed by the epilogue. This
8417 is 1 when peeling for gaps because we always need a final scalar
8418 iteration. */
8419 int min_epilogue_iters = LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) ? 1 : 0;
8420 /* +1 to convert latch counts to loop iteration counts,
8421 -min_epilogue_iters to remove iterations that cannot be performed
8422 by the vector code. */
8423 int bias_for_lowest = 1 - min_epilogue_iters;
8424 int bias_for_assumed = bias_for_lowest;
8425 int alignment_npeels = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
8426 if (alignment_npeels && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
8428 /* When the amount of peeling is known at compile time, the first
8429 iteration will have exactly alignment_npeels active elements.
8430 In the worst case it will have at least one. */
8431 int min_first_active = (alignment_npeels > 0 ? alignment_npeels : 1);
8432 bias_for_lowest += lowest_vf - min_first_active;
8433 bias_for_assumed += assumed_vf - min_first_active;
8435 /* In these calculations the "- 1" converts loop iteration counts
8436 back to latch counts. */
8437 if (loop->any_upper_bound)
8438 loop->nb_iterations_upper_bound
8439 = (final_iter_may_be_partial
8440 ? wi::udiv_ceil (loop->nb_iterations_upper_bound + bias_for_lowest,
8441 lowest_vf) - 1
8442 : wi::udiv_floor (loop->nb_iterations_upper_bound + bias_for_lowest,
8443 lowest_vf) - 1);
8444 if (loop->any_likely_upper_bound)
8445 loop->nb_iterations_likely_upper_bound
8446 = (final_iter_may_be_partial
8447 ? wi::udiv_ceil (loop->nb_iterations_likely_upper_bound
8448 + bias_for_lowest, lowest_vf) - 1
8449 : wi::udiv_floor (loop->nb_iterations_likely_upper_bound
8450 + bias_for_lowest, lowest_vf) - 1);
8451 if (loop->any_estimate)
8452 loop->nb_iterations_estimate
8453 = (final_iter_may_be_partial
8454 ? wi::udiv_ceil (loop->nb_iterations_estimate + bias_for_assumed,
8455 assumed_vf) - 1
8456 : wi::udiv_floor (loop->nb_iterations_estimate + bias_for_assumed,
8457 assumed_vf) - 1);
8459 if (dump_enabled_p ())
8461 if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo))
8463 dump_printf_loc (MSG_NOTE, vect_location,
8464 "LOOP VECTORIZED\n");
8465 if (loop->inner)
8466 dump_printf_loc (MSG_NOTE, vect_location,
8467 "OUTER LOOP VECTORIZED\n");
8468 dump_printf (MSG_NOTE, "\n");
8470 else
8472 dump_printf_loc (MSG_NOTE, vect_location,
8473 "LOOP EPILOGUE VECTORIZED (VS=");
8474 dump_dec (MSG_NOTE, current_vector_size);
8475 dump_printf (MSG_NOTE, ")\n");
8479 /* Free SLP instances here because otherwise stmt reference counting
8480 won't work. */
8481 slp_instance instance;
8482 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance)
8483 vect_free_slp_instance (instance, true);
8484 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
8485 /* Clear-up safelen field since its value is invalid after vectorization
8486 since vectorized loop can have loop-carried dependencies. */
8487 loop->safelen = 0;
8489 /* Don't vectorize epilogue for epilogue. */
8490 if (LOOP_VINFO_EPILOGUE_P (loop_vinfo))
8491 epilogue = NULL;
8493 if (!PARAM_VALUE (PARAM_VECT_EPILOGUES_NOMASK))
8494 epilogue = NULL;
8496 if (epilogue)
8498 auto_vector_sizes vector_sizes;
8499 targetm.vectorize.autovectorize_vector_sizes (&vector_sizes);
8500 unsigned int next_size = 0;
8502 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
8503 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) >= 0
8504 && known_eq (vf, lowest_vf))
8506 unsigned int eiters
8507 = (LOOP_VINFO_INT_NITERS (loop_vinfo)
8508 - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo));
8509 eiters = eiters % lowest_vf;
8510 epilogue->nb_iterations_upper_bound = eiters - 1;
8512 unsigned int ratio;
8513 while (next_size < vector_sizes.length ()
8514 && !(constant_multiple_p (current_vector_size,
8515 vector_sizes[next_size], &ratio)
8516 && eiters >= lowest_vf / ratio))
8517 next_size += 1;
8519 else
8520 while (next_size < vector_sizes.length ()
8521 && maybe_lt (current_vector_size, vector_sizes[next_size]))
8522 next_size += 1;
8524 if (next_size == vector_sizes.length ())
8525 epilogue = NULL;
8528 if (epilogue)
8530 epilogue->force_vectorize = loop->force_vectorize;
8531 epilogue->safelen = loop->safelen;
8532 epilogue->dont_vectorize = false;
8534 /* We may need to if-convert epilogue to vectorize it. */
8535 if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo))
8536 tree_if_conversion (epilogue);
8539 return epilogue;
8542 /* The code below is trying to perform simple optimization - revert
8543 if-conversion for masked stores, i.e. if the mask of a store is zero
8544 do not perform it and all stored value producers also if possible.
8545 For example,
8546 for (i=0; i<n; i++)
8547 if (c[i])
8549 p1[i] += 1;
8550 p2[i] = p3[i] +2;
8552 this transformation will produce the following semi-hammock:
8554 if (!mask__ifc__42.18_165 == { 0, 0, 0, 0, 0, 0, 0, 0 })
8556 vect__11.19_170 = MASK_LOAD (vectp_p1.20_168, 0B, mask__ifc__42.18_165);
8557 vect__12.22_172 = vect__11.19_170 + vect_cst__171;
8558 MASK_STORE (vectp_p1.23_175, 0B, mask__ifc__42.18_165, vect__12.22_172);
8559 vect__18.25_182 = MASK_LOAD (vectp_p3.26_180, 0B, mask__ifc__42.18_165);
8560 vect__19.28_184 = vect__18.25_182 + vect_cst__183;
8561 MASK_STORE (vectp_p2.29_187, 0B, mask__ifc__42.18_165, vect__19.28_184);
8565 void
8566 optimize_mask_stores (struct loop *loop)
8568 basic_block *bbs = get_loop_body (loop);
8569 unsigned nbbs = loop->num_nodes;
8570 unsigned i;
8571 basic_block bb;
8572 struct loop *bb_loop;
8573 gimple_stmt_iterator gsi;
8574 gimple *stmt;
8575 auto_vec<gimple *> worklist;
8577 vect_location = find_loop_location (loop);
8578 /* Pick up all masked stores in loop if any. */
8579 for (i = 0; i < nbbs; i++)
8581 bb = bbs[i];
8582 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
8583 gsi_next (&gsi))
8585 stmt = gsi_stmt (gsi);
8586 if (gimple_call_internal_p (stmt, IFN_MASK_STORE))
8587 worklist.safe_push (stmt);
8591 free (bbs);
8592 if (worklist.is_empty ())
8593 return;
8595 /* Loop has masked stores. */
8596 while (!worklist.is_empty ())
8598 gimple *last, *last_store;
8599 edge e, efalse;
8600 tree mask;
8601 basic_block store_bb, join_bb;
8602 gimple_stmt_iterator gsi_to;
8603 tree vdef, new_vdef;
8604 gphi *phi;
8605 tree vectype;
8606 tree zero;
8608 last = worklist.pop ();
8609 mask = gimple_call_arg (last, 2);
8610 bb = gimple_bb (last);
8611 /* Create then_bb and if-then structure in CFG, then_bb belongs to
8612 the same loop as if_bb. It could be different to LOOP when two
8613 level loop-nest is vectorized and mask_store belongs to the inner
8614 one. */
8615 e = split_block (bb, last);
8616 bb_loop = bb->loop_father;
8617 gcc_assert (loop == bb_loop || flow_loop_nested_p (loop, bb_loop));
8618 join_bb = e->dest;
8619 store_bb = create_empty_bb (bb);
8620 add_bb_to_loop (store_bb, bb_loop);
8621 e->flags = EDGE_TRUE_VALUE;
8622 efalse = make_edge (bb, store_bb, EDGE_FALSE_VALUE);
8623 /* Put STORE_BB to likely part. */
8624 efalse->probability = profile_probability::unlikely ();
8625 store_bb->count = efalse->count ();
8626 make_single_succ_edge (store_bb, join_bb, EDGE_FALLTHRU);
8627 if (dom_info_available_p (CDI_DOMINATORS))
8628 set_immediate_dominator (CDI_DOMINATORS, store_bb, bb);
8629 if (dump_enabled_p ())
8630 dump_printf_loc (MSG_NOTE, vect_location,
8631 "Create new block %d to sink mask stores.",
8632 store_bb->index);
8633 /* Create vector comparison with boolean result. */
8634 vectype = TREE_TYPE (mask);
8635 zero = build_zero_cst (vectype);
8636 stmt = gimple_build_cond (EQ_EXPR, mask, zero, NULL_TREE, NULL_TREE);
8637 gsi = gsi_last_bb (bb);
8638 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
8639 /* Create new PHI node for vdef of the last masked store:
8640 .MEM_2 = VDEF <.MEM_1>
8641 will be converted to
8642 .MEM.3 = VDEF <.MEM_1>
8643 and new PHI node will be created in join bb
8644 .MEM_2 = PHI <.MEM_1, .MEM_3>
8646 vdef = gimple_vdef (last);
8647 new_vdef = make_ssa_name (gimple_vop (cfun), last);
8648 gimple_set_vdef (last, new_vdef);
8649 phi = create_phi_node (vdef, join_bb);
8650 add_phi_arg (phi, new_vdef, EDGE_SUCC (store_bb, 0), UNKNOWN_LOCATION);
8652 /* Put all masked stores with the same mask to STORE_BB if possible. */
8653 while (true)
8655 gimple_stmt_iterator gsi_from;
8656 gimple *stmt1 = NULL;
8658 /* Move masked store to STORE_BB. */
8659 last_store = last;
8660 gsi = gsi_for_stmt (last);
8661 gsi_from = gsi;
8662 /* Shift GSI to the previous stmt for further traversal. */
8663 gsi_prev (&gsi);
8664 gsi_to = gsi_start_bb (store_bb);
8665 gsi_move_before (&gsi_from, &gsi_to);
8666 /* Setup GSI_TO to the non-empty block start. */
8667 gsi_to = gsi_start_bb (store_bb);
8668 if (dump_enabled_p ())
8669 dump_printf_loc (MSG_NOTE, vect_location,
8670 "Move stmt to created bb\n%G", last);
8671 /* Move all stored value producers if possible. */
8672 while (!gsi_end_p (gsi))
8674 tree lhs;
8675 imm_use_iterator imm_iter;
8676 use_operand_p use_p;
8677 bool res;
8679 /* Skip debug statements. */
8680 if (is_gimple_debug (gsi_stmt (gsi)))
8682 gsi_prev (&gsi);
8683 continue;
8685 stmt1 = gsi_stmt (gsi);
8686 /* Do not consider statements writing to memory or having
8687 volatile operand. */
8688 if (gimple_vdef (stmt1)
8689 || gimple_has_volatile_ops (stmt1))
8690 break;
8691 gsi_from = gsi;
8692 gsi_prev (&gsi);
8693 lhs = gimple_get_lhs (stmt1);
8694 if (!lhs)
8695 break;
8697 /* LHS of vectorized stmt must be SSA_NAME. */
8698 if (TREE_CODE (lhs) != SSA_NAME)
8699 break;
8701 if (!VECTOR_TYPE_P (TREE_TYPE (lhs)))
8703 /* Remove dead scalar statement. */
8704 if (has_zero_uses (lhs))
8706 gsi_remove (&gsi_from, true);
8707 continue;
8711 /* Check that LHS does not have uses outside of STORE_BB. */
8712 res = true;
8713 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
8715 gimple *use_stmt;
8716 use_stmt = USE_STMT (use_p);
8717 if (is_gimple_debug (use_stmt))
8718 continue;
8719 if (gimple_bb (use_stmt) != store_bb)
8721 res = false;
8722 break;
8725 if (!res)
8726 break;
8728 if (gimple_vuse (stmt1)
8729 && gimple_vuse (stmt1) != gimple_vuse (last_store))
8730 break;
8732 /* Can move STMT1 to STORE_BB. */
8733 if (dump_enabled_p ())
8734 dump_printf_loc (MSG_NOTE, vect_location,
8735 "Move stmt to created bb\n%G", stmt1);
8736 gsi_move_before (&gsi_from, &gsi_to);
8737 /* Shift GSI_TO for further insertion. */
8738 gsi_prev (&gsi_to);
8740 /* Put other masked stores with the same mask to STORE_BB. */
8741 if (worklist.is_empty ()
8742 || gimple_call_arg (worklist.last (), 2) != mask
8743 || worklist.last () != stmt1)
8744 break;
8745 last = worklist.pop ();
8747 add_phi_arg (phi, gimple_vuse (last_store), e, UNKNOWN_LOCATION);